title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
DOC: fix PR09,PR08 docstring errors in pandas.plotting | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index e4a44a89998e3..9467978f13d30 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2066,7 +2066,7 @@ def to_feather(self, fname):
Parameters
----------
fname : str
- string file path
+ String file path.
"""
from pandas.io.feather_format import to_feather
@@ -4772,6 +4772,7 @@ def drop_duplicates(self, subset=None, keep="first", inplace=False):
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
+ Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
@@ -4806,10 +4807,10 @@ def duplicated(self, subset=None, keep="first"):
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- - ``first`` : Mark duplicates as ``True`` except for the
- first occurrence.
- - ``last`` : Mark duplicates as ``True`` except for the
- last occurrence.
+ Determines which duplicates (if any) to mark.
+
+ - ``first`` : Mark duplicates as ``True`` except for the first occurrence.
+ - ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
@@ -6233,8 +6234,8 @@ def unstack(self, level=-1, fill_value=None):
----------
level : int, str, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
- fill_value : replace NaN with this value if the unstack produces
- missing values
+ fill_value : int, string or dict
+ Replace NaN with this value if the unstack produces missing values
Returns
-------
@@ -6665,6 +6666,8 @@ def apply(
by result_type='broadcast'.
raw : bool, default False
+ Determines if row or column is passed as a Series or ndarry object:
+
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
@@ -7357,6 +7360,8 @@ def corr(self, method="pearson", min_periods=1):
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
+ Method of correlation:
+
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
@@ -7556,10 +7561,13 @@ def corrwith(self, other, axis=0, drop=False, method="pearson"):
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index' to compute column-wise, 1 or 'columns' for row-wise.
+ The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for
+ row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
+ Method of correlation:
+
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
@@ -7939,7 +7947,7 @@ def idxmin(self, axis=0, skipna=True):
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index' for row-wise, 1 or 'columns' for column-wise
+ The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
@@ -7976,7 +7984,7 @@ def idxmax(self, axis=0, skipna=True):
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index' for row-wise, 1 or 'columns' for column-wise
+ The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index a3b9bec494854..cb21588c8ba1a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2559,10 +2559,10 @@ def to_msgpack(self, path_or_buf=None, encoding="utf-8", **kwargs):
path : str, buffer-like, or None
Destination for the serialized object.
If None, return generated bytes
- append : bool whether to append to an existing msgpack
- (default is False)
- compress : type of compressor (zlib or blosc), default to None (no
- compression)
+ append : bool, default False
+ Whether to append to an existing msgpack.
+ compress : str, default None
+ Type of compressor (zlib, blosc or None).
Returns
-------
@@ -2797,10 +2797,10 @@ def to_clipboard(self, excel=True, sep=None, **kwargs):
Parameters
----------
excel : bool, default True
- - True, use the provided separator, writing in a csv format for
- allowing easy pasting into excel.
- - False, write a string representation of the object to the
- clipboard.
+ Produce output in a csv format for easy pasting into excel.
+
+ - True, use the provided separator for csv pasting.
+ - False, write a string representation of the object to the clipboard.
sep : str, default ``'\t'``
Field delimiter.
@@ -5024,15 +5024,15 @@ def sample(
Parameters
----------
func : function
- function to apply to the %(klass)s.
+ Function to apply to the %(klass)s.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the %(klass)s.
args : iterable, optional
- positional arguments passed into ``func``.
+ Positional arguments passed into ``func``.
kwargs : mapping, optional
- a dictionary of keyword arguments passed into ``func``.
+ A dictionary of keyword arguments passed into ``func``.
Returns
-------
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 8724382d9ec55..966a18e11a620 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -514,6 +514,8 @@ class PlotAccessor(PandasObject):
Allows plotting of one column versus another. Only used if data is a
DataFrame.
kind : str
+ The kind of plot to produce:
+
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
@@ -537,7 +539,7 @@ class PlotAccessor(PandasObject):
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
- matplotlib line style per column
+ The matplotlib line style per column
logx : bool or 'sym', default False
Use log scaling or symlog scaling on x axis
.. versionchanged:: 0.25.0
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index a8e86d9dfa997..74ce60c6116a9 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -14,9 +14,9 @@ def table(ax, data, rowLabels=None, colLabels=None, **kwargs):
----------
ax : Matplotlib axes object
data : DataFrame or Series
- data for table contents
- kwargs : keywords, optional
- keyword arguments which passed to matplotlib.table.table.
+ Data for table contents.
+ **kwargs
+ Keyword arguments to be passed to matplotlib.table.table.
If `rowLabels` or `colLabels` is not specified, data index or column
name will be used.
@@ -82,7 +82,7 @@ def scatter_matrix(
density_kwds=None,
hist_kwds=None,
range_padding=0.05,
- **kwds
+ **kwargs
):
"""
Draw a matrix of scatter plots.
@@ -91,28 +91,26 @@ def scatter_matrix(
----------
frame : DataFrame
alpha : float, optional
- amount of transparency applied
+ Amount of transparency applied.
figsize : (float,float), optional
- a tuple (width, height) in inches
+ A tuple (width, height) in inches.
ax : Matplotlib axis object, optional
grid : bool, optional
- setting this to True will show the grid
+ Setting this to True will show the grid.
diagonal : {'hist', 'kde'}
- pick between 'kde' and 'hist' for
- either Kernel Density Estimation or Histogram
- plot in the diagonal
+ Pick between 'kde' and 'hist' for either Kernel Density Estimation or
+ Histogram plot in the diagonal.
marker : str, optional
- Matplotlib marker type, default '.'
- hist_kwds : other plotting keyword arguments
- To be passed to hist function
- density_kwds : other plotting keyword arguments
- To be passed to kernel density estimate plot
- range_padding : float, optional
- relative extension of axis range in x and y
- with respect to (x_max - x_min) or (y_max - y_min),
- default 0.05
- kwds : other plotting keyword arguments
- To be passed to scatter function
+ Matplotlib marker type, default '.'.
+ density_kwds : keywords
+ Keyword arguments to be passed to kernel density estimate plot.
+ hist_kwds : keywords
+ Keyword arguments to be passed to hist function.
+ range_padding : float, default 0.05
+ Relative extension of axis range in x and y with respect to
+ (x_max - x_min) or (y_max - y_min).
+ **kwargs
+ Keyword arguments to be passed to scatter function.
Returns
-------
@@ -136,7 +134,7 @@ def scatter_matrix(
density_kwds=density_kwds,
hist_kwds=hist_kwds,
range_padding=range_padding,
- **kwds
+ **kwargs
)
@@ -215,7 +213,7 @@ def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
@deprecate_kwarg(old_arg_name="data", new_arg_name="frame")
def andrews_curves(
- frame, class_column, ax=None, samples=200, color=None, colormap=None, **kwds
+ frame, class_column, ax=None, samples=200, color=None, colormap=None, **kwargs
):
"""
Generate a matplotlib plot of Andrews curves, for visualising clusters of
@@ -233,17 +231,17 @@ def andrews_curves(
Parameters
----------
frame : DataFrame
- Data to be plotted, preferably normalized to (0.0, 1.0)
+ Data to be plotted, preferably normalized to (0.0, 1.0).
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color : list or tuple, optional
- Colors to use for the different classes
+ Colors to use for the different classes.
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
- kwds : keywords
- Options to pass to matplotlib plotting method
+ **kwargs
+ Options to pass to matplotlib plotting method.
Returns
-------
@@ -257,7 +255,7 @@ def andrews_curves(
samples=samples,
color=color,
colormap=colormap,
- **kwds
+ **kwargs
)
@@ -327,7 +325,7 @@ def parallel_coordinates(
axvlines=True,
axvlines_kwds=None,
sort_labels=False,
- **kwds
+ **kwargs
):
"""
Parallel coordinates plotting.
@@ -336,30 +334,29 @@ def parallel_coordinates(
----------
frame : DataFrame
class_column : str
- Column name containing class names
+ Column name containing class names.
cols : list, optional
- A list of column names to use
+ A list of column names to use.
ax : matplotlib.axis, optional
- matplotlib axis object
+ Matplotlib axis object.
color : list or tuple, optional
- Colors to use for the different classes
+ Colors to use for the different classes.
use_columns : bool, optional
- If true, columns will be used as xticks
+ If true, columns will be used as xticks.
xticks : list or tuple, optional
- A list of values to use for xticks
+ A list of values to use for xticks.
colormap : str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines : bool, optional
- If true, vertical lines will be added at each xtick
+ If true, vertical lines will be added at each xtick.
axvlines_kwds : keywords, optional
- Options to be passed to axvline method for vertical lines
- sort_labels : bool, False
- Sort class_column labels, useful when assigning colors
+ Options to be passed to axvline method for vertical lines.
+ sort_labels : bool, default False
+ Sort class_column labels, useful when assigning colors.
.. versionadded:: 0.20.0
-
- kwds : keywords
- Options to pass to matplotlib plotting method
+ **kwargs
+ Options to pass to matplotlib plotting method.
Returns
-------
@@ -388,7 +385,7 @@ def parallel_coordinates(
axvlines=axvlines,
axvlines_kwds=axvlines_kwds,
sort_labels=sort_labels,
- **kwds
+ **kwargs
)
@@ -411,7 +408,7 @@ def lag_plot(series, lag=1, ax=None, **kwds):
return plot_backend.lag_plot(series=series, lag=lag, ax=ax, **kwds)
-def autocorrelation_plot(series, ax=None, **kwds):
+def autocorrelation_plot(series, ax=None, **kwargs):
"""
Autocorrelation plot for time series.
@@ -419,15 +416,15 @@ def autocorrelation_plot(series, ax=None, **kwds):
----------
series : Time series
ax : Matplotlib axis object, optional
- kwds : keywords
- Options to pass to matplotlib plotting method
+ **kwargs
+ Options to pass to matplotlib plotting method.
Returns
-------
class:`matplotlib.axis.Axes`
"""
plot_backend = _get_plot_backend("matplotlib")
- return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwds)
+ return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwargs)
def tsplot(series, plotf, ax=None, **kwargs):
| fixes the errors along with minor changes to standardize kwds -> **kwargs
closes #28687
verified this fixes PR09,PR08 errors with:
```
./scripts/validate_docstrings.py --errors=PR09,PR08 | grep "pandas.plotting"
```
- [x] closes #xxxx
- [x] tests added / passed | https://api.github.com/repos/pandas-dev/pandas/pulls/28689 | 2019-09-30T16:11:59Z | 2019-10-01T15:50:52Z | 2019-10-01T15:50:52Z | 2019-10-01T18:14:49Z |
DOC: fix formatting in the ExtensionArray docstrings | diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 7a16c3f6a35b6..53755695c97e3 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -474,7 +474,7 @@ def fillna(self, value=None, method=None, limit=None):
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
- backfill / bfill: use NEXT valid observation to fill gap
+ backfill / bfill: use NEXT valid observation to fill gap.
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
@@ -485,7 +485,8 @@ def fillna(self, value=None, method=None, limit=None):
Returns
-------
- filled : ExtensionArray with NA/NaN filled
+ ExtensionArray
+ With NA/NaN filled.
"""
value, method = validate_fillna_kwargs(value, method)
@@ -539,13 +540,14 @@ def shift(self, periods: int = 1, fill_value: object = None) -> ABCExtensionArra
fill_value : object, optional
The scalar value to use for newly introduced missing values.
- The default is ``self.dtype.na_value``
+ The default is ``self.dtype.na_value``.
.. versionadded:: 0.24.0
Returns
-------
- shifted : ExtensionArray
+ ExtensionArray
+ Shifted.
Notes
-----
@@ -869,11 +871,12 @@ def view(self, dtype=None) -> Union[ABCExtensionArray, np.ndarray]:
Parameters
----------
dtype : str, np.dtype, or ExtensionDtype, optional
- Default None
+ Default None.
Returns
-------
ExtensionArray
+ A view of the :class:`ExtensionArray`.
"""
# NB:
# - This must return a *new* object referencing the same data, not self.
| - [x] closes #28685
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28686 | 2019-09-30T14:47:21Z | 2019-10-14T21:48:16Z | 2019-10-14T21:48:15Z | 2019-10-14T21:50:43Z |
Deprecate is copy in take | diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
index 6912d15abf3d6..944ce9b4fb1f6 100644
--- a/.github/FUNDING.yml
+++ b/.github/FUNDING.yml
@@ -1 +1,2 @@
custom: https://pandas.pydata.org/donate.html
+tidelift: pypi/pandas
diff --git a/.github/SECURITY.md b/.github/SECURITY.md
new file mode 100644
index 0000000000000..f3b059a5d4f13
--- /dev/null
+++ b/.github/SECURITY.md
@@ -0,0 +1 @@
+To report a security vulnerability to pandas, please go to https://tidelift.com/security and see the instructions there.
diff --git a/.gitignore b/.gitignore
index 56828fa1d9331..6c3c275c48fb7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -57,6 +57,7 @@ dist
# wheel files
*.whl
**/wheelhouse/*
+pip-wheel-metadata
# coverage
.coverage
coverage.xml
@@ -66,6 +67,9 @@ coverage_html_report
# hypothesis test database
.hypothesis/
__pycache__
+# pytest-monkeytype
+monkeytype.sqlite3
+
# OS generated files #
######################
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 5f7143ef518bb..3f98273a336cf 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,16 +1,18 @@
repos:
- - repo: https://github.com/python/black
- rev: stable
- hooks:
- - id: black
- language_version: python3.7
- - repo: https://gitlab.com/pycqa/flake8
- rev: 3.7.7
- hooks:
- - id: flake8
- language: python_venv
- - repo: https://github.com/pre-commit/mirrors-isort
- rev: v4.3.20
- hooks:
- - id: isort
- language: python_venv
+- repo: https://github.com/python/black
+ rev: stable
+ hooks:
+ - id: black
+ language_version: python3.7
+- repo: https://gitlab.com/pycqa/flake8
+ rev: 3.7.7
+ hooks:
+ - id: flake8
+ language: python_venv
+ additional_dependencies: [flake8-comprehensions]
+- repo: https://github.com/pre-commit/mirrors-isort
+ rev: v4.3.20
+ hooks:
+ - id: isort
+ language: python_venv
+ exclude: ^pandas/__init__\.py$|^pandas/core/api\.py$
diff --git a/.travis.yml b/.travis.yml
index 8335a6ee92bef..79fecc41bec0d 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,4 +1,3 @@
-sudo: false
language: python
python: 3.5
@@ -22,7 +21,7 @@ env:
git:
# for cloning
- depth: 2000
+ depth: false
matrix:
fast_finish: true
@@ -64,7 +63,7 @@ before_install:
- pwd
- uname -a
- git --version
- - git tag
+ - ./ci/check_git_tags.sh
# Because travis runs on Google Cloud and has a /etc/boto.cfg,
# it breaks moto import, see:
# https://github.com/spulec/moto/issues/1771
diff --git a/MANIFEST.in b/MANIFEST.in
index b417b8890fa24..adaad1dc1c864 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -3,6 +3,7 @@ include LICENSE
include RELEASE.md
include README.md
include setup.py
+include pyproject.toml
graft doc
prune doc/build
@@ -14,6 +15,7 @@ graft pandas
global-exclude *.bz2
global-exclude *.csv
global-exclude *.dta
+global-exclude *.feather
global-exclude *.gz
global-exclude *.h5
global-exclude *.html
@@ -23,7 +25,10 @@ global-exclude *.pickle
global-exclude *.png
global-exclude *.pyc
global-exclude *.pyd
+global-exclude *.ods
+global-exclude *.odt
global-exclude *.sas7bdat
+global-exclude *.sav
global-exclude *.so
global-exclude *.xls
global-exclude *.xlsm
diff --git a/Makefile b/Makefile
index baceefe6d49ff..27a2c3682de9c 100644
--- a/Makefile
+++ b/Makefile
@@ -15,10 +15,10 @@ lint-diff:
git diff upstream/master --name-only -- "*.py" | xargs flake8
black:
- black . --exclude '(asv_bench/env|\.egg|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist)'
+ black . --exclude '(asv_bench/env|\.egg|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist|setup.py)'
develop: build
- python setup.py develop
+ python -m pip install --no-build-isolation -e .
doc:
-rm -rf doc/build doc/source/generated
diff --git a/README.md b/README.md
index aeeea1464e1fd..7786eeb0ec5c7 100644
--- a/README.md
+++ b/README.md
@@ -188,16 +188,17 @@ python setup.py install
or for installing in [development mode](https://pip.pypa.io/en/latest/reference/pip_install.html#editable-installs):
+
```sh
-python setup.py develop
+python -m pip install --no-build-isolation -e .
```
-Alternatively, you can use `pip` if you want all the dependencies pulled
-in automatically (the `-e` option is for installing it in [development
-mode](https://pip.pypa.io/en/latest/reference/pip_install.html#editable-installs)):
+If you have `make`, you can also use `make develop` to run the same command.
+
+or alternatively
```sh
-pip install -e .
+python setup.py develop
```
See the full instructions for [installing from source](https://pandas.pydata.org/pandas-docs/stable/install.html#installing-from-source).
@@ -224,7 +225,7 @@ Most development discussion is taking place on github in this repo. Further, the
All contributions, bug reports, bug fixes, documentation improvements, enhancements and ideas are welcome.
-A detailed overview on how to contribute can be found in the **[contributing guide](https://dev.pandas.io/contributing.html)**. There is also an [overview](.github/CONTRIBUTING.md) on GitHub.
+A detailed overview on how to contribute can be found in the **[contributing guide](https://dev.pandas.io/docs/contributing.html)**. There is also an [overview](.github/CONTRIBUTING.md) on GitHub.
If you are simply looking to start working with the pandas codebase, navigate to the [GitHub "issues" tab](https://github.com/pandas-dev/pandas/issues) and start looking through interesting issues. There are a number of issues listed under [Docs](https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open) and [good first issue](https://github.com/pandas-dev/pandas/issues?labels=good+first+issue&sort=updated&state=open) where you could start out.
@@ -233,3 +234,5 @@ You can also triage issues which may include reproducing bug reports, or asking
Or maybe through using pandas you have an idea of your own or are looking for something in the documentation and thinking ‘this can be improved’...you can do something about it!
Feel free to ask questions on the [mailing list](https://groups.google.com/forum/?fromgroups#!forum/pydata) or on [Gitter](https://gitter.im/pydata/pandas).
+
+As contributors and maintainers to this project, you are expected to abide by pandas' code of conduct. More information can be found at: [Contributor Code of Conduct](https://github.com/pandas-dev/pandas/blob/master/.github/CODE_OF_CONDUCT.md)
diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json
index 571ede1a21134..c04bbf53a86a6 100644
--- a/asv_bench/asv.conf.json
+++ b/asv_bench/asv.conf.json
@@ -50,12 +50,13 @@
"xlsxwriter": [],
"xlrd": [],
"xlwt": [],
+ "odfpy": [],
"pytest": [],
// If using Windows with python 2.7 and want to build using the
// mingw toolchain (rather than MSVC), uncomment the following line.
// "libpython": [],
},
-
+ "conda_channels": ["defaults", "conda-forge"],
// Combinations of libraries/python versions can be excluded/included
// from the set to test. Each entry is a dictionary containing additional
// key-value pairs to include/exclude.
diff --git a/asv_bench/benchmarks/attrs_caching.py b/asv_bench/benchmarks/attrs_caching.py
index c43e5dfd729aa..501e27b9078ec 100644
--- a/asv_bench/benchmarks/attrs_caching.py
+++ b/asv_bench/benchmarks/attrs_caching.py
@@ -1,4 +1,5 @@
import numpy as np
+
from pandas import DataFrame
try:
@@ -32,4 +33,4 @@ def time_cache_readonly(self):
self.obj.prop
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/binary_ops.py b/asv_bench/benchmarks/binary_ops.py
index fd3324b78f1c3..58e0db67d6025 100644
--- a/asv_bench/benchmarks/binary_ops.py
+++ b/asv_bench/benchmarks/binary_ops.py
@@ -1,4 +1,5 @@
import numpy as np
+
from pandas import DataFrame, Series, date_range
from pandas.core.algorithms import checked_add_with_arr
@@ -155,4 +156,4 @@ def time_add_overflow_both_arg_nan(self):
)
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py
index 8097118a79d20..559aa7050a640 100644
--- a/asv_bench/benchmarks/categoricals.py
+++ b/asv_bench/benchmarks/categoricals.py
@@ -1,7 +1,9 @@
+import warnings
+
import numpy as np
+
import pandas as pd
import pandas.util.testing as tm
-import warnings
try:
from pandas.api.types import union_categoricals
@@ -280,4 +282,4 @@ def time_sort_values(self):
self.index.sort_values(ascending=False)
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py
index 654075292cdf6..ec3dd7a48a89f 100644
--- a/asv_bench/benchmarks/ctors.py
+++ b/asv_bench/benchmarks/ctors.py
@@ -1,6 +1,7 @@
import numpy as np
+
+from pandas import DatetimeIndex, Index, MultiIndex, Series, Timestamp
import pandas.util.testing as tm
-from pandas import Series, Index, DatetimeIndex, Timestamp, MultiIndex
def no_change(arr):
@@ -113,4 +114,4 @@ def time_multiindex_from_iterables(self):
MultiIndex.from_product(self.iterables)
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/dtypes.py b/asv_bench/benchmarks/dtypes.py
index 60800b1f9cae7..24cc1c6f9fa70 100644
--- a/asv_bench/benchmarks/dtypes.py
+++ b/asv_bench/benchmarks/dtypes.py
@@ -1,14 +1,14 @@
+import numpy as np
+
from pandas.api.types import pandas_dtype
-import numpy as np
from .pandas_vb_common import (
- numeric_dtypes,
datetime_dtypes,
- string_dtypes,
extension_dtypes,
+ numeric_dtypes,
+ string_dtypes,
)
-
_numpy_dtypes = [
np.dtype(dtype) for dtype in (numeric_dtypes + datetime_dtypes + string_dtypes)
]
@@ -40,4 +40,4 @@ def time_pandas_dtype_invalid(self, dtype):
pass
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/eval.py b/asv_bench/benchmarks/eval.py
index 84e94315cc28b..06a181875aaa8 100644
--- a/asv_bench/benchmarks/eval.py
+++ b/asv_bench/benchmarks/eval.py
@@ -1,4 +1,5 @@
import numpy as np
+
import pandas as pd
try:
@@ -62,4 +63,4 @@ def time_query_with_boolean_selection(self):
self.df.query("(a >= @self.min_val) & (a <= @self.max_val)")
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py
index acfb26bcf5d7c..3944e0bc523d8 100644
--- a/asv_bench/benchmarks/frame_ctor.py
+++ b/asv_bench/benchmarks/frame_ctor.py
@@ -1,6 +1,7 @@
import numpy as np
+
+from pandas import DataFrame, MultiIndex, Series, Timestamp, date_range
import pandas.util.testing as tm
-from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range
try:
from pandas.tseries.offsets import Nano, Hour
@@ -104,4 +105,4 @@ def time_frame_from_lists(self):
self.df = DataFrame(self.data)
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index e2f6764c76eef..eb9a0e83271f1 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -1,5 +1,5 @@
-import warnings
import string
+import warnings
import numpy as np
@@ -609,4 +609,15 @@ def time_dataframe_describe(self):
self.df.describe()
-from .pandas_vb_common import setup # noqa: F401
+class SelectDtypes:
+ params = [100, 1000]
+ param_names = ["n"]
+
+ def setup(self, n):
+ self.df = DataFrame(np.random.randn(10, n))
+
+ def time_select_dtypes(self, n):
+ self.df.select_dtypes(include="int")
+
+
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py
index 0d0b75561d057..d57492dd37268 100644
--- a/asv_bench/benchmarks/gil.py
+++ b/asv_bench/benchmarks/gil.py
@@ -1,7 +1,8 @@
import numpy as np
-import pandas.util.testing as tm
-from pandas import DataFrame, Series, read_csv, factorize, date_range
+
+from pandas import DataFrame, Series, date_range, factorize, read_csv
from pandas.core.algorithms import take_1d
+import pandas.util.testing as tm
try:
from pandas import (
@@ -36,7 +37,7 @@ def wrapper(fname):
return wrapper
-from .pandas_vb_common import BaseIO
+from .pandas_vb_common import BaseIO # noqa: E402 isort:skip
class ParallelGroupbyMethods:
@@ -301,4 +302,4 @@ def time_loop(self, threads):
self.loop()
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index 39b07d4734399..d51c53e2264f1 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -15,7 +15,6 @@
)
import pandas.util.testing as tm
-
method_blacklist = {
"object": {
"median",
@@ -626,4 +625,4 @@ def time_first(self):
self.df_nans.groupby("key").transform("first")
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py
index 6541ddcb0397d..a94960d494707 100644
--- a/asv_bench/benchmarks/index_object.py
+++ b/asv_bench/benchmarks/index_object.py
@@ -1,14 +1,17 @@
+import gc
+
import numpy as np
-import pandas.util.testing as tm
+
from pandas import (
- Series,
- date_range,
DatetimeIndex,
- Index,
- RangeIndex,
Float64Index,
+ Index,
IntervalIndex,
+ RangeIndex,
+ Series,
+ date_range,
)
+import pandas.util.testing as tm
class SetOperations:
@@ -225,4 +228,21 @@ def time_intersection_both_duplicate(self, N):
self.intv.intersection(self.intv2)
-from .pandas_vb_common import setup # noqa: F401
+class GC:
+ params = [1, 2, 5]
+
+ def create_use_drop(self):
+ idx = Index(list(range(1000 * 1000)))
+ idx._engine
+
+ def peakmem_gc_instances(self, N):
+ try:
+ gc.disable()
+
+ for _ in range(N):
+ self.create_use_drop()
+ finally:
+ gc.enable()
+
+
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py
index 720bd0245be41..ac35139c1954a 100644
--- a/asv_bench/benchmarks/indexing.py
+++ b/asv_bench/benchmarks/indexing.py
@@ -1,22 +1,23 @@
import warnings
import numpy as np
-import pandas.util.testing as tm
+
from pandas import (
- Series,
+ CategoricalIndex,
DataFrame,
- MultiIndex,
- Int64Index,
- UInt64Index,
Float64Index,
- IntervalIndex,
- CategoricalIndex,
IndexSlice,
+ Int64Index,
+ IntervalIndex,
+ MultiIndex,
+ Series,
+ UInt64Index,
concat,
date_range,
option_context,
period_range,
)
+import pandas.util.testing as tm
class NumericSeriesIndexing:
@@ -129,10 +130,6 @@ def time_getitem_label_slice(self, index, index_structure):
def time_getitem_pos_slice(self, index, index_structure):
self.s[:80000]
- def time_get_value(self, index, index_structure):
- with warnings.catch_warnings(record=True):
- self.s.get_value(self.lbl)
-
def time_getitem_scalar(self, index, index_structure):
self.s[self.lbl]
@@ -151,10 +148,6 @@ def setup(self):
self.bool_indexer = self.df[self.col_scalar] > 0
self.bool_obj_indexer = self.bool_indexer.astype(object)
- def time_get_value(self):
- with warnings.catch_warnings(record=True):
- self.df.get_value(self.idx_scalar, self.col_scalar)
-
def time_ix(self):
with warnings.catch_warnings(record=True):
self.df.ix[self.idx_scalar, self.col_scalar]
@@ -379,4 +372,4 @@ def time_chained_indexing(self, mode):
df2["C"] = 1.0
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/inference.py b/asv_bench/benchmarks/inference.py
index 66ef4f2aec380..e85b3bd2c7687 100644
--- a/asv_bench/benchmarks/inference.py
+++ b/asv_bench/benchmarks/inference.py
@@ -1,8 +1,9 @@
import numpy as np
-import pandas.util.testing as tm
+
from pandas import DataFrame, Series, to_numeric
+import pandas.util.testing as tm
-from .pandas_vb_common import numeric_dtypes, lib
+from .pandas_vb_common import lib, numeric_dtypes
class NumericInferOps:
@@ -120,4 +121,4 @@ def time_convert(self, data):
lib.maybe_convert_numeric(data, set(), coerce_numeric=False)
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/io/csv.py b/asv_bench/benchmarks/io/csv.py
index 4525e504fc4dd..9b8599b0a1b64 100644
--- a/asv_bench/benchmarks/io/csv.py
+++ b/asv_bench/benchmarks/io/csv.py
@@ -1,10 +1,11 @@
+from io import StringIO
import random
import string
import numpy as np
+
+from pandas import Categorical, DataFrame, date_range, read_csv, to_datetime
import pandas.util.testing as tm
-from pandas import DataFrame, Categorical, date_range, read_csv, to_datetime
-from io import StringIO
from ..pandas_vb_common import BaseIO
@@ -406,4 +407,4 @@ def time_to_datetime_format_DD_MM_YYYY(self, cache_dates):
to_datetime(df["date"], cache=cache_dates, format="%d-%m-%Y")
-from ..pandas_vb_common import setup # noqa: F401
+from ..pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py
index 12e70f84e5203..c97cf768e27d9 100644
--- a/asv_bench/benchmarks/io/excel.py
+++ b/asv_bench/benchmarks/io/excel.py
@@ -1,38 +1,72 @@
from io import BytesIO
+
import numpy as np
-from pandas import DataFrame, date_range, ExcelWriter, read_excel
+from odf.opendocument import OpenDocumentSpreadsheet
+from odf.table import Table, TableCell, TableRow
+from odf.text import P
+
+from pandas import DataFrame, ExcelWriter, date_range, read_excel
import pandas.util.testing as tm
-class Excel:
+def _generate_dataframe():
+ N = 2000
+ C = 5
+ df = DataFrame(
+ np.random.randn(N, C),
+ columns=["float{}".format(i) for i in range(C)],
+ index=date_range("20000101", periods=N, freq="H"),
+ )
+ df["object"] = tm.makeStringIndex(N)
+ return df
+
+
+class WriteExcel:
params = ["openpyxl", "xlsxwriter", "xlwt"]
param_names = ["engine"]
def setup(self, engine):
- N = 2000
- C = 5
- self.df = DataFrame(
- np.random.randn(N, C),
- columns=["float{}".format(i) for i in range(C)],
- index=date_range("20000101", periods=N, freq="H"),
- )
- self.df["object"] = tm.makeStringIndex(N)
- self.bio_read = BytesIO()
- self.writer_read = ExcelWriter(self.bio_read, engine=engine)
- self.df.to_excel(self.writer_read, sheet_name="Sheet1")
- self.writer_read.save()
- self.bio_read.seek(0)
-
- def time_read_excel(self, engine):
- read_excel(self.bio_read)
+ self.df = _generate_dataframe()
def time_write_excel(self, engine):
- bio_write = BytesIO()
- bio_write.seek(0)
- writer_write = ExcelWriter(bio_write, engine=engine)
- self.df.to_excel(writer_write, sheet_name="Sheet1")
- writer_write.save()
+ bio = BytesIO()
+ bio.seek(0)
+ writer = ExcelWriter(bio, engine=engine)
+ self.df.to_excel(writer, sheet_name="Sheet1")
+ writer.save()
+
+
+class ReadExcel:
+
+ params = ["xlrd", "openpyxl", "odf"]
+ param_names = ["engine"]
+ fname_excel = "spreadsheet.xlsx"
+ fname_odf = "spreadsheet.ods"
+
+ def _create_odf(self):
+ doc = OpenDocumentSpreadsheet()
+ table = Table(name="Table1")
+ for row in self.df.values:
+ tr = TableRow()
+ for val in row:
+ tc = TableCell(valuetype="string")
+ tc.addElement(P(text=val))
+ tr.addElement(tc)
+ table.addElement(tr)
+
+ doc.spreadsheet.addElement(table)
+ doc.save(self.fname_odf)
+
+ def setup_cache(self):
+ self.df = _generate_dataframe()
+
+ self.df.to_excel(self.fname_excel, sheet_name="Sheet1")
+ self._create_odf()
+
+ def time_read_excel(self, engine):
+ fname = self.fname_odf if engine == "odf" else self.fname_excel
+ read_excel(fname, engine=engine)
-from ..pandas_vb_common import setup # noqa: F401
+from ..pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/io/hdf.py b/asv_bench/benchmarks/io/hdf.py
index 2874a7889156b..8ec04a2087f1b 100644
--- a/asv_bench/benchmarks/io/hdf.py
+++ b/asv_bench/benchmarks/io/hdf.py
@@ -1,5 +1,6 @@
import numpy as np
-from pandas import DataFrame, date_range, HDFStore, read_hdf
+
+from pandas import DataFrame, HDFStore, date_range, read_hdf
import pandas.util.testing as tm
from ..pandas_vb_common import BaseIO
@@ -127,4 +128,4 @@ def time_write_hdf(self, format):
self.df.to_hdf(self.fname, "df", format=format)
-from ..pandas_vb_common import setup # noqa: F401
+from ..pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/io/json.py b/asv_bench/benchmarks/io/json.py
index 0ce42856fb14a..5c1d39776b91c 100644
--- a/asv_bench/benchmarks/io/json.py
+++ b/asv_bench/benchmarks/io/json.py
@@ -1,6 +1,7 @@
import numpy as np
+
+from pandas import DataFrame, concat, date_range, read_json, timedelta_range
import pandas.util.testing as tm
-from pandas import DataFrame, date_range, timedelta_range, concat, read_json
from ..pandas_vb_common import BaseIO
@@ -63,10 +64,13 @@ def peakmem_read_json_lines_concat(self, index):
class ToJSON(BaseIO):
fname = "__test__.json"
- params = ["split", "columns", "index"]
- param_names = ["orient"]
+ params = [
+ ["split", "columns", "index", "values", "records"],
+ ["df", "df_date_idx", "df_td_int_ts", "df_int_floats", "df_int_float_str"],
+ ]
+ param_names = ["orient", "frame"]
- def setup(self, lines_orient):
+ def setup(self, orient, frame):
N = 10 ** 5
ncols = 5
index = date_range("20000101", periods=N, freq="H")
@@ -111,34 +115,85 @@ def setup(self, lines_orient):
index=index,
)
- def time_floats_with_int_index(self, orient):
- self.df.to_json(self.fname, orient=orient)
+ def time_to_json(self, orient, frame):
+ getattr(self, frame).to_json(self.fname, orient=orient)
+
+ def peakmem_to_json(self, orient, frame):
+ getattr(self, frame).to_json(self.fname, orient=orient)
- def time_floats_with_dt_index(self, orient):
- self.df_date_idx.to_json(self.fname, orient=orient)
+ def time_to_json_wide(self, orient, frame):
+ base_df = getattr(self, frame).copy()
+ df = concat([base_df.iloc[:100]] * 1000, ignore_index=True, axis=1)
+ df.to_json(self.fname, orient=orient)
- def time_delta_int_tstamp(self, orient):
- self.df_td_int_ts.to_json(self.fname, orient=orient)
+ def peakmem_to_json_wide(self, orient, frame):
+ base_df = getattr(self, frame).copy()
+ df = concat([base_df.iloc[:100]] * 1000, ignore_index=True, axis=1)
+ df.to_json(self.fname, orient=orient)
- def time_float_int(self, orient):
- self.df_int_floats.to_json(self.fname, orient=orient)
- def time_float_int_str(self, orient):
- self.df_int_float_str.to_json(self.fname, orient=orient)
+class ToJSONLines(BaseIO):
+
+ fname = "__test__.json"
+
+ def setup(self):
+ N = 10 ** 5
+ ncols = 5
+ index = date_range("20000101", periods=N, freq="H")
+ timedeltas = timedelta_range(start=1, periods=N, freq="s")
+ datetimes = date_range(start=1, periods=N, freq="s")
+ ints = np.random.randint(100000000, size=N)
+ floats = np.random.randn(N)
+ strings = tm.makeStringIndex(N)
+ self.df = DataFrame(np.random.randn(N, ncols), index=np.arange(N))
+ self.df_date_idx = DataFrame(np.random.randn(N, ncols), index=index)
+ self.df_td_int_ts = DataFrame(
+ {
+ "td_1": timedeltas,
+ "td_2": timedeltas,
+ "int_1": ints,
+ "int_2": ints,
+ "ts_1": datetimes,
+ "ts_2": datetimes,
+ },
+ index=index,
+ )
+ self.df_int_floats = DataFrame(
+ {
+ "int_1": ints,
+ "int_2": ints,
+ "int_3": ints,
+ "float_1": floats,
+ "float_2": floats,
+ "float_3": floats,
+ },
+ index=index,
+ )
+ self.df_int_float_str = DataFrame(
+ {
+ "int_1": ints,
+ "int_2": ints,
+ "float_1": floats,
+ "float_2": floats,
+ "str_1": strings,
+ "str_2": strings,
+ },
+ index=index,
+ )
- def time_floats_with_int_idex_lines(self, orient):
+ def time_floats_with_int_idex_lines(self):
self.df.to_json(self.fname, orient="records", lines=True)
- def time_floats_with_dt_index_lines(self, orient):
+ def time_floats_with_dt_index_lines(self):
self.df_date_idx.to_json(self.fname, orient="records", lines=True)
- def time_delta_int_tstamp_lines(self, orient):
+ def time_delta_int_tstamp_lines(self):
self.df_td_int_ts.to_json(self.fname, orient="records", lines=True)
- def time_float_int_lines(self, orient):
+ def time_float_int_lines(self):
self.df_int_floats.to_json(self.fname, orient="records", lines=True)
- def time_float_int_str_lines(self, orient):
+ def time_float_int_str_lines(self):
self.df_int_float_str.to_json(self.fname, orient="records", lines=True)
@@ -160,4 +215,4 @@ def peakmem_float(self, frames):
df.to_json()
-from ..pandas_vb_common import setup # noqa: F401
+from ..pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/io/msgpack.py b/asv_bench/benchmarks/io/msgpack.py
index d97b4ae13f0bd..f5038602539ab 100644
--- a/asv_bench/benchmarks/io/msgpack.py
+++ b/asv_bench/benchmarks/io/msgpack.py
@@ -1,5 +1,7 @@
import warnings
+
import numpy as np
+
from pandas import DataFrame, date_range, read_msgpack
import pandas.util.testing as tm
@@ -27,4 +29,4 @@ def time_write_msgpack(self):
self.df.to_msgpack(self.fname)
-from ..pandas_vb_common import setup # noqa: F401
+from ..pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/io/pickle.py b/asv_bench/benchmarks/io/pickle.py
index 286ac767c02e7..647e9d27dec9d 100644
--- a/asv_bench/benchmarks/io/pickle.py
+++ b/asv_bench/benchmarks/io/pickle.py
@@ -1,4 +1,5 @@
import numpy as np
+
from pandas import DataFrame, date_range, read_pickle
import pandas.util.testing as tm
@@ -25,4 +26,4 @@ def time_write_pickle(self):
self.df.to_pickle(self.fname)
-from ..pandas_vb_common import setup # noqa: F401
+from ..pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/io/sql.py b/asv_bench/benchmarks/io/sql.py
index b80872b17a9e4..fe84c869717e3 100644
--- a/asv_bench/benchmarks/io/sql.py
+++ b/asv_bench/benchmarks/io/sql.py
@@ -1,10 +1,11 @@
import sqlite3
import numpy as np
-import pandas.util.testing as tm
-from pandas import DataFrame, date_range, read_sql_query, read_sql_table
from sqlalchemy import create_engine
+from pandas import DataFrame, date_range, read_sql_query, read_sql_table
+import pandas.util.testing as tm
+
class SQL:
@@ -141,4 +142,4 @@ def time_read_sql_table_column(self, dtype):
read_sql_table(self.table_name, self.con, columns=[dtype])
-from ..pandas_vb_common import setup # noqa: F401
+from ..pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/io/stata.py b/asv_bench/benchmarks/io/stata.py
index b3ed71af47dc8..28829785d72e9 100644
--- a/asv_bench/benchmarks/io/stata.py
+++ b/asv_bench/benchmarks/io/stata.py
@@ -1,4 +1,5 @@
import numpy as np
+
from pandas import DataFrame, date_range, read_stata
import pandas.util.testing as tm
@@ -50,4 +51,4 @@ def setup(self, convert_dates):
self.df.to_stata(self.fname, self.convert_dates)
-from ..pandas_vb_common import setup # noqa: F401
+from ..pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py
index 7c899e3dc6ac8..6aa82a43a4d6a 100644
--- a/asv_bench/benchmarks/join_merge.py
+++ b/asv_bench/benchmarks/join_merge.py
@@ -1,8 +1,9 @@
import string
import numpy as np
+
+from pandas import DataFrame, MultiIndex, Series, concat, date_range, merge, merge_asof
import pandas.util.testing as tm
-from pandas import DataFrame, Series, MultiIndex, date_range, concat, merge, merge_asof
try:
from pandas import merge_ordered
@@ -348,4 +349,4 @@ def time_series_align_left_monotonic(self):
self.ts1.align(self.ts2, join="left")
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/multiindex_object.py b/asv_bench/benchmarks/multiindex_object.py
index eda059a68e8a5..3f4fd7ad911c1 100644
--- a/asv_bench/benchmarks/multiindex_object.py
+++ b/asv_bench/benchmarks/multiindex_object.py
@@ -1,8 +1,9 @@
import string
import numpy as np
+
+from pandas import DataFrame, MultiIndex, date_range
import pandas.util.testing as tm
-from pandas import date_range, MultiIndex, DataFrame
class GetLoc:
@@ -146,4 +147,4 @@ def time_categorical_level(self):
self.df.set_index(["a", "b"])
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/offset.py b/asv_bench/benchmarks/offset.py
index 31c3b6fb6cb60..d822646e712ae 100644
--- a/asv_bench/benchmarks/offset.py
+++ b/asv_bench/benchmarks/offset.py
@@ -1,7 +1,8 @@
-import warnings
from datetime import datetime
+import warnings
import numpy as np
+
import pandas as pd
try:
diff --git a/asv_bench/benchmarks/package.py b/asv_bench/benchmarks/package.py
new file mode 100644
index 0000000000000..8ca33db361fa0
--- /dev/null
+++ b/asv_bench/benchmarks/package.py
@@ -0,0 +1,25 @@
+"""
+Benchmarks for pandas at the package-level.
+"""
+import subprocess
+import sys
+
+from pandas.compat import PY37
+
+
+class TimeImport:
+ def time_import(self):
+ if PY37:
+ # on py37+ we the "-X importtime" usage gives us a more precise
+ # measurement of the import time we actually care about,
+ # without the subprocess or interpreter overhead
+ cmd = [sys.executable, "-X", "importtime", "-c", "import pandas as pd"]
+ p = subprocess.run(cmd, stderr=subprocess.PIPE)
+
+ line = p.stderr.splitlines()[-1]
+ field = line.split(b"|")[-2].strip()
+ total = int(field) # microseconds
+ return total
+
+ cmd = [sys.executable, "-c", "import pandas as pd"]
+ subprocess.run(cmd, stderr=subprocess.PIPE)
diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py
index fdc8207021c0f..1faf13329110d 100644
--- a/asv_bench/benchmarks/pandas_vb_common.py
+++ b/asv_bench/benchmarks/pandas_vb_common.py
@@ -1,7 +1,8 @@
-import os
from importlib import import_module
+import os
import numpy as np
+
import pandas as pd
# Compatibility import for lib
diff --git a/asv_bench/benchmarks/period.py b/asv_bench/benchmarks/period.py
index 2f8ae0650ab75..7303240a25f29 100644
--- a/asv_bench/benchmarks/period.py
+++ b/asv_bench/benchmarks/period.py
@@ -1,4 +1,5 @@
from pandas import DataFrame, Period, PeriodIndex, Series, date_range, period_range
+
from pandas.tseries.frequencies import to_offset
diff --git a/asv_bench/benchmarks/plotting.py b/asv_bench/benchmarks/plotting.py
index 4fb0876f05a0a..5c718516360ed 100644
--- a/asv_bench/benchmarks/plotting.py
+++ b/asv_bench/benchmarks/plotting.py
@@ -1,11 +1,12 @@
+import matplotlib
import numpy as np
-from pandas import DataFrame, Series, DatetimeIndex, date_range
+
+from pandas import DataFrame, DatetimeIndex, Series, date_range
try:
from pandas.plotting import andrews_curves
except ImportError:
from pandas.tools.plotting import andrews_curves
-import matplotlib
matplotlib.use("Agg")
@@ -93,4 +94,4 @@ def time_plot_andrews_curves(self):
andrews_curves(self.df, "Name")
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/reindex.py b/asv_bench/benchmarks/reindex.py
index 8d4c9ebaf3e89..cd450f801c805 100644
--- a/asv_bench/benchmarks/reindex.py
+++ b/asv_bench/benchmarks/reindex.py
@@ -1,6 +1,8 @@
import numpy as np
+
+from pandas import DataFrame, Index, MultiIndex, Series, date_range, period_range
import pandas.util.testing as tm
-from pandas import DataFrame, Series, MultiIndex, Index, date_range, period_range
+
from .pandas_vb_common import lib
@@ -159,4 +161,4 @@ def time_lib_fast_zip(self):
lib.fast_zip(self.col_array_list)
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/replace.py b/asv_bench/benchmarks/replace.py
index 6137e944e6b9e..2a115fb0b4fe3 100644
--- a/asv_bench/benchmarks/replace.py
+++ b/asv_bench/benchmarks/replace.py
@@ -1,4 +1,5 @@
import numpy as np
+
import pandas as pd
@@ -36,6 +37,23 @@ def time_replace_series(self, inplace):
self.s.replace(self.to_rep, inplace=inplace)
+class ReplaceList:
+ # GH#28099
+
+ params = [(True, False)]
+ param_names = ["inplace"]
+
+ def setup(self, inplace):
+ self.df = pd.DataFrame({"A": 0, "B": 0}, index=range(4 * 10 ** 7))
+
+ def time_replace_list(self, inplace):
+ self.df.replace([np.inf, -np.inf], np.nan, inplace=inplace)
+
+ def time_replace_list_one_match(self, inplace):
+ # the 1 can be held in self._df.blocks[0], while the inf and -inf cant
+ self.df.replace([np.inf, -np.inf, 1], np.nan, inplace=inplace)
+
+
class Convert:
params = (["DataFrame", "Series"], ["Timestamp", "Timedelta"])
@@ -56,4 +74,4 @@ def time_replace(self, constructor, replace_data):
self.data.replace(self.to_replace)
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py
index 1aed756b841a5..441f4b380656e 100644
--- a/asv_bench/benchmarks/reshape.py
+++ b/asv_bench/benchmarks/reshape.py
@@ -1,9 +1,10 @@
-import string
from itertools import product
+import string
import numpy as np
-from pandas import DataFrame, MultiIndex, date_range, melt, wide_to_long
+
import pandas as pd
+from pandas import DataFrame, MultiIndex, date_range, melt, wide_to_long
class Melt:
@@ -214,6 +215,7 @@ def setup(self, bins):
self.datetime_series = pd.Series(
np.random.randint(N, size=N), dtype="datetime64[ns]"
)
+ self.interval_bins = pd.IntervalIndex.from_breaks(np.linspace(0, N, bins))
def time_cut_int(self, bins):
pd.cut(self.int_series, bins)
@@ -239,6 +241,14 @@ def time_qcut_timedelta(self, bins):
def time_qcut_datetime(self, bins):
pd.qcut(self.datetime_series, bins)
+ def time_cut_interval(self, bins):
+ # GH 27668
+ pd.cut(self.int_series, self.interval_bins)
+
+ def peakmem_cut_interval(self, bins):
+ # GH 27668
+ pd.cut(self.int_series, self.interval_bins)
+
class Explode:
param_names = ["n_rows", "max_list_length"]
@@ -253,4 +263,4 @@ def time_explode(self, n_rows, max_list_length):
self.series.explode()
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py
index a70977fcf539f..493f96d46d5e7 100644
--- a/asv_bench/benchmarks/rolling.py
+++ b/asv_bench/benchmarks/rolling.py
@@ -1,6 +1,7 @@
-import pandas as pd
import numpy as np
+import pandas as pd
+
class Methods:
@@ -20,6 +21,28 @@ def setup(self, constructor, window, dtype, method):
def time_rolling(self, constructor, window, dtype, method):
getattr(self.roll, method)()
+ def peakmem_rolling(self, constructor, window, dtype, method):
+ getattr(self.roll, method)()
+
+
+class Apply:
+ params = (
+ ["DataFrame", "Series"],
+ [10, 1000],
+ ["int", "float"],
+ [sum, np.sum, lambda x: np.sum(x) + 5],
+ [True, False],
+ )
+ param_names = ["contructor", "window", "dtype", "function", "raw"]
+
+ def setup(self, constructor, window, dtype, function, raw):
+ N = 10 ** 5
+ arr = (100 * np.random.random(N)).astype(dtype)
+ self.roll = getattr(pd, constructor)(arr).rolling(window)
+
+ def time_rolling(self, constructor, window, dtype, function, raw):
+ self.roll.apply(function, raw=raw)
+
class ExpandingMethods:
@@ -121,4 +144,4 @@ def peakmem_fixed(self):
self.roll.max()
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py
index 6038a2ab4bd9f..a3f1d92545c3f 100644
--- a/asv_bench/benchmarks/series_methods.py
+++ b/asv_bench/benchmarks/series_methods.py
@@ -1,8 +1,9 @@
from datetime import datetime
import numpy as np
+
+from pandas import NaT, Series, date_range
import pandas.util.testing as tm
-from pandas import Series, date_range, NaT
class SeriesConstructor:
@@ -275,4 +276,4 @@ def time_func(self, func, N, dtype):
self.func()
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/sparse.py b/asv_bench/benchmarks/sparse.py
index 19d08c086a508..ac78ca53679fd 100644
--- a/asv_bench/benchmarks/sparse.py
+++ b/asv_bench/benchmarks/sparse.py
@@ -136,4 +136,4 @@ def time_division(self, fill_value):
self.arr1 / self.arr2
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/stat_ops.py b/asv_bench/benchmarks/stat_ops.py
index 620a6de0f5f34..ed5ebfa61594e 100644
--- a/asv_bench/benchmarks/stat_ops.py
+++ b/asv_bench/benchmarks/stat_ops.py
@@ -1,6 +1,6 @@
import numpy as np
-import pandas as pd
+import pandas as pd
ops = ["mean", "sum", "median", "std", "skew", "kurt", "mad", "prod", "sem", "var"]
@@ -113,12 +113,23 @@ def setup(self, method, use_bottleneck):
nanops._USE_BOTTLENECK = use_bottleneck
self.df = pd.DataFrame(np.random.randn(1000, 30))
self.df2 = pd.DataFrame(np.random.randn(1000, 30))
+ self.df_wide = pd.DataFrame(np.random.randn(1000, 200))
+ self.df_wide_nans = self.df_wide.where(np.random.random((1000, 200)) < 0.9)
self.s = pd.Series(np.random.randn(1000))
self.s2 = pd.Series(np.random.randn(1000))
def time_corr(self, method, use_bottleneck):
self.df.corr(method=method)
+ def time_corr_wide(self, method, use_bottleneck):
+ self.df_wide.corr(method=method)
+
+ def time_corr_wide_nans(self, method, use_bottleneck):
+ self.df_wide_nans.corr(method=method)
+
+ def peakmem_corr_wide(self, method, use_bottleneck):
+ self.df_wide.corr(method=method)
+
def time_corr_series(self, method, use_bottleneck):
self.s.corr(self.s2, method=method)
@@ -148,4 +159,4 @@ def time_cov_series(self, use_bottleneck):
self.s.cov(self.s2)
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py
index 6be2fa92d9eac..f30b2482615bd 100644
--- a/asv_bench/benchmarks/strings.py
+++ b/asv_bench/benchmarks/strings.py
@@ -1,7 +1,8 @@
import warnings
import numpy as np
-from pandas import Series, DataFrame
+
+from pandas import DataFrame, Series
import pandas.util.testing as tm
diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py
index 1020b773f8acb..498774034d642 100644
--- a/asv_bench/benchmarks/timeseries.py
+++ b/asv_bench/benchmarks/timeseries.py
@@ -2,7 +2,9 @@
import dateutil
import numpy as np
-from pandas import to_datetime, date_range, Series, DataFrame, period_range
+
+from pandas import DataFrame, Series, date_range, period_range, to_datetime
+
from pandas.tseries.frequencies import infer_freq
try:
@@ -426,4 +428,4 @@ def time_dt_accessor_year(self, tz):
self.series.dt.year
-from .pandas_vb_common import setup # noqa: F401
+from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index cfd7f6546833d..62c46b6970969 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -22,22 +22,17 @@ jobs:
timeoutInMinutes: 90
steps:
- script: |
- # XXX next command should avoid redefining the path in every step, but
- # made the process crash as it couldn't find deactivate
- #echo '##vso[task.prependpath]$HOME/miniconda3/bin'
+ echo '##vso[task.prependpath]$(HOME)/miniconda3/bin'
echo '##vso[task.setvariable variable=ENV_FILE]environment.yml'
echo '##vso[task.setvariable variable=AZURE]true'
displayName: 'Setting environment variables'
# Do not require a conda environment
- - script: |
- export PATH=$HOME/miniconda3/bin:$PATH
- ci/code_checks.sh patterns
+ - script: ci/code_checks.sh patterns
displayName: 'Looking for unwanted patterns'
condition: true
- script: |
- export PATH=$HOME/miniconda3/bin:$PATH
sudo apt-get install -y libc6-dev-i386
ci/setup_env.sh
displayName: 'Setup environment and build pandas'
@@ -45,14 +40,12 @@ jobs:
# Do not require pandas
- script: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
ci/code_checks.sh lint
displayName: 'Linting'
condition: true
- script: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
ci/code_checks.sh dependencies
displayName: 'Dependencies consistency'
@@ -60,42 +53,36 @@ jobs:
# Require pandas
- script: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
ci/code_checks.sh code
displayName: 'Checks on imported code'
condition: true
- script: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
ci/code_checks.sh doctests
displayName: 'Running doctests'
condition: true
- script: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
ci/code_checks.sh docstrings
displayName: 'Docstring validation'
condition: true
- script: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
ci/code_checks.sh typing
displayName: 'Typing validation'
condition: true
- script: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
pytest --capture=no --strict scripts
- displayName: 'Testing docstring validaton script'
+ displayName: 'Testing docstring validation script'
condition: true
- script: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
cd asv_bench
asv check -E existing
@@ -117,23 +104,27 @@ jobs:
displayName: 'Running benchmarks'
condition: true
-- job: 'Docs'
+- job: 'Web_and_Docs'
pool:
vmImage: ubuntu-16.04
timeoutInMinutes: 90
steps:
- script: |
echo '##vso[task.setvariable variable=ENV_FILE]environment.yml'
+ echo '##vso[task.prependpath]$(HOME)/miniconda3/bin'
displayName: 'Setting environment variables'
- script: |
- export PATH=$HOME/miniconda3/bin:$PATH
sudo apt-get install -y libc6-dev-i386
ci/setup_env.sh
displayName: 'Setup environment and build pandas'
- script: |
- export PATH=$HOME/miniconda3/bin:$PATH
+ source activate pandas-dev
+ python web/pandas_web.py web/pandas --target-path=web/build
+ displayName: 'Build website'
+
+ - script: |
source activate pandas-dev
# Next we should simply have `doc/make.py --warnings-are-errors`, everything else is required because the ipython directive doesn't fail the build on errors (https://github.com/ipython/ipython/issues/11547)
doc/make.py --warnings-are-errors | tee sphinx.log ; SPHINX_RET=${PIPESTATUS[0]}
@@ -142,15 +133,21 @@ jobs:
displayName: 'Build documentation'
- script: |
- cd doc/build/html
+ mkdir -p to_deploy/docs
+ cp -r web/build/* to_deploy/
+ cp -r doc/build/html/* to_deploy/docs/
+ displayName: 'Merge website and docs'
+
+ - script: |
+ cd to_deploy
git init
touch .nojekyll
echo "dev.pandas.io" > CNAME
printf "User-agent: *\nDisallow: /" > robots.txt
git add --all .
git config user.email "pandas-dev@python.org"
- git config user.name "pandas-docs-bot"
- git commit -m "pandas documentation in master"
+ git config user.name "pandas-bot"
+ git commit -m "pandas web and documentation in master"
displayName: 'Create git repo for docs build'
condition : |
and(not(eq(variables['Build.Reason'], 'PullRequest')),
@@ -174,10 +171,10 @@ jobs:
eq(variables['Build.SourceBranch'], 'refs/heads/master'))
- script: |
- cd doc/build/html
+ cd to_deploy
git remote add origin git@github.com:pandas-dev/pandas-dev.github.io.git
git push -f origin master
- displayName: 'Publish docs to GitHub pages'
+ displayName: 'Publish web and docs to GitHub pages'
condition : |
and(not(eq(variables['Build.Reason'], 'PullRequest')),
eq(variables['Build.SourceBranch'], 'refs/heads/master'))
diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml
index 39f862290e720..281107559a38c 100644
--- a/ci/azure/posix.yml
+++ b/ci/azure/posix.yml
@@ -56,21 +56,25 @@ jobs:
steps:
- script: |
if [ "$(uname)" == "Linux" ]; then sudo apt-get install -y libc6-dev-i386 $EXTRA_APT; fi
+ echo '##vso[task.prependpath]$(HOME)/miniconda3/bin'
echo "Creating Environment"
ci/setup_env.sh
displayName: 'Setup environment and build pandas'
+
- script: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
ci/run_tests.sh
displayName: 'Test'
- - script: |
- export PATH=$HOME/miniconda3/bin:$PATH
- source activate pandas-dev && pushd /tmp && python -c "import pandas; pandas.show_versions();" && popd
+
+ - script: source activate pandas-dev && pushd /tmp && python -c "import pandas; pandas.show_versions();" && popd
+ displayName: 'Build versions'
+
- task: PublishTestResults@2
inputs:
testResultsFiles: 'test-data-*.xml'
testRunTitle: ${{ format('{0}-$(CONDA_PY)', parameters.name) }}
+ displayName: 'Publish test results'
+
- powershell: |
$junitXml = "test-data-single.xml"
$(Get-Content $junitXml | Out-String) -match 'failures="(.*?)"'
@@ -96,8 +100,8 @@ jobs:
Write-Error "$($matches[1]) tests failed"
}
displayName: 'Check for test failures'
+
- script: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
python ci/print_skipped.py
displayName: 'Print skipped tests'
diff --git a/ci/azure/windows.yml b/ci/azure/windows.yml
index 20cad1bb4af96..dfa82819b9826 100644
--- a/ci/azure/windows.yml
+++ b/ci/azure/windows.yml
@@ -17,7 +17,9 @@ jobs:
CONDA_PY: "37"
steps:
- - powershell: Write-Host "##vso[task.prependpath]$env:CONDA\Scripts"
+ - powershell: |
+ Write-Host "##vso[task.prependpath]$env:CONDA\Scripts"
+ Write-Host "##vso[task.prependpath]$HOME/miniconda3/bin"
displayName: 'Add conda to PATH'
- script: conda update -q -n base conda
displayName: Update conda
@@ -52,7 +54,6 @@ jobs:
}
displayName: 'Check for test failures'
- script: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
python ci/print_skipped.py
displayName: 'Print skipped tests'
diff --git a/ci/check_git_tags.sh b/ci/check_git_tags.sh
new file mode 100755
index 0000000000000..9dbcd4f98683e
--- /dev/null
+++ b/ci/check_git_tags.sh
@@ -0,0 +1,28 @@
+set -e
+
+if [[ ! $(git tag) ]]; then
+ echo "No git tags in clone, please sync your git tags with upstream using:"
+ echo " git fetch --tags upstream"
+ echo " git push --tags origin"
+ echo ""
+ echo "If the issue persists, the clone depth needs to be increased in .travis.yml"
+ exit 1
+fi
+
+# This will error if there are no tags and we omit --always
+DESCRIPTION=$(git describe --long --tags)
+echo "$DESCRIPTION"
+
+if [[ "$DESCRIPTION" == *"untagged"* ]]; then
+ echo "Unable to determine most recent tag, aborting build"
+ exit 1
+else
+ if [[ "$DESCRIPTION" != *"g"* ]]; then
+ # A good description will have the hash prefixed by g, a bad one will be
+ # just the hash
+ echo "Unable to determine most recent tag, aborting build"
+ exit 1
+ else
+ echo "$(git tag)"
+ fi
+fi
diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 96a8440d85694..b03c4f2238445 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -56,7 +56,7 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then
black --version
MSG='Checking black formatting' ; echo $MSG
- black . --check --exclude '(asv_bench/env|\.egg|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist)'
+ black . --check --exclude '(asv_bench/env|\.egg|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist|setup.py)'
RET=$(($RET + $?)) ; echo $MSG "DONE"
# `setup.cfg` contains the list of error codes that are being ignored in flake8
@@ -188,9 +188,9 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then
set -o pipefail
if [[ "$AZURE" == "true" ]]; then
# we exclude all c/cpp files as the c/cpp files of pandas code base are tested when Linting .c and .h files
- ! grep -n '--exclude=*.'{svg,c,cpp,html} --exclude-dir=env -RI "\s$" * | awk -F ":" '{print "##vso[task.logissue type=error;sourcepath=" $1 ";linenumber=" $2 ";] Tailing whitespaces found: " $3}'
+ ! grep -n '--exclude=*.'{svg,c,cpp,html,js} --exclude-dir=env -RI "\s$" * | awk -F ":" '{print "##vso[task.logissue type=error;sourcepath=" $1 ";linenumber=" $2 ";] Tailing whitespaces found: " $3}'
else
- ! grep -n '--exclude=*.'{svg,c,cpp,html} --exclude-dir=env -RI "\s$" * | awk -F ":" '{print $1 ":" $2 ":Tailing whitespaces found: " $3}'
+ ! grep -n '--exclude=*.'{svg,c,cpp,html,js} --exclude-dir=env -RI "\s$" * | awk -F ":" '{print $1 ":" $2 ":Tailing whitespaces found: " $3}'
fi
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
@@ -203,10 +203,14 @@ if [[ -z "$CHECK" || "$CHECK" == "code" ]]; then
import sys
import pandas
-blacklist = {'bs4', 'gcsfs', 'html5lib', 'ipython', 'jinja2' 'hypothesis',
+blacklist = {'bs4', 'gcsfs', 'html5lib', 'http', 'ipython', 'jinja2', 'hypothesis',
'lxml', 'numexpr', 'openpyxl', 'py', 'pytest', 's3fs', 'scipy',
- 'tables', 'xlrd', 'xlsxwriter', 'xlwt'}
-mods = blacklist & set(m.split('.')[0] for m in sys.modules)
+ 'tables', 'urllib.request', 'xlrd', 'xlsxwriter', 'xlwt'}
+
+# GH#28227 for some of these check for top-level modules, while others are
+# more specific (e.g. urllib.request)
+import_mods = set(m.split('.')[0] for m in sys.modules) | set(sys.modules)
+mods = blacklist & import_mods
if mods:
sys.stderr.write('err: pandas should not import: {}\n'.format(', '.join(mods)))
sys.exit(len(mods))
@@ -263,8 +267,8 @@ fi
### DOCSTRINGS ###
if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
- MSG='Validate docstrings (GL03, GL04, GL05, GL06, GL07, GL09, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT01, RT04, RT05, SA05)' ; echo $MSG
- $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL03,GL04,GL05,GL06,GL07,GL09,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT01,RT04,RT05,SA05
+ MSG='Validate docstrings (GL03, GL04, GL05, GL06, GL07, GL09, GL10, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT01, RT04, RT05, SA05)' ; echo $MSG
+ $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL03,GL04,GL05,GL06,GL07,GL09,GL10,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT01,RT04,RT05,SA05
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
diff --git a/ci/deps/azure-35-compat.yaml b/ci/deps/azure-35-compat.yaml
index 97c45b2be27d7..dd54001984ec7 100644
--- a/ci/deps/azure-35-compat.yaml
+++ b/ci/deps/azure-35-compat.yaml
@@ -18,13 +18,13 @@ dependencies:
- xlsxwriter=0.9.8
- xlwt=1.2.0
# universal
- - cython=0.28.2
- hypothesis>=3.58.0
- pytest-xdist
- pytest-mock
- pytest-azurepipelines
- pip
- pip:
- # for python 3.5, pytest>=4.0.2 is not available in conda
+ # for python 3.5, pytest>=4.0.2, cython>=0.29.13 is not available in conda
+ - cython>=0.29.13
- pytest==4.5.0
- html5lib==1.0b2
diff --git a/ci/deps/azure-36-32bit.yaml b/ci/deps/azure-36-32bit.yaml
index 43bf0ecdd6c3e..321cc203961d5 100644
--- a/ci/deps/azure-36-32bit.yaml
+++ b/ci/deps/azure-36-32bit.yaml
@@ -6,7 +6,6 @@ dependencies:
- gcc_linux-32
- gcc_linux-32
- gxx_linux-32
- - cython=0.28.2
- numpy=1.14.*
- python-dateutil
- python=3.6.*
@@ -18,3 +17,6 @@ dependencies:
- pytest-azurepipelines
- hypothesis>=3.58.0
- pip
+ - pip:
+ # Anaconda doesn't build a new enough Cython
+ - cython>=0.29.13
diff --git a/ci/deps/azure-36-locale.yaml b/ci/deps/azure-36-locale.yaml
index 8f8273f57c3fe..76868f598f11b 100644
--- a/ci/deps/azure-36-locale.yaml
+++ b/ci/deps/azure-36-locale.yaml
@@ -5,7 +5,7 @@ channels:
dependencies:
- beautifulsoup4==4.6.0
- bottleneck=1.2.*
- - cython=0.28.2
+ - cython=0.29.13
- lxml
- matplotlib=2.2.2
- numpy=1.14.*
@@ -20,8 +20,8 @@ dependencies:
- xlsxwriter=0.9.8
- xlwt=1.2.0
# universal
- - pytest>=4.0.2,<5.0.0
- - pytest-xdist
+ - pytest>=5.0.0
+ - pytest-xdist>=1.29.0
- pytest-mock
- pytest-azurepipelines
- hypothesis>=3.58.0
diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-36-locale_slow.yaml
index 2bf2bd74795d2..21205375204dc 100644
--- a/ci/deps/azure-36-locale_slow.yaml
+++ b/ci/deps/azure-36-locale_slow.yaml
@@ -4,7 +4,7 @@ channels:
- conda-forge
dependencies:
- beautifulsoup4
- - cython>=0.28.2
+ - cython>=0.29.13
- gcsfs
- html5lib
- ipython
diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml
index 05adbf0c924dc..24464adb74f5b 100644
--- a/ci/deps/azure-37-locale.yaml
+++ b/ci/deps/azure-37-locale.yaml
@@ -4,7 +4,7 @@ channels:
- conda-forge
dependencies:
- beautifulsoup4
- - cython>=0.28.2
+ - cython>=0.29.13
- html5lib
- ipython
- jinja2
@@ -26,8 +26,8 @@ dependencies:
- xlsxwriter
- xlwt
# universal
- - pytest>=4.0.2
- - pytest-xdist
+ - pytest>=5.0.1
+ - pytest-xdist>=1.29.0
- pytest-mock
- pytest-azurepipelines
- pip
diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml
index 5cf897c98da10..0fb06fd43724c 100644
--- a/ci/deps/azure-37-numpydev.yaml
+++ b/ci/deps/azure-37-numpydev.yaml
@@ -4,9 +4,10 @@ channels:
dependencies:
- python=3.7.*
- pytz
- - Cython>=0.28.2
+ - Cython>=0.29.13
# universal
- - pytest>=4.0.2
+ # pytest < 5 until defaults has pytest-xdist>=1.29.0
+ - pytest>=4.0.2,<5.0
- pytest-xdist
- pytest-mock
- hypothesis>=3.58.0
diff --git a/ci/deps/azure-macos-35.yaml b/ci/deps/azure-macos-35.yaml
index 98859b596ab2a..4e0f09904b695 100644
--- a/ci/deps/azure-macos-35.yaml
+++ b/ci/deps/azure-macos-35.yaml
@@ -4,7 +4,6 @@ channels:
dependencies:
- beautifulsoup4
- bottleneck
- - cython>=0.28.2
- html5lib
- jinja2
- lxml
@@ -22,13 +21,15 @@ dependencies:
- xlrd
- xlsxwriter
- xlwt
+ - pip
- pip:
+ # Anaconda / conda-forge don't build for 3.5
+ - cython>=0.29.13
- pyreadstat
# universal
- - pytest==4.5.0
- - pytest-xdist
+ - pytest>=5.0.1
+ - pytest-xdist>=1.29.0
- pytest-mock
- hypothesis>=3.58.0
# https://github.com/pandas-dev/pandas/issues/27421
- pytest-azurepipelines<1.0.0
-
diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml
index b0f3f5389ac85..88b38aaef237c 100644
--- a/ci/deps/azure-windows-36.yaml
+++ b/ci/deps/azure-windows-36.yaml
@@ -1,17 +1,15 @@
name: pandas-dev
channels:
- - defaults
- conda-forge
+ - defaults
dependencies:
- blosc
- bottleneck
- - boost-cpp<1.67
- fastparquet>=0.2.1
- matplotlib=3.0.2
- numexpr
- numpy=1.15.*
- openpyxl
- - parquet-cpp
- pyarrow
- pytables
- python-dateutil
@@ -22,9 +20,9 @@ dependencies:
- xlsxwriter
- xlwt
# universal
- - cython>=0.28.2
- - pytest>=4.0.2
- - pytest-xdist
+ - cython>=0.29.13
+ - pytest>=5.0.1
+ - pytest-xdist>=1.29.0
- pytest-mock
- pytest-azurepipelines
- hypothesis>=3.58.0
diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml
index 08208d1e2d59a..7680ed9fd9c92 100644
--- a/ci/deps/azure-windows-37.yaml
+++ b/ci/deps/azure-windows-37.yaml
@@ -25,9 +25,9 @@ dependencies:
- xlsxwriter
- xlwt
# universal
- - cython>=0.28.2
- - pytest>=4.0.2
- - pytest-xdist
+ - cython>=0.29.13
+ - pytest>=5.0.0
+ - pytest-xdist>=1.29.0
- pytest-mock
- pytest-azurepipelines
- hypothesis>=3.58.0
diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml
index 6f85c32b9a915..e4e917d13990c 100644
--- a/ci/deps/travis-36-cov.yaml
+++ b/ci/deps/travis-36-cov.yaml
@@ -5,7 +5,7 @@ channels:
dependencies:
- beautifulsoup4
- botocore>=1.11
- - cython>=0.28.2
+ - cython>=0.29.13
- dask
- fastparquet>=0.2.1
- gcsfs
@@ -29,7 +29,7 @@ dependencies:
- python-snappy
- python=3.6.*
- pytz
- - s3fs
+ - s3fs<0.3
- scikit-learn
- scipy
- sqlalchemy
@@ -39,8 +39,8 @@ dependencies:
- xlsxwriter
- xlwt
# universal
- - pytest>=4.0.2
- - pytest-xdist
+ - pytest>=5.0.1
+ - pytest-xdist>=1.29.0
- pytest-cov
- pytest-mock
- hypothesis>=3.58.0
diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-36-locale.yaml
index 0d9a760914dab..44795766d7c31 100644
--- a/ci/deps/travis-36-locale.yaml
+++ b/ci/deps/travis-36-locale.yaml
@@ -6,7 +6,7 @@ dependencies:
- beautifulsoup4
- blosc=1.14.3
- python-blosc
- - cython>=0.28.2
+ - cython>=0.29.13
- fastparquet=0.2.1
- gcsfs=0.2.2
- html5lib
@@ -29,13 +29,13 @@ dependencies:
- s3fs=0.0.8
- scipy
- sqlalchemy=1.1.4
- - xarray=0.8.2
+ - xarray=0.10
- xlrd
- xlsxwriter
- xlwt
# universal
- - pytest>=4.0.2
- - pytest-xdist
+ - pytest>=5.0.1
+ - pytest-xdist>=1.29.0
- pytest-mock
- pip
- pip:
diff --git a/ci/deps/travis-36-slow.yaml b/ci/deps/travis-36-slow.yaml
index 538a82f66e4c8..d54708d48a65e 100644
--- a/ci/deps/travis-36-slow.yaml
+++ b/ci/deps/travis-36-slow.yaml
@@ -4,7 +4,7 @@ channels:
- conda-forge
dependencies:
- beautifulsoup4
- - cython>=0.28.2
+ - cython>=0.29.13
- html5lib
- lxml
- matplotlib
@@ -18,15 +18,15 @@ dependencies:
- python-dateutil
- python=3.6.*
- pytz
- - s3fs
+ - s3fs<0.3
- scipy
- sqlalchemy
- xlrd
- xlsxwriter
- xlwt
# universal
- - pytest>=4.0.2,<5.0.0
- - pytest-xdist
+ - pytest>=5.0.0
+ - pytest-xdist>=1.29.0
- pytest-mock
- moto
- hypothesis>=3.58.0
diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml
index c9a8c274fb144..440ca6c480b87 100644
--- a/ci/deps/travis-37.yaml
+++ b/ci/deps/travis-37.yaml
@@ -6,18 +6,18 @@ channels:
dependencies:
- python=3.7.*
- botocore>=1.11
- - cython>=0.28.2
+ - cython>=0.29.13
- numpy
- python-dateutil
- nomkl
- pyarrow
- pytz
# universal
- - pytest>=4.0.2
- - pytest-xdist
+ - pytest>=5.0.0
+ - pytest-xdist>=1.29.0
- pytest-mock
- hypothesis>=3.58.0
- - s3fs
+ - s3fs<0.3
- pip
- pyreadstat
- pip:
diff --git a/ci/incremental/build.cmd b/ci/incremental/build.cmd
index 2cce38c03f406..b61b59e287299 100644
--- a/ci/incremental/build.cmd
+++ b/ci/incremental/build.cmd
@@ -1,9 +1,9 @@
@rem https://github.com/numba/numba/blob/master/buildscripts/incremental/build.cmd
-@rem Build numba extensions without silencing compile errors
-python setup.py build_ext -q --inplace
+@rem Build extensions
+python setup.py build_ext -q -i
-@rem Install pandas locally
-python -m pip install -e .
+@rem Install pandas
+python -m pip install --no-build-isolation -e .
if %errorlevel% neq 0 exit /b %errorlevel%
diff --git a/ci/print_skipped.py b/ci/print_skipped.py
index a44281044e11d..e99e789a71fe8 100755
--- a/ci/print_skipped.py
+++ b/ci/print_skipped.py
@@ -1,52 +1,40 @@
#!/usr/bin/env python
-
import os
-import sys
-import math
import xml.etree.ElementTree as et
-def parse_results(filename):
+def main(filename):
+ if not os.path.isfile(filename):
+ return
+
tree = et.parse(filename)
root = tree.getroot()
- skipped = []
-
current_class = ""
- i = 1
- assert i - 1 == len(skipped)
for el in root.findall("testcase"):
cn = el.attrib["classname"]
for sk in el.findall("skipped"):
old_class = current_class
current_class = cn
- name = "{classname}.{name}".format(
- classname=current_class, name=el.attrib["name"]
- )
- msg = sk.attrib["message"]
- out = ""
if old_class != current_class:
- ndigits = int(math.log(i, 10) + 1)
-
- # 4 for : + space + # + space
- out += "-" * (len(name + msg) + 4 + ndigits) + "\n"
- out += "#{i} {name}: {msg}".format(i=i, name=name, msg=msg)
- skipped.append(out)
- i += 1
- assert i - 1 == len(skipped)
- assert i - 1 == len(skipped)
- # assert len(skipped) == int(root.attrib['skip'])
- return "\n".join(skipped)
-
-
-def main():
- test_files = ["test-data-single.xml", "test-data-multiple.xml", "test-data.xml"]
-
- print("SKIPPED TESTS:")
- for fn in test_files:
- if os.path.isfile(fn):
- print(parse_results(fn))
- return 0
+ yield None
+ yield {
+ "class_name": current_class,
+ "test_name": el.attrib["name"],
+ "message": sk.attrib["message"],
+ }
if __name__ == "__main__":
- sys.exit(main())
+ print("SKIPPED TESTS:")
+ i = 1
+ for file_type in ("-single", "-multiple", ""):
+ for test_data in main("test-data{}.xml".format(file_type)):
+ if test_data is None:
+ print("-" * 80)
+ else:
+ print(
+ "#{i} {class_name}.{test_name}: {message}".format(
+ **dict(test_data, i=i)
+ )
+ )
+ i += 1
diff --git a/ci/run_tests.sh b/ci/run_tests.sh
index ee46da9f52eab..57f1ecf1e56f7 100755
--- a/ci/run_tests.sh
+++ b/ci/run_tests.sh
@@ -1,13 +1,6 @@
-#!/bin/bash
+#!/bin/bash -e
-set -e
-
-if [ "$DOC" ]; then
- echo "We are not running pytest as this is a doc-build"
- exit 0
-fi
-
-# Workaround for pytest-xdist flaky collection order
+# Workaround for pytest-xdist (it collects different tests in the workers if PYTHONHASHSEED is not set)
# https://github.com/pytest-dev/pytest/issues/920
# https://github.com/pytest-dev/pytest/issues/1075
export PYTHONHASHSEED=$(python -c 'import random; print(random.randint(1, 4294967295))')
@@ -16,7 +9,7 @@ if [ -n "$LOCALE_OVERRIDE" ]; then
export LC_ALL="$LOCALE_OVERRIDE"
export LANG="$LOCALE_OVERRIDE"
PANDAS_LOCALE=`python -c 'import pandas; pandas.get_option("display.encoding")'`
- if [[ "$LOCALE_OVERIDE" != "$PANDAS_LOCALE" ]]; then
+ if [[ "$LOCALE_OVERRIDE" != "$PANDAS_LOCALE" ]]; then
echo "pandas could not detect the locale. System locale: $LOCALE_OVERRIDE, pandas detected: $PANDAS_LOCALE"
# TODO Not really aborting the tests until https://github.com/pandas-dev/pandas/issues/23923 is fixed
# exit 1
@@ -50,9 +43,10 @@ do
# if no tests are found (the case of "single and slow"), pytest exits with code 5, and would make the script fail, if not for the below code
sh -c "$PYTEST_CMD; ret=\$?; [ \$ret = 5 ] && exit 0 || exit \$ret"
- if [[ "$COVERAGE" && $? == 0 ]]; then
- echo "uploading coverage for $TYPE tests"
- echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME"
- bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME
- fi
+ # 2019-08-21 disabling because this is hitting HTTP 400 errors GH#27602
+ # if [[ "$COVERAGE" && $? == 0 && "$TRAVIS_BRANCH" == "master" ]]; then
+ # echo "uploading coverage for $TYPE tests"
+ # echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME"
+ # bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME
+ # fi
done
diff --git a/ci/setup_env.sh b/ci/setup_env.sh
index 88742e0483c7e..382491a947488 100755
--- a/ci/setup_env.sh
+++ b/ci/setup_env.sh
@@ -115,9 +115,20 @@ conda list pandas
# Make sure any error below is reported as such
-echo "Build extensions and install pandas"
-python setup.py build_ext -q --inplace
-python -m pip install -e .
+echo "[Build extensions]"
+python setup.py build_ext -q -i
+
+# XXX: Some of our environments end up with old verisons of pip (10.x)
+# Adding a new enough verison of pip to the requirements explodes the
+# solve time. Just using pip to update itself.
+# - py35_macos
+# - py35_compat
+# - py36_32bit
+echo "[Updating pip]"
+python -m pip install --no-deps -U pip wheel setuptools
+
+echo "[Install pandas]"
+python -m pip install --no-build-isolation -e .
echo
echo "conda list"
diff --git a/doc/logo/pandas_logo.py b/doc/logo/pandas_logo.py
index 5a07b094e6ad3..89410e3847bef 100644
--- a/doc/logo/pandas_logo.py
+++ b/doc/logo/pandas_logo.py
@@ -1,7 +1,6 @@
# script to generate the pandas logo
-from matplotlib import pyplot as plt
-from matplotlib import rcParams
+from matplotlib import pyplot as plt, rcParams
import numpy as np
rcParams["mathtext.fontset"] = "cm"
diff --git a/doc/make.py b/doc/make.py
index 48febef20fbe6..cbb1fa6a5324a 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -11,18 +11,18 @@
$ python make.py html
$ python make.py latex
"""
+import argparse
+import csv
import importlib
-import sys
import os
import shutil
-import csv
import subprocess
-import argparse
+import sys
import webbrowser
+
import docutils
import docutils.parsers.rst
-
DOC_PATH = os.path.dirname(os.path.abspath(__file__))
SOURCE_PATH = os.path.join(DOC_PATH, "source")
BUILD_PATH = os.path.join(DOC_PATH, "build")
diff --git a/doc/redirects.csv b/doc/redirects.csv
index a7886779c97d5..7171496b8cff3 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -503,7 +503,6 @@ generated/pandas.DataFrame.to_parquet,../reference/api/pandas.DataFrame.to_parqu
generated/pandas.DataFrame.to_period,../reference/api/pandas.DataFrame.to_period
generated/pandas.DataFrame.to_pickle,../reference/api/pandas.DataFrame.to_pickle
generated/pandas.DataFrame.to_records,../reference/api/pandas.DataFrame.to_records
-generated/pandas.DataFrame.to_sparse,../reference/api/pandas.DataFrame.to_sparse
generated/pandas.DataFrame.to_sql,../reference/api/pandas.DataFrame.to_sql
generated/pandas.DataFrame.to_stata,../reference/api/pandas.DataFrame.to_stata
generated/pandas.DataFrame.to_string,../reference/api/pandas.DataFrame.to_string
@@ -1432,7 +1431,6 @@ generated/pandas.Series.to_msgpack,../reference/api/pandas.Series.to_msgpack
generated/pandas.Series.to_numpy,../reference/api/pandas.Series.to_numpy
generated/pandas.Series.to_period,../reference/api/pandas.Series.to_period
generated/pandas.Series.to_pickle,../reference/api/pandas.Series.to_pickle
-generated/pandas.Series.to_sparse,../reference/api/pandas.Series.to_sparse
generated/pandas.Series.to_sql,../reference/api/pandas.Series.to_sql
generated/pandas.Series.to_string,../reference/api/pandas.Series.to_string
generated/pandas.Series.to_timestamp,../reference/api/pandas.Series.to_timestamp
@@ -1579,3 +1577,6 @@ generated/pandas.unique,../reference/api/pandas.unique
generated/pandas.util.hash_array,../reference/api/pandas.util.hash_array
generated/pandas.util.hash_pandas_object,../reference/api/pandas.util.hash_pandas_object
generated/pandas.wide_to_long,../reference/api/pandas.wide_to_long
+
+# Cached searches
+reference/api/pandas.DataFrame.from_csv,pandas.read_csv
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 3ebc5d8b6333b..1da1948e45268 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -10,15 +10,15 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
-import sys
-import os
-import inspect
import importlib
+import inspect
import logging
+import os
+import sys
+
import jinja2
-from sphinx.ext.autosummary import _import_by_name
from numpydoc.docscrape import NumpyDocString
-
+from sphinx.ext.autosummary import _import_by_name
logger = logging.getLogger(__name__)
@@ -141,7 +141,7 @@
# built documents.
#
# The short X.Y version.
-import pandas
+import pandas # noqa: E402 isort:skip
# version = '%s r%s' % (pandas.__version__, svn_version())
version = str(pandas.__version__)
@@ -315,7 +315,6 @@
import numpy as np
import pandas as pd
- randn = np.random.randn
np.random.seed(123456)
np.set_printoptions(precision=4, suppress=True)
pd.options.display.max_rows = 15
@@ -433,10 +432,14 @@
# Add custom Documenter to handle attributes/methods of an AccessorProperty
# eg pandas.Series.str and pandas.Series.dt (see GH9322)
-import sphinx
-from sphinx.util import rpartition
-from sphinx.ext.autodoc import Documenter, MethodDocumenter, AttributeDocumenter
-from sphinx.ext.autosummary import Autosummary
+import sphinx # noqa: E402 isort:skip
+from sphinx.util import rpartition # noqa: E402 isort:skip
+from sphinx.ext.autodoc import ( # noqa: E402 isort:skip
+ AttributeDocumenter,
+ Documenter,
+ MethodDocumenter,
+)
+from sphinx.ext.autosummary import Autosummary # noqa: E402 isort:skip
class AccessorDocumenter(MethodDocumenter):
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index 80dc8b0d8782b..3cdf9b83e96f3 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -133,22 +133,46 @@ Installing a C compiler
Pandas uses C extensions (mostly written using Cython) to speed up certain
operations. To install pandas from source, you need to compile these C
extensions, which means you need a C compiler. This process depends on which
-platform you're using. Follow the `CPython contributing guide
-<https://devguide.python.org/setup/#compile-and-build>`_ for getting a
-compiler installed. You don't need to do any of the ``./configure`` or ``make``
-steps; you only need to install the compiler.
-
-For Windows developers, when using Python 3.5 and later, it is sufficient to
-install `Visual Studio 2017 <https://visualstudio.com/>`_ with the
-**Python development workload** and the **Python native development tools**
-option. Otherwise, the following links may be helpful.
-
-* https://blogs.msdn.microsoft.com/pythonengineering/2017/03/07/python-support-in-vs2017/
-* https://blogs.msdn.microsoft.com/pythonengineering/2016/04/11/unable-to-find-vcvarsall-bat/
-* https://github.com/conda/conda-recipes/wiki/Building-from-Source-on-Windows-32-bit-and-64-bit
-* https://cowboyprogrammer.org/building-python-wheels-for-windows/
-* https://blog.ionelmc.ro/2014/12/21/compiling-python-extensions-on-windows/
-* https://support.enthought.com/hc/en-us/articles/204469260-Building-Python-extensions-with-Canopy
+platform you're using.
+
+**Windows**
+
+You will need `Build Tools for Visual Studio 2017
+<https://visualstudio.microsoft.com/downloads/>`_.
+
+.. warning::
+ You DO NOT need to install Visual Studio 2019.
+ You only need "Build Tools for Visual Studio 2019" found by
+ scrolling down to "All downloads" -> "Tools for Visual Studio 2019".
+
+**Mac OS**
+
+Information about compiler installation can be found here:
+https://devguide.python.org/setup/#macos
+
+**Unix**
+
+Some Linux distributions will come with a pre-installed C compiler. To find out
+which compilers (and versions) are installed on your system::
+
+ # for Debian/Ubuntu:
+ dpkg --list | grep compiler
+ # for Red Hat/RHEL/CentOS/Fedora:
+ yum list installed | grep -i --color compiler
+
+`GCC (GNU Compiler Collection) <https://gcc.gnu.org/>`_, is a widely used
+compiler, which supports C and a number of other languages. If GCC is listed
+as an installed compiler nothing more is required. If no C compiler is
+installed (or you wish to install a newer version) you can install a compiler
+(GCC in the example code below) with::
+
+ # for recent Debian/Ubuntu:
+ sudo apt install build-essential
+ # for Red Had/RHEL/CentOS/Fedora
+ yum groupinstall "Development Tools"
+
+For other Linux distributions, consult your favourite search engine for
+commpiler installation instructions.
Let us know if you have any difficulties by opening an issue or reaching out on
`Gitter`_.
@@ -184,7 +208,7 @@ We'll now kick off a three-step process:
# Build and install pandas
python setup.py build_ext --inplace -j 4
- python -m pip install -e .
+ python -m pip install -e . --no-build-isolation
At this point you should be able to import pandas from your locally built version::
@@ -228,7 +252,7 @@ You'll need to have at least python3.5 installed on your system.
# Build and install pandas
python setup.py build_ext --inplace -j 4
- python -m pip install -e .
+ python -m pip install -e . --no-build-isolation
Creating a branch
-----------------
@@ -710,6 +734,136 @@ You'll also need to
See :ref:`contributing.warnings` for more.
+.. _contributing.type_hints:
+
+Type Hints
+----------
+
+*pandas* strongly encourages the use of :pep:`484` style type hints. New development should contain type hints and pull requests to annotate existing code are accepted as well!
+
+Style Guidelines
+~~~~~~~~~~~~~~~~
+
+Types imports should follow the ``from typing import ...`` convention. So rather than
+
+.. code-block:: python
+
+ import typing
+
+ primes = [] # type: typing.List[int]
+
+You should write
+
+.. code-block:: python
+
+ from typing import List, Optional, Union
+
+ primes = [] # type: List[int]
+
+``Optional`` should be used where applicable, so instead of
+
+.. code-block:: python
+
+ maybe_primes = [] # type: List[Union[int, None]]
+
+You should write
+
+.. code-block:: python
+
+ maybe_primes = [] # type: List[Optional[int]]
+
+In some cases in the code base classes may define class variables that shadow builtins. This causes an issue as described in `Mypy 1775 <https://github.com/python/mypy/issues/1775#issuecomment-310969854>`_. The defensive solution here is to create an unambiguous alias of the builtin and use that without your annotation. For example, if you come across a definition like
+
+.. code-block:: python
+
+ class SomeClass1:
+ str = None
+
+The appropriate way to annotate this would be as follows
+
+.. code-block:: python
+
+ str_type = str
+
+ class SomeClass2:
+ str = None # type: str_type
+
+In some cases you may be tempted to use ``cast`` from the typing module when you know better than the analyzer. This occurs particularly when using custom inference functions. For example
+
+.. code-block:: python
+
+ from typing import cast
+
+ from pandas.core.dtypes.common import is_number
+
+ def cannot_infer_bad(obj: Union[str, int, float]):
+
+ if is_number(obj):
+ ...
+ else: # Reasonably only str objects would reach this but...
+ obj = cast(str, obj) # Mypy complains without this!
+ return obj.upper()
+
+The limitation here is that while a human can reasonably understand that ``is_number`` would catch the ``int`` and ``float`` types mypy cannot make that same inference just yet (see `mypy #5206 <https://github.com/python/mypy/issues/5206>`_. While the above works, the use of ``cast`` is **strongly discouraged**. Where applicable a refactor of the code to appease static analysis is preferable
+
+.. code-block:: python
+
+ def cannot_infer_good(obj: Union[str, int, float]):
+
+ if isinstance(obj, str):
+ return obj.upper()
+ else:
+ ...
+
+With custom types and inference this is not always possible so exceptions are made, but every effort should be exhausted to avoid ``cast`` before going down such paths.
+
+Syntax Requirements
+~~~~~~~~~~~~~~~~~~~
+
+Because *pandas* still supports Python 3.5, :pep:`526` does not apply and variables **must** be annotated with type comments. Specifically, this is a valid annotation within pandas:
+
+.. code-block:: python
+
+ primes = [] # type: List[int]
+
+Whereas this is **NOT** allowed:
+
+.. code-block:: python
+
+ primes: List[int] = [] # not supported in Python 3.5!
+
+Note that function signatures can always be annotated per :pep:`3107`:
+
+.. code-block:: python
+
+ def sum_of_primes(primes: List[int] = []) -> int:
+ ...
+
+
+Pandas-specific Types
+~~~~~~~~~~~~~~~~~~~~~
+
+Commonly used types specific to *pandas* will appear in `pandas._typing <https://github.com/pandas-dev/pandas/blob/master/pandas/_typing.py>`_ and you should use these where applicable. This module is private for now but ultimately this should be exposed to third party libraries who want to implement type checking against pandas.
+
+For example, quite a few functions in *pandas* accept a ``dtype`` argument. This can be expressed as a string like ``"object"``, a ``numpy.dtype`` like ``np.int64`` or even a pandas ``ExtensionDtype`` like ``pd.CategoricalDtype``. Rather than burden the user with having to constantly annotate all of those options, this can simply be imported and reused from the pandas._typing module
+
+.. code-block:: python
+
+ from pandas._typing import Dtype
+
+ def as_type(dtype: Dtype) -> ...:
+ ...
+
+This module will ultimately house types for repeatedly used concepts like "path-like", "array-like", "numeric", etc... and can also hold aliases for commonly appearing parameters like `axis`. Development of this module is active so be sure to refer to the source for the most up to date list of available types.
+
+Validating Type Hints
+~~~~~~~~~~~~~~~~~~~~~
+
+*pandas* uses `mypy <http://mypy-lang.org>`_ to statically analyze the code base and type hints. After making any change you can ensure your type hints are correct by running
+
+.. code-block:: shell
+
+ mypy pandas
.. _contributing.ci:
diff --git a/doc/source/development/developer.rst b/doc/source/development/developer.rst
index a283920ae4377..923ef005d5926 100644
--- a/doc/source/development/developer.rst
+++ b/doc/source/development/developer.rst
@@ -37,12 +37,19 @@ So that a ``pandas.DataFrame`` can be faithfully reconstructed, we store a
.. code-block:: text
- {'index_columns': ['__index_level_0__', '__index_level_1__', ...],
+ {'index_columns': [<descr0>, <descr1>, ...],
'column_indexes': [<ci0>, <ci1>, ..., <ciN>],
'columns': [<c0>, <c1>, ...],
- 'pandas_version': $VERSION}
+ 'pandas_version': $VERSION,
+ 'creator': {
+ 'library': $LIBRARY,
+ 'version': $LIBRARY_VERSION
+ }}
-Here, ``<c0>``/``<ci0>`` and so forth are dictionaries containing the metadata
+The "descriptor" values ``<descr0>`` in the ``'index_columns'`` field are
+strings (referring to a column) or dictionaries with values as described below.
+
+The ``<c0>``/``<ci0>`` and so forth are dictionaries containing the metadata
for each column, *including the index columns*. This has JSON form:
.. code-block:: text
@@ -53,26 +60,37 @@ for each column, *including the index columns*. This has JSON form:
'numpy_type': numpy_type,
'metadata': metadata}
-.. note::
+See below for the detailed specification for these.
+
+Index Metadata Descriptors
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``RangeIndex`` can be stored as metadata only, not requiring serialization. The
+descriptor format for these as is follows:
- Every index column is stored with a name matching the pattern
- ``__index_level_\d+__`` and its corresponding column information is can be
- found with the following code snippet.
+.. code-block:: python
- Following this naming convention isn't strictly necessary, but strongly
- suggested for compatibility with Arrow.
+ index = pd.RangeIndex(0, 10, 2)
+ {'kind': 'range',
+ 'name': index.name,
+ 'start': index.start,
+ 'stop': index.stop,
+ 'step': index.step}
- Here's an example of how the index metadata is structured in pyarrow:
+Other index types must be serialized as data columns along with the other
+DataFrame columns. The metadata for these is a string indicating the name of
+the field in the data columns, for example ``'__index_level_0__'``.
- .. code-block:: python
+If an index has a non-None ``name`` attribute, and there is no other column
+with a name matching that value, then the ``index.name`` value can be used as
+the descriptor. Otherwise (for unnamed indexes and ones with names colliding
+with other column names) a disambiguating name with pattern matching
+``__index_level_\d+__`` should be used. In cases of named indexes as data
+columns, ``name`` attribute is always stored in the column descriptors as
+above.
- # assuming there's at least 3 levels in the index
- index_columns = metadata['index_columns'] # noqa: F821
- columns = metadata['columns'] # noqa: F821
- ith_index = 2
- assert index_columns[ith_index] == '__index_level_2__'
- ith_index_info = columns[-len(index_columns):][ith_index]
- ith_index_level_name = ith_index_info['name']
+Column Metadata
+~~~~~~~~~~~~~~~
``pandas_type`` is the logical type of the column, and is one of:
@@ -161,4 +179,8 @@ As an example of fully-formed metadata:
'numpy_type': 'int64',
'metadata': None}
],
- 'pandas_version': '0.20.0'}
+ 'pandas_version': '0.20.0',
+ 'creator': {
+ 'library': 'pyarrow',
+ 'version': '0.13.0'
+ }}
diff --git a/doc/source/development/extending.rst b/doc/source/development/extending.rst
index b492a4edd70a4..e341dcb8318bc 100644
--- a/doc/source/development/extending.rst
+++ b/doc/source/development/extending.rst
@@ -441,5 +441,22 @@ This would be more or less equivalent to:
The backend module can then use other visualization tools (Bokeh, Altair,...)
to generate the plots.
+Libraries implementing the plotting backend should use `entry points <https://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`__
+to make their backend discoverable to pandas. The key is ``"pandas_plotting_backends"``. For example, pandas
+registers the default "matplotlib" backend as follows.
+
+.. code-block:: python
+
+ # in setup.py
+ setup( # noqa: F821
+ ...,
+ entry_points={
+ "pandas_plotting_backends": [
+ "matplotlib = pandas:plotting._matplotlib",
+ ],
+ },
+ )
+
+
More information on how to implement a third-party plotting backend can be found at
https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py#L1.
diff --git a/doc/source/development/index.rst b/doc/source/development/index.rst
index a149f31118ed5..1228f00667f3a 100644
--- a/doc/source/development/index.rst
+++ b/doc/source/development/index.rst
@@ -16,3 +16,5 @@ Development
internals
extending
developer
+ policies
+ roadmap
diff --git a/doc/source/development/policies.rst b/doc/source/development/policies.rst
new file mode 100644
index 0000000000000..2083a30af09c3
--- /dev/null
+++ b/doc/source/development/policies.rst
@@ -0,0 +1,57 @@
+.. _develop.policies:
+
+********
+Policies
+********
+
+.. _policies.version:
+
+Version Policy
+~~~~~~~~~~~~~~
+
+.. versionchanged:: 1.0.0
+
+Pandas uses a loose variant of semantic versioning (`SemVer`_) to govern
+deprecations, API compatibility, and version numbering.
+
+A pandas release number is made up of ``MAJOR.MINOR.PATCH``.
+
+API breaking changes should only occur in **major** releases. Theses changes
+will be documented, with clear guidance on what is changing, why it's changing,
+and how to migrate existing code to the new behavior.
+
+Whenever possible, a deprecation path will be provided rather than an outright
+breaking change.
+
+Pandas will introduce deprecations in **minor** releases. These deprecations
+will preserve the existing behavior while emitting a warning that provide
+guidance on:
+
+* How to achieve similar behavior if an alternative is available
+* The pandas version in which the deprecation will be enforced.
+
+We will not introduce new deprecations in patch releases.
+
+Deprecations will only be enforced in **major** releases. For example, if a
+behavior is deprecated in pandas 1.2.0, it will continue to work, with a
+warning, for all releases in the 1.x series. The behavior will change and the
+deprecation removed in the next next major release (2.0.0).
+
+.. note::
+
+ Pandas will sometimes make *behavior changing* bug fixes, as part of
+ minor or patch releases. Whether or not a change is a bug fix or an
+ API-breaking change is a judgement call. We'll do our best, and we
+ invite you to participate in development discussion on the issue
+ tracker or mailing list.
+
+These policies do not apply to features marked as **experimental** in the documentation.
+Pandas may change the behavior of experimental features at any time.
+
+Python Support
+~~~~~~~~~~~~~~
+
+Pandas will only drop support for specific Python versions (e.g. 3.5.x, 3.6.x) in
+pandas **major** releases.
+
+.. _SemVer: https://semver.org
diff --git a/doc/source/development/roadmap.rst b/doc/source/development/roadmap.rst
new file mode 100644
index 0000000000000..00598830e2fe9
--- /dev/null
+++ b/doc/source/development/roadmap.rst
@@ -0,0 +1,193 @@
+.. _roadmap:
+
+=======
+Roadmap
+=======
+
+This page provides an overview of the major themes in pandas' development. Each of
+these items requires a relatively large amount of effort to implement. These may
+be achieved more quickly with dedicated funding or interest from contributors.
+
+An item being on the roadmap does not mean that it will *necessarily* happen, even
+with unlimited funding. During the implementation period we may discover issues
+preventing the adoption of the feature.
+
+Additionally, an item *not* being on the roadmap does not exclude it from inclusion
+in pandas. The roadmap is intended for larger, fundamental changes to the project that
+are likely to take months or years of developer time. Smaller-scoped items will continue
+to be tracked on our `issue tracker <https://github.com/pandas-dev/pandas/issues>`__.
+
+See :ref:`roadmap.evolution` for proposing changes to this document.
+
+Extensibility
+-------------
+
+Pandas :ref:`extending.extension-types` allow for extending NumPy types with custom
+data types and array storage. Pandas uses extension types internally, and provides
+an interface for 3rd-party libraries to define their own custom data types.
+
+Many parts of pandas still unintentionally convert data to a NumPy array.
+These problems are especially pronounced for nested data.
+
+We'd like to improve the handling of extension arrays throughout the library,
+making their behavior more consistent with the handling of NumPy arrays. We'll do this
+by cleaning up pandas' internals and adding new methods to the extension array interface.
+
+String data type
+----------------
+
+Currently, pandas stores text data in an ``object`` -dtype NumPy array.
+The current implementation has two primary drawbacks: First, ``object`` -dtype
+is not specific to strings: any Python object can be stored in an ``object`` -dtype
+array, not just strings. Second: this is not efficient. The NumPy memory model
+isn't especially well-suited to variable width text data.
+
+To solve the first issue, we propose a new extension type for string data. This
+will initially be opt-in, with users explicitly requesting ``dtype="string"``.
+The array backing this string dtype may initially be the current implementation:
+an ``object`` -dtype NumPy array of Python strings.
+
+To solve the second issue (performance), we'll explore alternative in-memory
+array libraries (for example, Apache Arrow). As part of the work, we may
+need to implement certain operations expected by pandas users (for example
+the algorithm used in, ``Series.str.upper``). That work may be done outside of
+pandas.
+
+Apache Arrow interoperability
+-----------------------------
+
+`Apache Arrow <https://arrow.apache.org>`__ is a cross-language development
+platform for in-memory data. The Arrow logical types are closely aligned with
+typical pandas use cases.
+
+We'd like to provide better-integrated support for Arrow memory and data types
+within pandas. This will let us take advantage of its I/O capabilities and
+provide for better interoperability with other languages and libraries
+using Arrow.
+
+Block manager rewrite
+---------------------
+
+We'd like to replace pandas current internal data structures (a collection of
+1 or 2-D arrays) with a simpler collection of 1-D arrays.
+
+Pandas internal data model is quite complex. A DataFrame is made up of
+one or more 2-dimensional "blocks", with one or more blocks per dtype. This
+collection of 2-D arrays is managed by the BlockManager.
+
+The primary benefit of the BlockManager is improved performance on certain
+operations (construction from a 2D array, binary operations, reductions across the columns),
+especially for wide DataFrames. However, the BlockManager substantially increases the
+complexity and maintenance burden of pandas.
+
+By replacing the BlockManager we hope to achieve
+
+* Substantially simpler code
+* Easier extensibility with new logical types
+* Better user control over memory use and layout
+* Improved micro-performance
+* Option to provide a C / Cython API to pandas' internals
+
+See `these design documents <https://dev.pandas.io/pandas2/internal-architecture.html#removal-of-blockmanager-new-dataframe-internals>`__
+for more.
+
+Decoupling of indexing and internals
+------------------------------------
+
+The code for getting and setting values in pandas' data structures needs refactoring.
+In particular, we must clearly separate code that converts keys (e.g., the argument
+to ``DataFrame.loc``) to positions from code that uses these positions to get
+or set values. This is related to the proposed BlockManager rewrite. Currently, the
+BlockManager sometimes uses label-based, rather than position-based, indexing.
+We propose that it should only work with positional indexing, and the translation of keys
+to positions should be entirely done at a higher level.
+
+Indexing is a complicated API with many subtleties. This refactor will require care
+and attention. More details are discussed at
+https://github.com/pandas-dev/pandas/wiki/(Tentative)-rules-for-restructuring-indexing-code
+
+Numba-accelerated operations
+----------------------------
+
+`Numba <https://numba.pydata.org>`__ is a JIT compiler for Python code. We'd like to provide
+ways for users to apply their own Numba-jitted functions where pandas accepts user-defined functions
+(for example, :meth:`Series.apply`, :meth:`DataFrame.apply`, :meth:`DataFrame.applymap`,
+and in groupby and window contexts). This will improve the performance of
+user-defined-functions in these operations by staying within compiled code.
+
+
+Documentation improvements
+--------------------------
+
+We'd like to improve the content, structure, and presentation of the pandas documentation.
+Some specific goals include
+
+* Overhaul the HTML theme with a modern, responsive design (:issue:`15556`)
+* Improve the "Getting Started" documentation, designing and writing learning paths
+ for users different backgrounds (e.g. brand new to programming, familiar with
+ other languages like R, already familiar with Python).
+* Improve the overall organization of the documentation and specific subsections
+ of the documentation to make navigation and finding content easier.
+
+Package docstring validation
+----------------------------
+
+To improve the quality and consistency of pandas docstrings, we've developed
+tooling to check docstrings in a variety of ways.
+https://github.com/pandas-dev/pandas/blob/master/scripts/validate_docstrings.py
+contains the checks.
+
+Like many other projects, pandas uses the
+`numpydoc <https://numpydoc.readthedocs.io/en/latest/>`__ style for writing
+docstrings. With the collaboration of the numpydoc maintainers, we'd like to
+move the checks to a package other than pandas so that other projects can easily
+use them as well.
+
+Performance monitoring
+----------------------
+
+Pandas uses `airspeed velocity <https://asv.readthedocs.io/en/stable/>`__ to
+monitor for performance regressions. ASV itself is a fabulous tool, but requires
+some additional work to be integrated into an open source project's workflow.
+
+The `asv-runner <https://github.com/asv-runner>`__ organization, currently made up
+of pandas maintainers, provides tools built on top of ASV. We have a physical
+machine for running a number of project's benchmarks, and tools managing the
+benchmark runs and reporting on results.
+
+We'd like to fund improvements and maintenance of these tools to
+
+* Be more stable. Currently, they're maintained on the nights and weekends when
+ a maintainer has free time.
+* Tune the system for benchmarks to improve stability, following
+ https://pyperf.readthedocs.io/en/latest/system.html
+* Build a GitHub bot to request ASV runs *before* a PR is merged. Currently, the
+ benchmarks are only run nightly.
+
+.. _roadmap.evolution:
+
+Roadmap Evolution
+-----------------
+
+Pandas continues to evolve. The direction is primarily determined by community
+interest. Everyone is welcome to review existing items on the roadmap and
+to propose a new item.
+
+Each item on the roadmap should be a short summary of a larger design proposal.
+The proposal should include
+
+1. Short summary of the changes, which would be appropriate for inclusion in
+ the roadmap if accepted.
+2. Motivation for the changes.
+3. An explanation of why the change is in scope for pandas.
+4. Detailed design: Preferably with example-usage (even if not implemented yet)
+ and API documentation
+5. API Change: Any API changes that may result from the proposal.
+
+That proposal may then be submitted as a GitHub issue, where the pandas maintainers
+can review and comment on the design. The `pandas mailing list <https://mail.python.org/mailman/listinfo/pandas-dev>`__
+should be notified of the proposal.
+
+When there's agreement that an implementation
+would be welcome, the roadmap should be updated to include the summary and a
+link to the discussion issue.
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index b76dd3e0ff8e6..aaf2040156a45 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -23,6 +23,21 @@ or `search pypi for pandas <https://pypi.org/search/?q=pandas>`_.
We'd like to make it easier for users to find these projects, if you know of other
substantial projects that you feel should be on this list, please let us know.
+.. _ecosystem.data_cleaning_and_validation:
+
+Data cleaning and validation
+----------------------------
+
+`pyjanitor <https://github.com/ericmjl/pyjanitor/>`__
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Pyjanitor provides a clean API for cleaning data, using method chaining.
+
+`Engarde <https://engarde.readthedocs.io/en/latest/>`__
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Engarde is a lightweight library used to explicitly state assumptions about your datasets
+and check that they're *actually* true.
.. _ecosystem.stats:
@@ -72,6 +87,17 @@ the latest web technologies. Its goal is to provide elegant, concise constructio
graphics in the style of Protovis/D3, while delivering high-performance interactivity over
large data to thin clients.
+`Pandas-Bokeh <https://github.com/PatrikHlobil/Pandas-Bokeh>`__ provides a high level API
+for Bokeh that can be loaded as a native Pandas plotting backend via
+
+.. code:: python
+
+ pd.set_option("plotting.backend", "pandas_bokeh")
+
+It is very similar to the matplotlib plotting backend, but provides interactive
+web-based charts and maps.
+
+
`seaborn <https://seaborn.pydata.org>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -318,18 +344,6 @@ Increasingly, packages are being built on top of pandas to address specific need
* vaex.from_pandas
* vaex.to_pandas_df
-
-.. _ecosystem.data_validation:
-
-Data validation
----------------
-
-`Engarde <https://engarde.readthedocs.io/en/latest/>`__
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Engarde is a lightweight library used to explicitly state your assumptions about your datasets
-and check that they're *actually* true.
-
.. _ecosystem.extensions:
Extension data types
diff --git a/doc/source/getting_started/10min.rst b/doc/source/getting_started/10min.rst
index 510c7ef97aa98..41520795bde62 100644
--- a/doc/source/getting_started/10min.rst
+++ b/doc/source/getting_started/10min.rst
@@ -278,7 +278,7 @@ Using a single column's values to select data.
.. ipython:: python
- df[df.A > 0]
+ df[df['A'] > 0]
Selecting values from a DataFrame where a boolean condition is met.
@@ -468,6 +468,13 @@ Concatenating pandas objects together with :func:`concat`:
pd.concat(pieces)
+.. note::
+ Adding a column to a ``DataFrame`` is relatively fast. However, adding
+ a row requires a copy, and may be expensive. We recommend passing a
+ pre-built list of records to the ``DataFrame`` constructor instead
+ of building a ``DataFrame`` by iteratively appending records to it.
+ See :ref:`Appending to dataframe <merging.concatenation>` for more.
+
Join
~~~~
@@ -491,21 +498,6 @@ Another example that can be given is:
right
pd.merge(left, right, on='key')
-
-Append
-~~~~~~
-
-Append rows to a dataframe. See the :ref:`Appending <merging.concatenation>`
-section.
-
-.. ipython:: python
-
- df = pd.DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D'])
- df
- s = df.iloc[3]
- df.append(s, ignore_index=True)
-
-
Grouping
--------
diff --git a/doc/source/getting_started/basics.rst b/doc/source/getting_started/basics.rst
index bc3b7b4c70fd1..802ffadf2a81e 100644
--- a/doc/source/getting_started/basics.rst
+++ b/doc/source/getting_started/basics.rst
@@ -926,7 +926,7 @@ Single aggregations on a ``Series`` this will return a scalar value:
.. ipython:: python
- tsdf.A.agg('sum')
+ tsdf['A'].agg('sum')
Aggregating with multiple functions
@@ -950,13 +950,13 @@ On a ``Series``, multiple functions return a ``Series``, indexed by the function
.. ipython:: python
- tsdf.A.agg(['sum', 'mean'])
+ tsdf['A'].agg(['sum', 'mean'])
Passing a ``lambda`` function will yield a ``<lambda>`` named row:
.. ipython:: python
- tsdf.A.agg(['sum', lambda x: x.mean()])
+ tsdf['A'].agg(['sum', lambda x: x.mean()])
Passing a named function will yield that name for the row:
@@ -965,7 +965,7 @@ Passing a named function will yield that name for the row:
def mymean(x):
return x.mean()
- tsdf.A.agg(['sum', mymean])
+ tsdf['A'].agg(['sum', mymean])
Aggregating with a dict
+++++++++++++++++++++++
@@ -1065,7 +1065,7 @@ Passing a single function to ``.transform()`` with a ``Series`` will yield a sin
.. ipython:: python
- tsdf.A.transform(np.abs)
+ tsdf['A'].transform(np.abs)
Transform with multiple functions
@@ -1084,7 +1084,7 @@ resulting column names will be the transforming functions.
.. ipython:: python
- tsdf.A.transform([np.abs, lambda x: x + 1])
+ tsdf['A'].transform([np.abs, lambda x: x + 1])
Transforming with a dict
@@ -1422,8 +1422,6 @@ The :meth:`~DataFrame.rename` method also provides an ``inplace`` named
parameter that is by default ``False`` and copies the underlying data. Pass
``inplace=True`` to rename the data in place.
-.. versionadded:: 0.18.0
-
Finally, :meth:`~Series.rename` also accepts a scalar or list-like
for altering the ``Series.name`` attribute.
@@ -2063,8 +2061,6 @@ Convert a subset of columns to a specified type using :meth:`~DataFrame.astype`.
dft
dft.dtypes
-.. versionadded:: 0.19.0
-
Convert certain columns to a specific dtype by passing a dict to :meth:`~DataFrame.astype`.
.. ipython:: python
diff --git a/doc/source/getting_started/comparison/comparison_with_r.rst b/doc/source/getting_started/comparison/comparison_with_r.rst
index 444e886bc951d..f67f46fc2b29b 100644
--- a/doc/source/getting_started/comparison/comparison_with_r.rst
+++ b/doc/source/getting_started/comparison/comparison_with_r.rst
@@ -81,7 +81,7 @@ R pandas
=========================================== ===========================================
``select(df, col_one = col1)`` ``df.rename(columns={'col1': 'col_one'})['col_one']``
``rename(df, col_one = col1)`` ``df.rename(columns={'col1': 'col_one'})``
-``mutate(df, c=a-b)`` ``df.assign(c=df.a-df.b)``
+``mutate(df, c=a-b)`` ``df.assign(c=df['a']-df['b'])``
=========================================== ===========================================
@@ -258,8 +258,8 @@ index/slice as well as standard boolean indexing:
df = pd.DataFrame({'a': np.random.randn(10), 'b': np.random.randn(10)})
df.query('a <= b')
- df[df.a <= df.b]
- df.loc[df.a <= df.b]
+ df[df['a'] <= df['b']]
+ df.loc[df['a'] <= df['b']]
For more details and examples see :ref:`the query documentation
<indexing.query>`.
@@ -284,7 +284,7 @@ In ``pandas`` the equivalent expression, using the
df = pd.DataFrame({'a': np.random.randn(10), 'b': np.random.randn(10)})
df.eval('a + b')
- df.a + df.b # same as the previous expression
+ df['a'] + df['b'] # same as the previous expression
In certain cases :meth:`~pandas.DataFrame.eval` will be much faster than
evaluation in pure Python. For more details and examples see :ref:`the eval
diff --git a/doc/source/getting_started/comparison/comparison_with_sql.rst b/doc/source/getting_started/comparison/comparison_with_sql.rst
index 366fdd546f58b..6a03c06de3699 100644
--- a/doc/source/getting_started/comparison/comparison_with_sql.rst
+++ b/doc/source/getting_started/comparison/comparison_with_sql.rst
@@ -49,6 +49,20 @@ With pandas, column selection is done by passing a list of column names to your
Calling the DataFrame without the list of column names would display all columns (akin to SQL's
``*``).
+In SQL, you can add a calculated column:
+
+.. code-block:: sql
+
+ SELECT *, tip/total_bill as tip_rate
+ FROM tips
+ LIMIT 5;
+
+With pandas, you can use the :meth:`DataFrame.assign` method of a DataFrame to append a new column:
+
+.. ipython:: python
+
+ tips.assign(tip_rate=tips['tip'] / tips['total_bill']).head(5)
+
WHERE
-----
Filtering in SQL is done via a WHERE clause.
diff --git a/doc/source/getting_started/dsintro.rst b/doc/source/getting_started/dsintro.rst
index 2fb0b163642c5..9e18951fe3f4c 100644
--- a/doc/source/getting_started/dsintro.rst
+++ b/doc/source/getting_started/dsintro.rst
@@ -251,8 +251,6 @@ Series can also have a ``name`` attribute:
The Series ``name`` will be assigned automatically in many cases, in particular
when taking 1D slices of DataFrame as you will see below.
-.. versionadded:: 0.18.0
-
You can rename a Series with the :meth:`pandas.Series.rename` method.
.. ipython:: python
diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index b57ce83cfc33c..f5669626aa2b3 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -39,7 +39,7 @@ See the :ref:`overview` for more detail about what's in the library.
:hidden:
{% endif %}
{% if not single_doc %}
- What's New in 0.25.0 <whatsnew/v0.25.0>
+ What's New in 1.0.0 <whatsnew/v1.0.0>
install
getting_started/index
user_guide/index
@@ -53,7 +53,7 @@ See the :ref:`overview` for more detail about what's in the library.
whatsnew/index
{% endif %}
-* :doc:`whatsnew/v0.25.0`
+* :doc:`whatsnew/v1.0.0`
* :doc:`install`
* :doc:`getting_started/index`
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 352b56ebd3020..fc99b458fa0af 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -15,35 +15,10 @@ Instructions for installing from source,
`PyPI <https://pypi.org/project/pandas>`__, `ActivePython <https://www.activestate.com/activepython/downloads>`__, various Linux distributions, or a
`development version <http://github.com/pandas-dev/pandas>`__ are also provided.
-.. _install.dropping-27:
-
-Plan for dropping Python 2.7
-----------------------------
-
-The Python core team plans to stop supporting Python 2.7 on January 1st, 2020.
-In line with `NumPy's plans`_, all pandas releases through December 31, 2018
-will support Python 2.
-
-The 0.24.x feature release will be the last release to
-support Python 2. The released package will continue to be available on
-PyPI and through conda.
-
- Starting **January 1, 2019**, all new feature releases (> 0.24) will be Python 3 only.
-
-If there are people interested in continued support for Python 2.7 past December
-31, 2018 (either backporting bug fixes or funding) please reach out to the
-maintainers on the issue tracker.
-
-For more information, see the `Python 3 statement`_ and the `Porting to Python 3 guide`_.
-
-.. _NumPy's plans: https://github.com/numpy/numpy/blob/master/doc/neps/nep-0014-dropping-python2.7-proposal.rst#plan-for-dropping-python-27-support
-.. _Python 3 statement: http://python3statement.org/
-.. _Porting to Python 3 guide: https://docs.python.org/3/howto/pyporting.html
-
Python version support
----------------------
-Officially Python 2.7, 3.5, 3.6, and 3.7.
+Officially Python 3.5.3 and above, 3.6, and 3.7.
Installing pandas
-----------------
diff --git a/doc/source/reference/extensions.rst b/doc/source/reference/extensions.rst
index 407aab4bb1f1b..4b1a99da7cd4c 100644
--- a/doc/source/reference/extensions.rst
+++ b/doc/source/reference/extensions.rst
@@ -34,7 +34,6 @@ objects.
api.extensions.ExtensionArray._concat_same_type
api.extensions.ExtensionArray._formatter
- api.extensions.ExtensionArray._formatting_values
api.extensions.ExtensionArray._from_factorized
api.extensions.ExtensionArray._from_sequence
api.extensions.ExtensionArray._from_sequence_of_strings
@@ -45,6 +44,7 @@ objects.
api.extensions.ExtensionArray.argsort
api.extensions.ExtensionArray.astype
api.extensions.ExtensionArray.copy
+ api.extensions.ExtensionArray.view
api.extensions.ExtensionArray.dropna
api.extensions.ExtensionArray.factorize
api.extensions.ExtensionArray.fillna
diff --git a/doc/source/reference/frame.rst b/doc/source/reference/frame.rst
index b1c6172fb1261..4982edeb7f85b 100644
--- a/doc/source/reference/frame.rst
+++ b/doc/source/reference/frame.rst
@@ -356,15 +356,7 @@ Serialization / IO / conversion
DataFrame.to_msgpack
DataFrame.to_gbq
DataFrame.to_records
- DataFrame.to_sparse
DataFrame.to_dense
DataFrame.to_string
DataFrame.to_clipboard
DataFrame.style
-
-Sparse
-~~~~~~
-.. autosummary::
- :toctree: api/
-
- SparseDataFrame.to_coo
diff --git a/doc/source/reference/io.rst b/doc/source/reference/io.rst
index 666220d390cdc..91f4942d03b0d 100644
--- a/doc/source/reference/io.rst
+++ b/doc/source/reference/io.rst
@@ -105,6 +105,13 @@ SAS
read_sas
+SPSS
+~~~~
+.. autosummary::
+ :toctree: api/
+
+ read_spss
+
SQL
~~~
.. autosummary::
diff --git a/doc/source/reference/plotting.rst b/doc/source/reference/plotting.rst
index 7615e1d20f5e2..95657dfa5fde5 100644
--- a/doc/source/reference/plotting.rst
+++ b/doc/source/reference/plotting.rst
@@ -13,10 +13,14 @@ The following functions are contained in the `pandas.plotting` module.
:toctree: api/
andrews_curves
+ autocorrelation_plot
bootstrap_plot
+ boxplot
deregister_matplotlib_converters
lag_plot
parallel_coordinates
+ plot_params
radviz
register_matplotlib_converters
scatter_matrix
+ table
diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst
index 7ba625c141f24..5d825c8092efc 100644
--- a/doc/source/reference/series.rst
+++ b/doc/source/reference/series.rst
@@ -576,18 +576,7 @@ Serialization / IO / conversion
Series.to_sql
Series.to_msgpack
Series.to_json
- Series.to_sparse
Series.to_dense
Series.to_string
Series.to_clipboard
Series.to_latex
-
-
-Sparse
-------
-
-.. autosummary::
- :toctree: api/
-
- SparseSeries.to_coo
- SparseSeries.from_coo
diff --git a/doc/source/reference/window.rst b/doc/source/reference/window.rst
index 9e1374a3bd8e4..2f6addf607877 100644
--- a/doc/source/reference/window.rst
+++ b/doc/source/reference/window.rst
@@ -5,7 +5,6 @@
======
Window
======
-.. currentmodule:: pandas.core.window
Rolling objects are returned by ``.rolling`` calls: :func:`pandas.DataFrame.rolling`, :func:`pandas.Series.rolling`, etc.
Expanding objects are returned by ``.expanding`` calls: :func:`pandas.DataFrame.expanding`, :func:`pandas.Series.expanding`, etc.
@@ -13,6 +12,8 @@ EWM objects are returned by ``.ewm`` calls: :func:`pandas.DataFrame.ewm`, :func:
Standard moving window functions
--------------------------------
+.. currentmodule:: pandas.core.window.rolling
+
.. autosummary::
:toctree: api/
@@ -38,6 +39,8 @@ Standard moving window functions
Standard expanding window functions
-----------------------------------
+.. currentmodule:: pandas.core.window.expanding
+
.. autosummary::
:toctree: api/
@@ -59,6 +62,8 @@ Standard expanding window functions
Exponentially-weighted moving window functions
----------------------------------------------
+.. currentmodule:: pandas.core.window.ewm
+
.. autosummary::
:toctree: api/
diff --git a/doc/source/themes/nature_with_gtoc/layout.html b/doc/source/themes/nature_with_gtoc/layout.html
index b3f13f99f44d4..6e7d8ece35133 100644
--- a/doc/source/themes/nature_with_gtoc/layout.html
+++ b/doc/source/themes/nature_with_gtoc/layout.html
@@ -94,15 +94,15 @@ <h3 style="margin-top: 1.5em;">{{ _('Search') }}</h3>
});
});
</script>
-<script type="text/javascript">
- var _gaq = _gaq || [];
- _gaq.push(['_setAccount', 'UA-27880019-2']);
- _gaq.push(['_trackPageview']);
- (function() {
- var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
- ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
- var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
- })();
+<!-- Google Analytics -->
+<script>
+window.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)};ga.l=+new Date;
+ga('create', 'UA-27880019-2', 'auto');
+ga('set', 'anonymizeIp', true);
+ga('send', 'pageview');
</script>
+<script async src='https://www.google-analytics.com/analytics.js'></script>
+<!-- End Google Analytics -->
+
{% endblock %}
diff --git a/doc/source/user_guide/advanced.rst b/doc/source/user_guide/advanced.rst
index a42ab4f0255bd..62a9b6396404a 100644
--- a/doc/source/user_guide/advanced.rst
+++ b/doc/source/user_guide/advanced.rst
@@ -738,7 +738,7 @@ and allows efficient indexing and storage of an index with a large number of dup
df['B'] = df['B'].astype(CategoricalDtype(list('cab')))
df
df.dtypes
- df.B.cat.categories
+ df['B'].cat.categories
Setting the index will create a ``CategoricalIndex``.
@@ -810,15 +810,10 @@ values **not** in the categories, similarly to how you can reindex **any** panda
Int64Index and RangeIndex
~~~~~~~~~~~~~~~~~~~~~~~~~
-.. warning::
-
- Indexing on an integer-based Index with floats has been clarified in 0.18.0, for a summary of the changes, see :ref:`here <whatsnew_0180.float_indexers>`.
-
-:class:`Int64Index` is a fundamental basic index in pandas.
-This is an immutable array implementing an ordered, sliceable set.
-Prior to 0.18.0, the ``Int64Index`` would provide the default index for all ``NDFrame`` objects.
+:class:`Int64Index` is a fundamental basic index in pandas. This is an immutable array
+implementing an ordered, sliceable set.
-:class:`RangeIndex` is a sub-class of ``Int64Index`` added in version 0.18.0, now providing the default index for all ``NDFrame`` objects.
+:class:`RangeIndex` is a sub-class of ``Int64Index`` that provides the default index for all ``NDFrame`` objects.
``RangeIndex`` is an optimized version of ``Int64Index`` that can represent a monotonic ordered set. These are analogous to Python `range types <https://docs.python.org/3/library/stdtypes.html#typesseq-range>`__.
.. _indexing.float64index:
@@ -880,16 +875,6 @@ In non-float indexes, slicing using floats will raise a ``TypeError``.
In [1]: pd.Series(range(5))[3.5:4.5]
TypeError: the slice start [3.5] is not a proper indexer for this index type (Int64Index)
-.. warning::
-
- Using a scalar float indexer for ``.iloc`` has been removed in 0.18.0, so the following will raise a ``TypeError``:
-
- .. code-block:: ipython
-
- In [3]: pd.Series(range(5)).iloc[3.0]
- TypeError: cannot do positional indexing on <class 'pandas.indexes.range.RangeIndex'> with these indexers [3.0] of <type 'float'>
-
-
Here is a typical use-case for using this type of indexing. Imagine that you have a somewhat
irregular timedelta-like indexing scheme, but the data is recorded as floats. This could, for
example, be millisecond offsets.
diff --git a/doc/source/user_guide/categorical.rst b/doc/source/user_guide/categorical.rst
index 7dca34385c1ee..8ca96ba0daa5e 100644
--- a/doc/source/user_guide/categorical.rst
+++ b/doc/source/user_guide/categorical.rst
@@ -834,8 +834,6 @@ See also the section on :ref:`merge dtypes<merging.dtypes>` for notes about pres
Unioning
~~~~~~~~
-.. versionadded:: 0.19.0
-
If you want to combine categoricals that do not necessarily have the same
categories, the :func:`~pandas.api.types.union_categoricals` function will
combine a list-like of categoricals. The new categories will be the union of
diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index 4f44fcaab63d4..4beac5e035efc 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -182,7 +182,7 @@ assigned the mean of the ranks (by default) for the group:
.. ipython:: python
- s = pd.Series(np.random.np.random.randn(5), index=list('abcde'))
+ s = pd.Series(np.random.randn(5), index=list('abcde'))
s['d'] = s['b'] # so there's a tie
s.rank()
@@ -192,7 +192,7 @@ ranking.
.. ipython:: python
- df = pd.DataFrame(np.random.np.random.randn(10, 6))
+ df = pd.DataFrame(np.random.randn(10, 6))
df[4] = df[2][:5] # some ties
df
df.rank(1)
@@ -408,9 +408,7 @@ For some windowing functions, additional parameters must be specified:
Time-aware rolling
~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.19.0
-
-New in version 0.19.0 are the ability to pass an offset (or convertible) to a ``.rolling()`` method and have it produce
+It is possible to pass an offset (or convertible) to a ``.rolling()`` method and have it produce
variable sized windows based on the passed time window. For each time point, this includes all preceding values occurring
within the indicated time delta.
@@ -893,10 +891,9 @@ Therefore, there is an assumption that :math:`x_0` is not an ordinary value
but rather an exponentially weighted moment of the infinite series up to that
point.
-One must have :math:`0 < \alpha \leq 1`, and while since version 0.18.0
-it has been possible to pass :math:`\alpha` directly, it's often easier
-to think about either the **span**, **center of mass (com)** or **half-life**
-of an EW moment:
+One must have :math:`0 < \alpha \leq 1`, and while it is possible to pass
+:math:`\alpha` directly, it's often easier to think about either the
+**span**, **center of mass (com)** or **half-life** of an EW moment:
.. math::
diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst
index 15af5208a4f1f..c9d3bc3a28c70 100644
--- a/doc/source/user_guide/cookbook.rst
+++ b/doc/source/user_guide/cookbook.rst
@@ -592,8 +592,8 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
.. ipython:: python
df = pd.DataFrame([0, 1, 0, 1, 1, 1, 0, 1, 1], columns=['A'])
- df.A.groupby((df.A != df.A.shift()).cumsum()).groups
- df.A.groupby((df.A != df.A.shift()).cumsum()).cumsum()
+ df['A'].groupby((df['A'] != df['A'].shift()).cumsum()).groups
+ df['A'].groupby((df['A'] != df['A'].shift()).cumsum()).cumsum()
Expanding data
**************
@@ -719,7 +719,7 @@ Rolling Apply to multiple columns where function calculates a Series before a Sc
df
def gm(df, const):
- v = ((((df.A + df.B) + 1).cumprod()) - 1) * const
+ v = ((((df['A'] + df['B']) + 1).cumprod()) - 1) * const
return v.iloc[-1]
s = pd.Series({df.index[i]: gm(df.iloc[i:min(i + 51, len(df) - 1)], 5)
diff --git a/doc/source/user_guide/enhancingperf.rst b/doc/source/user_guide/enhancingperf.rst
index c15991fabfd3b..2df5b9d82dcc3 100644
--- a/doc/source/user_guide/enhancingperf.rst
+++ b/doc/source/user_guide/enhancingperf.rst
@@ -243,9 +243,9 @@ We've gotten another big improvement. Let's check again where the time is spent:
.. ipython:: python
- %prun -l 4 apply_integrate_f(df['a'].to_numpy(),
- df['b'].to_numpy(),
- df['N'].to_numpy())
+ %%prun -l 4 apply_integrate_f(df['a'].to_numpy(),
+ df['b'].to_numpy(),
+ df['N'].to_numpy())
As one might expect, the majority of the time is now spent in ``apply_integrate_f``,
so if we wanted to make anymore efficiencies we must continue to concentrate our
@@ -393,15 +393,15 @@ Consider the following toy example of doubling each observation:
.. code-block:: ipython
# Custom function without numba
- In [5]: %timeit df['col1_doubled'] = df.a.apply(double_every_value_nonumba) # noqa E501
+ In [5]: %timeit df['col1_doubled'] = df['a'].apply(double_every_value_nonumba) # noqa E501
1000 loops, best of 3: 797 us per loop
# Standard implementation (faster than a custom function)
- In [6]: %timeit df['col1_doubled'] = df.a * 2
+ In [6]: %timeit df['col1_doubled'] = df['a'] * 2
1000 loops, best of 3: 233 us per loop
# Custom function with numba
- In [7]: %timeit (df['col1_doubled'] = double_every_value_withnumba(df.a.to_numpy())
+ In [7]: %timeit (df['col1_doubled'] = double_every_value_withnumba(df['a'].to_numpy())
1000 loops, best of 3: 145 us per loop
Caveats
@@ -601,8 +601,6 @@ This allows for *formulaic evaluation*. The assignment target can be a
new column name or an existing column name, and it must be a valid Python
identifier.
-.. versionadded:: 0.18.0
-
The ``inplace`` keyword determines whether this assignment will performed
on the original ``DataFrame`` or return a copy with the new column.
@@ -630,8 +628,6 @@ new or modified columns is returned and the original frame is unchanged.
df.eval('e = a - c', inplace=False)
df
-.. versionadded:: 0.18.0
-
As a convenience, multiple assignments can be performed by using a
multi-line string.
@@ -647,14 +643,12 @@ The equivalent in standard Python would be
.. ipython:: python
df = pd.DataFrame(dict(a=range(5), b=range(5, 10)))
- df['c'] = df.a + df.b
- df['d'] = df.a + df.b + df.c
+ df['c'] = df['a'] + df['b']
+ df['d'] = df['a'] + df['b'] + df['c']
df['a'] = 1
df
-.. versionadded:: 0.18.0
-
-The ``query`` method gained the ``inplace`` keyword which determines
+The ``query`` method has a ``inplace`` keyword which determines
whether the query modifies the original frame.
.. ipython:: python
@@ -694,7 +688,7 @@ name in an expression.
a = np.random.randn()
df.query('@a < a')
- df.loc[a < df.a] # same as the previous expression
+ df.loc[a < df['a']] # same as the previous expression
With :func:`pandas.eval` you cannot use the ``@`` prefix *at all*, because it
isn't defined in that context. ``pandas`` will let you know this if you try to
diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst
index 147f07e36efb8..141d1708d882d 100644
--- a/doc/source/user_guide/groupby.rst
+++ b/doc/source/user_guide/groupby.rst
@@ -827,13 +827,10 @@ and that the transformed data contains no NAs.
.. _groupby.transform.window_resample:
-New syntax to window and resample operations
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.18.1
+Window and resample operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Working with the resample, expanding or rolling operations on the groupby
-level used to require the application of helper functions. However,
-now it is possible to use ``resample()``, ``expanding()`` and
+It is possible to use ``resample()``, ``expanding()`` and
``rolling()`` as methods on groupbys.
The example below will apply the ``rolling()`` method on the samples of
diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst
index 888266c3cfa55..cf55ce0c9a6d4 100644
--- a/doc/source/user_guide/indexing.rst
+++ b/doc/source/user_guide/indexing.rst
@@ -36,10 +36,6 @@ this area.
should be avoided. See :ref:`Returning a View versus Copy
<indexing.view_versus_copy>`.
-.. warning::
-
- Indexing on an integer-based Index with floats has been clarified in 0.18.0, for a summary of the changes, see :ref:`here <whatsnew_0180.float_indexers>`.
-
See the :ref:`MultiIndex / Advanced Indexing <advanced>` for ``MultiIndex`` and more advanced indexing documentation.
See the :ref:`cookbook<cookbook.selection>` for some advanced strategies.
@@ -67,8 +63,6 @@ of multi-axis indexing.
* A ``callable`` function with one argument (the calling Series or DataFrame) and
that returns valid output for indexing (one of the above).
- .. versionadded:: 0.18.1
-
See more at :ref:`Selection by Label <indexing.label>`.
* ``.iloc`` is primarily integer position based (from ``0`` to
@@ -85,8 +79,6 @@ of multi-axis indexing.
* A ``callable`` function with one argument (the calling Series or DataFrame) and
that returns valid output for indexing (one of the above).
- .. versionadded:: 0.18.1
-
See more at :ref:`Selection by Position <indexing.integer>`,
:ref:`Advanced Indexing <advanced>` and :ref:`Advanced
Hierarchical <advanced.advanced_hierarchical>`.
@@ -218,7 +210,7 @@ as an attribute:
See `here for an explanation of valid identifiers
<https://docs.python.org/3/reference/lexical_analysis.html#identifiers>`__.
- - The attribute will not be available if it conflicts with an existing method name, e.g. ``s.min`` is not allowed.
+ - The attribute will not be available if it conflicts with an existing method name, e.g. ``s.min`` is not allowed, but ``s['min']`` is possible.
- Similarly, the attribute will not be available if it conflicts with any of the following list: ``index``,
``major_axis``, ``minor_axis``, ``items``.
@@ -538,8 +530,6 @@ A list of indexers where any element is out of bounds will raise an
Selection by callable
---------------------
-.. versionadded:: 0.18.1
-
``.loc``, ``.iloc``, and also ``[]`` indexing can accept a ``callable`` as indexer.
The ``callable`` must be a function with one argument (the calling Series or DataFrame) that returns valid output for indexing.
@@ -550,7 +540,7 @@ The ``callable`` must be a function with one argument (the calling Series or Dat
columns=list('ABCD'))
df1
- df1.loc[lambda df: df.A > 0, :]
+ df1.loc[lambda df: df['A'] > 0, :]
df1.loc[:, lambda df: ['A', 'B']]
df1.iloc[:, lambda df: [0, 1]]
@@ -562,7 +552,7 @@ You can use callable indexing in ``Series``.
.. ipython:: python
- df1.A.loc[lambda s: s > 0]
+ df1['A'].loc[lambda s: s > 0]
Using these methods / indexers, you can chain data selection operations
without using a temporary variable.
@@ -571,7 +561,7 @@ without using a temporary variable.
bb = pd.read_csv('data/baseball.csv', index_col='id')
(bb.groupby(['year', 'team']).sum()
- .loc[lambda df: df.r > 100])
+ .loc[lambda df: df['r'] > 100])
.. _indexing.deprecate_ix:
@@ -881,9 +871,9 @@ Boolean indexing
Another common operation is the use of boolean vectors to filter the data.
The operators are: ``|`` for ``or``, ``&`` for ``and``, and ``~`` for ``not``.
These **must** be grouped by using parentheses, since by default Python will
-evaluate an expression such as ``df.A > 2 & df.B < 3`` as
-``df.A > (2 & df.B) < 3``, while the desired evaluation order is
-``(df.A > 2) & (df.B < 3)``.
+evaluate an expression such as ``df['A'] > 2 & df['B'] < 3`` as
+``df['A'] > (2 & df['B']) < 3``, while the desired evaluation order is
+``(df['A > 2) & (df['B'] < 3)``.
Using a boolean vector to index a Series works exactly as in a NumPy ndarray:
@@ -1105,9 +1095,7 @@ This is equivalent to (but faster than) the following.
df2 = df.copy()
df.apply(lambda x, y: x.where(x > 0, y), y=df['A'])
-.. versionadded:: 0.18.1
-
-Where can accept a callable as condition and ``other`` arguments. The function must
+``where`` can accept a callable as condition and ``other`` arguments. The function must
be with one argument (the calling Series or DataFrame) and that returns valid output
as condition and ``other`` argument.
@@ -1146,7 +1134,7 @@ between the values of columns ``a`` and ``c``. For example:
df
# pure python
- df[(df.a < df.b) & (df.b < df.c)]
+ df[(df['a'] < df['b']) & (df['b'] < df['c'])]
# query
df.query('(a < b) & (b < c)')
@@ -1253,7 +1241,7 @@ Full numpy-like syntax:
df = pd.DataFrame(np.random.randint(n, size=(n, 3)), columns=list('abc'))
df
df.query('(a < b) & (b < c)')
- df[(df.a < df.b) & (df.b < df.c)]
+ df[(df['a'] < df['b']) & (df['b'] < df['c'])]
Slightly nicer by removing the parentheses (by binding making comparison
operators bind tighter than ``&`` and ``|``).
@@ -1291,12 +1279,12 @@ The ``in`` and ``not in`` operators
df.query('a in b')
# How you'd do it in pure Python
- df[df.a.isin(df.b)]
+ df[df['a'].isin(df['b'])]
df.query('a not in b')
# pure Python
- df[~df.a.isin(df.b)]
+ df[~df['a'].isin(df['b'])]
You can combine this with other expressions for very succinct queries:
@@ -1309,7 +1297,7 @@ You can combine this with other expressions for very succinct queries:
df.query('a in b and c < d')
# pure Python
- df[df.b.isin(df.a) & (df.c < df.d)]
+ df[df['b'].isin(df['a']) & (df['c'] < df['d'])]
.. note::
@@ -1338,7 +1326,7 @@ to ``in``/``not in``.
df.query('b == ["a", "b", "c"]')
# pure Python
- df[df.b.isin(["a", "b", "c"])]
+ df[df['b'].isin(["a", "b", "c"])]
df.query('c == [1, 2]')
@@ -1350,7 +1338,7 @@ to ``in``/``not in``.
df.query('[1, 2] not in c')
# pure Python
- df[df.c.isin([1, 2])]
+ df[df['c'].isin([1, 2])]
Boolean operators
@@ -1364,7 +1352,7 @@ You can negate boolean expressions with the word ``not`` or the ``~`` operator.
df['bools'] = np.random.rand(len(df)) > 0.5
df.query('~bools')
df.query('not bools')
- df.query('not bools') == df[~df.bools]
+ df.query('not bools') == df[~df['bools']]
Of course, expressions can be arbitrarily complex too:
@@ -1374,7 +1362,10 @@ Of course, expressions can be arbitrarily complex too:
shorter = df.query('a < b < c and (not bools) or bools > 2')
# equivalent in pure Python
- longer = df[(df.a < df.b) & (df.b < df.c) & (~df.bools) | (df.bools > 2)]
+ longer = df[(df['a'] < df['b'])
+ & (df['b'] < df['c'])
+ & (~df['bools'])
+ | (df['bools'] > 2)]
shorter
longer
@@ -1847,14 +1838,14 @@ chained indexing expression, you can set the :ref:`option <options>`
# This will show the SettingWithCopyWarning
# but the frame values will be set
- dfb['c'][dfb.a.str.startswith('o')] = 42
+ dfb['c'][dfb['a'].str.startswith('o')] = 42
This however is operating on a copy and will not work.
::
>>> pd.set_option('mode.chained_assignment','warn')
- >>> dfb[dfb.a.str.startswith('o')]['c'] = 42
+ >>> dfb[dfb['a'].str.startswith('o')]['c'] = 42
Traceback (most recent call last)
...
SettingWithCopyWarning:
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index eac86dda31507..2c8f66dd99e72 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -28,6 +28,7 @@ The pandas I/O API is a set of top level ``reader`` functions accessed like
:delim: ;
text;`CSV <https://en.wikipedia.org/wiki/Comma-separated_values>`__;:ref:`read_csv<io.read_csv_table>`;:ref:`to_csv<io.store_in_csv>`
+ text;Fixed-Width Text File;:ref:`read_fwf<io.fwf_reader>`
text;`JSON <https://www.json.org/>`__;:ref:`read_json<io.json_reader>`;:ref:`to_json<io.json_writer>`
text;`HTML <https://en.wikipedia.org/wiki/HTML>`__;:ref:`read_html<io.read_html>`;:ref:`to_html<io.html>`
text; Local clipboard;:ref:`read_clipboard<io.clipboard>`;:ref:`to_clipboard<io.clipboard>`
@@ -39,6 +40,7 @@ The pandas I/O API is a set of top level ``reader`` functions accessed like
binary;`Msgpack <https://msgpack.org/index.html>`__;:ref:`read_msgpack<io.msgpack>`;:ref:`to_msgpack<io.msgpack>`
binary;`Stata <https://en.wikipedia.org/wiki/Stata>`__;:ref:`read_stata<io.stata_reader>`;:ref:`to_stata<io.stata_writer>`
binary;`SAS <https://en.wikipedia.org/wiki/SAS_(software)>`__;:ref:`read_sas<io.sas_reader>`;
+ binary;`SPSS <https://en.wikipedia.org/wiki/SPSS>`__;:ref:`read_spss<io.spss_reader>`;
binary;`Python Pickle Format <https://docs.python.org/3/library/pickle.html>`__;:ref:`read_pickle<io.pickle>`;:ref:`to_pickle<io.pickle>`
SQL;`SQL <https://en.wikipedia.org/wiki/SQL>`__;:ref:`read_sql<io.sql>`;:ref:`to_sql<io.sql>`
SQL;`Google Big Query <https://en.wikipedia.org/wiki/BigQuery>`__;:ref:`read_gbq<io.bigquery>`;:ref:`to_gbq<io.bigquery>`
@@ -87,8 +89,6 @@ delim_whitespace : boolean, default False
If this option is set to ``True``, nothing should be passed in for the
``delimiter`` parameter.
- .. versionadded:: 0.18.1 support for the Python parser.
-
Column and index locations and names
++++++++++++++++++++++++++++++++++++
@@ -298,7 +298,6 @@ compression : {``'infer'``, ``'gzip'``, ``'bz2'``, ``'zip'``, ``'xz'``, ``None``
the ZIP file must contain only one data file to be read in.
Set to ``None`` for no decompression.
- .. versionadded:: 0.18.1 support for 'zip' and 'xz' compression.
.. versionchanged:: 0.24.0 'infer' option added and set to default.
thousands : str, default ``None``
Thousands separator.
@@ -456,8 +455,6 @@ worth trying.
Specifying categorical dtype
''''''''''''''''''''''''''''
-.. versionadded:: 0.19.0
-
``Categorical`` columns can be parsed directly by specifying ``dtype='category'`` or
``dtype=CategoricalDtype(categories, ordered)``.
@@ -1376,6 +1373,7 @@ should pass the ``escapechar`` option:
print(data)
pd.read_csv(StringIO(data), escapechar='\\')
+.. _io.fwf_reader:
.. _io.fwf:
Files with fixed width columns
@@ -2195,8 +2193,6 @@ With max_level=1 the following snippet normalizes until 1st nesting level of the
Line delimited json
'''''''''''''''''''
-.. versionadded:: 0.19.0
-
pandas is able to read and write line-delimited json files that are common in data processing pipelines
using Hadoop or Spark.
@@ -2494,16 +2490,12 @@ Specify values that should be converted to NaN:
dfs = pd.read_html(url, na_values=['No Acquirer'])
-.. versionadded:: 0.19
-
Specify whether to keep the default set of NaN values:
.. code-block:: python
dfs = pd.read_html(url, keep_default_na=False)
-.. versionadded:: 0.19
-
Specify converters for columns. This is useful for numerical text data that has
leading zeros. By default columns that are numerical are cast to numeric
types and the leading zeros are lost. To avoid this, we can convert these
@@ -2515,8 +2507,6 @@ columns to strings.
dfs = pd.read_html(url_mcc, match='Telekom Albania', header=0,
converters={'MNC': str})
-.. versionadded:: 0.19
-
Use some combination of the above:
.. code-block:: python
@@ -3216,7 +3206,7 @@ argument to ``to_excel`` and to ``ExcelWriter``. The built-in engines are:
writer = pd.ExcelWriter('path_to_file.xlsx', engine='xlsxwriter')
# Or via pandas configuration.
- from pandas import options # noqa: E402
+ from pandas import options # noqa: E402
options.io.excel.xlsx.writer = 'xlsxwriter'
df.to_excel('path_to_file.xlsx', sheet_name='Sheet1')
@@ -3584,7 +3574,7 @@ Closing a Store and using a context manager:
Read/write API
''''''''''''''
-``HDFStore`` supports an top-level API using ``read_hdf`` for reading and ``to_hdf`` for writing,
+``HDFStore`` supports a top-level API using ``read_hdf`` for reading and ``to_hdf`` for writing,
similar to how ``read_csv`` and ``to_csv`` work.
.. ipython:: python
@@ -3699,7 +3689,7 @@ Hierarchical keys
Keys to a store can be specified as a string. These can be in a
hierarchical path-name like format (e.g. ``foo/bar/bah``), which will
generate a hierarchy of sub-stores (or ``Groups`` in PyTables
-parlance). Keys can be specified with out the leading '/' and are **always**
+parlance). Keys can be specified without the leading '/' and are **always**
absolute (e.g. 'foo' refers to '/foo'). Removal operations can remove
everything in the sub-store and **below**, so be *careful*.
@@ -3837,7 +3827,7 @@ data.
A query is specified using the ``Term`` class under the hood, as a boolean expression.
-* ``index`` and ``columns`` are supported indexers of a ``DataFrames``.
+* ``index`` and ``columns`` are supported indexers of ``DataFrames``.
* if ``data_columns`` are specified, these can be used as additional indexers.
Valid comparison operators are:
@@ -3929,7 +3919,7 @@ Use boolean expressions, with in-line function evaluation.
store.select('dfq', "index>pd.Timestamp('20130104') & columns=['A', 'B']")
-Use and inline column reference
+Use inline column reference.
.. ipython:: python
@@ -4605,8 +4595,8 @@ Performance
write chunksize (default is 50000). This will significantly lower
your memory usage on writing.
* You can pass ``expectedrows=<int>`` to the first ``append``,
- to set the TOTAL number of expected rows that ``PyTables`` will
- expected. This will optimize read/write performance.
+ to set the TOTAL number of rows that ``PyTables`` will expect.
+ This will optimize read/write performance.
* Duplicate rows can be written to tables, but are filtered out in
selection (with the last items being selected; thus a table is
unique on major, minor pairs)
@@ -4651,6 +4641,14 @@ Several caveats.
See the `Full Documentation <https://github.com/wesm/feather>`__.
+.. ipython:: python
+ :suppress:
+
+ import warnings
+ # This can be removed once building with pyarrow >=0.15.0
+ warnings.filterwarnings("ignore", "The Sparse", FutureWarning)
+
+
.. ipython:: python
df = pd.DataFrame({'a': list('abc'),
@@ -4855,7 +4853,7 @@ The above example creates a partitioned dataset that may look like:
from shutil import rmtree
try:
rmtree('test')
- except Exception:
+ except OSError:
pass
.. _io.sql:
@@ -5057,6 +5055,17 @@ Example of a callable using PostgreSQL `COPY clause
from io import StringIO
def psql_insert_copy(table, conn, keys, data_iter):
+ """
+ Execute SQL statement inserting data
+
+ Parameters
+ ----------
+ table : pandas.io.sql.SQLTable
+ conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
+ keys : list of str
+ Column names
+ data_iter : Iterable that iterates the values to be inserted
+ """
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
@@ -5090,6 +5099,18 @@ table name and optionally a subset of columns to read.
pd.read_sql_table('data', engine)
+.. note::
+
+ Note that pandas infers column dtypes from query outputs, and not by looking
+ up data types in the physical database schema. For example, assume ``userid``
+ is an integer column in a table. Then, intuitively, ``select userid ...`` will
+ return integer-valued series, while ``select cast(userid as text) ...`` will
+ return object-valued (str) series. Accordingly, if the query output is empty,
+ then all resulting columns will be returned as object-valued (since they are
+ most general). If you foresee that your query will sometimes generate an empty
+ result, you may want to explicitly typecast afterwards to ensure dtype
+ integrity.
+
You can also specify the name of the column as the ``DataFrame`` index,
and specify a subset of columns to be read.
@@ -5490,6 +5511,43 @@ web site.
No official documentation is available for the SAS7BDAT format.
+.. _io.spss:
+
+.. _io.spss_reader:
+
+SPSS formats
+------------
+
+.. versionadded:: 0.25.0
+
+The top-level function :func:`read_spss` can read (but not write) SPSS
+`sav` (.sav) and `zsav` (.zsav) format files.
+
+SPSS files contain column names. By default the
+whole file is read, categorical columns are converted into ``pd.Categorical``,
+and a ``DataFrame`` with all columns is returned.
+
+Specify the ``usecols`` parameter to obtain a subset of columns. Specify ``convert_categoricals=False``
+to avoid converting categorical columns into ``pd.Categorical``.
+
+Read an SPSS file:
+
+.. code-block:: python
+
+ df = pd.read_spss('spss_data.sav')
+
+Extract a subset of columns contained in ``usecols`` from an SPSS file and
+avoid converting categorical columns into ``pd.Categorical``:
+
+.. code-block:: python
+
+ df = pd.read_spss('spss_data.sav', usecols=['foo', 'bar'],
+ convert_categoricals=False)
+
+More information about the `sav` and `zsav` file format is available here_.
+
+.. _here: https://www.ibm.com/support/knowledgecenter/en/SSLVMB_22.0.0/com.ibm.spss.statistics.help/spss/base/savedatatypes.htm
+
.. _io.other:
Other file formats
diff --git a/doc/source/user_guide/merging.rst b/doc/source/user_guide/merging.rst
index 6e63e672bb968..4c0d3b75a4f79 100644
--- a/doc/source/user_guide/merging.rst
+++ b/doc/source/user_guide/merging.rst
@@ -819,8 +819,6 @@ The ``indicator`` argument will also accept string arguments, in which case the
Merge dtypes
~~~~~~~~~~~~
-.. versionadded:: 0.19.0
-
Merging will preserve the dtype of the join keys.
.. ipython:: python
@@ -1386,8 +1384,6 @@ fill/interpolate missing data:
Merging asof
~~~~~~~~~~~~
-.. versionadded:: 0.19.0
-
A :func:`merge_asof` is similar to an ordered left-join except that we match on
nearest key rather than equal keys. For each row in the ``left`` ``DataFrame``,
we select the last row in the ``right`` ``DataFrame`` whose ``on`` key is less
diff --git a/doc/source/user_guide/options.rst b/doc/source/user_guide/options.rst
index f32a8adfd4d33..a6491c6645613 100644
--- a/doc/source/user_guide/options.rst
+++ b/doc/source/user_guide/options.rst
@@ -163,7 +163,7 @@ determines how many rows are shown in the truncated repr.
.. ipython:: python
pd.set_option('max_rows', 8)
- pd.set_option('max_rows', 4)
+ pd.set_option('min_rows', 4)
# below max_rows -> all rows shown
df = pd.DataFrame(np.random.randn(7, 2))
df
@@ -353,7 +353,7 @@ display.max_colwidth 50 The maximum width in charac
a column in the repr of a pandas
data structure. When the column overflows,
a "..." placeholder is embedded in
- the output.
+ the output. 'None' value means unlimited.
display.max_info_columns 100 max_info_columns is used in DataFrame.info
method to decide if per column information
will be printed.
diff --git a/doc/source/user_guide/reshaping.rst b/doc/source/user_guide/reshaping.rst
index 0470a6c0c2f42..dd6d3062a8f0a 100644
--- a/doc/source/user_guide/reshaping.rst
+++ b/doc/source/user_guide/reshaping.rst
@@ -254,8 +254,6 @@ values will be set to ``NaN``.
df3
df3.unstack()
-.. versionadded:: 0.18.0
-
Alternatively, unstack takes an optional ``fill_value`` argument, for specifying
the value of missing data.
@@ -471,7 +469,7 @@ If ``crosstab`` receives only two Series, it will provide a frequency table.
'C': [1, 1, np.nan, 1, 1]})
df
- pd.crosstab(df.A, df.B)
+ pd.crosstab(df['A'], df['B'])
Any input passed containing ``Categorical`` data will have **all** of its
categories included in the cross-tabulation, even if the actual data does
@@ -486,20 +484,18 @@ not contain any instances of a particular category.
Normalization
~~~~~~~~~~~~~
-.. versionadded:: 0.18.1
-
Frequency tables can also be normalized to show percentages rather than counts
using the ``normalize`` argument:
.. ipython:: python
- pd.crosstab(df.A, df.B, normalize=True)
+ pd.crosstab(df['A'], df['B'], normalize=True)
``normalize`` can also normalize values within each row or within each column:
.. ipython:: python
- pd.crosstab(df.A, df.B, normalize='columns')
+ pd.crosstab(df['A'], df['B'], normalize='columns')
``crosstab`` can also be passed a third ``Series`` and an aggregation function
(``aggfunc``) that will be applied to the values of the third ``Series`` within
@@ -507,7 +503,7 @@ each group defined by the first two ``Series``:
.. ipython:: python
- pd.crosstab(df.A, df.B, values=df.C, aggfunc=np.sum)
+ pd.crosstab(df['A'], df['B'], values=df['C'], aggfunc=np.sum)
Adding margins
~~~~~~~~~~~~~~
@@ -516,7 +512,7 @@ Finally, one can also add margins or normalize this output.
.. ipython:: python
- pd.crosstab(df.A, df.B, values=df.C, aggfunc=np.sum, normalize=True,
+ pd.crosstab(df['A'], df['B'], values=df['C'], aggfunc=np.sum, normalize=True,
margins=True)
.. _reshaping.tile:
@@ -630,8 +626,6 @@ the prefix separator. You can specify ``prefix`` and ``prefix_sep`` in 3 ways:
from_dict = pd.get_dummies(df, prefix={'B': 'from_B', 'A': 'from_A'})
from_dict
-.. versionadded:: 0.18.0
-
Sometimes it will be useful to only keep k-1 levels of a categorical
variable to avoid collinearity when feeding the result to statistical models.
You can switch to this mode by turn on ``drop_first``.
diff --git a/doc/source/user_guide/sparse.rst b/doc/source/user_guide/sparse.rst
index 98fd30f67d05b..c258a8840b714 100644
--- a/doc/source/user_guide/sparse.rst
+++ b/doc/source/user_guide/sparse.rst
@@ -6,12 +6,6 @@
Sparse data structures
**********************
-.. note::
-
- ``SparseSeries`` and ``SparseDataFrame`` have been deprecated. Their purpose
- is served equally well by a :class:`Series` or :class:`DataFrame` with
- sparse values. See :ref:`sparse.migration` for tips on migrating.
-
Pandas provides data structures for efficiently storing sparse data.
These are not necessarily sparse in the typical "mostly 0". Rather, you can view these
objects as being "compressed" where any data matching a specific value (``NaN`` / missing value, though any value
@@ -168,6 +162,11 @@ the correct dense result.
Migrating
---------
+.. note::
+
+ ``SparseSeries`` and ``SparseDataFrame`` were removed in pandas 1.0.0. This migration
+ guide is present to aid in migrating from previous versions.
+
In older versions of pandas, the ``SparseSeries`` and ``SparseDataFrame`` classes (documented below)
were the preferred way to work with sparse data. With the advent of extension arrays, these subclasses
are no longer needed. Their purpose is better served by using a regular Series or DataFrame with
@@ -366,12 +365,3 @@ row and columns coordinates of the matrix. Note that this will consume a signifi
ss_dense = pd.Series.sparse.from_coo(A, dense_index=True)
ss_dense
-
-
-.. _sparse.subclasses:
-
-Sparse subclasses
------------------
-
-The :class:`SparseSeries` and :class:`SparseDataFrame` classes are deprecated. Visit their
-API pages for usage.
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index 8aa1f63ecf22a..006f928c037bd 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -6,10 +6,6 @@
"source": [
"# Styling\n",
"\n",
- "*New in version 0.17.1*\n",
- "\n",
- "<span style=\"color: red\">*Provisional: This is a new feature and still under development. We'll be adding features and possibly making breaking changes in future releases. We'd love to hear your feedback.*</span>\n",
- "\n",
"This document is written as a Jupyter Notebook, and can be viewed or downloaded [here](http://nbviewer.ipython.org/github/pandas-dev/pandas/blob/master/doc/source/style.ipynb).\n",
"\n",
"You can apply **conditional formatting**, the visual styling of a DataFrame\n",
diff --git a/doc/source/user_guide/text.rst b/doc/source/user_guide/text.rst
index 4f1fcdeb62f14..acb5810e5252a 100644
--- a/doc/source/user_guide/text.rst
+++ b/doc/source/user_guide/text.rst
@@ -366,13 +366,12 @@ Extract first match in each subject (extract)
.. warning::
- In version 0.18.0, ``extract`` gained the ``expand`` argument. When
- ``expand=False`` it returns a ``Series``, ``Index``, or
+ Before version 0.23, argument ``expand`` of the ``extract`` method defaulted to
+ ``False``. When ``expand=False``, ``expand`` returns a ``Series``, ``Index``, or
``DataFrame``, depending on the subject and regular expression
- pattern (same behavior as pre-0.18.0). When ``expand=True`` it
- always returns a ``DataFrame``, which is more consistent and less
- confusing from the perspective of a user. ``expand=True`` is the
- default since version 0.23.0.
+ pattern. When ``expand=True``, it always returns a ``DataFrame``,
+ which is more consistent and less confusing from the perspective of a user.
+ ``expand=True`` has been the default since version 0.23.0.
The ``extract`` method accepts a `regular expression
<https://docs.python.org/3/library/re.html>`__ with at least one
@@ -468,8 +467,6 @@ Extract all matches in each subject (extractall)
.. _text.extractall:
-.. versionadded:: 0.18.0
-
Unlike ``extract`` (which returns only the first match),
.. ipython:: python
@@ -509,8 +506,6 @@ then ``extractall(pat).xs(0, level='match')`` gives the same result as
``Index`` also supports ``.str.extractall``. It returns a ``DataFrame`` which has the
same result as a ``Series.str.extractall`` with a default index (starts from 0).
-.. versionadded:: 0.19.0
-
.. ipython:: python
pd.Index(["a1a2", "b1", "c1"]).str.extractall(two_groups)
@@ -560,8 +555,6 @@ For example if they are separated by a ``'|'``:
String ``Index`` also supports ``get_dummies`` which returns a ``MultiIndex``.
-.. versionadded:: 0.18.1
-
.. ipython:: python
idx = pd.Index(['a', 'a|b', np.nan, 'a|c'])
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index ce02059cd421f..0894edd69c2ae 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -255,8 +255,6 @@ option, see the Python `datetime documentation`_.
Assembling datetime from multiple DataFrame columns
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.18.1
-
You can also pass a ``DataFrame`` of integer or string columns to assemble into a ``Series`` of ``Timestamps``.
.. ipython:: python
@@ -609,8 +607,6 @@ We are stopping on the included end-point as it is part of the index:
dft['2013-1-15':'2013-1-15 12:30:00']
-.. versionadded:: 0.18.0
-
``DatetimeIndex`` partial string indexing also works on a ``DataFrame`` with a ``MultiIndex``:
.. ipython:: python
@@ -1165,8 +1161,6 @@ following subsection.
Custom business hour
~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.18.1
-
The ``CustomBusinessHour`` is a mixture of ``BusinessHour`` and ``CustomBusinessDay`` which
allows you to specify arbitrary holidays. ``CustomBusinessHour`` works as the same
as ``BusinessHour`` except that it skips specified custom holidays.
@@ -1518,11 +1512,6 @@ Converting to Python datetimes
Resampling
----------
-.. warning::
-
- The interface to ``.resample`` has changed in 0.18.0 to be more groupby-like and hence more flexible.
- See the :ref:`whatsnew docs <whatsnew_0180.breaking.resample>` for a comparison with prior versions.
-
Pandas has a simple, powerful, and efficient functionality for performing
resampling operations during frequency conversion (e.g., converting secondly
data into 5-minutely data). This is extremely common in, but not limited to,
@@ -1532,8 +1521,8 @@ financial applications.
on each of its groups. See some :ref:`cookbook examples <cookbook.resample>` for
some advanced strategies.
-Starting in version 0.18.1, the ``resample()`` function can be used directly from
-``DataFrameGroupBy`` objects, see the :ref:`groupby docs <groupby.transform.window_resample>`.
+The ``resample()`` method can be used directly from ``DataFrameGroupBy`` objects,
+see the :ref:`groupby docs <groupby.transform.window_resample>`.
.. note::
@@ -1932,8 +1921,6 @@ objects:
Period dtypes
~~~~~~~~~~~~~
-.. versionadded:: 0.19.0
-
``PeriodIndex`` has a custom ``period`` dtype. This is a pandas extension
dtype similar to the :ref:`timezone aware dtype <timeseries.timezone_series>` (``datetime64[ns, tz]``).
diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst
index 6589900c8491c..fa16b2f216610 100644
--- a/doc/source/user_guide/visualization.rst
+++ b/doc/source/user_guide/visualization.rst
@@ -438,10 +438,6 @@ columns:
.. _visualization.box.return:
-.. warning::
-
- The default changed from ``'dict'`` to ``'axes'`` in version 0.19.0.
-
In ``boxplot``, the return type can be controlled by the ``return_type``, keyword. The valid choices are ``{"axes", "dict", "both", None}``.
Faceting, created by ``DataFrame.boxplot`` with the ``by``
keyword, will affect the output type as well:
@@ -1152,10 +1148,10 @@ To plot data on a secondary y-axis, use the ``secondary_y`` keyword:
.. ipython:: python
- df.A.plot()
+ df['A'].plot()
@savefig series_plot_secondary_y.png
- df.B.plot(secondary_y=True, style='g')
+ df['B'].plot(secondary_y=True, style='g')
.. ipython:: python
:suppress:
@@ -1209,7 +1205,7 @@ Here is the default behavior, notice how the x-axis tick labeling is performed:
plt.figure()
@savefig ser_plot_suppress.png
- df.A.plot()
+ df['A'].plot()
.. ipython:: python
:suppress:
@@ -1223,7 +1219,7 @@ Using the ``x_compat`` parameter, you can suppress this behavior:
plt.figure()
@savefig ser_plot_suppress_parm.png
- df.A.plot(x_compat=True)
+ df['A'].plot(x_compat=True)
.. ipython:: python
:suppress:
@@ -1239,9 +1235,9 @@ in ``pandas.plotting.plot_params`` can be used in a `with statement`:
@savefig ser_plot_suppress_context.png
with pd.plotting.plot_params.use('x_compat', True):
- df.A.plot(color='r')
- df.B.plot(color='g')
- df.C.plot(color='b')
+ df['A'].plot(color='r')
+ df['B'].plot(color='g')
+ df['C'].plot(color='b')
.. ipython:: python
:suppress:
@@ -1632,18 +1628,3 @@ when plotting a large number of points.
:suppress:
plt.close('all')
-
-
-.. _rplot:
-
-
-Trellis plotting interface
---------------------------
-
-.. warning::
-
- The ``rplot`` trellis plotting interface has been **removed**. Please use
- external packages like `seaborn <https://github.com/mwaskom/seaborn>`_ for
- similar but more refined functionality and refer to our 0.18.1 documentation
- `here <http://pandas.pydata.org/pandas-docs/version/0.18.1/visualization.html>`__
- for how to convert to using it.
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index 592b4748126c1..fe80cc8bb959a 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -10,12 +10,22 @@ This is the list of changes to pandas between each release. For full details,
see the commit logs at http://github.com/pandas-dev/pandas. For install and
upgrade instructions, see :ref:`install`.
+Version 1.0
+-----------
+
+.. toctree::
+ :maxdepth: 2
+
+ v1.0.0
+
Version 0.25
------------
.. toctree::
:maxdepth: 2
+ v0.25.2
+ v0.25.1
v0.25.0
Version 0.24
diff --git a/doc/source/whatsnew/v0.10.0.rst b/doc/source/whatsnew/v0.10.0.rst
index 59ea6b9776232..2e0442364b2f3 100644
--- a/doc/source/whatsnew/v0.10.0.rst
+++ b/doc/source/whatsnew/v0.10.0.rst
@@ -498,7 +498,7 @@ Here is a taste of what to expect.
.. code-block:: ipython
- In [58]: p4d = Panel4D(randn(2, 2, 5, 4),
+ In [58]: p4d = Panel4D(np.random.randn(2, 2, 5, 4),
....: labels=['Label1','Label2'],
....: items=['Item1', 'Item2'],
....: major_axis=date_range('1/1/2000', periods=5),
diff --git a/doc/source/whatsnew/v0.16.0.rst b/doc/source/whatsnew/v0.16.0.rst
index b903c4dae4c5a..fc638e35ed88b 100644
--- a/doc/source/whatsnew/v0.16.0.rst
+++ b/doc/source/whatsnew/v0.16.0.rst
@@ -91,8 +91,7 @@ Interaction with scipy.sparse
Added :meth:`SparseSeries.to_coo` and :meth:`SparseSeries.from_coo` methods (:issue:`8048`) for converting to and from ``scipy.sparse.coo_matrix`` instances (see :ref:`here <sparse.scipysparse>`). For example, given a SparseSeries with MultiIndex we can convert to a `scipy.sparse.coo_matrix` by specifying the row and column labels as index levels:
-.. ipython:: python
- :okwarning:
+.. code-block:: python
s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0),
@@ -121,8 +120,7 @@ Added :meth:`SparseSeries.to_coo` and :meth:`SparseSeries.from_coo` methods (:is
The from_coo method is a convenience method for creating a ``SparseSeries``
from a ``scipy.sparse.coo_matrix``:
-.. ipython:: python
- :okwarning:
+.. code-block:: python
from scipy import sparse
A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),
@@ -530,7 +528,7 @@ Deprecations
`seaborn <http://stanford.edu/~mwaskom/software/seaborn/>`_ for similar
but more refined functionality (:issue:`3445`).
The documentation includes some examples how to convert your existing code
- using ``rplot`` to seaborn: :ref:`rplot docs <rplot>`.
+ from ``rplot`` to seaborn `here <http://pandas.pydata.org/pandas-docs/version/0.18.1/visualization.html#trellis-plotting-interface>`__.
- The ``pandas.sandbox.qtpandas`` interface is deprecated and will be removed in a future version.
We refer users to the external package `pandas-qt <https://github.com/datalyze-solutions/pandas-qt>`_. (:issue:`9615`)
diff --git a/doc/source/whatsnew/v0.18.1.rst b/doc/source/whatsnew/v0.18.1.rst
index 7e06e5050c5f0..f786ce513f6fe 100644
--- a/doc/source/whatsnew/v0.18.1.rst
+++ b/doc/source/whatsnew/v0.18.1.rst
@@ -393,8 +393,7 @@ used in the ``pandas`` implementation (:issue:`12644`, :issue:`12638`, :issue:`1
An example of this signature augmentation is illustrated below:
-.. ipython:: python
- :okwarning:
+.. code-block:: python
sp = pd.SparseDataFrame([1, 2, 3])
sp
@@ -409,8 +408,7 @@ Previous behaviour:
New behaviour:
-.. ipython:: python
- :okwarning:
+.. code-block:: python
np.cumsum(sp, axis=0)
diff --git a/doc/source/whatsnew/v0.19.0.rst b/doc/source/whatsnew/v0.19.0.rst
index 1dad8769a6b39..61a65415f6b57 100644
--- a/doc/source/whatsnew/v0.19.0.rst
+++ b/doc/source/whatsnew/v0.19.0.rst
@@ -1235,8 +1235,7 @@ Operators now preserve dtypes
- Sparse data structure now can preserve ``dtype`` after arithmetic ops (:issue:`13848`)
-.. ipython:: python
- :okwarning:
+.. code-block:: python
s = pd.SparseSeries([0, 2, 0, 1], fill_value=0, dtype=np.int64)
s.dtype
@@ -1245,8 +1244,7 @@ Operators now preserve dtypes
- Sparse data structure now support ``astype`` to convert internal ``dtype`` (:issue:`13900`)
-.. ipython:: python
- :okwarning:
+.. code-block:: python
s = pd.SparseSeries([1., 0., 2., 0.], fill_value=0)
s
diff --git a/doc/source/whatsnew/v0.20.0.rst b/doc/source/whatsnew/v0.20.0.rst
index ef6108ae3ec90..c7278d5a47ba6 100644
--- a/doc/source/whatsnew/v0.20.0.rst
+++ b/doc/source/whatsnew/v0.20.0.rst
@@ -338,8 +338,7 @@ See the :ref:`documentation <sparse.scipysparse>` for more information. (:issue:
All sparse formats are supported, but matrices that are not in :mod:`COOrdinate <scipy.sparse>` format will be converted, copying data as needed.
-.. ipython:: python
- :okwarning:
+.. code-block:: python
from scipy.sparse import csr_matrix
arr = np.random.random(size=(1000, 5))
@@ -351,7 +350,7 @@ All sparse formats are supported, but matrices that are not in :mod:`COOrdinate
To convert a ``SparseDataFrame`` back to sparse SciPy matrix in COO format, you can use:
-.. ipython:: python
+.. code-block:: python
sdf.to_coo()
@@ -495,7 +494,7 @@ Other enhancements
- :func:`pandas.util.hash_pandas_object` has gained the ability to hash a ``MultiIndex`` (:issue:`15224`)
- ``Series/DataFrame.squeeze()`` have gained the ``axis`` parameter. (:issue:`15339`)
- ``DataFrame.to_excel()`` has a new ``freeze_panes`` parameter to turn on Freeze Panes when exporting to Excel (:issue:`15160`)
-- ``pd.read_html()`` will parse multiple header rows, creating a MutliIndex header. (:issue:`13434`).
+- ``pd.read_html()`` will parse multiple header rows, creating a MultiIndex header. (:issue:`13434`).
- HTML table output skips ``colspan`` or ``rowspan`` attribute if equal to 1. (:issue:`15403`)
- :class:`pandas.io.formats.style.Styler` template now has blocks for easier extension, see the :ref:`example notebook </user_guide/style.ipynb#Subclassing>` (:issue:`15649`)
- :meth:`Styler.render() <pandas.io.formats.style.Styler.render>` now accepts ``**kwargs`` to allow user-defined variables in the template (:issue:`15649`)
diff --git a/doc/source/whatsnew/v0.23.0.rst b/doc/source/whatsnew/v0.23.0.rst
index 62cf977d8c8ac..f4c283ea742f7 100644
--- a/doc/source/whatsnew/v0.23.0.rst
+++ b/doc/source/whatsnew/v0.23.0.rst
@@ -31,7 +31,7 @@ Check the :ref:`API Changes <whatsnew_0230.api_breaking>` and :ref:`deprecations
.. warning::
Starting January 1, 2019, pandas feature releases will support Python 3 only.
- See :ref:`install.dropping-27` for more.
+ See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more.
.. contents:: What's new in v0.23.0
:local:
diff --git a/doc/source/whatsnew/v0.23.1.rst b/doc/source/whatsnew/v0.23.1.rst
index d730a57a01a60..03b7d9db6bc63 100644
--- a/doc/source/whatsnew/v0.23.1.rst
+++ b/doc/source/whatsnew/v0.23.1.rst
@@ -12,7 +12,7 @@ and bug fixes. We recommend that all users upgrade to this version.
.. warning::
Starting January 1, 2019, pandas feature releases will support Python 3 only.
- See :ref:`install.dropping-27` for more.
+ See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more.
.. contents:: What's new in v0.23.1
:local:
diff --git a/doc/source/whatsnew/v0.23.2.rst b/doc/source/whatsnew/v0.23.2.rst
index df8cc12e3385e..9f24092d1d4ae 100644
--- a/doc/source/whatsnew/v0.23.2.rst
+++ b/doc/source/whatsnew/v0.23.2.rst
@@ -17,7 +17,7 @@ and bug fixes. We recommend that all users upgrade to this version.
.. warning::
Starting January 1, 2019, pandas feature releases will support Python 3 only.
- See :ref:`install.dropping-27` for more.
+ See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more.
.. contents:: What's new in v0.23.2
:local:
diff --git a/doc/source/whatsnew/v0.23.4.rst b/doc/source/whatsnew/v0.23.4.rst
index 060d1fc8eba34..eadac6f569926 100644
--- a/doc/source/whatsnew/v0.23.4.rst
+++ b/doc/source/whatsnew/v0.23.4.rst
@@ -12,7 +12,7 @@ and bug fixes. We recommend that all users upgrade to this version.
.. warning::
Starting January 1, 2019, pandas feature releases will support Python 3 only.
- See :ref:`install.dropping-27` for more.
+ See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more.
.. contents:: What's new in v0.23.4
:local:
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index a66056f661de3..d9f41d2a75116 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -6,7 +6,7 @@ What's new in 0.24.0 (January 25, 2019)
.. warning::
The 0.24.x series of releases will be the last to support Python 2. Future feature
- releases will support Python 3 only. See :ref:`install.dropping-27` for more
+ releases will support Python 3 only. See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more
details.
{{ header }}
diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst
index 1b0232cad7476..aead8c48eb9b7 100644
--- a/doc/source/whatsnew/v0.24.1.rst
+++ b/doc/source/whatsnew/v0.24.1.rst
@@ -6,7 +6,7 @@ Whats new in 0.24.1 (February 3, 2019)
.. warning::
The 0.24.x series of releases will be the last to support Python 2. Future feature
- releases will support Python 3 only. See :ref:`install.dropping-27` for more.
+ releases will support Python 3 only. See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more.
{{ header }}
diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index da8064893e8a8..d1a893f99cff4 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -6,7 +6,7 @@ Whats new in 0.24.2 (March 12, 2019)
.. warning::
The 0.24.x series of releases will be the last to support Python 2. Future feature
- releases will support Python 3 only. See :ref:`install.dropping-27` for more.
+ releases will support Python 3 only. See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more.
{{ header }}
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 42e756635e739..503f9b6bfb1f0 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -6,7 +6,7 @@ What's new in 0.25.0 (July 18, 2019)
.. warning::
Starting with the 0.25.x series of releases, pandas only supports Python 3.5.3 and higher.
- See :ref:`install.dropping-27` for more details.
+ See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more details.
.. warning::
@@ -828,7 +828,7 @@ If installed, we now require:
| pytest (dev) | 4.0.2 | |
+-----------------+-----------------+----------+
-For `optional libraries <https://dev.pandas.io/install.html#dependencies>`_ the general recommendation is to use the latest version.
+For `optional libraries <https://dev.pandas.io/docs/install.html#dependencies>`_ the general recommendation is to use the latest version.
The following table lists the lowest version per library that is currently being tested throughout the development of pandas.
Optional libraries below the lowest tested version may still work, but are not considered supported.
@@ -902,8 +902,7 @@ by a ``Series`` or ``DataFrame`` with sparse values.
**Previous way**
-.. ipython:: python
- :okwarning:
+.. code-block:: python
df = pd.SparseDataFrame({"A": [0, 0, 1, 2]})
df.dtypes
@@ -974,6 +973,7 @@ Removal of prior version deprecations/changes
- Removed the previously deprecated ``cdate_range`` (:issue:`17691`)
- Removed the previously deprecated ``True`` option for the ``dropna`` keyword argument in :func:`SeriesGroupBy.nth` (:issue:`17493`)
- Removed the previously deprecated ``convert`` keyword argument in :meth:`Series.take` and :meth:`DataFrame.take` (:issue:`17352`)
+- Removed the previously deprecated behavior of arithmetic operations with ``datetime.date`` objects (:issue:`21152`)
.. _whatsnew_0250.performance:
@@ -1267,4 +1267,4 @@ Other
Contributors
~~~~~~~~~~~~
-.. contributors:: 0.24.x..HEAD
+.. contributors:: v0.24.x..HEAD
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst
index 6234bc0f7bd35..63dd56f4a3793 100644
--- a/doc/source/whatsnew/v0.25.1.rst
+++ b/doc/source/whatsnew/v0.25.1.rst
@@ -1,170 +1,119 @@
-:orphan:
-
-.. TODO. Remove the orphan tag.
-
.. _whatsnew_0251:
-What's new in 0.25.1 (July XX, 2019)
-------------------------------------
-
-Enhancements
-~~~~~~~~~~~~
-
+What's new in 0.25.1 (August 21, 2019)
+--------------------------------------
-.. _whatsnew_0251.enhancements.other:
+These are the changes in pandas 0.25.1. See :ref:`release` for a full changelog
+including other versions of pandas.
-Other enhancements
-^^^^^^^^^^^^^^^^^^
+I/O and LZMA
+~~~~~~~~~~~~
--
--
--
+Some users may unknowingly have an incomplete Python installation lacking the `lzma` module from the standard library. In this case, `import pandas` failed due to an `ImportError` (:issue: `27575`).
+Pandas will now warn, rather than raising an `ImportError` if the `lzma` module is not present. Any subsequent attempt to use `lzma` methods will raise a `RuntimeError`.
+A possible fix for the lack of the `lzma` module is to ensure you have the necessary libraries and then re-install Python.
+For example, on MacOS installing Python with `pyenv` may lead to an incomplete Python installation due to unmet system dependencies at compilation time (like `xz`). Compilation will succeed, but Python might fail at run time. The issue can be solved by installing the necessary dependencies and then re-installing Python.
.. _whatsnew_0251.bug_fixes:
Bug fixes
~~~~~~~~~
-
Categorical
^^^^^^^^^^^
--
--
--
+- Bug in :meth:`Categorical.fillna` that would replace all values, not just those that are ``NaN`` (:issue:`26215`)
Datetimelike
^^^^^^^^^^^^
--
--
--
-
-Timedelta
-^^^^^^^^^
-
--
--
--
+- Bug in :func:`to_datetime` where passing a timezone-naive :class:`DatetimeArray` or :class:`DatetimeIndex` and ``utc=True`` would incorrectly return a timezone-naive result (:issue:`27733`)
+- Bug in :meth:`Period.to_timestamp` where a :class:`Period` outside the :class:`Timestamp` implementation bounds (roughly 1677-09-21 to 2262-04-11) would return an incorrect :class:`Timestamp` instead of raising ``OutOfBoundsDatetime`` (:issue:`19643`)
+- Bug in iterating over :class:`DatetimeIndex` when the underlying data is read-only (:issue:`28055`)
Timezones
^^^^^^^^^
--
--
--
+- Bug in :class:`Index` where a numpy object array with a timezone aware :class:`Timestamp` and ``np.nan`` would not return a :class:`DatetimeIndex` (:issue:`27011`)
Numeric
^^^^^^^
--
--
--
+
+- Bug in :meth:`Series.interpolate` when using a timezone aware :class:`DatetimeIndex` (:issue:`27548`)
+- Bug when printing negative floating point complex numbers would raise an ``IndexError`` (:issue:`27484`)
+- Bug where :class:`DataFrame` arithmetic operators such as :meth:`DataFrame.mul` with a :class:`Series` with axis=1 would raise an ``AttributeError`` on :class:`DataFrame` larger than the minimum threshold to invoke numexpr (:issue:`27636`)
+- Bug in :class:`DataFrame` arithmetic where missing values in results were incorrectly masked with ``NaN`` instead of ``Inf`` (:issue:`27464`)
Conversion
^^^^^^^^^^
--
--
--
-
-Strings
-^^^^^^^
-
--
--
--
-
+- Improved the warnings for the deprecated methods :meth:`Series.real` and :meth:`Series.imag` (:issue:`27610`)
Interval
^^^^^^^^
--
--
--
+- Bug in :class:`IntervalIndex` where `dir(obj)` would raise ``ValueError`` (:issue:`27571`)
Indexing
^^^^^^^^
--
--
--
+- Bug in partial-string indexing returning a NumPy array rather than a ``Series`` when indexing with a scalar like ``.loc['2015']`` (:issue:`27516`)
+- Break reference cycle involving :class:`Index` and other index classes to allow garbage collection of index objects without running the GC. (:issue:`27585`, :issue:`27840`)
+- Fix regression in assigning values to a single column of a DataFrame with a ``MultiIndex`` columns (:issue:`27841`).
+- Fix regression in ``.ix`` fallback with an ``IntervalIndex`` (:issue:`27865`).
Missing
^^^^^^^
--
--
--
-
-MultiIndex
-^^^^^^^^^^
-
--
--
--
+- Bug in :func:`pandas.isnull` or :func:`pandas.isna` when the input is a type e.g. ``type(pandas.Series())`` (:issue:`27482`)
I/O
^^^
--
--
--
+- Avoid calling ``S3File.s3`` when reading parquet, as this was removed in s3fs version 0.3.0 (:issue:`27756`)
+- Better error message when a negative header is passed in :func:`pandas.read_csv` (:issue:`27779`)
+- Follow the ``min_rows`` display option (introduced in v0.25.0) correctly in the HTML repr in the notebook (:issue:`27991`).
Plotting
^^^^^^^^
--
--
--
+- Added a ``pandas_plotting_backends`` entrypoint group for registering plot backends. See :ref:`extending.plotting-backends` for more (:issue:`26747`).
+- Fixed the re-instatement of Matplotlib datetime converters after calling
+ :meth:`pandas.plotting.deregister_matplotlib_converters` (:issue:`27481`).
+- Fix compatibility issue with matplotlib when passing a pandas ``Index`` to a plot call (:issue:`27775`).
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
--
--
--
+- Fixed regression in :meth:`pands.core.groupby.DataFrameGroupBy.quantile` raising when multiple quantiles are given (:issue:`27526`)
+- Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.transform` where applying a timezone conversion lambda function would drop timezone information (:issue:`27496`)
+- Bug in :meth:`pandas.core.groupby.GroupBy.nth` where ``observed=False`` was being ignored for Categorical groupers (:issue:`26385`)
+- Bug in windowing over read-only arrays (:issue:`27766`)
+- Fixed segfault in `pandas.core.groupby.DataFrameGroupBy.quantile` when an invalid quantile was passed (:issue:`27470`)
Reshaping
^^^^^^^^^
--
--
--
+- A ``KeyError`` is now raised if ``.unstack()`` is called on a :class:`Series` or :class:`DataFrame` with a flat :class:`Index` passing a name which is not the correct one (:issue:`18303`)
+- Bug :meth:`merge_asof` could not merge :class:`Timedelta` objects when passing `tolerance` kwarg (:issue:`27642`)
+- Bug in :meth:`DataFrame.crosstab` when ``margins`` set to ``True`` and ``normalize`` is not ``False``, an error is raised. (:issue:`27500`)
+- :meth:`DataFrame.join` now suppresses the ``FutureWarning`` when the sort parameter is specified (:issue:`21952`)
+- Bug in :meth:`DataFrame.join` raising with readonly arrays (:issue:`27943`)
Sparse
^^^^^^
--
--
--
-
-
-Build Changes
-^^^^^^^^^^^^^
-
--
--
--
-
-ExtensionArray
-^^^^^^^^^^^^^^
-
--
--
--
+- Bug in reductions for :class:`Series` with Sparse dtypes (:issue:`27080`)
Other
^^^^^
--
--
--
+- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` when replacing timezone-aware timestamps using a dict-like replacer (:issue:`27720`)
+- Bug in :meth:`Series.rename` when using a custom type indexer. Now any value that isn't callable or dict-like is treated as a scalar. (:issue:`27814`)
.. _whatsnew_0.251.contributors:
Contributors
~~~~~~~~~~~~
-.. TODO. Change to v0.25.0..HEAD
-
-.. contributors:: HEAD..HEAD
+.. contributors:: v0.25.0..HEAD
diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst
new file mode 100644
index 0000000000000..14682b706f924
--- /dev/null
+++ b/doc/source/whatsnew/v0.25.2.rst
@@ -0,0 +1,111 @@
+.. _whatsnew_0252:
+
+What's new in 0.25.2 (October XX, 2019)
+---------------------------------------
+
+These are the changes in pandas 0.25.2. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+.. _whatsnew_0252.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+
+Categorical
+^^^^^^^^^^^
+
+-
+
+Datetimelike
+^^^^^^^^^^^^
+
+-
+-
+-
+
+Timezones
+^^^^^^^^^
+
+-
+
+Numeric
+^^^^^^^
+
+-
+-
+-
+-
+
+Conversion
+^^^^^^^^^^
+
+-
+
+Interval
+^^^^^^^^
+
+-
+
+Indexing
+^^^^^^^^
+
+-
+-
+-
+
+Missing
+^^^^^^^
+
+-
+
+I/O
+^^^
+
+- Fix regression in notebook display where <th> tags not used for :attr:`DataFrame.index` (:issue:`28204`).
+- Regression in :meth:`~DataFrame.to_csv` where writing a :class:`Series` or :class:`DataFrame` indexed by an :class:`IntervalIndex` would incorrectly raise a ``TypeError`` (:issue:`28210`)
+-
+-
+
+Plotting
+^^^^^^^^
+
+-
+-
+-
+
+Groupby/resample/rolling
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Bug incorrectly raising an ``IndexError`` when passing a list of quantiles to :meth:`pandas.core.groupby.DataFrameGroupBy.quantile` (:issue:`28113`).
+- Bug in :meth:`pandas.core.groupby.GroupBy.shift`, :meth:`pandas.core.groupby.GroupBy.bfill` and :meth:`pandas.core.groupby.GroupBy.ffill` where timezone information would be dropped (:issue:`19995`, :issue:`27992`)
+-
+-
+-
+
+Reshaping
+^^^^^^^^^
+
+-
+-
+-
+-
+-
+
+Sparse
+^^^^^^
+
+-
+
+Other
+^^^^^
+
+- Compatibility with Python 3.8 in :meth:`DataFrame.query` (:issue:`27261`)
+- Fix to ensure that tab-completion in an IPython console does not raise
+ warnings for deprecated attributes (:issue:`27900`).
+
+.. _whatsnew_0.252.contributors:
+
+Contributors
+~~~~~~~~~~~~
+
+.. contributors:: v0.25.1..HEAD
diff --git a/doc/source/whatsnew/v0.7.3.rst b/doc/source/whatsnew/v0.7.3.rst
index a8697f60d7467..020cf3bdc2d59 100644
--- a/doc/source/whatsnew/v0.7.3.rst
+++ b/doc/source/whatsnew/v0.7.3.rst
@@ -25,8 +25,6 @@ New features
from pandas.tools.plotting import scatter_matrix
scatter_matrix(df, alpha=0.2) # noqa F821
-.. image:: ../savefig/scatter_matrix_kde.png
- :width: 5in
- Add ``stacked`` argument to Series and DataFrame's ``plot`` method for
:ref:`stacked bar plots <visualization.barplot>`.
@@ -35,15 +33,11 @@ New features
df.plot(kind='bar', stacked=True) # noqa F821
-.. image:: ../savefig/bar_plot_stacked_ex.png
- :width: 4in
.. code-block:: python
df.plot(kind='barh', stacked=True) # noqa F821
-.. image:: ../savefig/barh_plot_stacked_ex.png
- :width: 4in
- Add log x and y :ref:`scaling options <visualization.basic>` to
``DataFrame.plot`` and ``Series.plot``
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
new file mode 100644
index 0000000000000..eb4b72d01d59a
--- /dev/null
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -0,0 +1,308 @@
+.. _whatsnew_1000:
+
+What's new in 1.0.0 (??)
+------------------------
+
+New Deprecation Policy
+~~~~~~~~~~~~~~~~~~~~~~
+
+Starting with Pandas 1.0.0, pandas will adopt a version of `SemVer`_.
+
+Historically, pandas has used a "rolling" deprecation policy, with occasional
+outright breaking API changes. Where possible, we would deprecate the behavior
+we'd like to change, giving an option to adopt the new behavior (via a keyword
+or an alternative method), and issuing a warning for users of the old behavior.
+Sometimes, a deprecation was not possible, and we would make an outright API
+breaking change.
+
+We'll continue to *introduce* deprecations in major and minor releases (e.g.
+1.0.0, 1.1.0, ...). Those deprecations will be *enforced* in the next major
+release.
+
+Note that *behavior changes* and *API breaking changes* are not identical. API
+breaking changes will only be released in major versions. If we consider a
+behavior to be a bug, and fixing that bug induces a behavior change, we'll
+release that change in a minor release. This is a sometimes difficult judgment
+call that we'll do our best on.
+
+This doesn't mean that pandas' pace of development will slow down. In the `2019
+Pandas User Survey`_, about 95% of the respondents said they considered pandas
+"stable enough". This indicates there's an appetite for new features, even if it
+comes at the cost of break API. The difference is that now API breaking changes
+will be accompanied with a bump in the major version number (e.g. pandas 1.5.1
+-> 2.0.0).
+
+See :ref:`policies.version` for more.
+
+.. _2019 Pandas User Survey: http://dev.pandas.io/pandas-blog/2019-pandas-user-survey.html
+.. _SemVer: https://semver.org
+
+.. warning::
+
+ The minimum supported Python version will be bumped to 3.6 in a future release.
+
+{{ header }}
+
+These are the changes in pandas 1.0.0. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+
+Enhancements
+~~~~~~~~~~~~
+
+- :meth:`DataFrame.to_string` added the ``max_colwidth`` parameter to control when wide columns are truncated (:issue:`9784`)
+-
+
+.. _whatsnew_1000.enhancements.other:
+
+Other enhancements
+^^^^^^^^^^^^^^^^^^
+
+- :meth:`MultiIndex.from_product` infers level names from inputs if not explicitly provided (:issue:`27292`)
+- :meth:`DataFrame.to_latex` now accepts ``caption`` and ``label`` arguments (:issue:`25436`)
+- The :ref:`integer dtype <integer_na>` with support for missing values can now be converted to
+ ``pyarrow`` (>= 0.15.0), which means that it is supported in writing to the Parquet file format
+ when using the ``pyarrow`` engine. It is currently not yet supported when converting back to
+ pandas (so it will become an integer or float dtype depending on the presence of missing data).
+ (:issue:`28368`)
+- :meth:`DataFrame.to_json` now accepts an ``indent`` integer argument to enable pretty printing of JSON output (:issue:`12004`)
+- :meth:`read_stata` can read Stata 119 dta files. (:issue:`28250`)
+
+Build Changes
+^^^^^^^^^^^^^
+
+Pandas has added a `pyproject.toml <https://www.python.org/dev/peps/pep-0517/>`_ file and will no longer include
+cythonized files in the source distribution uploaded to PyPI (:issue:`28341`, :issue:`20775`). If you're installing
+a built distribution (wheel) or via conda, this shouldn't have any effect on you. If you're building pandas from
+source, you should no longer need to install Cython into your build environment before calling ``pip install pandas``.
+
+.. _whatsnew_1000.api_breaking:
+
+Backwards incompatible API changes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- :class:`pandas.core.groupby.GroupBy.transform` now raises on invalid operation names (:issue:`27489`).
+- :class:`pandas.core.arrays.IntervalArray` adopts a new ``__repr__`` in accordance with other array classes (:issue:`25022`)
+
+*pandas 0.25.x*
+
+.. code-block:: ipython
+
+ In [1]: pd.arrays.IntervalArray.from_tuples([(0, 1), (2, 3)])
+ Out[2]:
+ IntervalArray([(0, 1], (2, 3]],
+ closed='right',
+ dtype='interval[int64]')
+
+
+*pandas 1.0.0*
+
+.. ipython:: python
+
+ pd.arrays.IntervalArray.from_tuples([(0, 1), (2, 3)])
+
+
+.. _whatsnew_1000.api.other:
+
+Other API changes
+^^^^^^^^^^^^^^^^^
+
+- :meth:`pandas.api.types.infer_dtype` will now return "integer-na" for integer and ``np.nan`` mix (:issue:`27283`)
+- :meth:`MultiIndex.from_arrays` will no longer infer names from arrays if ``names=None`` is explicitly provided (:issue:`27292`)
+-
+
+.. _whatsnew_1000.deprecations:
+
+Deprecations
+~~~~~~~~~~~~
+
+-
+-
+
+.. _whatsnew_1000.prior_deprecations:
+
+
+Removed SparseSeries and SparseDataFrame
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``SparseSeries``, ``SparseDataFrame`` and the ``DataFrame.to_sparse`` method
+have been removed (:issue:`28425`). We recommend using a ``Series`` or
+``DataFrame`` with sparse values instead. See :ref:`sparse.migration` for help
+with migrating existing code.
+
+Removal of prior version deprecations/changes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Removed the previously deprecated :meth:`Series.get_value`, :meth:`Series.set_value`, :meth:`DataFrame.get_value`, :meth:`DataFrame.set_value` (:issue:`17739`)
+- Changed the the default value of `inplace` in :meth:`DataFrame.set_index` and :meth:`Series.set_axis`. It now defaults to False (:issue:`27600`)
+- :meth:`pandas.Series.str.cat` now defaults to aligning ``others``, using ``join='left'`` (:issue:`27611`)
+- :meth:`pandas.Series.str.cat` does not accept list-likes *within* list-likes anymore (:issue:`27611`)
+- Removed the previously deprecated :meth:`ExtensionArray._formatting_values`. Use :attr:`ExtensionArray._formatter` instead. (:issue:`23601`)
+- Removed the previously deprecated ``IntervalIndex.from_intervals`` in favor of the :class:`IntervalIndex` constructor (:issue:`19263`)
+- Ability to read pickles containing :class:`Categorical` instances created with pre-0.16 version of pandas has been removed (:issue:`27538`)
+-
+
+.. _whatsnew_1000.performance:
+
+Performance improvements
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Performance improvement in indexing with a non-unique :class:`IntervalIndex` (:issue:`27489`)
+- Performance improvement in `MultiIndex.is_monotonic` (:issue:`27495`)
+- Performance improvement in :func:`cut` when ``bins`` is an :class:`IntervalIndex` (:issue:`27668`)
+- Performance improvement in :meth:`DataFrame.corr` when ``method`` is ``"spearman"`` (:issue:`28139`)
+- Performance improvement in :meth:`DataFrame.replace` when provided a list of values to replace (:issue:`28099`)
+- Performance improvement in :meth:`DataFrame.select_dtypes` by using vectorization instead of iterating over a loop (:issue:`28317`)
+
+.. _whatsnew_1000.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+
+- Bug in :meth:`DataFrame.to_html` when using ``formatters=<list>`` and ``max_cols`` together. (:issue:`25955`)
+
+Categorical
+^^^^^^^^^^^
+
+- Added test to assert the :func:`fillna` raises the correct ValueError message when the value isn't a value from categories (:issue:`13628`)
+- Bug in :meth:`Categorical.astype` where ``NaN`` values were handled incorrectly when casting to int (:issue:`28406`)
+-
+-
+
+
+Datetimelike
+^^^^^^^^^^^^
+- Bug in :meth:`Series.__setitem__` incorrectly casting ``np.timedelta64("NaT")`` to ``np.datetime64("NaT")`` when inserting into a :class:`Series` with datetime64 dtype (:issue:`27311`)
+- Bug in :meth:`Series.dt` property lookups when the underlying data is read-only (:issue:`27529`)
+- Bug in ``HDFStore.__getitem__`` incorrectly reading tz attribute created in Python 2 (:issue:`26443`)
+- Bug in :func:`to_datetime` where passing arrays of malformed ``str`` with errors="coerce" could incorrectly lead to raising ``ValueError`` (:issue:`28299`)
+- Bug in :meth:`pandas.core.groupby.SeriesGroupBy.nunique` where ``NaT`` values were interfering with the count of unique values (:issue:`27951`)
+- Bug in :class:`Timestamp` subtraction when subtracting a :class:`Timestamp` from a ``np.datetime64`` object incorrectly raising ``TypeError`` (:issue:`28286`)
+- Addition and subtraction of integer or integer-dtype arrays with :class:`Timestamp` will now raise ``NullFrequencyError`` instead of ``ValueError`` (:issue:`28268`)
+- Bug in :class:`Series` and :class:`DataFrame` with integer dtype failing to raise ``TypeError`` when adding or subtracting a ``np.datetime64`` object (:issue:`28080`)
+- Bug in :class:`Week` with ``weekday`` incorrectly raising ``AttributeError`` instead of ``TypeError`` when adding or subtracting an invalid type (:issue:`28530`)
+- Bug in :class:`DataFrame` arithmetic operations when operating with a :class:`Series` with dtype `'timedelta64[ns]'` (:issue:`28049`)
+-
+
+Timedelta
+^^^^^^^^^
+
+-
+-
+
+Timezones
+^^^^^^^^^
+
+-
+-
+
+
+Numeric
+^^^^^^^
+- Bug in :meth:`DataFrame.quantile` with zero-column :class:`DataFrame` incorrectly raising (:issue:`23925`)
+- :class:`DataFrame` inequality comparisons with object-dtype and ``complex`` entries failing to raise ``TypeError`` like their :class:`Series` counterparts (:issue:`28079`)
+-
+
+Conversion
+^^^^^^^^^^
+
+-
+-
+
+Strings
+^^^^^^^
+
+-
+-
+
+
+Interval
+^^^^^^^^
+
+-
+-
+
+Indexing
+^^^^^^^^
+
+- Bug in assignment using a reverse slicer (:issue:`26939`)
+- Bug in :meth:`DataFrame.explode` would duplicate frame in the presence of duplicates in the index (:issue:`28010`)
+- Bug in reindexing a :meth:`PeriodIndex` with another type of index that contained a `Period` (:issue:`28323`) (:issue:`28337`)
+- Fix assignment of column via `.loc` with numpy non-ns datetime type (:issue:`27395`)
+- Bug in :meth:`Float64Index.astype` where ``np.inf`` was not handled properly when casting to an integer dtype (:issue:`28475`)
+
+Missing
+^^^^^^^
+
+-
+-
+
+MultiIndex
+^^^^^^^^^^
+
+-
+-
+
+I/O
+^^^
+
+- :meth:`read_csv` now accepts binary mode file buffers when using the Python csv engine (:issue:`23779`)
+- Bug in :meth:`DataFrame.to_json` where using a Tuple as a column or index value and using ``orient="columns"`` or ``orient="index"`` would produce invalid JSON (:issue:`20500`)
+- Improve infinity parsing. :meth:`read_csv` now interprets ``Infinity``, ``+Infinity``, ``-Infinity`` as floating point values (:issue:`10065`)
+- Bug in :meth:`DataFrame.to_csv` where values were truncated when the length of ``na_rep`` was shorter than the text input data. (:issue:`25099`)
+- Bug in :func:`DataFrame.to_string` where values were truncated using display options instead of outputting the full content (:issue:`9784`)
+- Bug in :meth:`DataFrame.to_json` where a datetime column label would not be written out in ISO format with ``orient="table"`` (:issue:`28130`)
+- Bug in :func:`DataFrame.to_parquet` where writing to GCS would fail with `engine='fastparquet'` if the file did not already exist (:issue:`28326`)
+
+Plotting
+^^^^^^^^
+
+- Bug in :meth:`Series.plot` not able to plot boolean values (:issue:`23719`)
+-
+- Bug in :meth:`DataFrame.plot` not able to plot when no rows (:issue:`27758`)
+- Bug in :meth:`DataFrame.plot` producing incorrect legend markers when plotting multiple series on the same axis (:issue:`18222`)
+- Bug in :meth:`DataFrame.plot` when ``kind='box'`` and data contains datetime or timedelta data. These types are now automatically dropped (:issue:`22799`)
+- Bug in :meth:`DataFrame.plot.line` and :meth:`DataFrame.plot.area` produce wrong xlim in x-axis (:issue:`27686`, :issue:`25160`, :issue:`24784`)
+- Bug where :meth:`DataFrame.boxplot` would not accept a `color` parameter like `DataFrame.plot.box` (:issue:`26214`)
+- Bug in the ``xticks`` argument being ignored for :meth:`DataFrame.plot.bar` (:issue:`14119`)
+- :func:`set_option` now validates that the plot backend provided to ``'plotting.backend'`` implements the backend when the option is set, rather than when a plot is created (:issue:`28163`)
+
+Groupby/resample/rolling
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+-
+- Bug in :meth:`DataFrame.rolling` not allowing for rolling over datetimes when ``axis=1`` (:issue: `28192`)
+- Bug in :meth:`DataFrame.groupby` not offering selection by column name when ``axis=1`` (:issue:`27614`)
+- Bug in :meth:`DataFrameGroupby.agg` not able to use lambda function with named aggregation (:issue:`27519`)
+
+Reshaping
+^^^^^^^^^
+
+- Bug in :meth:`DataFrame.apply` that caused incorrect output with empty :class:`DataFrame` (:issue:`28202`, :issue:`21959`)
+- Bug in :meth:`DataFrame.stack` not handling non-unique indexes correctly when creating MultiIndex (:issue: `28301`)
+- Bug :func:`merge_asof` could not use :class:`datetime.timedelta` for ``tolerance`` kwarg (:issue:`28098`)
+
+Sparse
+^^^^^^
+- Bug in :class:`SparseDataFrame` arithmetic operations incorrectly casting inputs to float (:issue:`28107`)
+-
+-
+
+ExtensionArray
+^^^^^^^^^^^^^^
+
+- Bug in :class:`arrays.PandasArray` when setting a scalar string (:issue:`28118`, :issue:`28150`).
+-
+
+
+Other
+^^^^^
+- Trying to set the ``display.precision``, ``display.max_rows`` or ``display.max_columns`` using :meth:`set_option` to anything but a ``None`` or a positive int will raise a ``ValueError`` (:issue:`23348`)
+- Using :meth:`DataFrame.replace` with overlapping keys in a nested dictionary will no longer raise, now matching the behavior of a flat dictionary (:issue:`27660`)
+- :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` now support dicts as ``compression`` argument with key ``'method'`` being the compression method and others as additional compression options when the compression method is ``'zip'``. (:issue:`26023`)
+- :meth:`Series.append` will no longer raise a ``TypeError`` when passed a tuple of ``Series`` (:issue:`28410`)
+
+.. _whatsnew_1000.contributors:
+
+Contributors
+~~~~~~~~~~~~
diff --git a/doc/sphinxext/contributors.py b/doc/sphinxext/contributors.py
index 4256e4659715d..1a064f71792e9 100644
--- a/doc/sphinxext/contributors.py
+++ b/doc/sphinxext/contributors.py
@@ -8,12 +8,11 @@
code contributors and commits, and then list each contributor
individually.
"""
+from announce import build_components
from docutils import nodes
from docutils.parsers.rst import Directive
import git
-from announce import build_components
-
class ContributorsDirective(Directive):
required_arguments = 1
diff --git a/environment.yml b/environment.yml
index 93e8302b498a0..7629fa52e7829 100644
--- a/environment.yml
+++ b/environment.yml
@@ -13,7 +13,7 @@ dependencies:
- asv
# building
- - cython>=0.28.2
+ - cython>=0.29.13
# code checks
- black
@@ -36,6 +36,12 @@ dependencies:
- nbsphinx
- pandoc
+ # web (jinja2 is also needed, but it's also an optional pandas dependency)
+ - markdown
+ - feedparser
+ - pyyaml
+ - requests
+
# testing
- boto3
- botocore>=1.11
@@ -71,7 +77,7 @@ dependencies:
- lxml # pandas.read_html
- openpyxl # pandas.read_excel, DataFrame.to_excel, pandas.ExcelWriter, pandas.ExcelFile
- pyarrow>=0.9.0 # pandas.read_paquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather
- - pyqt # pandas.read_clipbobard
+ - pyqt>=5.9.2 # pandas.read_clipboard
- pytables>=3.4.2 # pandas.read_hdf, DataFrame.to_hdf
- python-snappy # required by pyarrow
- s3fs # pandas.read_csv... when using 's3://...' path
@@ -80,4 +86,5 @@ dependencies:
- xlrd # pandas.read_excel, DataFrame.to_excel, pandas.ExcelWriter, pandas.ExcelFile
- xlsxwriter # pandas.read_excel, DataFrame.to_excel, pandas.ExcelWriter, pandas.ExcelFile
- xlwt # pandas.read_excel, DataFrame.to_excel, pandas.ExcelWriter, pandas.ExcelFile
+ - odfpy # pandas.read_excel
- pyreadstat # pandas.read_spss
diff --git a/pandas/__init__.py b/pandas/__init__.py
index 6351b508fb0e5..6d0c55a45ed46 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -114,12 +114,7 @@
DataFrame,
)
-from pandas.core.sparse.api import (
- SparseArray,
- SparseDataFrame,
- SparseSeries,
- SparseDtype,
-)
+from pandas.core.arrays.sparse import SparseArray, SparseDtype
from pandas.tseries.api import infer_freq
from pandas.tseries import offsets
@@ -196,8 +191,9 @@
if pandas.compat.PY37:
def __getattr__(name):
+ import warnings
+
if name == "Panel":
- import warnings
warnings.warn(
"The Panel class is removed from pandas. Accessing it "
@@ -211,6 +207,17 @@ class Panel:
pass
return Panel
+ elif name in {"SparseSeries", "SparseDataFrame"}:
+ warnings.warn(
+ "The {} class is removed from pandas. Accessing it from "
+ "the top-level namespace will also be removed in the next "
+ "version".format(name),
+ FutureWarning,
+ stacklevel=2,
+ )
+
+ return type(name, (), {})
+
raise AttributeError("module 'pandas' has no attribute '{}'".format(name))
@@ -219,6 +226,12 @@ class Panel:
class Panel:
pass
+ class SparseDataFrame:
+ pass
+
+ class SparseSeries:
+ pass
+
# module level doc-string
__doc__ = """
diff --git a/pandas/_config/config.py b/pandas/_config/config.py
index 61e926035c3f2..890db5b41907e 100644
--- a/pandas/_config/config.py
+++ b/pandas/_config/config.py
@@ -110,7 +110,7 @@ def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
- raise ValueError("Must provide an even number of non-keyword " "arguments")
+ raise ValueError("Must provide an even number of non-keyword arguments")
# default to false
silent = kwargs.pop("silent", False)
@@ -395,7 +395,7 @@ class option_context:
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
raise ValueError(
- "Need to invoke as" " option_context(pat, val, [(pat, val), ...])."
+ "Need to invoke as option_context(pat, val, [(pat, val), ...])."
)
self.ops = list(zip(args[::2], args[1::2]))
@@ -787,6 +787,7 @@ def is_instance_factory(_type):
ValueError if x is not an instance of `_type`
"""
+
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
type_repr = "|".join(map(str, _type))
@@ -820,6 +821,32 @@ def inner(x):
return inner
+def is_nonnegative_int(value):
+ """
+ Verify that value is None or a positive int.
+
+ Parameters
+ ----------
+ value : None or int
+ The `value` to be checked.
+
+ Raises
+ ------
+ ValueError
+ When the value is not None or is a negative integer
+ """
+
+ if value is None:
+ return
+
+ elif isinstance(value, int):
+ if value >= 0:
+ return
+
+ msg = "Value must be a nonnegative integer or None"
+ raise ValueError(msg)
+
+
# common type validators, for convenience
# usage: register_option(... , validator = is_int)
is_int = is_type_factory(int)
diff --git a/pandas/_config/display.py b/pandas/_config/display.py
index 6e5fabe2706e5..067b7c503baab 100644
--- a/pandas/_config/display.py
+++ b/pandas/_config/display.py
@@ -28,7 +28,10 @@ def detect_console_encoding():
if not encoding or "ascii" in encoding.lower():
try:
encoding = locale.getpreferredencoding()
- except Exception:
+ except locale.Error:
+ # can be raised by locale.setlocale(), which is
+ # called by getpreferredencoding
+ # (on some systems, see stdlib locale docs)
pass
# when all else fails. this will usually be "ascii"
diff --git a/pandas/_config/localization.py b/pandas/_config/localization.py
index 46802c6460959..9f750d8447c6a 100644
--- a/pandas/_config/localization.py
+++ b/pandas/_config/localization.py
@@ -98,13 +98,7 @@ def _valid_locales(locales, normalize):
def _default_locale_getter():
- try:
- raw_locales = subprocess.check_output(["locale -a"], shell=True)
- except subprocess.CalledProcessError as e:
- raise type(e)(
- "{exception}, the 'locale -a' command cannot be found "
- "on your system".format(exception=e)
- )
+ raw_locales = subprocess.check_output(["locale -a"], shell=True)
return raw_locales
@@ -139,7 +133,9 @@ def get_locales(prefix=None, normalize=True, locale_getter=_default_locale_gette
"""
try:
raw_locales = locale_getter()
- except Exception:
+ except subprocess.CalledProcessError:
+ # Raised on (some? all?) Windows platforms because Note: "locale -a"
+ # is not defined
return None
try:
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 0dbe525f7506e..0f91f612994c7 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -296,6 +296,7 @@ def nancorr_spearman(const float64_t[:, :] mat, Py_ssize_t minp=1):
cdef:
Py_ssize_t i, j, xi, yi, N, K
ndarray[float64_t, ndim=2] result
+ ndarray[float64_t, ndim=2] ranked_mat
ndarray[float64_t, ndim=1] maskedx
ndarray[float64_t, ndim=1] maskedy
ndarray[uint8_t, ndim=2] mask
@@ -307,10 +308,18 @@ def nancorr_spearman(const float64_t[:, :] mat, Py_ssize_t minp=1):
result = np.empty((K, K), dtype=np.float64)
mask = np.isfinite(mat).view(np.uint8)
+ ranked_mat = np.empty((N, K), dtype=np.float64)
+
+ for i in range(K):
+ ranked_mat[:, i] = rank_1d_float64(mat[:, i])
+
for xi in range(K):
for yi in range(xi + 1):
nobs = 0
+ # Keep track of whether we need to recompute ranks
+ all_ranks = True
for i in range(N):
+ all_ranks &= not (mask[i, xi] ^ mask[i, yi])
if mask[i, xi] and mask[i, yi]:
nobs += 1
@@ -320,13 +329,16 @@ def nancorr_spearman(const float64_t[:, :] mat, Py_ssize_t minp=1):
maskedx = np.empty(nobs, dtype=np.float64)
maskedy = np.empty(nobs, dtype=np.float64)
j = 0
+
for i in range(N):
if mask[i, xi] and mask[i, yi]:
- maskedx[j] = mat[i, xi]
- maskedy[j] = mat[i, yi]
+ maskedx[j] = ranked_mat[i, xi]
+ maskedy[j] = ranked_mat[i, yi]
j += 1
- maskedx = rank_1d_float64(maskedx)
- maskedy = rank_1d_float64(maskedy)
+
+ if not all_ranks:
+ maskedx = rank_1d_float64(maskedx)
+ maskedy = rank_1d_float64(maskedy)
mean = (nobs + 1) / 2.
@@ -674,31 +686,6 @@ def backfill_2d_inplace(algos_t[:, :] values,
val = values[j, i]
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def arrmap(algos_t[:] index, object func):
- cdef:
- Py_ssize_t length = index.shape[0]
- Py_ssize_t i = 0
- ndarray[object] result = np.empty(length, dtype=np.object_)
-
- from pandas._libs.lib import maybe_convert_objects
-
- for i in range(length):
- result[i] = func(index[i])
-
- return maybe_convert_objects(result)
-
-
-arrmap_float64 = arrmap["float64_t"]
-arrmap_float32 = arrmap["float32_t"]
-arrmap_object = arrmap["object"]
-arrmap_int64 = arrmap["int64_t"]
-arrmap_int32 = arrmap["int32_t"]
-arrmap_uint64 = arrmap["uint64_t"]
-arrmap_bool = arrmap["uint8_t"]
-
-
@cython.boundscheck(False)
@cython.wraparound(False)
def is_monotonic(ndarray[algos_t, ndim=1] arr, bint timelike):
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index e3f18572abca1..3069bbbf34bb7 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -719,6 +719,11 @@ def group_quantile(ndarray[float64_t] out,
ndarray[int64_t] counts, non_na_counts, sort_arr
assert values.shape[0] == N
+
+ if not (0 <= q <= 1):
+ raise ValueError("'q' must be between 0 and 1. Got"
+ " '{}' instead".format(q))
+
inter_methods = {
'linear': INTERPOLATION_LINEAR,
'lower': INTERPOLATION_LOWER,
diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx
index 3e620f5934d5e..8179822b9e10c 100644
--- a/pandas/_libs/hashtable.pyx
+++ b/pandas/_libs/hashtable.pyx
@@ -1,7 +1,7 @@
cimport cython
-from cpython cimport (PyObject, Py_INCREF,
- PyMem_Malloc, PyMem_Realloc, PyMem_Free)
+from cpython.ref cimport PyObject, Py_INCREF
+from cpython.mem cimport PyMem_Malloc, PyMem_Realloc, PyMem_Free
from libc.stdlib cimport malloc, free
@@ -108,7 +108,7 @@ cdef class Int64Factorizer:
def get_count(self):
return self.count
- def factorize(self, int64_t[:] values, sort=False,
+ def factorize(self, const int64_t[:] values, sort=False,
na_sentinel=-1, na_value=None):
"""
Factorize values with nans replaced by na_sentinel
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 7000c07b1f5a6..979dad6db0838 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -1,4 +1,5 @@
from datetime import datetime, timedelta, date
+import warnings
import cython
@@ -47,10 +48,6 @@ cpdef get_value_at(ndarray arr, object loc, object tz=None):
return util.get_value_at(arr, loc)
-def get_value_box(arr: ndarray, loc: object) -> object:
- return get_value_at(arr, loc, tz=None)
-
-
# Don't populate hash tables in monotonic indexes larger than this
_SIZE_CUTOFF = 1000000
@@ -533,6 +530,9 @@ cpdef convert_scalar(ndarray arr, object value):
pass
elif isinstance(value, (datetime, np.datetime64, date)):
return Timestamp(value).value
+ elif util.is_timedelta64_object(value):
+ # exclude np.timedelta64("NaT") from value != value below
+ pass
elif value is None or value != value:
return NPY_NAT
elif isinstance(value, str):
@@ -542,7 +542,7 @@ cpdef convert_scalar(ndarray arr, object value):
elif arr.descr.type_num == NPY_TIMEDELTA:
if util.is_array(value):
pass
- elif isinstance(value, timedelta):
+ elif isinstance(value, timedelta) or util.is_timedelta64_object(value):
return Timedelta(value).value
elif util.is_datetime64_object(value):
# exclude np.datetime64("NaT") which would otherwise be picked up
diff --git a/pandas/_libs/index_class_helper.pxi.in b/pandas/_libs/index_class_helper.pxi.in
index 3c9a096e7ecc0..4db048eeb0383 100644
--- a/pandas/_libs/index_class_helper.pxi.in
+++ b/pandas/_libs/index_class_helper.pxi.in
@@ -60,7 +60,16 @@ cdef class {{name}}Engine(IndexEngine):
# A view is needed for some subclasses, such as PeriodEngine:
values = self._get_index_values().view('{{dtype}}')
- indexer = values == val
+ try:
+ with warnings.catch_warnings():
+ # e.g. if values is float64 and `val` is a str, suppress warning
+ warnings.filterwarnings("ignore", category=FutureWarning)
+ indexer = values == val
+ except TypeError:
+ # if the equality above returns a bool, cython will raise TypeError
+ # when trying to cast it to ndarray
+ raise KeyError(val)
+
found = np.where(indexer)[0]
count = len(found)
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index 54ee4753ba332..b7fd490532514 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -1,7 +1,7 @@
import cython
from cython import Py_ssize_t
-from cpython cimport PyObject
+from cpython.object cimport PyObject
cdef extern from "Python.h":
Py_ssize_t PY_SSIZE_T_MAX
diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx
index f9e1ebb11116b..238bfd0be0aa7 100644
--- a/pandas/_libs/join.pyx
+++ b/pandas/_libs/join.pyx
@@ -8,8 +8,9 @@ from numpy cimport (ndarray,
uint32_t, uint64_t, float32_t, float64_t)
cnp.import_array()
-from pandas._libs.algos import groupsort_indexer, ensure_platform_int
-from pandas.core.algorithms import take_nd
+from pandas._libs.algos import (
+ groupsort_indexer, ensure_platform_int, take_1d_int64_int64
+)
def inner_join(const int64_t[:] left, const int64_t[:] right,
@@ -67,8 +68,8 @@ def left_outer_join(const int64_t[:] left, const int64_t[:] right,
Py_ssize_t max_groups, sort=True):
cdef:
Py_ssize_t i, j, k, count = 0
- ndarray[int64_t] left_count, right_count
- ndarray left_sorter, right_sorter, rev
+ ndarray[int64_t] left_count, right_count, left_sorter, right_sorter
+ ndarray rev
ndarray[int64_t] left_indexer, right_indexer
int64_t lc, rc
@@ -124,10 +125,8 @@ def left_outer_join(const int64_t[:] left, const int64_t[:] right,
# no multiple matches for any row on the left
# this is a short-cut to avoid groupsort_indexer
# otherwise, the `else` path also works in this case
- left_sorter = ensure_platform_int(left_sorter)
-
rev = np.empty(len(left), dtype=np.intp)
- rev.put(left_sorter, np.arange(len(left)))
+ rev.put(ensure_platform_int(left_sorter), np.arange(len(left)))
else:
rev, _ = groupsort_indexer(left_indexer, len(left))
@@ -201,9 +200,12 @@ def full_outer_join(const int64_t[:] left, const int64_t[:] right,
_get_result_indexer(right_sorter, right_indexer))
-def _get_result_indexer(sorter, indexer):
+cdef _get_result_indexer(ndarray[int64_t] sorter, ndarray[int64_t] indexer):
if len(sorter) > 0:
- res = take_nd(sorter, indexer, fill_value=-1)
+ # cython-only equivalent to
+ # `res = algos.take_nd(sorter, indexer, fill_value=-1)`
+ res = np.empty(len(indexer), dtype=np.int64)
+ take_1d_int64_int64(sorter, indexer, res, -1)
else:
# length-0 case
res = np.empty(len(indexer), dtype=np.int64)
diff --git a/pandas/_libs/khash.pxd b/pandas/_libs/khash.pxd
index c52dacd37f955..ca3b83852b098 100644
--- a/pandas/_libs/khash.pxd
+++ b/pandas/_libs/khash.pxd
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-from cpython cimport PyObject
+from cpython.object cimport PyObject
from numpy cimport int64_t, uint64_t, int32_t, uint32_t, float64_t
cdef extern from "khash_python.h":
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 27ee685acfde7..1c2f80b832201 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -9,9 +9,12 @@ import warnings
import cython
from cython import Py_ssize_t
-from cpython cimport (Py_INCREF, PyTuple_SET_ITEM, PyTuple_New, PyObject_Str,
- Py_EQ, Py_SIZE, PyObject_RichCompareBool,
- PyUnicode_Join, PyList_New)
+from cpython.list cimport PyList_New
+from cpython.object cimport (PyObject_Str, PyObject_RichCompareBool, Py_EQ,
+ Py_SIZE)
+from cpython.ref cimport Py_INCREF
+from cpython.tuple cimport PyTuple_SET_ITEM, PyTuple_New
+from cpython.unicode cimport PyUnicode_Join
from cpython.datetime cimport (PyDateTime_Check, PyDate_Check,
PyTime_Check, PyDelta_Check,
@@ -52,8 +55,7 @@ cimport pandas._libs.util as util
from pandas._libs.util cimport is_nan, UINT64_MAX, INT64_MAX, INT64_MIN
from pandas._libs.tslib import array_to_datetime
-from pandas._libs.tslibs.nattype cimport NPY_NAT
-from pandas._libs.tslibs.nattype import NaT
+from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT
from pandas._libs.tslibs.conversion cimport convert_to_tsobject
from pandas._libs.tslibs.timedeltas cimport convert_to_timedelta64
from pandas._libs.tslibs.timezones cimport get_timezone, tz_compare
@@ -157,13 +159,13 @@ def is_scalar(val: object) -> bool:
return (cnp.PyArray_IsAnyScalar(val)
# PyArray_IsAnyScalar is always False for bytearrays on Py3
- or isinstance(val, (Fraction, Number))
- # We differ from numpy, which claims that None is not scalar;
- # see np.isscalar
- or val is None
or PyDate_Check(val)
or PyDelta_Check(val)
or PyTime_Check(val)
+ # We differ from numpy, which claims that None is not scalar;
+ # see np.isscalar
+ or val is None
+ or isinstance(val, (Fraction, Number))
or util.is_period_object(val)
or is_decimal(val)
or is_interval(val)
@@ -235,7 +237,7 @@ def fast_unique_multiple(list arrays, sort: bool=True):
if sort is None:
try:
uniques.sort()
- except Exception:
+ except TypeError:
# TODO: RuntimeWarning?
pass
@@ -264,7 +266,7 @@ def fast_unique_multiple_list(lists: list, sort: bool=True) -> list:
if sort:
try:
uniques.sort()
- except Exception:
+ except TypeError:
pass
return uniques
@@ -304,7 +306,7 @@ def fast_unique_multiple_list_gen(object gen, bint sort=True):
if sort:
try:
uniques.sort()
- except Exception:
+ except TypeError:
pass
return uniques
@@ -522,9 +524,18 @@ def array_equivalent_object(left: object[:], right: object[:]) -> bool:
# we are either not equal or both nan
# I think None == None will be true here
- if not (PyObject_RichCompareBool(x, y, Py_EQ) or
- (x is None or is_nan(x)) and (y is None or is_nan(y))):
- return False
+ try:
+ if not (PyObject_RichCompareBool(x, y, Py_EQ) or
+ (x is None or is_nan(x)) and (y is None or is_nan(y))):
+ return False
+ except TypeError as err:
+ # Avoid raising TypeError on tzawareness mismatch
+ # TODO: This try/except can be removed if/when Timestamp
+ # comparisons are change dto match datetime, see GH#28507
+ if "tz-naive and tz-aware" in str(err):
+ return False
+ raise
+
return True
@@ -688,50 +699,6 @@ def generate_bins_dt64(ndarray[int64_t] values, const int64_t[:] binner,
return bins
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def row_bool_subset(const float64_t[:, :] values,
- ndarray[uint8_t, cast=True] mask):
- cdef:
- Py_ssize_t i, j, n, k, pos = 0
- ndarray[float64_t, ndim=2] out
-
- n, k = (<object>values).shape
- assert (n == len(mask))
-
- out = np.empty((mask.sum(), k), dtype=np.float64)
-
- for i in range(n):
- if mask[i]:
- for j in range(k):
- out[pos, j] = values[i, j]
- pos += 1
-
- return out
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def row_bool_subset_object(ndarray[object, ndim=2] values,
- ndarray[uint8_t, cast=True] mask):
- cdef:
- Py_ssize_t i, j, n, k, pos = 0
- ndarray[object, ndim=2] out
-
- n, k = (<object>values).shape
- assert (n == len(mask))
-
- out = np.empty((mask.sum(), k), dtype=object)
-
- for i in range(n):
- if mask[i]:
- for j in range(k):
- out[pos, j] = values[i, j]
- pos += 1
-
- return out
-
-
@cython.boundscheck(False)
@cython.wraparound(False)
def get_level_sorter(const int64_t[:] label,
@@ -969,6 +936,7 @@ _TYPE_MAP = {
'M': 'datetime64',
'timedelta64[ns]': 'timedelta64',
'm': 'timedelta64',
+ 'interval': 'interval',
}
# types only exist on certain platform
@@ -997,6 +965,7 @@ cdef class Seen:
cdef:
bint int_ # seen_int
+ bint nat_ # seen nat
bint bool_ # seen_bool
bint null_ # seen_null
bint uint_ # seen_uint (unsigned integer)
@@ -1020,6 +989,7 @@ cdef class Seen:
initial methods to convert to numeric fail.
"""
self.int_ = 0
+ self.nat_ = 0
self.bool_ = 0
self.null_ = 0
self.uint_ = 0
@@ -1099,11 +1069,13 @@ cdef class Seen:
@property
def is_bool(self):
- return not (self.datetime_ or self.numeric_ or self.timedelta_)
+ return not (self.datetime_ or self.numeric_ or self.timedelta_
+ or self.nat_)
@property
def is_float_or_complex(self):
- return not (self.bool_ or self.datetime_ or self.timedelta_)
+ return not (self.bool_ or self.datetime_ or self.timedelta_
+ or self.nat_)
cdef _try_infer_map(v):
@@ -1236,7 +1208,9 @@ def infer_dtype(value: object, skipna: object=None) -> str:
# e.g. categoricals
try:
values = getattr(value, '_values', getattr(value, 'values', value))
- except:
+ except TypeError:
+ # This gets hit if we have an EA, since cython expects `values`
+ # to be an ndarray
value = _try_infer_map(value)
if value is not None:
return value
@@ -1252,8 +1226,6 @@ def infer_dtype(value: object, skipna: object=None) -> str:
construct_1d_object_array_from_listlike)
values = construct_1d_object_array_from_listlike(value)
- values = getattr(values, 'values', values)
-
# make contiguous
values = values.ravel()
@@ -1305,7 +1277,10 @@ def infer_dtype(value: object, skipna: object=None) -> str:
if is_integer_array(values):
return 'integer'
elif is_integer_float_array(values):
- return 'mixed-integer-float'
+ if is_integer_na_array(values):
+ return 'integer-na'
+ else:
+ return 'mixed-integer-float'
return 'mixed-integer'
elif PyDateTime_Check(val):
@@ -1330,7 +1305,10 @@ def infer_dtype(value: object, skipna: object=None) -> str:
if is_float_array(values):
return 'floating'
elif is_integer_float_array(values):
- return 'mixed-integer-float'
+ if is_integer_na_array(values):
+ return 'integer-na'
+ else:
+ return 'mixed-integer-float'
elif util.is_bool_object(val):
if is_bool_array(values, skipna=skipna):
@@ -1443,7 +1421,7 @@ def infer_datetimelike_array(arr: object) -> object:
try:
array_to_datetime(objs, errors='raise')
return 'datetime'
- except:
+ except (ValueError, TypeError):
pass
# we are *not* going to infer from strings
@@ -1566,6 +1544,19 @@ cpdef bint is_integer_array(ndarray values):
return validator.validate(values)
+cdef class IntegerNaValidator(Validator):
+ cdef inline bint is_value_typed(self, object value) except -1:
+ return (util.is_integer_object(value)
+ or (util.is_nan(value) and util.is_float_object(value)))
+
+
+cdef bint is_integer_na_array(ndarray values):
+ cdef:
+ IntegerNaValidator validator = IntegerNaValidator(len(values),
+ values.dtype)
+ return validator.validate(values)
+
+
cdef class IntegerFloatValidator(Validator):
cdef inline bint is_value_typed(self, object value) except -1:
return util.is_integer_object(value) or util.is_float_object(value)
@@ -2002,12 +1993,11 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
seen.null_ = 1
floats[i] = complexes[i] = fnan
elif val is NaT:
+ seen.nat_ = 1
if convert_datetime:
idatetimes[i] = NPY_NAT
- seen.datetime_ = 1
if convert_timedelta:
itimedeltas[i] = NPY_NAT
- seen.timedelta_ = 1
if not (convert_datetime or convert_timedelta):
seen.object_ = 1
break
@@ -2101,11 +2091,20 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
else:
if not seen.bool_:
if seen.datetime_:
- if not seen.numeric_:
+ if not seen.numeric_ and not seen.timedelta_:
return datetimes
elif seen.timedelta_:
if not seen.numeric_:
return timedeltas
+ elif seen.nat_:
+ if not seen.numeric_:
+ if convert_datetime and convert_timedelta:
+ # TODO: array full of NaT ambiguity resolve here needed
+ pass
+ elif convert_datetime:
+ return datetimes
+ elif convert_timedelta:
+ return timedeltas
else:
if seen.complex_:
return complexes
@@ -2132,11 +2131,20 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
else:
if not seen.bool_:
if seen.datetime_:
- if not seen.numeric_:
+ if not seen.numeric_ and not seen.timedelta_:
return datetimes
elif seen.timedelta_:
if not seen.numeric_:
return timedeltas
+ elif seen.nat_:
+ if not seen.numeric_:
+ if convert_datetime and convert_timedelta:
+ # TODO: array full of NaT ambiguity resolve here needed
+ pass
+ elif convert_datetime:
+ return datetimes
+ elif convert_timedelta:
+ return timedeltas
else:
if seen.complex_:
if not seen.int_:
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx
index 1d756115ebd5a..052b081988c9e 100644
--- a/pandas/_libs/missing.pyx
+++ b/pandas/_libs/missing.pyx
@@ -80,11 +80,8 @@ cpdef bint checknull_old(object val):
cdef inline bint _check_none_nan_inf_neginf(object val):
- try:
- return val is None or (isinstance(val, float) and
- (val != val or val == INF or val == NEGINF))
- except ValueError:
- return False
+ return val is None or (isinstance(val, float) and
+ (val != val or val == INF or val == NEGINF))
@cython.wraparound(False)
diff --git a/pandas/_libs/ops.pyx b/pandas/_libs/ops.pyx
index 27f7016ab4057..bdafcd646a4c8 100644
--- a/pandas/_libs/ops.pyx
+++ b/pandas/_libs/ops.pyx
@@ -1,7 +1,7 @@
import operator
-from cpython cimport (PyObject_RichCompareBool,
- Py_EQ, Py_NE, Py_LT, Py_LE, Py_GT, Py_GE)
+from cpython.object cimport (PyObject_RichCompareBool,
+ Py_EQ, Py_NE, Py_LT, Py_LE, Py_GT, Py_GE)
import cython
from cython import Py_ssize_t
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index cafc31dad3568..f5a42d7aef3ba 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -2,7 +2,6 @@
# See LICENSE for the license
import bz2
import gzip
-import lzma
import os
import sys
import time
@@ -18,12 +17,11 @@ from libc.string cimport strncpy, strlen, strcasecmp
import cython
from cython import Py_ssize_t
-from cpython cimport (PyObject, PyBytes_FromString,
- PyBytes_AsString,
- PyUnicode_AsUTF8String,
- PyErr_Occurred, PyErr_Fetch,
- PyUnicode_Decode)
+from cpython.bytes cimport PyBytes_AsString, PyBytes_FromString
+from cpython.exc cimport PyErr_Occurred, PyErr_Fetch
+from cpython.object cimport PyObject
from cpython.ref cimport Py_XDECREF
+from cpython.unicode cimport PyUnicode_AsUTF8String, PyUnicode_Decode
cdef extern from "Python.h":
@@ -59,9 +57,12 @@ from pandas.core.arrays import Categorical
from pandas.core.dtypes.concat import union_categoricals
import pandas.io.common as icom
+from pandas.compat import _import_lzma, _get_lzma_file
from pandas.errors import (ParserError, DtypeWarning,
EmptyDataError, ParserWarning)
+lzma = _import_lzma()
+
# Import CParserError as alias of ParserError for backwards compatibility.
# Ultimately, we want to remove this import. See gh-12665 and gh-14479.
CParserError = ParserError
@@ -565,10 +566,8 @@ cdef class TextReader:
# we need to properly close an open derived
# filehandle here, e.g. and UTFRecoder
if self.handle is not None:
- try:
- self.handle.close()
- except:
- pass
+ self.handle.close()
+
# also preemptively free all allocated memory
parser_free(self.parser)
if self.true_set:
@@ -645,9 +644,9 @@ cdef class TextReader:
'zip file %s', str(zip_names))
elif self.compression == 'xz':
if isinstance(source, str):
- source = lzma.LZMAFile(source, 'rb')
+ source = _get_lzma_file(lzma)(source, 'rb')
else:
- source = lzma.LZMAFile(filename=source)
+ source = _get_lzma_file(lzma)(filename=source)
else:
raise ValueError('Unrecognized compression type: %s' %
self.compression)
@@ -1691,6 +1690,10 @@ cdef:
char* cposinf = b'+inf'
char* cneginf = b'-inf'
+ char* cinfty = b'Infinity'
+ char* cposinfty = b'+Infinity'
+ char* cneginfty = b'-Infinity'
+
cdef _try_double(parser_t *parser, int64_t col,
int64_t line_start, int64_t line_end,
@@ -1770,9 +1773,12 @@ cdef inline int _try_double_nogil(parser_t *parser,
if error != 0 or p_end == word or p_end[0]:
error = 0
if (strcasecmp(word, cinf) == 0 or
- strcasecmp(word, cposinf) == 0):
+ strcasecmp(word, cposinf) == 0 or
+ strcasecmp(word, cinfty) == 0 or
+ strcasecmp(word, cposinfty) == 0):
data[0] = INF
- elif strcasecmp(word, cneginf) == 0:
+ elif (strcasecmp(word, cneginf) == 0 or
+ strcasecmp(word, cneginfty) == 0 ):
data[0] = NEGINF
else:
return 1
@@ -1791,9 +1797,12 @@ cdef inline int _try_double_nogil(parser_t *parser,
if error != 0 or p_end == word or p_end[0]:
error = 0
if (strcasecmp(word, cinf) == 0 or
- strcasecmp(word, cposinf) == 0):
+ strcasecmp(word, cposinf) == 0 or
+ strcasecmp(word, cinfty) == 0 or
+ strcasecmp(word, cposinfty) == 0):
data[0] = INF
- elif strcasecmp(word, cneginf) == 0:
+ elif (strcasecmp(word, cneginf) == 0 or
+ strcasecmp(word, cneginfty) == 0):
data[0] = NEGINF
else:
return 1
diff --git a/pandas/_libs/properties.pyx b/pandas/_libs/properties.pyx
index 92f1c7c51aa04..857119789ab45 100644
--- a/pandas/_libs/properties.pyx
+++ b/pandas/_libs/properties.pyx
@@ -1,6 +1,6 @@
from cython import Py_ssize_t
-from cpython cimport (
+from cpython.dict cimport (
PyDict_Contains, PyDict_GetItem, PyDict_SetItem)
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 739ac0ed397ca..361c21c18c4da 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -1,7 +1,7 @@
from distutils.version import LooseVersion
from cython import Py_ssize_t
-from cpython cimport Py_INCREF
+from cpython.ref cimport Py_INCREF
from libc.stdlib cimport malloc, free
@@ -296,8 +296,6 @@ cdef class SeriesBinGrouper:
islider.advance(group_size)
vslider.advance(group_size)
- except:
- raise
finally:
# so we don't free the wrong memory
islider.reset()
@@ -425,8 +423,6 @@ cdef class SeriesGrouper:
group_size = 0
- except:
- raise
finally:
# so we don't free the wrong memory
islider.reset()
@@ -532,7 +528,8 @@ def apply_frame_axis0(object frame, object f, object names,
try:
piece = f(chunk)
- except:
+ except Exception:
+ # We can't be more specific without knowing something about `f`
raise InvalidApply('Let this error raise above us')
# Need to infer if low level index slider will cause segfaults
@@ -543,6 +540,7 @@ def apply_frame_axis0(object frame, object f, object names,
else:
mutated = True
except AttributeError:
+ # `piece` might not have an index, could be e.g. an int
pass
results.append(piece)
@@ -628,7 +626,7 @@ cdef class BlockSlider:
arr.shape[1] = 0
-def reduce(arr, f, axis=0, dummy=None, labels=None):
+def compute_reduction(arr, f, axis=0, dummy=None, labels=None):
"""
Parameters
diff --git a/pandas/_libs/skiplist.pxd b/pandas/_libs/skiplist.pxd
index a273d2c445d18..e827223bbe0a7 100644
--- a/pandas/_libs/skiplist.pxd
+++ b/pandas/_libs/skiplist.pxd
@@ -1,7 +1,5 @@
# -*- coding: utf-8 -*-
-
-from cython cimport Py_ssize_t
-
+# See GH#27465 for reference on related-but-unused cython code
cdef extern from "src/skiplist.h":
ctypedef struct node_t:
@@ -24,22 +22,3 @@ cdef extern from "src/skiplist.h":
double skiplist_get(skiplist_t*, int, int*) nogil
int skiplist_insert(skiplist_t*, double) nogil
int skiplist_remove(skiplist_t*, double) nogil
-
-
-# Note: Node is declared here so that IndexableSkiplist can be exposed;
-# Node itself not intended to be exposed.
-cdef class Node:
- cdef public:
- double value
- list next
- list width
-
-
-cdef class IndexableSkiplist:
- cdef:
- Py_ssize_t size, maxlevels
- Node head
-
- cpdef get(self, Py_ssize_t i)
- cpdef insert(self, double value)
- cpdef remove(self, double value)
diff --git a/pandas/_libs/skiplist.pyx b/pandas/_libs/skiplist.pyx
index 2fdee72f9d588..eb750a478415a 100644
--- a/pandas/_libs/skiplist.pyx
+++ b/pandas/_libs/skiplist.pyx
@@ -5,144 +5,3 @@
# Link: http://code.activestate.com/recipes/576930/
# Cython version: Wes McKinney
-from random import random
-
-from libc.math cimport log
-
-import numpy as np
-
-
-# MSVC does not have log2!
-
-cdef double Log2(double x):
- return log(x) / log(2.)
-
-
-# TODO: optimize this, make less messy
-
-cdef class Node:
- # cdef public:
- # double value
- # list next
- # list width
-
- def __init__(self, double value, list next, list width):
- self.value = value
- self.next = next
- self.width = width
-
-
-# Singleton terminator node
-NIL = Node(np.inf, [], [])
-
-
-cdef class IndexableSkiplist:
- """
- Sorted collection supporting O(lg n) insertion, removal, and
- lookup by rank.
- """
- # cdef:
- # Py_ssize_t size, maxlevels
- # Node head
-
- def __init__(self, expected_size=100):
- self.size = 0
- self.maxlevels = int(1 + Log2(expected_size))
- self.head = Node(np.NaN, [NIL] * self.maxlevels, [1] * self.maxlevels)
-
- def __len__(self):
- return self.size
-
- def __getitem__(self, i):
- return self.get(i)
-
- cpdef get(self, Py_ssize_t i):
- cdef:
- Py_ssize_t level
- Node node
-
- node = self.head
- i += 1
-
- for level in range(self.maxlevels - 1, -1, -1):
- while node.width[level] <= i:
- i -= node.width[level]
- node = node.next[level]
-
- return node.value
-
- cpdef insert(self, double value):
- cdef:
- Py_ssize_t level, steps, d
- Node node, prevnode, newnode, next_at_level, tmp
- list chain, steps_at_level
-
- # find first node on each level where node.next[levels].value > value
- chain = [None] * self.maxlevels
- steps_at_level = [0] * self.maxlevels
- node = self.head
-
- for level in range(self.maxlevels - 1, -1, -1):
- next_at_level = node.next[level]
-
- while next_at_level.value <= value:
- steps_at_level[level] = (steps_at_level[level] +
- node.width[level])
- node = next_at_level
- next_at_level = node.next[level]
-
- chain[level] = node
-
- # insert a link to the newnode at each level
- d = min(self.maxlevels, 1 - int(Log2(random())))
- newnode = Node(value, [None] * d, [None] * d)
- steps = 0
-
- for level in range(d):
- prevnode = chain[level]
- newnode.next[level] = prevnode.next[level]
- prevnode.next[level] = newnode
- newnode.width[level] = (prevnode.width[level] - steps)
- prevnode.width[level] = steps + 1
- steps += steps_at_level[level]
-
- for level in range(d, self.maxlevels):
- (<Node>chain[level]).width[level] += 1
-
- self.size += 1
-
- cpdef remove(self, double value):
- cdef:
- Py_ssize_t level, d
- Node node, prevnode, tmpnode, next_at_level
- list chain
-
- # find first node on each level where node.next[levels].value >= value
- chain = [None] * self.maxlevels
- node = self.head
-
- for level in range(self.maxlevels - 1, -1, -1):
- next_at_level = node.next[level]
- while next_at_level.value < value:
- node = next_at_level
- next_at_level = node.next[level]
-
- chain[level] = node
-
- if value != (<Node>(<Node>(<Node>chain[0]).next)[0]).value:
- raise KeyError('Not Found')
-
- # remove one link at each level
- d = len((<Node>(<Node>(<Node>chain[0]).next)[0]).next)
-
- for level in range(d):
- prevnode = chain[level]
- tmpnode = prevnode.next[level]
- prevnode.width[level] += tmpnode.width[level] - 1
- prevnode.next[level] = tmpnode.next[level]
-
- for level in range(d, self.maxlevels):
- tmpnode = chain[level]
- tmpnode.width[level] -= 1
-
- self.size -= 1
diff --git a/pandas/_libs/src/parse_helper.h b/pandas/_libs/src/parse_helper.h
index 1db1878a8a773..0a767dd27b658 100644
--- a/pandas/_libs/src/parse_helper.h
+++ b/pandas/_libs/src/parse_helper.h
@@ -25,11 +25,6 @@ int to_double(char *item, double *p_value, char sci, char decimal,
return (error == 0) && (!*p_end);
}
-#if PY_VERSION_HEX < 0x02060000
-#define PyBytes_Check PyString_Check
-#define PyBytes_AS_STRING PyString_AS_STRING
-#endif // PY_VERSION_HEX
-
int floatify(PyObject *str, double *result, int *maybe_int) {
int status;
char *data;
@@ -50,7 +45,7 @@ int floatify(PyObject *str, double *result, int *maybe_int) {
status = to_double(data, result, sci, dec, maybe_int);
if (!status) {
- /* handle inf/-inf */
+ /* handle inf/-inf infinity/-infinity */
if (strlen(data) == 3) {
if (0 == strcasecmp(data, "inf")) {
*result = HUGE_VAL;
@@ -68,6 +63,23 @@ int floatify(PyObject *str, double *result, int *maybe_int) {
} else {
goto parsingerror;
}
+ } else if (strlen(data) == 8) {
+ if (0 == strcasecmp(data, "infinity")) {
+ *result = HUGE_VAL;
+ *maybe_int = 0;
+ } else {
+ goto parsingerror;
+ }
+ } else if (strlen(data) == 9) {
+ if (0 == strcasecmp(data, "-infinity")) {
+ *result = -HUGE_VAL;
+ *maybe_int = 0;
+ } else if (0 == strcasecmp(data, "+infinity")) {
+ *result = HUGE_VAL;
+ *maybe_int = 0;
+ } else {
+ goto parsingerror;
+ }
} else {
goto parsingerror;
}
diff --git a/pandas/_libs/src/ujson/lib/ultrajson.h b/pandas/_libs/src/ujson/lib/ultrajson.h
index 0470fef450dde..05c3ae4096ad5 100644
--- a/pandas/_libs/src/ujson/lib/ultrajson.h
+++ b/pandas/_libs/src/ujson/lib/ultrajson.h
@@ -244,6 +244,10 @@ typedef struct __JSONObjectEncoder {
If true, '<', '>', and '&' characters will be encoded as \u003c, \u003e, and \u0026, respectively. If false, no special encoding will be used. */
int encodeHTMLChars;
+ /*
+ Configuration for spaces of indent */
+ int indent;
+
/*
Set to an error message if error occurred */
const char *errorMsg;
@@ -307,11 +311,4 @@ EXPORTFUNCTION JSOBJ JSON_DecodeObject(JSONObjectDecoder *dec,
const char *buffer, size_t cbBuffer);
EXPORTFUNCTION void encode(JSOBJ, JSONObjectEncoder *, const char *, size_t);
-#define Buffer_Reserve(__enc, __len) \
- if ((size_t)((__enc)->end - (__enc)->offset) < (size_t)(__len)) { \
- Buffer_Realloc((__enc), (__len)); \
- }
-
-void Buffer_Realloc(JSONObjectEncoder *enc, size_t cbNeeded);
-
#endif // PANDAS__LIBS_SRC_UJSON_LIB_ULTRAJSON_H_
diff --git a/pandas/_libs/src/ujson/lib/ultrajsonenc.c b/pandas/_libs/src/ujson/lib/ultrajsonenc.c
index 2d6c823a45515..51c9b9244ecfc 100644
--- a/pandas/_libs/src/ujson/lib/ultrajsonenc.c
+++ b/pandas/_libs/src/ujson/lib/ultrajsonenc.c
@@ -714,6 +714,12 @@ int Buffer_EscapeStringValidated(JSOBJ obj, JSONObjectEncoder *enc,
}
}
+#define Buffer_Reserve(__enc, __len) \
+ if ( (size_t) ((__enc)->end - (__enc)->offset) < (size_t) (__len)) \
+ { \
+ Buffer_Realloc((__enc), (__len));\
+ } \
+
#define Buffer_AppendCharUnchecked(__enc, __chr) *((__enc)->offset++) = __chr;
FASTCALL_ATTR INLINE_PREFIX void FASTCALL_MSVC strreverse(char *begin,
@@ -722,6 +728,22 @@ FASTCALL_ATTR INLINE_PREFIX void FASTCALL_MSVC strreverse(char *begin,
while (end > begin) aux = *end, *end-- = *begin, *begin++ = aux;
}
+void Buffer_AppendIndentNewlineUnchecked(JSONObjectEncoder *enc)
+{
+ if (enc->indent > 0) Buffer_AppendCharUnchecked(enc, '\n');
+}
+
+// This function could be refactored to only accept enc as an argument,
+// but this is a straight vendor from ujson source
+void Buffer_AppendIndentUnchecked(JSONObjectEncoder *enc, JSINT32 value)
+{
+ int i;
+ if (enc->indent > 0)
+ while (value-- > 0)
+ for (i = 0; i < enc->indent; i++)
+ Buffer_AppendCharUnchecked(enc, ' ');
+}
+
void Buffer_AppendIntUnchecked(JSONObjectEncoder *enc, JSINT32 value) {
char *wstr;
JSUINT32 uvalue = (value < 0) ? -value : value;
@@ -954,6 +976,7 @@ void encode(JSOBJ obj, JSONObjectEncoder *enc, const char *name,
enc->iterBegin(obj, &tc);
Buffer_AppendCharUnchecked(enc, '[');
+ Buffer_AppendIndentNewlineUnchecked (enc);
while (enc->iterNext(obj, &tc)) {
if (count > 0) {
@@ -961,17 +984,20 @@ void encode(JSOBJ obj, JSONObjectEncoder *enc, const char *name,
#ifndef JSON_NO_EXTRA_WHITESPACE
Buffer_AppendCharUnchecked(buffer, ' ');
#endif
+ Buffer_AppendIndentNewlineUnchecked (enc);
}
iterObj = enc->iterGetValue(obj, &tc);
enc->level++;
+ Buffer_AppendIndentUnchecked (enc, enc->level);
encode(iterObj, enc, NULL, 0);
count++;
}
enc->iterEnd(obj, &tc);
- Buffer_Reserve(enc, 2);
+ Buffer_AppendIndentNewlineUnchecked (enc);
+ Buffer_AppendIndentUnchecked (enc, enc->level);
Buffer_AppendCharUnchecked(enc, ']');
break;
}
@@ -981,6 +1007,7 @@ void encode(JSOBJ obj, JSONObjectEncoder *enc, const char *name,
enc->iterBegin(obj, &tc);
Buffer_AppendCharUnchecked(enc, '{');
+ Buffer_AppendIndentNewlineUnchecked (enc);
while (enc->iterNext(obj, &tc)) {
if (count > 0) {
@@ -988,18 +1015,21 @@ void encode(JSOBJ obj, JSONObjectEncoder *enc, const char *name,
#ifndef JSON_NO_EXTRA_WHITESPACE
Buffer_AppendCharUnchecked(enc, ' ');
#endif
+ Buffer_AppendIndentNewlineUnchecked (enc);
}
iterObj = enc->iterGetValue(obj, &tc);
objName = enc->iterGetName(obj, &tc, &szlen);
enc->level++;
+ Buffer_AppendIndentUnchecked (enc, enc->level);
encode(iterObj, enc, objName, szlen);
count++;
}
enc->iterEnd(obj, &tc);
- Buffer_Reserve(enc, 2);
+ Buffer_AppendIndentNewlineUnchecked (enc);
+ Buffer_AppendIndentUnchecked (enc, enc->level);
Buffer_AppendCharUnchecked(enc, '}');
break;
}
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index 926440218b5d9..22c42acea0150 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -16,18 +16,19 @@ derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
https://github.com/client9/stringencoders
-Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
+Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights
+reserved.
Numeric decoder derived from from TCL library
http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
@@ -48,13 +49,13 @@ Numeric decoder derived from from TCL library
#include <../../../tslibs/src/datetime/np_datetime_strings.h>
#include "datetime.h"
-#define NPY_JSON_BUFSIZE 32768
-
static PyTypeObject *type_decimal;
static PyTypeObject *cls_dataframe;
static PyTypeObject *cls_series;
static PyTypeObject *cls_index;
static PyTypeObject *cls_nat;
+PyObject *cls_timestamp;
+PyObject *cls_timedelta;
npy_int64 get_nat(void) { return NPY_MIN_INT64; }
@@ -64,9 +65,9 @@ typedef void *(*PFN_PyTypeToJSON)(JSOBJ obj, JSONTypeContext *ti,
typedef struct __NpyArrContext {
PyObject *array;
char *dataptr;
- int curdim; // current dimension in array's order
- int stridedim; // dimension we are striding over
- int inc; // stride dimension increment (+/- 1)
+ int curdim; // current dimension in array's order
+ int stridedim; // dimension we are striding over
+ int inc; // stride dimension increment (+/- 1)
npy_intp dim;
npy_intp stride;
npy_intp ndim;
@@ -83,8 +84,8 @@ typedef struct __PdBlockContext {
int ncols;
int transpose;
- int *cindices; // frame column -> block column map
- NpyArrContext **npyCtxts; // NpyArrContext for each column
+ int *cindices; // frame column -> block column map
+ NpyArrContext **npyCtxts; // NpyArrContext for each column
} PdBlockContext;
typedef struct __TypeContext {
@@ -148,13 +149,12 @@ enum PANDAS_FORMAT { SPLIT, RECORDS, INDEX, COLUMNS, VALUES };
int PdBlock_iterNext(JSOBJ, JSONTypeContext *);
-void *initObjToJSON(void)
-{
+void *initObjToJSON(void) {
PyObject *mod_pandas;
PyObject *mod_nattype;
PyObject *mod_decimal = PyImport_ImportModule("decimal");
type_decimal =
- (PyTypeObject *)PyObject_GetAttrString(mod_decimal, "Decimal");
+ (PyTypeObject *)PyObject_GetAttrString(mod_decimal, "Decimal");
Py_DECREF(mod_decimal);
PyDateTime_IMPORT;
@@ -166,13 +166,15 @@ void *initObjToJSON(void)
cls_index = (PyTypeObject *)PyObject_GetAttrString(mod_pandas, "Index");
cls_series =
(PyTypeObject *)PyObject_GetAttrString(mod_pandas, "Series");
+ cls_timestamp = PyObject_GetAttrString(mod_pandas, "Timestamp");
+ cls_timedelta = PyObject_GetAttrString(mod_pandas, "Timedelta");
Py_DECREF(mod_pandas);
}
mod_nattype = PyImport_ImportModule("pandas._libs.tslibs.nattype");
if (mod_nattype) {
- cls_nat = (PyTypeObject *)PyObject_GetAttrString(mod_nattype,
- "NaTType");
+ cls_nat =
+ (PyTypeObject *)PyObject_GetAttrString(mod_nattype, "NaTType");
Py_DECREF(mod_nattype);
}
@@ -210,7 +212,6 @@ static TypeContext *createTypeContext(void) {
return pc;
}
-
static int is_sparse_array(PyObject *obj) {
// TODO can be removed again once SparseArray.values is removed (GH26421)
if (PyObject_HasAttrString(obj, "_subtyp")) {
@@ -225,7 +226,6 @@ static int is_sparse_array(PyObject *obj) {
return 0;
}
-
static PyObject *get_values(PyObject *obj) {
PyObject *values = NULL;
@@ -240,7 +240,8 @@ static PyObject *get_values(PyObject *obj) {
values = PyObject_CallMethod(values, "to_numpy", NULL);
}
- if (!is_sparse_array(values) && PyObject_HasAttrString(values, "values")) {
+ if (!is_sparse_array(values) &&
+ PyObject_HasAttrString(values, "values")) {
PyObject *subvals = get_values(values);
PyErr_Clear();
PRINTMARK();
@@ -355,20 +356,20 @@ static Py_ssize_t get_attr_length(PyObject *obj, char *attr) {
}
static npy_int64 get_long_attr(PyObject *o, const char *attr) {
- npy_int64 long_val;
- PyObject *value = PyObject_GetAttrString(o, attr);
- long_val = (PyLong_Check(value) ?
- PyLong_AsLongLong(value) : PyLong_AsLong(value));
- Py_DECREF(value);
- return long_val;
+ npy_int64 long_val;
+ PyObject *value = PyObject_GetAttrString(o, attr);
+ long_val =
+ (PyLong_Check(value) ? PyLong_AsLongLong(value) : PyLong_AsLong(value));
+ Py_DECREF(value);
+ return long_val;
}
static npy_float64 total_seconds(PyObject *td) {
- npy_float64 double_val;
- PyObject *value = PyObject_CallMethod(td, "total_seconds", NULL);
- double_val = PyFloat_AS_DOUBLE(value);
- Py_DECREF(value);
- return double_val;
+ npy_float64 double_val;
+ PyObject *value = PyObject_CallMethod(td, "total_seconds", NULL);
+ double_val = PyFloat_AS_DOUBLE(value);
+ Py_DECREF(value);
+ return double_val;
}
static PyObject *get_item(PyObject *obj, Py_ssize_t i) {
@@ -434,7 +435,7 @@ static void *PyFloatToDOUBLE(JSOBJ _obj, JSONTypeContext *tc, void *outValue,
return NULL;
}
-static void *PyStringToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue,
+static void *PyBytesToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue,
size_t *_outLen) {
PyObject *obj = (PyObject *)_obj;
*_outLen = PyBytes_GET_SIZE(obj);
@@ -448,7 +449,7 @@ static void *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue,
if (PyUnicode_IS_COMPACT_ASCII(obj)) {
Py_ssize_t len;
- char *data = (char*)PyUnicode_AsUTF8AndSize(obj, &len);
+ char *data = (char *)PyUnicode_AsUTF8AndSize(obj, &len);
*_outLen = len;
return data;
}
@@ -503,7 +504,7 @@ static void *NpyDateTimeScalarToJSON(JSOBJ _obj, JSONTypeContext *tc,
// TODO(anyone): Does not appear to be reached in tests.
pandas_datetime_to_datetimestruct(obj->obval,
- (NPY_DATETIMEUNIT)obj->obmeta.base, &dts);
+ (NPY_DATETIMEUNIT)obj->obmeta.base, &dts);
return PandasDateTimeStructToJSON(&dts, tc, outValue, _outLen);
}
@@ -662,9 +663,9 @@ void NpyArr_iterBegin(JSOBJ _obj, JSONTypeContext *tc) {
GET_TC(tc)->npyarr = npyarr;
if (!npyarr) {
- PyErr_NoMemory();
- GET_TC(tc)->iterNext = NpyArr_iterNextNone;
- return;
+ PyErr_NoMemory();
+ GET_TC(tc)->iterNext = NpyArr_iterNextNone;
+ return;
}
npyarr->array = (PyObject *)obj;
@@ -675,17 +676,17 @@ void NpyArr_iterBegin(JSOBJ _obj, JSONTypeContext *tc) {
npyarr->type_num = PyArray_DESCR(obj)->type_num;
if (GET_TC(tc)->transpose) {
- npyarr->dim = PyArray_DIM(obj, npyarr->ndim);
- npyarr->stride = PyArray_STRIDE(obj, npyarr->ndim);
- npyarr->stridedim = npyarr->ndim;
- npyarr->index[npyarr->ndim] = 0;
- npyarr->inc = -1;
+ npyarr->dim = PyArray_DIM(obj, npyarr->ndim);
+ npyarr->stride = PyArray_STRIDE(obj, npyarr->ndim);
+ npyarr->stridedim = npyarr->ndim;
+ npyarr->index[npyarr->ndim] = 0;
+ npyarr->inc = -1;
} else {
- npyarr->dim = PyArray_DIM(obj, 0);
- npyarr->stride = PyArray_STRIDE(obj, 0);
- npyarr->stridedim = 0;
- npyarr->index[0] = 0;
- npyarr->inc = 1;
+ npyarr->dim = PyArray_DIM(obj, 0);
+ npyarr->stride = PyArray_STRIDE(obj, 0);
+ npyarr->stridedim = 0;
+ npyarr->index[0] = 0;
+ npyarr->inc = 1;
}
npyarr->columnLabels = GET_TC(tc)->columnLabels;
@@ -733,8 +734,7 @@ int NpyArr_iterNextItem(JSOBJ obj, JSONTypeContext *tc) {
NpyArr_freeItemValue(obj, tc);
- if (PyArray_ISDATETIME(npyarr->array))
- {
+ if (PyArray_ISDATETIME(npyarr->array)) {
PRINTMARK();
GET_TC(tc)->itemValue = obj;
Py_INCREF(obj);
@@ -787,30 +787,23 @@ JSOBJ NpyArr_iterGetValue(JSOBJ obj, JSONTypeContext *tc) {
return GET_TC(tc)->itemValue;
}
-static void NpyArr_getLabel(JSOBJ obj, JSONTypeContext *tc, size_t *outLen,
- npy_intp idx, char **labels) {
- JSONObjectEncoder *enc = (JSONObjectEncoder *)tc->encoder;
- PRINTMARK();
- *outLen = strlen(labels[idx]);
- Buffer_Reserve(enc, *outLen);
- memcpy(enc->offset, labels[idx], sizeof(char) * (*outLen));
- enc->offset += *outLen;
- *outLen = 0;
-}
-
char *NpyArr_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) {
NpyArrContext *npyarr = GET_TC(tc)->npyarr;
npy_intp idx;
PRINTMARK();
+ char *cStr;
if (GET_TC(tc)->iterNext == NpyArr_iterNextItem) {
idx = npyarr->index[npyarr->stridedim] - 1;
- NpyArr_getLabel(obj, tc, outLen, idx, npyarr->columnLabels);
+ cStr = npyarr->columnLabels[idx];
} else {
idx = npyarr->index[npyarr->stridedim - npyarr->inc] - 1;
- NpyArr_getLabel(obj, tc, outLen, idx, npyarr->rowLabels);
+ cStr = npyarr->rowLabels[idx];
}
- return NULL;
+
+ *outLen = strlen(cStr);
+
+ return cStr;
}
//=============================================================================
@@ -852,19 +845,22 @@ char *PdBlock_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) {
PdBlockContext *blkCtxt = GET_TC(tc)->pdblock;
NpyArrContext *npyarr = blkCtxt->npyCtxts[0];
npy_intp idx;
+ char *cStr;
PRINTMARK();
if (GET_TC(tc)->iterNext == PdBlock_iterNextItem) {
idx = blkCtxt->colIdx - 1;
- NpyArr_getLabel(obj, tc, outLen, idx, npyarr->columnLabels);
+ cStr = npyarr->columnLabels[idx];
} else {
idx = GET_TC(tc)->iterNext != PdBlock_iterNext
? npyarr->index[npyarr->stridedim - npyarr->inc] - 1
: npyarr->index[npyarr->stridedim];
- NpyArr_getLabel(obj, tc, outLen, idx, npyarr->rowLabels);
+ cStr = npyarr->rowLabels[idx];
}
- return NULL;
+
+ *outLen = strlen(cStr);
+ return cStr;
}
char *PdBlock_iterGetName_Transpose(JSOBJ obj, JSONTypeContext *tc,
@@ -872,16 +868,19 @@ char *PdBlock_iterGetName_Transpose(JSOBJ obj, JSONTypeContext *tc,
PdBlockContext *blkCtxt = GET_TC(tc)->pdblock;
NpyArrContext *npyarr = blkCtxt->npyCtxts[blkCtxt->colIdx];
npy_intp idx;
+ char *cStr;
PRINTMARK();
if (GET_TC(tc)->iterNext == NpyArr_iterNextItem) {
idx = npyarr->index[npyarr->stridedim] - 1;
- NpyArr_getLabel(obj, tc, outLen, idx, npyarr->columnLabels);
+ cStr = npyarr->columnLabels[idx];
} else {
idx = blkCtxt->colIdx;
- NpyArr_getLabel(obj, tc, outLen, idx, npyarr->rowLabels);
+ cStr = npyarr->rowLabels[idx];
}
- return NULL;
+
+ *outLen = strlen(cStr);
+ return cStr;
}
int PdBlock_iterNext(JSOBJ obj, JSONTypeContext *tc) {
@@ -942,9 +941,9 @@ void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) {
dtype = PyArray_DescrFromType(NPY_INT64);
obj = (PyObject *)_obj;
- GET_TC(tc)
- ->iterGetName = GET_TC(tc)->transpose ? PdBlock_iterGetName_Transpose
- : PdBlock_iterGetName;
+ GET_TC(tc)->iterGetName = GET_TC(tc)->transpose
+ ? PdBlock_iterGetName_Transpose
+ : PdBlock_iterGetName;
blkCtxt = PyObject_Malloc(sizeof(PdBlockContext));
if (!blkCtxt) {
@@ -1395,7 +1394,7 @@ void Series_iterBegin(JSOBJ obj, JSONTypeContext *tc) {
PyObjectEncoder *enc = (PyObjectEncoder *)tc->encoder;
GET_TC(tc)->index = 0;
GET_TC(tc)->cStr = PyObject_Malloc(20 * sizeof(char));
- enc->outputFormat = VALUES; // for contained series
+ enc->outputFormat = VALUES; // for contained series
if (!GET_TC(tc)->cStr) {
PyErr_NoMemory();
}
@@ -1454,7 +1453,7 @@ void DataFrame_iterBegin(JSOBJ obj, JSONTypeContext *tc) {
PyObjectEncoder *enc = (PyObjectEncoder *)tc->encoder;
GET_TC(tc)->index = 0;
GET_TC(tc)->cStr = PyObject_Malloc(20 * sizeof(char));
- enc->outputFormat = VALUES; // for contained series & index
+ enc->outputFormat = VALUES; // for contained series & index
if (!GET_TC(tc)->cStr) {
PyErr_NoMemory();
}
@@ -1578,16 +1577,30 @@ void NpyArr_freeLabels(char **labels, npy_intp len) {
}
}
-char **NpyArr_encodeLabels(PyArrayObject *labels, JSONObjectEncoder *enc,
+/*
+ * Function: NpyArr_encodeLabels
+ * -----------------------------
+ *
+ * Builds an array of "encoded" labels.
+ *
+ * labels: PyArrayObject pointer for labels to be "encoded"
+ * num : number of labels
+ *
+ * "encode" is quoted above because we aren't really doing encoding
+ * For historical reasons this function would actually encode the entire
+ * array into a separate buffer with a separate call to JSON_Encode
+ * and would leave it to complex pointer manipulation from there to
+ * unpack values as needed. To make things simpler and more idiomatic
+ * this has instead just stringified any input save for datetime values,
+ * which may need to be represented in various formats.
+ */
+char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc,
npy_intp num) {
// NOTE this function steals a reference to labels.
- PyObjectEncoder *pyenc = (PyObjectEncoder *)enc;
PyObject *item = NULL;
- npy_intp i, stride, len, need_quotes;
+ npy_intp i, stride, len;
char **ret;
- char *dataptr, *cLabel, *origend, *origst, *origoffset;
- char labelBuffer[NPY_JSON_BUFSIZE];
- PyArray_GetItemFunc *getitem;
+ char *dataptr, *cLabel;
int type_num;
PRINTMARK();
@@ -1614,68 +1627,137 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, JSONObjectEncoder *enc,
ret[i] = NULL;
}
- origst = enc->start;
- origend = enc->end;
- origoffset = enc->offset;
-
stride = PyArray_STRIDE(labels, 0);
dataptr = PyArray_DATA(labels);
- getitem = (PyArray_GetItemFunc *)PyArray_DESCR(labels)->f->getitem;
type_num = PyArray_TYPE(labels);
for (i = 0; i < num; i++) {
- if (PyTypeNum_ISDATETIME(type_num) || PyTypeNum_ISNUMBER(type_num))
- {
- item = (PyObject *)labels;
- pyenc->npyType = type_num;
- pyenc->npyValue = dataptr;
- } else {
- item = getitem(dataptr, labels);
- if (!item) {
+ item = PyArray_GETITEM(labels, dataptr);
+ if (!item) {
+ NpyArr_freeLabels(ret, num);
+ ret = 0;
+ break;
+ }
+
+ // TODO: for any matches on type_num (date and timedeltas) should use a
+ // vectorized solution to convert to epoch or iso formats
+ if (enc->datetimeIso &&
+ (type_num == NPY_TIMEDELTA || PyDelta_Check(item))) {
+ PyObject *td = PyObject_CallFunction(cls_timedelta, "(O)", item);
+ if (td == NULL) {
+ Py_DECREF(item);
+ NpyArr_freeLabels(ret, num);
+ ret = 0;
+ break;
+ }
+
+ PyObject *iso = PyObject_CallMethod(td, "isoformat", NULL);
+ Py_DECREF(td);
+ if (iso == NULL) {
+ Py_DECREF(item);
NpyArr_freeLabels(ret, num);
ret = 0;
break;
}
- }
- cLabel = JSON_EncodeObject(item, enc, labelBuffer, NPY_JSON_BUFSIZE);
+ cLabel = (char *)PyUnicode_AsUTF8(iso);
+ Py_DECREF(iso);
+ len = strlen(cLabel);
+ } else if (PyTypeNum_ISDATETIME(type_num) || PyDateTime_Check(item) ||
+ PyDate_Check(item)) {
+ PyObject *ts = PyObject_CallFunction(cls_timestamp, "(O)", item);
+ if (ts == NULL) {
+ Py_DECREF(item);
+ NpyArr_freeLabels(ret, num);
+ ret = 0;
+ break;
+ }
- if (item != (PyObject *)labels) {
- Py_DECREF(item);
+ if (enc->datetimeIso) {
+ PyObject *iso = PyObject_CallMethod(ts, "isoformat", NULL);
+ Py_DECREF(ts);
+ if (iso == NULL) {
+ Py_DECREF(item);
+ NpyArr_freeLabels(ret, num);
+ ret = 0;
+ break;
+ }
+
+ cLabel = (char *)PyUnicode_AsUTF8(iso);
+ Py_DECREF(iso);
+ len = strlen(cLabel);
+ } else {
+ npy_int64 value;
+ // TODO: refactor to not duplicate what goes on in
+ // beginTypeContext
+ if (PyObject_HasAttrString(ts, "value")) {
+ PRINTMARK();
+ value = get_long_attr(ts, "value");
+ } else {
+ PRINTMARK();
+ value = total_seconds(ts) *
+ 1000000000LL; // nanoseconds per second
+ }
+ Py_DECREF(ts);
+
+ switch (enc->datetimeUnit) {
+ case NPY_FR_ns:
+ break;
+ case NPY_FR_us:
+ value /= 1000LL;
+ break;
+ case NPY_FR_ms:
+ value /= 1000000LL;
+ break;
+ case NPY_FR_s:
+ value /= 1000000000LL;
+ break;
+ default:
+ Py_DECREF(item);
+ NpyArr_freeLabels(ret, num);
+ ret = 0;
+ break;
+ }
+
+ char buf[21] = {0}; // 21 chars for 2**63 as string
+ cLabel = buf;
+ sprintf(buf, "%" NPY_INT64_FMT, value);
+ len = strlen(cLabel);
+ }
+ } else { // Fallack to string representation
+ PyObject *str = PyObject_Str(item);
+ if (str == NULL) {
+ Py_DECREF(item);
+ NpyArr_freeLabels(ret, num);
+ ret = 0;
+ break;
+ }
+
+ cLabel = (char *)PyUnicode_AsUTF8(str);
+ Py_DECREF(str);
+ len = strlen(cLabel);
}
- if (PyErr_Occurred() || enc->errorMsg) {
+ Py_DECREF(item);
+ // Add 1 to include NULL terminator
+ ret[i] = PyObject_Malloc(len + 1);
+ memcpy(ret[i], cLabel, len + 1);
+
+ if (PyErr_Occurred()) {
NpyArr_freeLabels(ret, num);
ret = 0;
break;
}
- need_quotes = ((*cLabel) != '"');
- len = enc->offset - cLabel + 1 + 2 * need_quotes;
- ret[i] = PyObject_Malloc(sizeof(char) * len);
-
if (!ret[i]) {
PyErr_NoMemory();
ret = 0;
break;
}
- if (need_quotes) {
- ret[i][0] = '"';
- memcpy(ret[i] + 1, cLabel, sizeof(char) * (len - 4));
- ret[i][len - 3] = '"';
- } else {
- memcpy(ret[i], cLabel, sizeof(char) * (len - 2));
- }
- ret[i][len - 2] = ':';
- ret[i][len - 1] = '\0';
dataptr += stride;
}
- enc->start = origst;
- enc->end = origend;
- enc->offset = origoffset;
-
Py_DECREF(labels);
return ret;
}
@@ -1787,7 +1869,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
return;
} else if (PyBytes_Check(obj)) {
PRINTMARK();
- pc->PyTypeToJSON = PyStringToUTF8;
+ pc->PyTypeToJSON = PyBytesToUTF8;
tc->type = JT_UTF8;
return;
} else if (PyUnicode_Check(obj)) {
@@ -1840,23 +1922,22 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
value = get_long_attr(obj, "value");
} else {
PRINTMARK();
- value =
- total_seconds(obj) * 1000000000LL; // nanoseconds per second
+ value = total_seconds(obj) * 1000000000LL; // nanoseconds per second
}
base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit;
switch (base) {
- case NPY_FR_ns:
- break;
- case NPY_FR_us:
- value /= 1000LL;
- break;
- case NPY_FR_ms:
- value /= 1000000LL;
- break;
- case NPY_FR_s:
- value /= 1000000000LL;
- break;
+ case NPY_FR_ns:
+ break;
+ case NPY_FR_us:
+ value /= 1000LL;
+ break;
+ case NPY_FR_ms:
+ value /= 1000000LL;
+ break;
+ case NPY_FR_s:
+ value /= 1000000000LL;
+ break;
}
exc = PyErr_Occurred();
@@ -1971,8 +2052,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
goto INVALID;
}
pc->columnLabelsLen = PyArray_DIM(pc->newObj, 0);
- pc->columnLabels = NpyArr_encodeLabels((PyArrayObject *)values,
- (JSONObjectEncoder *)enc,
+ pc->columnLabels = NpyArr_encodeLabels((PyArrayObject *)values, enc,
pc->columnLabelsLen);
if (!pc->columnLabels) {
goto INVALID;
@@ -2074,8 +2154,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
goto INVALID;
}
pc->columnLabelsLen = PyObject_Size(tmpObj);
- pc->columnLabels = NpyArr_encodeLabels((PyArrayObject *)values,
- (JSONObjectEncoder *)enc,
+ pc->columnLabels = NpyArr_encodeLabels((PyArrayObject *)values, enc,
pc->columnLabelsLen);
Py_DECREF(tmpObj);
if (!pc->columnLabels) {
@@ -2096,9 +2175,8 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
goto INVALID;
}
pc->rowLabelsLen = PyObject_Size(tmpObj);
- pc->rowLabels =
- NpyArr_encodeLabels((PyArrayObject *)values,
- (JSONObjectEncoder *)enc, pc->rowLabelsLen);
+ pc->rowLabels = NpyArr_encodeLabels((PyArrayObject *)values, enc,
+ pc->rowLabelsLen);
Py_DECREF(tmpObj);
tmpObj = (enc->outputFormat == INDEX
? PyObject_GetAttrString(obj, "columns")
@@ -2116,8 +2194,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
goto INVALID;
}
pc->columnLabelsLen = PyObject_Size(tmpObj);
- pc->columnLabels = NpyArr_encodeLabels((PyArrayObject *)values,
- (JSONObjectEncoder *)enc,
+ pc->columnLabels = NpyArr_encodeLabels((PyArrayObject *)values, enc,
pc->columnLabelsLen);
Py_DECREF(tmpObj);
if (!pc->columnLabels) {
@@ -2242,7 +2319,8 @@ void Object_endTypeContext(JSOBJ obj, JSONTypeContext *tc) {
PyObject_Free(GET_TC(tc)->cStr);
GET_TC(tc)->cStr = NULL;
- if (tc->prv != &(((PyObjectEncoder *)tc->encoder)->basicTypeContext)) { // NOLINT
+ if (tc->prv !=
+ &(((PyObjectEncoder *)tc->encoder)->basicTypeContext)) { // NOLINT
PyObject_Free(tc->prv);
}
tc->prv = NULL;
@@ -2295,22 +2373,29 @@ char *Object_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) {
}
PyObject *objToJSON(PyObject *self, PyObject *args, PyObject *kwargs) {
- static char *kwlist[] = {
- "obj", "ensure_ascii", "double_precision", "encode_html_chars",
- "orient", "date_unit", "iso_dates", "default_handler",
- NULL};
+ static char *kwlist[] = {"obj",
+ "ensure_ascii",
+ "double_precision",
+ "encode_html_chars",
+ "orient",
+ "date_unit",
+ "iso_dates",
+ "default_handler",
+ "indent",
+ NULL};
char buffer[65536];
char *ret;
PyObject *newobj;
PyObject *oinput = NULL;
PyObject *oensureAscii = NULL;
- int idoublePrecision = 10; // default double precision setting
+ int idoublePrecision = 10; // default double precision setting
PyObject *oencodeHTMLChars = NULL;
char *sOrient = NULL;
char *sdateFormat = NULL;
PyObject *oisoDates = 0;
PyObject *odefHandler = 0;
+ int indent = 0;
PyObjectEncoder pyEncoder = {{
Object_beginTypeContext,
@@ -2328,10 +2413,11 @@ PyObject *objToJSON(PyObject *self, PyObject *args, PyObject *kwargs) {
PyObject_Malloc,
PyObject_Realloc,
PyObject_Free,
- -1, // recursionMax
+ -1, // recursionMax
idoublePrecision,
- 1, // forceAscii
- 0, // encodeHTMLChars
+ 1, // forceAscii
+ 0, // encodeHTMLChars
+ 0, // indent
}};
JSONObjectEncoder *encoder = (JSONObjectEncoder *)&pyEncoder;
@@ -2356,10 +2442,10 @@ PyObject *objToJSON(PyObject *self, PyObject *args, PyObject *kwargs) {
PRINTMARK();
- if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|OiOssOO", kwlist, &oinput,
- &oensureAscii, &idoublePrecision,
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|OiOssOOi", kwlist,
+ &oinput, &oensureAscii, &idoublePrecision,
&oencodeHTMLChars, &sOrient, &sdateFormat,
- &oisoDates, &odefHandler)) {
+ &oisoDates, &odefHandler, &indent)) {
return NULL;
}
@@ -2425,11 +2511,12 @@ PyObject *objToJSON(PyObject *self, PyObject *args, PyObject *kwargs) {
pyEncoder.defaultHandler = odefHandler;
}
+ encoder->indent = indent;
+
pyEncoder.originalOutputFormat = pyEncoder.outputFormat;
PRINTMARK();
ret = JSON_EncodeObject(oinput, encoder, buffer, sizeof(buffer));
PRINTMARK();
-
if (PyErr_Occurred()) {
PRINTMARK();
return NULL;
diff --git a/pandas/_libs/testing.pyx b/pandas/_libs/testing.pyx
index ae0d3ac1a61ca..7ad5ea189763c 100644
--- a/pandas/_libs/testing.pyx
+++ b/pandas/_libs/testing.pyx
@@ -139,15 +139,13 @@ cpdef assert_almost_equal(a, b,
obj, '{0} shapes are different'.format(obj),
a.shape, b.shape)
- if check_dtype and not is_dtype_equal(a, b):
+ if check_dtype and not is_dtype_equal(a.dtype, b.dtype):
from pandas.util.testing import assert_attr_equal
assert_attr_equal('dtype', a, b, obj=obj)
- try:
- if array_equivalent(a, b, strict_nan=True):
- return True
- except:
- pass
+ if array_equivalent(a, b, strict_nan=True):
+ return True
+
else:
na, nb = len(a), len(b)
@@ -188,6 +186,7 @@ cpdef assert_almost_equal(a, b,
# object comparison
return True
if isna(a) and isna(b):
+ # TODO: Should require same-dtype NA?
# nan / None comparison
return True
if is_comparable_as_number(a) and is_comparable_as_number(b):
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 4e49f660f5e19..0f1657480e4b3 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -71,7 +71,7 @@ cdef inline object create_time_from_ts(
@cython.wraparound(False)
@cython.boundscheck(False)
-def ints_to_pydatetime(int64_t[:] arr, object tz=None, object freq=None,
+def ints_to_pydatetime(const int64_t[:] arr, object tz=None, object freq=None,
str box="datetime"):
"""
Convert an i8 repr to an ndarray of datetimes, date, time or Timestamp
@@ -344,14 +344,13 @@ def array_with_unit_to_datetime(ndarray values, object unit,
# try a quick conversion to i8
# if we have nulls that are not type-compat
# then need to iterate
- try:
+ if values.dtype.kind == "i":
+ # Note: this condition makes the casting="same_kind" redundant
iresult = values.astype('i8', casting='same_kind', copy=False)
mask = iresult == NPY_NAT
iresult[mask] = 0
fvalues = iresult.astype('f8') * m
need_to_iterate = False
- except:
- pass
# check the bounds
if not need_to_iterate:
@@ -406,7 +405,7 @@ def array_with_unit_to_datetime(ndarray values, object unit,
elif is_ignore:
raise AssertionError
iresult[i] = NPY_NAT
- except:
+ except OverflowError:
if is_raise:
raise OutOfBoundsDatetime(
"cannot convert input {val} with the unit "
@@ -447,7 +446,7 @@ def array_with_unit_to_datetime(ndarray values, object unit,
else:
try:
oresult[i] = Timestamp(cast_from_unit(val, unit))
- except:
+ except OverflowError:
oresult[i] = val
elif isinstance(val, str):
@@ -574,7 +573,7 @@ cpdef array_to_datetime(ndarray[object] values, str errors='raise',
# datetimes/strings, then we must coerce)
try:
iresult[i] = cast_from_unit(val, 'ns')
- except:
+ except OverflowError:
iresult[i] = NPY_NAT
elif isinstance(val, str):
@@ -610,16 +609,17 @@ cpdef array_to_datetime(ndarray[object] values, str errors='raise',
py_dt = parse_datetime_string(val,
dayfirst=dayfirst,
yearfirst=yearfirst)
- except Exception:
+ # If the dateutil parser returned tzinfo, capture it
+ # to check if all arguments have the same tzinfo
+ tz = py_dt.utcoffset()
+
+ except (ValueError, OverflowError):
if is_coerce:
iresult[i] = NPY_NAT
continue
raise TypeError("invalid string coercion to "
"datetime")
- # If the dateutil parser returned tzinfo, capture it
- # to check if all arguments have the same tzinfo
- tz = py_dt.utcoffset()
if tz is not None:
seen_datetime_offset = 1
# dateutil timezone objects cannot be hashed, so
diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py
index 67a323782a836..8d3b00e4a44b9 100644
--- a/pandas/_libs/tslibs/__init__.py
+++ b/pandas/_libs/tslibs/__init__.py
@@ -7,3 +7,6 @@
from .timedeltas import Timedelta, delta_to_nanoseconds, ints_to_pytimedelta
from .timestamps import Timestamp
from .tzconversion import tz_convert_single
+
+# import fails if we do this before np_datetime
+from .c_timestamp import NullFrequencyError # isort:skip
diff --git a/pandas/_libs/tslibs/c_timestamp.pyx b/pandas/_libs/tslibs/c_timestamp.pyx
index 2d3ea3e14775e..032363d867196 100644
--- a/pandas/_libs/tslibs/c_timestamp.pyx
+++ b/pandas/_libs/tslibs/c_timestamp.pyx
@@ -14,12 +14,12 @@ shadows the python class, where we do any heavy lifting.
import warnings
-from cpython cimport (PyObject_RichCompareBool, PyObject_RichCompare,
- Py_GT, Py_GE, Py_EQ, Py_NE, Py_LT, Py_LE)
+from cpython.object cimport (PyObject_RichCompareBool, PyObject_RichCompare,
+ Py_GT, Py_GE, Py_EQ, Py_NE, Py_LT, Py_LE)
import numpy as np
cimport numpy as cnp
-from numpy cimport int64_t, int8_t
+from numpy cimport int64_t, int8_t, uint8_t, ndarray
cnp.import_array()
from cpython.datetime cimport (datetime,
@@ -42,6 +42,15 @@ from pandas._libs.tslibs.timezones import UTC
from pandas._libs.tslibs.tzconversion cimport tz_convert_single
+class NullFrequencyError(ValueError):
+ """
+ Error raised when a null `freq` attribute is used in an operation
+ that needs a non-null frequency, particularly `DatetimeIndex.shift`,
+ `TimedeltaIndex.shift`, `PeriodIndex.shift`.
+ """
+ pass
+
+
def maybe_integer_op_deprecated(obj):
# GH#22535 add/sub of integers and int-arrays is deprecated
if obj.freq is not None:
@@ -131,7 +140,8 @@ cdef class _Timestamp(datetime):
try:
stamp += zone.strftime(' %%Z')
- except:
+ except AttributeError:
+ # e.g. tzlocal has no `strftime`
pass
tz = ", tz='{0}'".format(zone) if zone is not None else ""
@@ -227,8 +237,8 @@ cdef class _Timestamp(datetime):
# to be compat with Period
return NaT
elif self.freq is None:
- raise ValueError("Cannot add integral value to Timestamp "
- "without freq.")
+ raise NullFrequencyError(
+ "Cannot add integral value to Timestamp without freq.")
return self.__class__((self.freq * other).apply(self),
freq=self.freq)
@@ -246,11 +256,17 @@ cdef class _Timestamp(datetime):
result = self.__class__(self.value + nanos,
tz=self.tzinfo, freq=self.freq)
- if getattr(other, 'normalize', False):
- # DateOffset
- result = result.normalize()
return result
+ elif is_array(other):
+ if other.dtype.kind in ['i', 'u']:
+ maybe_integer_op_deprecated(self)
+ if self.freq is None:
+ raise NullFrequencyError(
+ "Cannot add integer-dtype array "
+ "to Timestamp without freq.")
+ return self.freq * other + self
+
# index/series like
elif hasattr(other, '_typ'):
return NotImplemented
@@ -262,24 +278,27 @@ cdef class _Timestamp(datetime):
return result
def __sub__(self, other):
+
if (is_timedelta64_object(other) or is_integer_object(other) or
PyDelta_Check(other) or hasattr(other, 'delta')):
# `delta` attribute is for offsets.Tick or offsets.Week obj
neg_other = -other
return self + neg_other
- typ = getattr(other, '_typ', None)
+ elif is_array(other):
+ if other.dtype.kind in ['i', 'u']:
+ maybe_integer_op_deprecated(self)
+ if self.freq is None:
+ raise NullFrequencyError(
+ "Cannot subtract integer-dtype array "
+ "from Timestamp without freq.")
+ return self - self.freq * other
- # a Timestamp-DatetimeIndex -> yields a negative TimedeltaIndex
- if typ in ('datetimeindex', 'datetimearray'):
- # timezone comparison is performed in DatetimeIndex._sub_datelike
- return -other.__sub__(self)
-
- # a Timestamp-TimedeltaIndex -> yields a negative TimedeltaIndex
- elif typ in ('timedeltaindex', 'timedeltaarray'):
- return (-other).__add__(self)
+ typ = getattr(other, '_typ', None)
+ if typ is not None:
+ return NotImplemented
- elif other is NaT:
+ if other is NaT:
return NaT
# coerce if necessary if we are a Timestamp-like
@@ -302,10 +321,12 @@ cdef class _Timestamp(datetime):
return Timedelta(self.value - other.value)
except (OverflowError, OutOfBoundsDatetime):
pass
+ elif is_datetime64_object(self):
+ # GH#28286 cython semantics for __rsub__, `other` is actually
+ # the Timestamp
+ return type(other)(self) - other
- # scalar Timestamp/datetime - Timedelta -> yields a Timestamp (with
- # same timezone if specified)
- return datetime.__sub__(self, other)
+ return NotImplemented
cdef int64_t _maybe_convert_value_to_local(self):
"""Convert UTC i8 value to local i8 value if tz exists"""
@@ -320,7 +341,7 @@ cdef class _Timestamp(datetime):
cdef:
int64_t val
dict kwds
- int8_t out[1]
+ ndarray[uint8_t, cast=True] out
int month_kw
freq = self.freq
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 0a3f4ed3cc91d..bd74180403ad9 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -519,7 +519,7 @@ cdef _TSObject convert_str_to_tsobject(object ts, object tz, object unit,
try:
ts = parse_datetime_string(ts, dayfirst=dayfirst,
yearfirst=yearfirst)
- except Exception:
+ except (ValueError, OverflowError):
raise ValueError("could not convert string to Timestamp")
return convert_to_tsobject(ts, tz, unit, dayfirst, yearfirst)
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index 2a41b5ff2339c..2ed85595f7e3a 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -45,7 +45,7 @@ def get_time_micros(ndarray[int64_t] dtindex):
@cython.wraparound(False)
@cython.boundscheck(False)
-def build_field_sarray(int64_t[:] dtindex):
+def build_field_sarray(const int64_t[:] dtindex):
"""
Datetime as int64 representation to a structured array of fields
"""
@@ -87,7 +87,7 @@ def build_field_sarray(int64_t[:] dtindex):
@cython.wraparound(False)
@cython.boundscheck(False)
-def get_date_name_field(int64_t[:] dtindex, object field, object locale=None):
+def get_date_name_field(const int64_t[:] dtindex, object field, object locale=None):
"""
Given a int64-based datetime index, return array of strings of date
name based on requested field (e.g. weekday_name)
@@ -137,7 +137,7 @@ def get_date_name_field(int64_t[:] dtindex, object field, object locale=None):
@cython.wraparound(False)
@cython.boundscheck(False)
-def get_start_end_field(int64_t[:] dtindex, object field,
+def get_start_end_field(const int64_t[:] dtindex, object field,
object freqstr=None, int month_kw=12):
"""
Given an int64-based datetime index return array of indicators
@@ -380,7 +380,7 @@ def get_start_end_field(int64_t[:] dtindex, object field,
@cython.wraparound(False)
@cython.boundscheck(False)
-def get_date_field(int64_t[:] dtindex, object field):
+def get_date_field(const int64_t[:] dtindex, object field):
"""
Given a int64-based datetime index, extract the year, month, etc.,
field and return an array of these values.
@@ -542,7 +542,7 @@ def get_date_field(int64_t[:] dtindex, object field):
@cython.wraparound(False)
@cython.boundscheck(False)
-def get_timedelta_field(int64_t[:] tdindex, object field):
+def get_timedelta_field(const int64_t[:] tdindex, object field):
"""
Given a int64-based timedelta index, extract the days, hrs, sec.,
field and return an array of these values.
diff --git a/pandas/_libs/tslibs/frequencies.pyx b/pandas/_libs/tslibs/frequencies.pyx
index f2dcd37b191ed..b29c841896072 100644
--- a/pandas/_libs/tslibs/frequencies.pyx
+++ b/pandas/_libs/tslibs/frequencies.pyx
@@ -138,6 +138,10 @@ cpdef get_freq_code(freqstr):
-------
return : tuple of base frequency code and stride (mult)
+ Raises
+ ------
+ TypeError : if passed a tuple witth incorrect types
+
Examples
--------
>>> get_freq_code('3D')
@@ -156,16 +160,16 @@ cpdef get_freq_code(freqstr):
if is_integer_object(freqstr[0]) and is_integer_object(freqstr[1]):
# e.g., freqstr = (2000, 1)
return freqstr
+ elif is_integer_object(freqstr[0]):
+ # Note: passing freqstr[1] below will raise TypeError if that
+ # is not a str
+ code = _period_str_to_code(freqstr[1])
+ stride = freqstr[0]
+ return code, stride
else:
# e.g., freqstr = ('T', 5)
- try:
- code = _period_str_to_code(freqstr[0])
- stride = freqstr[1]
- except:
- if is_integer_object(freqstr[1]):
- raise
- code = _period_str_to_code(freqstr[1])
- stride = freqstr[0]
+ code = _period_str_to_code(freqstr[0])
+ stride = freqstr[1]
return code, stride
if is_integer_object(freqstr):
@@ -177,7 +181,7 @@ cpdef get_freq_code(freqstr):
return code, stride
-cpdef _base_and_stride(freqstr):
+cpdef _base_and_stride(str freqstr):
"""
Return base freq and stride info from string representation
@@ -207,7 +211,7 @@ cpdef _base_and_stride(freqstr):
return base, stride
-cpdef _period_str_to_code(freqstr):
+cpdef _period_str_to_code(str freqstr):
freqstr = _lite_rule_alias.get(freqstr, freqstr)
if freqstr not in _dont_uppercase:
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 7f35a11e57b71..328fc26e4fef6 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -1,4 +1,4 @@
-from cpython cimport (
+from cpython.object cimport (
PyObject_RichCompare,
Py_GT, Py_GE, Py_EQ, Py_NE, Py_LT, Py_LE)
@@ -92,6 +92,9 @@ cdef class _NaT(datetime):
# int64_t value
# object freq
+ # higher than np.ndarray and np.matrix
+ __array_priority__ = 100
+
def __hash__(_NaT self):
# py3k needs this defined here
return hash(self.value)
@@ -103,58 +106,108 @@ cdef class _NaT(datetime):
if ndim == -1:
return _nat_scalar_rules[op]
- if ndim == 0:
+ elif util.is_array(other):
+ result = np.empty(other.shape, dtype=np.bool_)
+ result.fill(_nat_scalar_rules[op])
+ return result
+
+ elif ndim == 0:
if is_datetime64_object(other):
return _nat_scalar_rules[op]
else:
raise TypeError('Cannot compare type %r with type %r' %
(type(self).__name__, type(other).__name__))
+
# Note: instead of passing "other, self, _reverse_ops[op]", we observe
# that `_nat_scalar_rules` is invariant under `_reverse_ops`,
# rendering it unnecessary.
return PyObject_RichCompare(other, self, op)
def __add__(self, other):
+ if self is not c_NaT:
+ # cython __radd__ semantics
+ self, other = other, self
+
if PyDateTime_Check(other):
return c_NaT
-
+ elif PyDelta_Check(other):
+ return c_NaT
+ elif is_datetime64_object(other) or is_timedelta64_object(other):
+ return c_NaT
elif hasattr(other, 'delta'):
# Timedelta, offsets.Tick, offsets.Week
return c_NaT
- elif getattr(other, '_typ', None) in ['dateoffset', 'series',
- 'period', 'datetimeindex',
- 'timedeltaindex']:
- # Duplicate logic in _Timestamp.__add__ to avoid needing
- # to subclass; allows us to @final(_Timestamp.__add__)
- return NotImplemented
- return c_NaT
+
+ elif is_integer_object(other) or util.is_period_object(other):
+ # For Period compat
+ # TODO: the integer behavior is deprecated, remove it
+ return c_NaT
+
+ elif util.is_array(other):
+ if other.dtype.kind in 'mM':
+ # If we are adding to datetime64, we treat NaT as timedelta
+ # Either way, result dtype is datetime64
+ result = np.empty(other.shape, dtype="datetime64[ns]")
+ result.fill("NaT")
+ return result
+ raise TypeError("Cannot add NaT to ndarray with dtype {dtype}"
+ .format(dtype=other.dtype))
+
+ return NotImplemented
def __sub__(self, other):
# Duplicate some logic from _Timestamp.__sub__ to avoid needing
# to subclass; allows us to @final(_Timestamp.__sub__)
- if PyDateTime_Check(other):
- return NaT
- elif PyDelta_Check(other):
- return NaT
-
- elif getattr(other, '_typ', None) == 'datetimeindex':
- # a Timestamp-DatetimeIndex -> yields a negative TimedeltaIndex
- return -other.__sub__(self)
+ cdef:
+ bint is_rsub = False
- elif getattr(other, '_typ', None) == 'timedeltaindex':
- # a Timestamp-TimedeltaIndex -> yields a negative TimedeltaIndex
- return (-other).__add__(self)
+ if self is not c_NaT:
+ # cython __rsub__ semantics
+ self, other = other, self
+ is_rsub = True
+ if PyDateTime_Check(other):
+ return c_NaT
+ elif PyDelta_Check(other):
+ return c_NaT
+ elif is_datetime64_object(other) or is_timedelta64_object(other):
+ return c_NaT
elif hasattr(other, 'delta'):
# offsets.Tick, offsets.Week
- neg_other = -other
- return self + neg_other
+ return c_NaT
- elif getattr(other, '_typ', None) in ['period', 'series',
- 'periodindex', 'dateoffset']:
- return NotImplemented
+ elif is_integer_object(other) or util.is_period_object(other):
+ # For Period compat
+ # TODO: the integer behavior is deprecated, remove it
+ return c_NaT
- return NaT
+ elif util.is_array(other):
+ if other.dtype.kind == 'm':
+ if not is_rsub:
+ # NaT - timedelta64 we treat NaT as datetime64, so result
+ # is datetime64
+ result = np.empty(other.shape, dtype="datetime64[ns]")
+ result.fill("NaT")
+ return result
+
+ # timedelta64 - NaT we have to treat NaT as timedelta64
+ # for this to be meaningful, and the result is timedelta64
+ result = np.empty(other.shape, dtype="timedelta64[ns]")
+ result.fill("NaT")
+ return result
+
+ elif other.dtype.kind == 'M':
+ # We treat NaT as a datetime, so regardless of whether this is
+ # NaT - other or other - NaT, the result is timedelta64
+ result = np.empty(other.shape, dtype="timedelta64[ns]")
+ result.fill("NaT")
+ return result
+
+ raise TypeError(
+ "Cannot subtract NaT from ndarray with dtype {dtype}"
+ .format(dtype=other.dtype))
+
+ return NotImplemented
def __pos__(self):
return NaT
@@ -498,7 +551,7 @@ class NaTType(_NaT):
""")
round = _make_nat_func('round', # noqa:E128
"""
- Round the Timestamp to the specified resolution
+ Round the Timestamp to the specified resolution.
Parameters
----------
@@ -536,7 +589,7 @@ default 'raise'
""")
floor = _make_nat_func('floor', # noqa:E128
"""
- return a new Timestamp floored to this resolution
+ return a new Timestamp floored to this resolution.
Parameters
----------
@@ -570,7 +623,7 @@ default 'raise'
""")
ceil = _make_nat_func('ceil', # noqa:E128
"""
- return a new Timestamp ceiled to this resolution
+ return a new Timestamp ceiled to this resolution.
Parameters
----------
@@ -682,7 +735,7 @@ default 'raise'
""")
replace = _make_nat_func('replace', # noqa:E128
"""
- implements datetime.replace, handles nanoseconds
+ implements datetime.replace, handles nanoseconds.
Parameters
----------
diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx
index 7d362708015ce..e76f84265a327 100644
--- a/pandas/_libs/tslibs/np_datetime.pyx
+++ b/pandas/_libs/tslibs/np_datetime.pyx
@@ -1,4 +1,4 @@
-from cpython cimport Py_EQ, Py_NE, Py_GE, Py_GT, Py_LT, Py_LE
+from cpython.object cimport Py_EQ, Py_NE, Py_GE, Py_GT, Py_LT, Py_LE
from cpython.datetime cimport (datetime, date,
PyDateTime_IMPORT,
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index eb99f090e8565..ca70c8af45f2f 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -10,7 +10,8 @@ from libc.string cimport strchr
import cython
from cython import Py_ssize_t
-from cpython cimport PyObject_Str, PyUnicode_Join
+from cpython.object cimport PyObject_Str
+from cpython.unicode cimport PyUnicode_Join
from cpython.datetime cimport datetime, datetime_new, import_datetime
from cpython.version cimport PY_VERSION_HEX
@@ -308,9 +309,9 @@ cdef parse_datetime_string_with_reso(date_string, freq=None, dayfirst=False,
parsed, reso = dateutil_parse(date_string, _DEFAULT_DATETIME,
dayfirst=dayfirst, yearfirst=yearfirst,
ignoretz=False, tzinfos=None)
- except Exception as e:
+ except (ValueError, OverflowError) as err:
# TODO: allow raise of errors within instead
- raise DateParseError(e)
+ raise DateParseError(err)
if parsed is None:
raise DateParseError("Could not parse {dstr}".format(dstr=date_string))
return parsed, parsed, reso
@@ -587,15 +588,11 @@ def try_parse_dates(object[:] values, parser=None,
else:
parse_date = parser
- try:
- for i in range(n):
- if values[i] == '':
- result[i] = np.nan
- else:
- result[i] = parse_date(values[i])
- except Exception:
- # raise if passed parser and it failed
- raise
+ for i in range(n):
+ if values[i] == '':
+ result[i] = np.nan
+ else:
+ result[i] = parse_date(values[i])
return result.base # .base to access underlying ndarray
@@ -814,7 +811,7 @@ def _guess_datetime_format(dt_str, dayfirst=False, dt_str_parse=du_parse,
if dt_str_parse is None or dt_str_split is None:
return None
- if not isinstance(dt_str, (str, unicode)):
+ if not isinstance(dt_str, str):
return None
day_attribute_and_format = (('day',), '%d', 2)
@@ -840,19 +837,16 @@ def _guess_datetime_format(dt_str, dayfirst=False, dt_str_parse=du_parse,
try:
parsed_datetime = dt_str_parse(dt_str, dayfirst=dayfirst)
- except:
+ except (ValueError, OverflowError):
# In case the datetime can't be parsed, its format cannot be guessed
return None
if parsed_datetime is None:
return None
- try:
- tokens = dt_str_split(dt_str)
- except:
- # In case the datetime string can't be split, its format cannot
- # be guessed
- return None
+ # the default dt_str_split from dateutil will never raise here; we assume
+ # that any user-provided function will not either.
+ tokens = dt_str_split(dt_str)
format_guess = [None] * len(tokens)
found_attrs = set()
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index c68d686ff2bf2..697e97e518b13 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1,6 +1,6 @@
from datetime import datetime
-from cpython cimport (
+from cpython.object cimport (
PyObject_RichCompareBool,
Py_EQ, Py_NE)
@@ -21,7 +21,8 @@ PyDateTime_IMPORT
from pandas._libs.tslibs.np_datetime cimport (
npy_datetimestruct, dtstruct_to_dt64, dt64_to_dtstruct,
- pandas_datetime_to_datetimestruct, NPY_DATETIMEUNIT, NPY_FR_D)
+ pandas_datetime_to_datetimestruct, check_dts_bounds,
+ NPY_DATETIMEUNIT, NPY_FR_D)
cdef extern from "src/datetime/np_datetime.h":
int64_t npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr,
@@ -1011,7 +1012,7 @@ def dt64arr_to_periodarr(int64_t[:] dtarr, int freq, tz=None):
@cython.wraparound(False)
@cython.boundscheck(False)
-def periodarr_to_dt64arr(int64_t[:] periodarr, int freq):
+def periodarr_to_dt64arr(const int64_t[:] periodarr, int freq):
"""
Convert array to datetime64 values from a set of ordinals corresponding to
periods per period convention.
@@ -1024,9 +1025,8 @@ def periodarr_to_dt64arr(int64_t[:] periodarr, int freq):
out = np.empty(l, dtype='i8')
- with nogil:
- for i in range(l):
- out[i] = period_ordinal_to_dt64(periodarr[i], freq)
+ for i in range(l):
+ out[i] = period_ordinal_to_dt64(periodarr[i], freq)
return out.base # .base to access underlying np.ndarray
@@ -1179,7 +1179,7 @@ cpdef int64_t period_ordinal(int y, int m, int d, int h, int min,
return get_period_ordinal(&dts, freq)
-cpdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq) nogil:
+cdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq) except? -1:
cdef:
npy_datetimestruct dts
@@ -1187,6 +1187,7 @@ cpdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq) nogil:
return NPY_NAT
get_date_info(ordinal, freq, &dts)
+ check_dts_bounds(&dts)
return dtstruct_to_dt64(&dts)
diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx
index d93858cff5e05..fbda5f178e164 100644
--- a/pandas/_libs/tslibs/strptime.pyx
+++ b/pandas/_libs/tslibs/strptime.pyx
@@ -341,7 +341,8 @@ def array_strptime(object[:] values, object fmt,
return result, result_timezone.base
-"""_getlang, LocaleTime, TimeRE, _calc_julian_from_U_or_W are vendored
+"""
+_getlang, LocaleTime, TimeRE, _calc_julian_from_U_or_W are vendored
from the standard library, see
https://github.com/python/cpython/blob/master/Lib/_strptime.py
The original module-level docstring follows.
@@ -363,7 +364,8 @@ def _getlang():
class LocaleTime:
- """Stores and handles locale-specific information related to time.
+ """
+ Stores and handles locale-specific information related to time.
ATTRIBUTES:
f_weekday -- full weekday names (7-item list)
@@ -382,7 +384,8 @@ class LocaleTime:
"""
def __init__(self):
- """Set all attributes.
+ """
+ Set all attributes.
Order of methods called matters for dependency reasons.
@@ -399,7 +402,6 @@ class LocaleTime:
Only other possible issue is if someone changed the timezone and did
not call tz.tzset . That is an issue for the programmer, though,
since changing the timezone is worthless without that call.
-
"""
self.lang = _getlang()
self.__calc_weekday()
@@ -518,15 +520,16 @@ class TimeRE(dict):
"""
def __init__(self, locale_time=None):
- """Create keys/values.
+ """
+ Create keys/values.
Order of execution is important for dependency reasons.
-
"""
if locale_time:
self.locale_time = locale_time
else:
self.locale_time = LocaleTime()
+ self._Z = None
base = super()
base.__init__({
# The " \d" part of the regex is to make %c from ANSI C work
@@ -555,21 +558,29 @@ class TimeRE(dict):
'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
- 'Z': self.__seqToRE(pytz.all_timezones, 'Z'),
+ # 'Z' key is generated lazily via __getitem__
'%': '%'})
base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
+ def __getitem__(self, key):
+ if key == "Z":
+ # lazy computation
+ if self._Z is None:
+ self._Z = self.__seqToRE(pytz.all_timezones, 'Z')
+ return self._Z
+ return super().__getitem__(key)
+
def __seqToRE(self, to_convert, directive):
- """Convert a list to a regex string for matching a directive.
+ """
+ Convert a list to a regex string for matching a directive.
Want possible matching values to be from longest to shortest. This
prevents the possibility of a match occurring for a value that also
a substring of a larger value that should have matched (e.g., 'abc'
matching when 'abcdef' should have been the match).
-
"""
to_convert = sorted(to_convert, key=len, reverse=True)
for value in to_convert:
@@ -582,11 +593,11 @@ class TimeRE(dict):
return '%s)' % regex
def pattern(self, format):
- """Return regex pattern for the format string.
+ """
+ Return regex pattern for the format string.
Need to make sure that any characters that might be interpreted as
regex syntax are escaped.
-
"""
processed_format = ''
# The sub() call escapes all characters that might be misconstrued
@@ -619,7 +630,8 @@ _regex_cache = {}
cdef int _calc_julian_from_U_or_W(int year, int week_of_year,
int day_of_week, int week_starts_Mon):
- """Calculate the Julian day based on the year, week of the year, and day of
+ """
+ Calculate the Julian day based on the year, week of the year, and day of
the week, with week_start_day representing whether the week of the year
assumes the week starts on Sunday or Monday (6 or 0).
@@ -660,8 +672,10 @@ cdef int _calc_julian_from_U_or_W(int year, int week_of_year,
return 1 + days_to_week + day_of_week
-cdef object _calc_julian_from_V(int iso_year, int iso_week, int iso_weekday):
- """Calculate the Julian day based on the ISO 8601 year, week, and weekday.
+cdef (int, int) _calc_julian_from_V(int iso_year, int iso_week, int iso_weekday):
+ """
+ Calculate the Julian day based on the ISO 8601 year, week, and weekday.
+
ISO weeks start on Mondays, with week 01 being the week containing 4 Jan.
ISO week days range from 1 (Monday) to 7 (Sunday).
@@ -694,7 +708,7 @@ cdef object _calc_julian_from_V(int iso_year, int iso_week, int iso_weekday):
return iso_year, ordinal
-cdef parse_timezone_directive(object z):
+cdef parse_timezone_directive(str z):
"""
Parse the '%z' directive and return a pytz.FixedOffset
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 6a32553fe2d38..b232042c70eac 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -4,7 +4,7 @@ import warnings
import cython
-from cpython cimport Py_NE, Py_EQ, PyObject_RichCompare
+from cpython.object cimport Py_NE, Py_EQ, PyObject_RichCompare
import numpy as np
cimport numpy as cnp
@@ -228,8 +228,13 @@ def array_to_timedelta64(object[:] values, unit='ns', errors='raise'):
# this is where all of the error handling will take place.
try:
for i in range(n):
- result[i] = parse_timedelta_string(values[i])
- except:
+ if values[i] is NaT:
+ # we allow this check in the fast-path because NaT is a C-object
+ # so this is an inexpensive check
+ iresult[i] = NPY_NAT
+ else:
+ result[i] = parse_timedelta_string(values[i])
+ except (TypeError, ValueError):
unit = parse_timedelta_unit(unit)
for i in range(n):
try:
@@ -309,7 +314,7 @@ cdef inline int64_t cast_from_unit(object ts, object unit) except? -1:
return <int64_t>(base * m) + <int64_t>(frac * m)
-cdef inline parse_timedelta_string(object ts):
+cdef inline int64_t parse_timedelta_string(str ts) except? -1:
"""
Parse a regular format timedelta string. Return an int64_t (in ns)
or raise a ValueError on an invalid parse.
@@ -1150,7 +1155,7 @@ cdef class _Timedelta(timedelta):
"""
Format Timedelta as ISO 8601 Duration like
``P[n]Y[n]M[n]DT[n]H[n]M[n]S``, where the ``[n]`` s are replaced by the
- values. See https://en.wikipedia.org/wiki/ISO_8601#Durations
+ values. See https://en.wikipedia.org/wiki/ISO_8601#Durations.
.. versionadded:: 0.20.0
@@ -1280,7 +1285,8 @@ class Timedelta(_Timedelta):
else:
raise ValueError(
"Value must be Timedelta, string, integer, "
- "float, timedelta or convertible")
+ "float, timedelta or convertible, not {typ}"
+ .format(typ=type(value).__name__))
if is_timedelta64_object(value):
value = value.view('i8')
@@ -1313,7 +1319,7 @@ class Timedelta(_Timedelta):
def round(self, freq):
"""
- Round the Timedelta to the specified resolution
+ Round the Timedelta to the specified resolution.
Parameters
----------
@@ -1331,7 +1337,7 @@ class Timedelta(_Timedelta):
def floor(self, freq):
"""
- return a new Timedelta floored to this resolution
+ return a new Timedelta floored to this resolution.
Parameters
----------
@@ -1341,7 +1347,7 @@ class Timedelta(_Timedelta):
def ceil(self, freq):
"""
- return a new Timedelta ceiled to this resolution
+ return a new Timedelta ceiled to this resolution.
Parameters
----------
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index c8c6efda30fae..6ca39d83afd25 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -441,7 +441,7 @@ class Timestamp(_Timestamp):
def round(self, freq, ambiguous='raise', nonexistent='raise'):
"""
- Round the Timestamp to the specified resolution
+ Round the Timestamp to the specified resolution.
Parameters
----------
@@ -483,7 +483,7 @@ default 'raise'
def floor(self, freq, ambiguous='raise', nonexistent='raise'):
"""
- return a new Timestamp floored to this resolution
+ return a new Timestamp floored to this resolution.
Parameters
----------
@@ -519,7 +519,7 @@ default 'raise'
def ceil(self, freq, ambiguous='raise', nonexistent='raise'):
"""
- return a new Timestamp ceiled to this resolution
+ return a new Timestamp ceiled to this resolution.
Parameters
----------
@@ -556,7 +556,7 @@ default 'raise'
@property
def tz(self):
"""
- Alias for tzinfo
+ Alias for tzinfo.
"""
return self.tzinfo
@@ -754,7 +754,7 @@ default 'raise'
def resolution(self):
"""
Return resolution describing the smallest difference between two
- times that can be represented by Timestamp object_state
+ times that can be represented by Timestamp object_state.
"""
# GH#21336, GH#21365
return Timedelta(nanoseconds=1)
@@ -893,7 +893,7 @@ default 'raise'
hour=None, minute=None, second=None, microsecond=None,
nanosecond=None, tzinfo=object, fold=0):
"""
- implements datetime.replace, handles nanoseconds
+ implements datetime.replace, handles nanoseconds.
Parameters
----------
diff --git a/pandas/_libs/tslibs/util.pxd b/pandas/_libs/tslibs/util.pxd
index 07c2805dd0ef6..63cbd36f9cd1d 100644
--- a/pandas/_libs/tslibs/util.pxd
+++ b/pandas/_libs/tslibs/util.pxd
@@ -1,14 +1,10 @@
-from cpython cimport PyTypeObject
+from cpython.object cimport PyTypeObject
cdef extern from *:
"""
PyObject* char_to_string(const char* data) {
- #if PY_VERSION_HEX >= 0x03000000
return PyUnicode_FromString(data);
- #else
- return PyString_FromString(data);
- #endif
}
"""
object char_to_string(const char* data)
@@ -18,7 +14,6 @@ cdef extern from "Python.h":
# Note: importing extern-style allows us to declare these as nogil
# functions, whereas `from cpython cimport` does not.
bint PyUnicode_Check(object obj) nogil
- bint PyString_Check(object obj) nogil
bint PyBool_Check(object obj) nogil
bint PyFloat_Check(object obj) nogil
bint PyComplex_Check(object obj) nogil
diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx
index 0a986942d2a09..8de593ce36c86 100644
--- a/pandas/_libs/window.pyx
+++ b/pandas/_libs/window.pyx
@@ -1675,9 +1675,22 @@ def roll_generic(object obj,
return output
-def roll_window(ndarray[float64_t, ndim=1, cast=True] values,
- ndarray[float64_t, ndim=1, cast=True] weights,
- int minp, bint avg=True):
+# ----------------------------------------------------------------------
+# Rolling sum and mean for weighted window
+
+
+def roll_weighted_sum(float64_t[:] values, float64_t[:] weights,
+ int minp):
+ return _roll_weighted_sum_mean(values, weights, minp, avg=0)
+
+
+def roll_weighted_mean(float64_t[:] values, float64_t[:] weights,
+ int minp):
+ return _roll_weighted_sum_mean(values, weights, minp, avg=1)
+
+
+def _roll_weighted_sum_mean(float64_t[:] values, float64_t[:] weights,
+ int minp, bint avg):
"""
Assume len(weights) << len(values)
"""
@@ -1688,6 +1701,7 @@ def roll_window(ndarray[float64_t, ndim=1, cast=True] values,
in_n = len(values)
win_n = len(weights)
+
output = np.zeros(in_n, dtype=float)
counts = np.zeros(in_n, dtype=float)
if avg:
@@ -1739,6 +1753,7 @@ def roll_window(ndarray[float64_t, ndim=1, cast=True] values,
return output
+
# ----------------------------------------------------------------------
# Exponentially weighted moving average
diff --git a/pandas/_libs/writers.pyx b/pandas/_libs/writers.pyx
index d1aecf0a9d294..1775893b9f2bf 100644
--- a/pandas/_libs/writers.pyx
+++ b/pandas/_libs/writers.pyx
@@ -1,12 +1,8 @@
import cython
from cython import Py_ssize_t
-from cpython cimport PyBytes_GET_SIZE, PyUnicode_GET_SIZE
-
-try:
- from cpython cimport PyString_GET_SIZE
-except ImportError:
- from cpython cimport PyUnicode_GET_SIZE as PyString_GET_SIZE
+from cpython.bytes cimport PyBytes_GET_SIZE
+from cpython.unicode cimport PyUnicode_GET_SIZE
import numpy as np
from numpy cimport ndarray, uint8_t
@@ -125,12 +121,7 @@ def max_len_string_array(pandas_string[:] arr) -> Py_ssize_t:
for i in range(length):
val = arr[i]
- if isinstance(val, str):
- l = PyString_GET_SIZE(val)
- elif isinstance(val, bytes):
- l = PyBytes_GET_SIZE(val)
- elif isinstance(val, unicode):
- l = PyUnicode_GET_SIZE(val)
+ l = word_len(val)
if l > m:
m = l
@@ -138,6 +129,18 @@ def max_len_string_array(pandas_string[:] arr) -> Py_ssize_t:
return m
+cpdef inline Py_ssize_t word_len(object val):
+ """ return the maximum length of a string or bytes value """
+ cdef:
+ Py_ssize_t l = 0
+
+ if isinstance(val, str):
+ l = PyUnicode_GET_SIZE(val)
+ elif isinstance(val, bytes):
+ l = PyBytes_GET_SIZE(val)
+
+ return l
+
# ------------------------------------------------------------------
# PyTables Helpers
diff --git a/pandas/_typing.py b/pandas/_typing.py
index a1224a609579e..5afe64f719b8a 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -1,34 +1,33 @@
from pathlib import Path
-from typing import IO, AnyStr, TypeVar, Union
+from typing import IO, TYPE_CHECKING, AnyStr, Iterable, Optional, TypeVar, Union
import numpy as np
-from pandas._libs import Timestamp
-from pandas._libs.tslibs.period import Period
-from pandas._libs.tslibs.timedeltas import Timedelta
+# To prevent import cycles place any internal imports in the branch below
+# and use a string literal forward reference to it in subsequent types
+# https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles
+if TYPE_CHECKING:
+ from pandas._libs import Period, Timedelta, Timestamp # noqa: F401
+ from pandas.core.arrays.base import ExtensionArray # noqa: F401
+ from pandas.core.dtypes.dtypes import ExtensionDtype # noqa: F401
+ from pandas.core.indexes.base import Index # noqa: F401
+ from pandas.core.series import Series # noqa: F401
+ from pandas.core.generic import NDFrame # noqa: F401
-from pandas.core.dtypes.dtypes import ExtensionDtype
-from pandas.core.dtypes.generic import (
- ABCDataFrame,
- ABCExtensionArray,
- ABCIndexClass,
- ABCSeries,
- ABCSparseSeries,
-)
-AnyArrayLike = TypeVar(
- "AnyArrayLike",
- ABCExtensionArray,
- ABCIndexClass,
- ABCSeries,
- ABCSparseSeries,
- np.ndarray,
-)
-ArrayLike = TypeVar("ArrayLike", ABCExtensionArray, np.ndarray)
-DatetimeLikeScalar = TypeVar("DatetimeLikeScalar", Period, Timestamp, Timedelta)
-Dtype = Union[str, np.dtype, ExtensionDtype]
+AnyArrayLike = TypeVar("AnyArrayLike", "ExtensionArray", "Index", "Series", np.ndarray)
+ArrayLike = TypeVar("ArrayLike", "ExtensionArray", np.ndarray)
+DatetimeLikeScalar = TypeVar("DatetimeLikeScalar", "Period", "Timestamp", "Timedelta")
+Dtype = Union[str, np.dtype, "ExtensionDtype"]
FilePathOrBuffer = Union[str, Path, IO[AnyStr]]
-FrameOrSeries = TypeVar("FrameOrSeries", ABCSeries, ABCDataFrame)
-Scalar = Union[str, int, float]
+FrameOrSeries = TypeVar("FrameOrSeries", bound="NDFrame")
+Scalar = Union[str, int, float, bool]
Axis = Union[str, int]
+Ordered = Optional[bool]
+
+# use Collection after we drop support for py35
+Axes = Iterable
+
+# to maintain type information across generic functions and parametrization
+_T = TypeVar("_T")
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index c9597505fa596..9c778f68727c6 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -10,9 +10,12 @@
import platform
import struct
import sys
+import warnings
+PY35 = sys.version_info[:2] == (3, 5)
PY36 = sys.version_info >= (3, 6)
PY37 = sys.version_info >= (3, 7)
+PY38 = sys.version_info >= (3, 8)
PYPY = platform.python_implementation() == "PyPy"
@@ -64,3 +67,32 @@ def is_platform_mac():
def is_platform_32bit():
return struct.calcsize("P") * 8 < 64
+
+
+def _import_lzma():
+ """Attempts to import lzma, warning the user when lzma is not available.
+ """
+ try:
+ import lzma
+
+ return lzma
+ except ImportError:
+ msg = (
+ "Could not import the lzma module. "
+ "Your installed Python is incomplete. "
+ "Attempting to use lzma compression will result in a RuntimeError."
+ )
+ warnings.warn(msg)
+
+
+def _get_lzma_file(lzma):
+ """Returns the lzma method LZMAFile when the module was correctly imported.
+ Otherwise, raises a RuntimeError.
+ """
+ if lzma is None:
+ raise RuntimeError(
+ "lzma module not available. "
+ "A Python re-install with the proper "
+ "dependencies might be required to solve this issue."
+ )
+ return lzma.LZMAFile
diff --git a/pandas/compat/chainmap.py b/pandas/compat/chainmap.py
index 83f1da597d6a6..84824207de2a9 100644
--- a/pandas/compat/chainmap.py
+++ b/pandas/compat/chainmap.py
@@ -15,9 +15,3 @@ def __delitem__(self, key):
del mapping[key]
return
raise KeyError(key)
-
- # override because the m parameter is introduced in Python 3.4
- def new_child(self, m=None):
- if m is None:
- m = {}
- return self.__class__(m, *self.maps)
diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py
index 89f7d71e21e9d..c2fe7d1dd12f4 100644
--- a/pandas/compat/numpy/function.py
+++ b/pandas/compat/numpy/function.py
@@ -59,7 +59,7 @@ def __call__(self, args, kwargs, fname=None, max_fname_arg_count=None, method=No
)
else:
raise ValueError(
- "invalid validation method " "'{method}'".format(method=method)
+ "invalid validation method '{method}'".format(method=method)
)
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index 0934d8529fdf7..b3c7b8a7c8b9f 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -5,10 +5,14 @@
import copy
import pickle as pkl
import sys
+from typing import TYPE_CHECKING
+import warnings
-import pandas # noqa
from pandas import Index
+if TYPE_CHECKING:
+ from pandas import Series, DataFrame
+
def load_reduce(self):
stack = self.stack
@@ -55,6 +59,41 @@ def load_reduce(self):
raise
+_sparse_msg = """\
+
+Loading a saved '{cls}' as a {new} with sparse values.
+'{cls}' is now removed. You should re-save this dataset in its new format.
+"""
+
+
+class _LoadSparseSeries:
+ # To load a SparseSeries as a Series[Sparse]
+ def __new__(cls) -> "Series":
+ from pandas import Series
+
+ warnings.warn(
+ _sparse_msg.format(cls="SparseSeries", new="Series"),
+ FutureWarning,
+ stacklevel=6,
+ )
+
+ return Series()
+
+
+class _LoadSparseFrame:
+ # To load a SparseDataFrame as a DataFrame[Sparse]
+ def __new__(cls) -> "DataFrame":
+ from pandas import DataFrame
+
+ warnings.warn(
+ _sparse_msg.format(cls="SparseDataFrame", new="DataFrame"),
+ FutureWarning,
+ stacklevel=6,
+ )
+
+ return DataFrame()
+
+
# If classes are moved, provide compat here.
_class_locations_map = {
("pandas.core.sparse.array", "SparseArray"): ("pandas.core.arrays", "SparseArray"),
@@ -102,12 +141,12 @@ def load_reduce(self):
"SparseArray",
),
("pandas.sparse.series", "SparseSeries"): (
- "pandas.core.sparse.series",
- "SparseSeries",
+ "pandas.compat.pickle_compat",
+ "_LoadSparseSeries",
),
("pandas.sparse.frame", "SparseDataFrame"): (
"pandas.core.sparse.frame",
- "SparseDataFrame",
+ "_LoadSparseFrame",
),
("pandas.indexes.base", "_new_Index"): ("pandas.core.indexes.base", "_new_Index"),
("pandas.indexes.base", "Index"): ("pandas.core.indexes.base", "Index"),
@@ -140,6 +179,14 @@ def load_reduce(self):
"pandas.core.indexes.numeric",
"Float64Index",
),
+ ("pandas.core.sparse.series", "SparseSeries"): (
+ "pandas.compat.pickle_compat",
+ "_LoadSparseSeries",
+ ),
+ ("pandas.core.sparse.frame", "SparseDataFrame"): (
+ "pandas.compat.pickle_compat",
+ "_LoadSparseFrame",
+ ),
}
@@ -197,10 +244,6 @@ def load_newobj_ex(self):
def load(fh, encoding=None, is_verbose=False):
"""load a pickle, with a provided encoding
- if compat is True:
- fake the old class hierarchy
- if it works, then return the new type objects
-
Parameters
----------
fh : a filelike object
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 2cf7bf6a6df41..b032e14d8f7e1 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -123,18 +123,22 @@ def ip():
@pytest.fixture(params=[True, False, None])
def observed(request):
- """ pass in the observed keyword to groupby for [True, False]
+ """
+ Pass in the observed keyword to groupby for [True, False]
This indicates whether categoricals should return values for
values which are not in the grouper [False / None], or only values which
appear in the grouper [True]. [None] is supported for future compatibility
if we decide to change the default (and would need to warn if this
- parameter is not passed)"""
+ parameter is not passed).
+ """
return request.param
@pytest.fixture(params=[True, False, None])
def ordered_fixture(request):
- """Boolean 'ordered' parameter for Categorical."""
+ """
+ Boolean 'ordered' parameter for Categorical.
+ """
return request.param
@@ -234,7 +238,8 @@ def cython_table_items(request):
def _get_cython_table_params(ndframe, func_names_and_expected):
- """combine frame, functions from SelectionMixin._cython_table
+ """
+ Combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
@@ -242,7 +247,7 @@ def _get_cython_table_params(ndframe, func_names_and_expected):
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
- The second item is the expected return value
+ The second item is the expected return value.
Returns
-------
@@ -341,7 +346,8 @@ def strict_data_files(pytestconfig):
@pytest.fixture
def datapath(strict_data_files):
- """Get the path to a data file.
+ """
+ Get the path to a data file.
Parameters
----------
@@ -375,7 +381,9 @@ def deco(*args):
@pytest.fixture
def iris(datapath):
- """The iris dataset as a DataFrame."""
+ """
+ The iris dataset as a DataFrame.
+ """
return pd.read_csv(datapath("data", "iris.csv"))
@@ -504,7 +512,8 @@ def tz_aware_fixture(request):
@pytest.fixture(params=STRING_DTYPES)
def string_dtype(request):
- """Parametrized fixture for string dtypes.
+ """
+ Parametrized fixture for string dtypes.
* str
* 'str'
@@ -515,7 +524,8 @@ def string_dtype(request):
@pytest.fixture(params=BYTES_DTYPES)
def bytes_dtype(request):
- """Parametrized fixture for bytes dtypes.
+ """
+ Parametrized fixture for bytes dtypes.
* bytes
* 'bytes'
@@ -525,7 +535,8 @@ def bytes_dtype(request):
@pytest.fixture(params=OBJECT_DTYPES)
def object_dtype(request):
- """Parametrized fixture for object dtypes.
+ """
+ Parametrized fixture for object dtypes.
* object
* 'object'
@@ -535,7 +546,8 @@ def object_dtype(request):
@pytest.fixture(params=DATETIME64_DTYPES)
def datetime64_dtype(request):
- """Parametrized fixture for datetime64 dtypes.
+ """
+ Parametrized fixture for datetime64 dtypes.
* 'datetime64[ns]'
* 'M8[ns]'
@@ -545,7 +557,8 @@ def datetime64_dtype(request):
@pytest.fixture(params=TIMEDELTA64_DTYPES)
def timedelta64_dtype(request):
- """Parametrized fixture for timedelta64 dtypes.
+ """
+ Parametrized fixture for timedelta64 dtypes.
* 'timedelta64[ns]'
* 'm8[ns]'
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py
index f84033e9c3c90..2d4ded9e2e6ba 100644
--- a/pandas/core/accessor.py
+++ b/pandas/core/accessor.py
@@ -51,7 +51,7 @@ class PandasDelegate:
"""
def _delegate_property_get(self, name, *args, **kwargs):
- raise TypeError("You cannot access the " "property {name}".format(name=name))
+ raise TypeError("You cannot access the property {name}".format(name=name))
def _delegate_property_set(self, name, value, *args, **kwargs):
raise TypeError("The property {name} cannot be set".format(name=name))
@@ -271,8 +271,7 @@ def plot(self):
@Appender(
_doc
% dict(
- klass="DataFrame",
- others=("register_series_accessor, " "register_index_accessor"),
+ klass="DataFrame", others=("register_series_accessor, register_index_accessor")
)
)
def register_dataframe_accessor(name):
@@ -284,8 +283,7 @@ def register_dataframe_accessor(name):
@Appender(
_doc
% dict(
- klass="Series",
- others=("register_dataframe_accessor, " "register_index_accessor"),
+ klass="Series", others=("register_dataframe_accessor, register_index_accessor")
)
)
def register_series_accessor(name):
@@ -297,8 +295,7 @@ def register_series_accessor(name):
@Appender(
_doc
% dict(
- klass="Index",
- others=("register_dataframe_accessor, " "register_series_accessor"),
+ klass="Index", others=("register_dataframe_accessor, register_series_accessor")
)
)
def register_index_accessor(name):
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 2c38e071d3d44..6e73e1636a75b 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -28,20 +28,17 @@
is_complex_dtype,
is_datetime64_any_dtype,
is_datetime64_ns_dtype,
- is_datetime64tz_dtype,
is_datetimelike,
is_extension_array_dtype,
is_float_dtype,
is_integer,
is_integer_dtype,
- is_interval_dtype,
is_list_like,
is_numeric_dtype,
is_object_dtype,
is_period_dtype,
is_scalar,
is_signed_integer_dtype,
- is_sparse,
is_timedelta64_dtype,
is_unsigned_integer_dtype,
needs_i8_conversion,
@@ -50,6 +47,7 @@
from pandas.core.dtypes.missing import isna, na_value_for_dtype
from pandas.core import common as com
+from pandas.core.construction import array
from pandas.core.indexers import validate_indices
_shared_docs = {} # type: Dict[str, str]
@@ -182,8 +180,6 @@ def _reconstruct_data(values, dtype, original):
if is_extension_array_dtype(dtype):
values = dtype.construct_array_type()._from_sequence(values)
- elif is_datetime64tz_dtype(dtype) or is_period_dtype(dtype):
- values = Index(original)._shallow_copy(values, name=None)
elif is_bool_dtype(dtype):
values = values.astype(dtype)
@@ -746,7 +742,7 @@ def value_counts(
else:
- if is_extension_array_dtype(values) or is_sparse(values):
+ if is_extension_array_dtype(values):
# handle Categorical and sparse,
result = Series(values)._values.value_counts(dropna=dropna)
@@ -816,8 +812,6 @@ def duplicated(values, keep="first"):
"""
Return boolean ndarray denoting duplicate values.
- .. versionadded:: 0.19.0
-
Parameters
----------
values : ndarray-like
@@ -1102,7 +1096,9 @@ def _get_score(at):
return _get_score(q)
else:
q = np.asarray(q, np.float64)
- return algos.arrmap_float64(q, _get_score)
+ result = [_get_score(x) for x in q]
+ result = np.array(result, dtype=np.float64)
+ return result
# --------------- #
@@ -1626,7 +1622,7 @@ def take_nd(
out : ndarray or None, default None
Optional output array, must be appropriate type to hold input and
fill_value together, if indexer has any -1 value entries; call
- _maybe_promote to determine this type for any fill_value
+ maybe_promote to determine this type for any fill_value
fill_value : any, default np.nan
Fill value to replace -1 values with
mask_info : tuple of (ndarray, boolean)
@@ -1644,19 +1640,11 @@ def take_nd(
May be the same type as the input, or cast to an ndarray.
"""
- # TODO(EA): Remove these if / elifs as datetimeTZ, interval, become EAs
- # dispatch to internal type takes
if is_extension_array_dtype(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
- elif is_datetime64tz_dtype(arr):
- return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
- elif is_interval_dtype(arr):
- return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
- if is_sparse(arr):
- arr = arr.to_dense()
- elif isinstance(arr, (ABCIndexClass, ABCSeries)):
- arr = arr.values
+ if isinstance(arr, (ABCIndexClass, ABCSeries)):
+ arr = arr._values
arr = np.asarray(arr)
@@ -1855,8 +1843,6 @@ def searchsorted(arr, value, side="left", sorter=None):
and is_integer_dtype(arr)
and (is_integer(value) or is_integer_dtype(value))
):
- from .arrays.array_ import array
-
# if `arr` and `value` have different dtypes, `arr` would be
# recast by numpy, causing a slow search.
# Before searching below, we therefore try to give `value` the
@@ -1977,12 +1963,6 @@ def diff(arr, n, axis=0):
out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]
if is_timedelta:
- from pandas import TimedeltaIndex
-
- out_arr = (
- TimedeltaIndex(out_arr.ravel().astype("int64"))
- .asi8.reshape(out_arr.shape)
- .astype("timedelta64[ns]")
- )
+ out_arr = out_arr.astype("int64").view("timedelta64[ns]")
return out_arr
diff --git a/pandas/core/api.py b/pandas/core/api.py
index f3ea0976a2869..bd2a57a15bdd2 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -2,6 +2,16 @@
import numpy as np
+from pandas.core.dtypes.dtypes import (
+ CategoricalDtype,
+ DatetimeTZDtype,
+ IntervalDtype,
+ PeriodDtype,
+)
+from pandas.core.dtypes.missing import isna, isnull, notna, notnull
+
+from pandas.core.algorithms import factorize, unique, value_counts
+from pandas.core.arrays import Categorical
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
@@ -12,44 +22,38 @@
UInt32Dtype,
UInt64Dtype,
)
-from pandas.core.algorithms import factorize, unique, value_counts
-from pandas.core.dtypes.missing import isna, isnull, notna, notnull
-from pandas.core.dtypes.dtypes import (
- CategoricalDtype,
- PeriodDtype,
- IntervalDtype,
- DatetimeTZDtype,
-)
-from pandas.core.arrays import Categorical, array
+from pandas.core.construction import array
+
from pandas.core.groupby import Grouper, NamedAgg
-from pandas.io.formats.format import set_eng_float_format
+
+# DataFrame needs to be imported after NamedAgg to avoid a circular import
+from pandas.core.frame import DataFrame # isort:skip
from pandas.core.index import (
- Index,
CategoricalIndex,
- Int64Index,
- UInt64Index,
- RangeIndex,
+ DatetimeIndex,
Float64Index,
- MultiIndex,
+ Index,
+ Int64Index,
IntervalIndex,
- TimedeltaIndex,
- DatetimeIndex,
- PeriodIndex,
+ MultiIndex,
NaT,
+ PeriodIndex,
+ RangeIndex,
+ TimedeltaIndex,
+ UInt64Index,
)
+from pandas.core.indexes.datetimes import Timestamp, bdate_range, date_range
+from pandas.core.indexes.interval import Interval, interval_range
from pandas.core.indexes.period import Period, period_range
from pandas.core.indexes.timedeltas import Timedelta, timedelta_range
-from pandas.core.indexes.datetimes import Timestamp, date_range, bdate_range
-from pandas.core.indexes.interval import Interval, interval_range
-
-from pandas.core.series import Series
-from pandas.core.frame import DataFrame
-
-# TODO: Remove import when statsmodels updates #18264
-from pandas.core.reshape.reshape import get_dummies
-
from pandas.core.indexing import IndexSlice
-from pandas.core.tools.numeric import to_numeric
-from pandas.tseries.offsets import DateOffset
+from pandas.core.reshape.reshape import (
+ get_dummies,
+) # TODO: Remove get_dummies import when statsmodels updates #18264
+from pandas.core.series import Series
from pandas.core.tools.datetimes import to_datetime
+from pandas.core.tools.numeric import to_numeric
from pandas.core.tools.timedeltas import to_timedelta
+
+from pandas.io.formats.format import set_eng_float_format
+from pandas.tseries.offsets import DateOffset
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 2246bbfde636d..d093d7a145382 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -3,7 +3,7 @@
import numpy as np
-from pandas._libs import reduction
+from pandas._libs import reduction as libreduction
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
@@ -199,21 +199,25 @@ def apply_empty_result(self):
return self.obj.copy()
# we may need to infer
- reduce = self.result_type == "reduce"
+ should_reduce = self.result_type == "reduce"
from pandas import Series
- if not reduce:
-
- EMPTY_SERIES = Series([])
+ if not should_reduce:
try:
- r = self.f(EMPTY_SERIES, *self.args, **self.kwds)
- reduce = not isinstance(r, Series)
+ r = self.f(Series([]))
except Exception:
pass
+ else:
+ should_reduce = not isinstance(r, Series)
+
+ if should_reduce:
+ if len(self.agg_axis):
+ r = self.f(Series([]))
+ else:
+ r = np.nan
- if reduce:
- return self.obj._constructor_sliced(np.nan, index=self.agg_axis)
+ return self.obj._constructor_sliced(r, index=self.agg_axis)
else:
return self.obj.copy()
@@ -221,7 +225,7 @@ def apply_raw(self):
""" apply to the values as a numpy array """
try:
- result = reduction.reduce(self.values, self.f, axis=self.axis)
+ result = libreduction.compute_reduction(self.values, self.f, axis=self.axis)
except Exception:
result = np.apply_along_axis(self.f, self.axis, self.values)
@@ -281,7 +285,7 @@ def apply_standard(self):
dummy = Series(empty_arr, index=index, dtype=values.dtype)
try:
- result = reduction.reduce(
+ result = libreduction.compute_reduction(
values, self.f, axis=self.axis, dummy=dummy, labels=labels
)
return self.obj._constructor_sliced(result, index=labels)
@@ -306,10 +310,11 @@ def apply_series_generator(self):
for i, v in enumerate(series_gen):
try:
results[i] = self.f(v)
- keys.append(v.name)
- successes.append(i)
except Exception:
pass
+ else:
+ keys.append(v.name)
+ successes.append(i)
# so will work with MultiIndex
if len(successes) < len(res_index):
@@ -337,7 +342,7 @@ def wrap_results(self):
results = self.results
# see if we can infer the results
- if len(results) > 0 and is_sequence(results[0]):
+ if len(results) > 0 and 0 in results and is_sequence(results[0]):
return self.wrap_results_for_axis()
diff --git a/pandas/core/arrays/__init__.py b/pandas/core/arrays/__init__.py
index dab29e9ce71d3..5c83ed8cf5e24 100644
--- a/pandas/core/arrays/__init__.py
+++ b/pandas/core/arrays/__init__.py
@@ -1,4 +1,3 @@
-from .array_ import array # noqa: F401
from .base import ( # noqa: F401
ExtensionArray,
ExtensionOpsMixin,
diff --git a/pandas/core/arrays/array_.py b/pandas/core/arrays/array_.py
deleted file mode 100644
index 93ee570c1f971..0000000000000
--- a/pandas/core/arrays/array_.py
+++ /dev/null
@@ -1,281 +0,0 @@
-from typing import Optional, Sequence, Union, cast
-
-import numpy as np
-
-from pandas._libs import lib, tslibs
-
-from pandas.core.dtypes.common import (
- is_datetime64_ns_dtype,
- is_extension_array_dtype,
- is_timedelta64_ns_dtype,
-)
-from pandas.core.dtypes.dtypes import ExtensionDtype, registry
-from pandas.core.dtypes.generic import ABCExtensionArray
-
-
-def array(
- data: Sequence[object],
- dtype: Optional[Union[str, np.dtype, ExtensionDtype]] = None,
- copy: bool = True,
-) -> ABCExtensionArray:
- """
- Create an array.
-
- .. versionadded:: 0.24.0
-
- Parameters
- ----------
- data : Sequence of objects
- The scalars inside `data` should be instances of the
- scalar type for `dtype`. It's expected that `data`
- represents a 1-dimensional array of data.
-
- When `data` is an Index or Series, the underlying array
- will be extracted from `data`.
-
- dtype : str, np.dtype, or ExtensionDtype, optional
- The dtype to use for the array. This may be a NumPy
- dtype or an extension type registered with pandas using
- :meth:`pandas.api.extensions.register_extension_dtype`.
-
- If not specified, there are two possibilities:
-
- 1. When `data` is a :class:`Series`, :class:`Index`, or
- :class:`ExtensionArray`, the `dtype` will be taken
- from the data.
- 2. Otherwise, pandas will attempt to infer the `dtype`
- from the data.
-
- Note that when `data` is a NumPy array, ``data.dtype`` is
- *not* used for inferring the array type. This is because
- NumPy cannot represent all the types of data that can be
- held in extension arrays.
-
- Currently, pandas will infer an extension dtype for sequences of
-
- ============================== =====================================
- Scalar Type Array Type
- ============================== =====================================
- :class:`pandas.Interval` :class:`pandas.arrays.IntervalArray`
- :class:`pandas.Period` :class:`pandas.arrays.PeriodArray`
- :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray`
- :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray`
- ============================== =====================================
-
- For all other cases, NumPy's usual inference rules will be used.
-
- copy : bool, default True
- Whether to copy the data, even if not necessary. Depending
- on the type of `data`, creating the new array may require
- copying data, even if ``copy=False``.
-
- Returns
- -------
- ExtensionArray
- The newly created array.
-
- Raises
- ------
- ValueError
- When `data` is not 1-dimensional.
-
- See Also
- --------
- numpy.array : Construct a NumPy array.
- Series : Construct a pandas Series.
- Index : Construct a pandas Index.
- arrays.PandasArray : ExtensionArray wrapping a NumPy array.
- Series.array : Extract the array stored within a Series.
-
- Notes
- -----
- Omitting the `dtype` argument means pandas will attempt to infer the
- best array type from the values in the data. As new array types are
- added by pandas and 3rd party libraries, the "best" array type may
- change. We recommend specifying `dtype` to ensure that
-
- 1. the correct array type for the data is returned
- 2. the returned array type doesn't change as new extension types
- are added by pandas and third-party libraries
-
- Additionally, if the underlying memory representation of the returned
- array matters, we recommend specifying the `dtype` as a concrete object
- rather than a string alias or allowing it to be inferred. For example,
- a future version of pandas or a 3rd-party library may include a
- dedicated ExtensionArray for string data. In this event, the following
- would no longer return a :class:`arrays.PandasArray` backed by a NumPy
- array.
-
- >>> pd.array(['a', 'b'], dtype=str)
- <PandasArray>
- ['a', 'b']
- Length: 2, dtype: str32
-
- This would instead return the new ExtensionArray dedicated for string
- data. If you really need the new array to be backed by a NumPy array,
- specify that in the dtype.
-
- >>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
- <PandasArray>
- ['a', 'b']
- Length: 2, dtype: str32
-
- Or use the dedicated constructor for the array you're expecting, and
- wrap that in a PandasArray
-
- >>> pd.array(np.array(['a', 'b'], dtype='<U1'))
- <PandasArray>
- ['a', 'b']
- Length: 2, dtype: str32
-
- Finally, Pandas has arrays that mostly overlap with NumPy
-
- * :class:`arrays.DatetimeArray`
- * :class:`arrays.TimedeltaArray`
-
- When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is
- passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``
- rather than a ``PandasArray``. This is for symmetry with the case of
- timezone-aware data, which NumPy does not natively support.
-
- >>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
- <DatetimeArray>
- ['2015-01-01 00:00:00', '2016-01-01 00:00:00']
- Length: 2, dtype: datetime64[ns]
-
- >>> pd.array(["1H", "2H"], dtype='timedelta64[ns]')
- <TimedeltaArray>
- ['01:00:00', '02:00:00']
- Length: 2, dtype: timedelta64[ns]
-
- Examples
- --------
- If a dtype is not specified, `data` is passed through to
- :meth:`numpy.array`, and a :class:`arrays.PandasArray` is returned.
-
- >>> pd.array([1, 2])
- <PandasArray>
- [1, 2]
- Length: 2, dtype: int64
-
- Or the NumPy dtype can be specified
-
- >>> pd.array([1, 2], dtype=np.dtype("int32"))
- <PandasArray>
- [1, 2]
- Length: 2, dtype: int32
-
- You can use the string alias for `dtype`
-
- >>> pd.array(['a', 'b', 'a'], dtype='category')
- [a, b, a]
- Categories (2, object): [a, b]
-
- Or specify the actual dtype
-
- >>> pd.array(['a', 'b', 'a'],
- ... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
- [a, b, a]
- Categories (3, object): [a < b < c]
-
- Because omitting the `dtype` passes the data through to NumPy,
- a mixture of valid integers and NA will return a floating-point
- NumPy array.
-
- >>> pd.array([1, 2, np.nan])
- <PandasArray>
- [1.0, 2.0, nan]
- Length: 3, dtype: float64
-
- To use pandas' nullable :class:`pandas.arrays.IntegerArray`, specify
- the dtype:
-
- >>> pd.array([1, 2, np.nan], dtype='Int64')
- <IntegerArray>
- [1, 2, NaN]
- Length: 3, dtype: Int64
-
- Pandas will infer an ExtensionArray for some types of data:
-
- >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
- <PeriodArray>
- ['2000-01-01', '2000-01-01']
- Length: 2, dtype: period[D]
-
- `data` must be 1-dimensional. A ValueError is raised when the input
- has the wrong dimensionality.
-
- >>> pd.array(1)
- Traceback (most recent call last):
- ...
- ValueError: Cannot pass scalar '1' to 'pandas.array'.
- """
- from pandas.core.arrays import (
- period_array,
- ExtensionArray,
- IntervalArray,
- PandasArray,
- DatetimeArray,
- TimedeltaArray,
- )
- from pandas.core.internals.arrays import extract_array
-
- if lib.is_scalar(data):
- msg = "Cannot pass scalar '{}' to 'pandas.array'."
- raise ValueError(msg.format(data))
-
- data = extract_array(data, extract_numpy=True)
-
- if dtype is None and isinstance(data, ExtensionArray):
- dtype = data.dtype
-
- # this returns None for not-found dtypes.
- if isinstance(dtype, str):
- dtype = registry.find(dtype) or dtype
-
- if is_extension_array_dtype(dtype):
- cls = cast(ExtensionDtype, dtype).construct_array_type()
- return cls._from_sequence(data, dtype=dtype, copy=copy)
-
- if dtype is None:
- inferred_dtype = lib.infer_dtype(data, skipna=False)
- if inferred_dtype == "period":
- try:
- return period_array(data, copy=copy)
- except tslibs.IncompatibleFrequency:
- # We may have a mixture of frequencies.
- # We choose to return an ndarray, rather than raising.
- pass
- elif inferred_dtype == "interval":
- try:
- return IntervalArray(data, copy=copy)
- except ValueError:
- # We may have a mixture of `closed` here.
- # We choose to return an ndarray, rather than raising.
- pass
-
- elif inferred_dtype.startswith("datetime"):
- # datetime, datetime64
- try:
- return DatetimeArray._from_sequence(data, copy=copy)
- except ValueError:
- # Mixture of timezones, fall back to PandasArray
- pass
-
- elif inferred_dtype.startswith("timedelta"):
- # timedelta, timedelta64
- return TimedeltaArray._from_sequence(data, copy=copy)
-
- # TODO(BooleanArray): handle this type
-
- # Pandas overrides NumPy for
- # 1. datetime64[ns]
- # 2. timedelta64[ns]
- # so that a DatetimeArray is returned.
- if is_datetime64_ns_dtype(dtype):
- return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
- elif is_timedelta64_ns_dtype(dtype):
- return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)
-
- result = PandasArray._from_sequence(data, dtype=dtype, copy=copy)
- return result
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index ee796f9896b52..0778b6726d104 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -14,14 +14,17 @@
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution
+from pandas.util._validators import validate_fillna_kwargs
-from pandas.core.dtypes.common import is_list_like
+from pandas.core.dtypes.common import is_array_like, is_list_like
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCExtensionArray, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas._typing import ArrayLike
from pandas.core import ops
+from pandas.core.algorithms import _factorize_array, unique
+from pandas.core.missing import backfill_1d, pad_1d
from pandas.core.sorting import nargsort
_not_implemented_message = "{} does not implement {}."
@@ -61,9 +64,9 @@ class ExtensionArray:
shift
take
unique
+ view
_concat_same_type
_formatter
- _formatting_values
_from_factorized
_from_sequence
_from_sequence_of_strings
@@ -144,7 +147,7 @@ class ExtensionArray:
If implementing NumPy's ``__array_ufunc__`` interface, pandas expects
that
- 1. You defer by raising ``NotImplemented`` when any Series are present
+ 1. You defer by returning ``NotImplemented`` when any Series are present
in `inputs`. Pandas will extract the arrays and call the ufunc again.
2. You define a ``_HANDLED_TYPES`` tuple as an attribute on the class.
Pandas inspect this to determine whether the ufunc is valid for the
@@ -484,10 +487,6 @@ def fillna(self, value=None, method=None, limit=None):
-------
filled : ExtensionArray with NA/NaN filled
"""
- from pandas.api.types import is_array_like
- from pandas.util._validators import validate_fillna_kwargs
- from pandas.core.missing import pad_1d, backfill_1d
-
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
@@ -515,7 +514,7 @@ def fillna(self, value=None, method=None, limit=None):
def dropna(self):
"""
- Return ExtensionArray without NA values
+ Return ExtensionArray without NA values.
Returns
-------
@@ -584,8 +583,6 @@ def unique(self):
-------
uniques : ExtensionArray
"""
- from pandas import unique
-
uniques = unique(self.astype(object))
return self._from_sequence(uniques, dtype=self.dtype)
@@ -700,8 +697,6 @@ def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, ABCExtensionArra
# original ExtensionArray.
# 2. ExtensionArray.factorize.
# Complete control over factorization.
- from pandas.core.algorithms import _factorize_array
-
arr, na_value = self._values_for_factorize()
labels, uniques = _factorize_array(
@@ -867,6 +862,27 @@ def copy(self) -> ABCExtensionArray:
"""
raise AbstractMethodError(self)
+ def view(self, dtype=None) -> Union[ABCExtensionArray, np.ndarray]:
+ """
+ Return a view on the array.
+
+ Parameters
+ ----------
+ dtype : str, np.dtype, or ExtensionDtype, optional
+ Default None
+
+ Returns
+ -------
+ ExtensionArray
+ """
+ # NB:
+ # - This must return a *new* object referencing the same data, not self.
+ # - The only case that *must* be implemented is with dtype=None,
+ # giving a view with the same dtype as self.
+ if dtype is not None:
+ raise NotImplementedError(dtype)
+ return self[:]
+
# ------------------------------------------------------------------------
# Printing
# ------------------------------------------------------------------------
@@ -874,7 +890,7 @@ def copy(self) -> ABCExtensionArray:
def __repr__(self):
from pandas.io.formats.printing import format_object_summary
- template = "{class_name}" "{data}\n" "Length: {length}, dtype: {dtype}"
+ template = "{class_name}{data}\nLength: {length}, dtype: {dtype}"
# the short repr has no trailing newline, while the truncated
# repr does. So we include a newline in our template, and strip
# any trailing newlines from format_object_summary
@@ -913,21 +929,6 @@ def _formatter(self, boxed: bool = False) -> Callable[[Any], Optional[str]]:
return str
return repr
- def _formatting_values(self) -> np.ndarray:
- # At the moment, this has to be an array since we use result.dtype
- """
- An array of values to be printed in, e.g. the Series repr
-
- .. deprecated:: 0.24.0
-
- Use :meth:`ExtensionArray._formatter` instead.
-
- Returns
- -------
- array : ndarray
- """
- return np.array(self)
-
# ------------------------------------------------------------------------
# Reshaping
# ------------------------------------------------------------------------
@@ -956,7 +957,7 @@ def _concat_same_type(
cls, to_concat: Sequence[ABCExtensionArray]
) -> ABCExtensionArray:
"""
- Concatenate multiple array
+ Concatenate multiple array.
Parameters
----------
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index df5cd12a479f0..870628500af21 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1,12 +1,14 @@
+import operator
from shutil import get_terminal_size
import textwrap
+from typing import Type, Union, cast
from warnings import warn
import numpy as np
from pandas._config import get_option
-from pandas._libs import algos as libalgos, lib
+from pandas._libs import algos as libalgos, hashtable as htable, lib
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
@@ -21,7 +23,6 @@
ensure_int64,
ensure_object,
ensure_platform_int,
- is_categorical,
is_categorical_dtype,
is_datetime64_dtype,
is_datetimelike,
@@ -38,21 +39,25 @@
is_timedelta64_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
-from pandas.core.dtypes.generic import (
- ABCCategoricalIndex,
- ABCDataFrame,
- ABCIndexClass,
- ABCSeries,
-)
+from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
+from pandas._typing import ArrayLike, Dtype, Ordered
from pandas.core import ops
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algorithms
-from pandas.core.algorithms import factorize, take, take_1d, unique1d
+from pandas.core.algorithms import (
+ _get_data_algo,
+ _hashtables,
+ factorize,
+ take,
+ take_1d,
+ unique1d,
+)
from pandas.core.base import NoNewAttributesMixin, PandasObject, _shared_docs
import pandas.core.common as com
+from pandas.core.construction import extract_array, sanitize_array
from pandas.core.missing import interpolate_2d
from pandas.core.sorting import nargsort
@@ -74,6 +79,8 @@
def _cat_compare_op(op):
+ opname = "__{op}__".format(op=op.__name__)
+
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
@@ -84,17 +91,20 @@ def f(self, other):
return NotImplemented
other = lib.item_from_zerodim(other)
+ if is_list_like(other) and len(other) != len(self):
+ # TODO: Could this fail if the categories are listlike objects?
+ raise ValueError("Lengths must match.")
if not self.ordered:
- if op in ["__lt__", "__gt__", "__le__", "__ge__"]:
+ if opname in ["__lt__", "__gt__", "__le__", "__ge__"]:
raise TypeError(
- "Unordered Categoricals can only compare " "equality or not"
+ "Unordered Categoricals can only compare equality or not"
)
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
- msg = "Categoricals can only be compared if " "'categories' are the same."
+ msg = "Categoricals can only be compared if 'categories' are the same."
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif self.ordered and not (self.categories == other.categories).all():
@@ -104,7 +114,7 @@ def f(self, other):
if not (self.ordered == other.ordered):
raise TypeError(
- "Categoricals can only be compared if " "'ordered' is the same"
+ "Categoricals can only be compared if 'ordered' is the same"
)
if not self.ordered and not self.categories.equals(other.categories):
# both unordered and different order
@@ -113,7 +123,7 @@ def f(self, other):
other_codes = other._codes
mask = (self._codes == -1) | (other_codes == -1)
- f = getattr(self._codes, op)
+ f = getattr(self._codes, opname)
ret = f(other_codes)
if mask.any():
# In other series, the leads to False, so do that here too
@@ -123,55 +133,42 @@ def f(self, other):
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
- ret = getattr(self._codes, op)(i)
+ ret = getattr(self._codes, opname)(i)
# check for NaN in self
mask = self._codes == -1
ret[mask] = False
return ret
else:
- if op == "__eq__":
+ if opname == "__eq__":
return np.repeat(False, len(self))
- elif op == "__ne__":
+ elif opname == "__ne__":
return np.repeat(True, len(self))
else:
msg = (
"Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category."
)
- raise TypeError(msg.format(op=op))
+ raise TypeError(msg.format(op=opname))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
- if op in ["__eq__", "__ne__"]:
- return getattr(np.array(self), op)(np.array(other))
+ if opname in ["__eq__", "__ne__"]:
+ return getattr(np.array(self), opname)(np.array(other))
msg = (
"Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'."
)
- raise TypeError(msg.format(op=op, typ=type(other)))
+ raise TypeError(msg.format(op=opname, typ=type(other)))
- f.__name__ = op
+ f.__name__ = opname
return f
-def _maybe_to_categorical(array):
- """
- Coerce to a categorical if a series is given.
-
- Internal use ONLY.
- """
- if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
- return array._values
- elif isinstance(array, np.ndarray):
- return Categorical(array)
- return array
-
-
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
@@ -367,7 +364,6 @@ def __init__(
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
- from pandas.core.internals.construction import sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
@@ -396,7 +392,7 @@ def __init__(
# FIXME
raise NotImplementedError(
- "> 1 ndim Categorical are not " "supported at this time"
+ "> 1 ndim Categorical are not supported at this time"
)
# we're inferring from values
@@ -466,7 +462,7 @@ def categories(self, categories):
self._dtype = new_dtype
@property
- def ordered(self):
+ def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
@@ -475,16 +471,16 @@ def ordered(self):
@property
def dtype(self) -> CategoricalDtype:
"""
- The :class:`~pandas.api.types.CategoricalDtype` for this instance
+ The :class:`~pandas.api.types.CategoricalDtype` for this instance.
"""
return self._dtype
@property
- def _ndarray_values(self):
+ def _ndarray_values(self) -> np.ndarray:
return self.codes
@property
- def _constructor(self):
+ def _constructor(self) -> Type["Categorical"]:
return Categorical
@classmethod
@@ -495,7 +491,7 @@ def _formatter(self, boxed=False):
# Defer to CategoricalFormatter's formatter.
return None
- def copy(self):
+ def copy(self) -> "Categorical":
"""
Copy constructor.
"""
@@ -503,7 +499,7 @@ def copy(self):
values=self._codes.copy(), dtype=self.dtype, fastpath=True
)
- def astype(self, dtype, copy=True):
+ def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike:
"""
Coerce this type to another dtype
@@ -514,41 +510,36 @@ def astype(self, dtype, copy=True):
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
-
- .. versionadded:: 0.19.0
-
"""
if is_categorical_dtype(dtype):
+ dtype = cast(Union[str, CategoricalDtype], dtype)
+
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
+ if is_integer_dtype(dtype) and self.isna().any():
+ msg = "Cannot convert float NaN to integer"
+ raise ValueError(msg)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
- def ndim(self):
- """
- Number of dimensions of the Categorical
- """
- return self._codes.ndim
-
- @cache_readonly
- def size(self):
+ def size(self) -> int:
"""
return the len of myself
"""
- return len(self)
+ return self._codes.size
@cache_readonly
- def itemsize(self):
+ def itemsize(self) -> int:
"""
return the size of a single category
"""
return self.categories.itemsize
- def tolist(self):
+ def tolist(self) -> list:
"""
Return a list of the values.
@@ -561,7 +552,7 @@ def tolist(self):
to_list = tolist
@property
- def base(self):
+ def base(self) -> None:
"""
compat, we are always our own object
"""
@@ -704,12 +695,10 @@ def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
raise ValueError(msg)
if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1):
- raise ValueError("codes need to be between -1 and " "len(categories)-1")
+ raise ValueError("codes need to be between -1 and len(categories)-1")
return cls(codes, dtype=dtype, fastpath=True)
- _codes = None
-
def _get_codes(self):
"""
Get the codes.
@@ -769,7 +758,7 @@ def _set_categories(self, categories, fastpath=False):
self._dtype = new_dtype
- def _set_dtype(self, dtype):
+ def _set_dtype(self, dtype: CategoricalDtype) -> "Categorical":
"""
Internal method for directly updating the CategoricalDtype
@@ -1031,7 +1020,7 @@ def reorder_categories(self, new_categories, ordered=None, inplace=False):
inplace = validate_bool_kwarg(inplace, "inplace")
if set(self.dtype.categories) != set(new_categories):
raise ValueError(
- "items in new_categories are not the same as in " "old categories"
+ "items in new_categories are not the same as in old categories"
)
return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
@@ -1260,12 +1249,12 @@ def map(self, mapper):
new_categories = new_categories.insert(len(new_categories), np.nan)
return np.take(new_categories, self._codes)
- __eq__ = _cat_compare_op("__eq__")
- __ne__ = _cat_compare_op("__ne__")
- __lt__ = _cat_compare_op("__lt__")
- __gt__ = _cat_compare_op("__gt__")
- __le__ = _cat_compare_op("__le__")
- __ge__ = _cat_compare_op("__ge__")
+ __eq__ = _cat_compare_op(operator.eq)
+ __ne__ = _cat_compare_op(operator.ne)
+ __lt__ = _cat_compare_op(operator.lt)
+ __gt__ = _cat_compare_op(operator.gt)
+ __le__ = _cat_compare_op(operator.le)
+ __ge__ = _cat_compare_op(operator.ge)
# for Series/ndarray like compat
@property
@@ -1364,24 +1353,7 @@ def __setstate__(self, state):
if not isinstance(state, dict):
raise Exception("invalid pickle state")
- # Provide compatibility with pre-0.15.0 Categoricals.
- if "_categories" not in state and "_levels" in state:
- state["_categories"] = self.dtype.validate_categories(state.pop("_levels"))
- if "_codes" not in state and "labels" in state:
- state["_codes"] = coerce_indexer_dtype(
- state.pop("labels"), state["_categories"]
- )
-
- # 0.16.0 ordered change
- if "_ordered" not in state:
-
- # >=15.0 < 0.16.0
- if "ordered" in state:
- state["_ordered"] = state.pop("ordered")
- else:
- state["_ordered"] = False
-
- # 0.21.0 CategoricalDtype change
+ # compat with pre 0.21.0 CategoricalDtype change
if "_dtype" not in state:
state["_dtype"] = CategoricalDtype(state["_categories"], state["_ordered"])
@@ -1493,7 +1465,7 @@ def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
- raise NotImplementedError(("'put' is not yet implemented " "for Categorical"))
+ raise NotImplementedError(("'put' is not yet implemented for Categorical"))
def dropna(self):
"""
@@ -1527,9 +1499,7 @@ def value_counts(self, dropna=True):
See Also
--------
Series.value_counts
-
"""
- from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
@@ -1538,9 +1508,9 @@ def value_counts(self, dropna=True):
if dropna or clean:
obs = code if clean else code[mask]
- count = bincount(obs, minlength=ncat or 0)
+ count = np.bincount(obs, minlength=ncat or 0)
else:
- count = bincount(np.where(mask, code, ncat))
+ count = np.bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype, fastpath=True)
@@ -1778,18 +1748,10 @@ def ravel(self, order="C"):
)
return np.array(self)
- def view(self):
- """
- Return a view of myself.
-
- For internal compatibility with numpy arrays.
-
- Returns
- -------
- view : Categorical
- Returns `self`!
- """
- return self
+ def view(self, dtype=None):
+ if dtype is not None:
+ raise NotImplementedError(dtype)
+ return self._constructor(values=self._codes, dtype=self.dtype, fastpath=True)
def to_dense(self):
"""
@@ -1841,7 +1803,7 @@ def fillna(self, value=None, method=None, limit=None):
value = np.nan
if limit is not None:
raise NotImplementedError(
- "specifying a limit for fillna has not " "been implemented yet"
+ "specifying a limit for fillna has not been implemented yet"
)
codes = self._codes
@@ -1864,8 +1826,8 @@ def fillna(self, value=None, method=None, limit=None):
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
- indexer = np.where(values_codes != -1)
- codes[indexer] = values_codes[values_codes != -1]
+ indexer = np.where(codes == -1)
+ codes[indexer] = values_codes[indexer]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
@@ -1977,7 +1939,7 @@ def take_nd(self, indexer, allow_fill=None, fill_value=None):
if fill_value in self.categories:
fill_value = self.categories.get_loc(fill_value)
else:
- msg = "'fill_value' ('{}') is not in this Categorical's " "categories."
+ msg = "'fill_value' ('{}') is not in this Categorical's categories."
raise TypeError(msg.format(fill_value))
codes = take(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value)
@@ -1986,23 +1948,6 @@ def take_nd(self, indexer, allow_fill=None, fill_value=None):
take = take_nd
- def _slice(self, slicer):
- """
- Return a slice of myself.
-
- For internal compatibility with numpy arrays.
- """
-
- # only allow 1 dimensional slicing, but can
- # in a 2-d case be passd (slice(None),....)
- if isinstance(slicer, tuple) and len(slicer) == 2:
- if not com.is_null_slice(slicer[0]):
- raise AssertionError("invalid slicing for a 1-ndim " "categorical")
- slicer = slicer[1]
-
- codes = self._codes[slicer]
- return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
-
def __len__(self):
"""
The length of this Categorical.
@@ -2160,8 +2105,6 @@ def __setitem__(self, key, value):
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
- from pandas.core.internals.arrays import extract_array
-
value = extract_array(value, extract_numpy=True)
# require identical categories set
@@ -2201,12 +2144,12 @@ def __setitem__(self, key, value):
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
- raise AssertionError("invalid slicing for a 1-ndim " "categorical")
+ raise AssertionError("invalid slicing for a 1-ndim categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
- raise AssertionError("invalid slicing for a 1-ndim " "categorical")
+ raise AssertionError("invalid slicing for a 1-ndim categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
@@ -2329,9 +2272,6 @@ def mode(self, dropna=True):
-------
modes : `Categorical` (sorted)
"""
-
- import pandas._libs.hashtable as htable
-
codes = self._codes
if dropna:
good = self._codes != -1
@@ -2481,9 +2421,9 @@ def _can_hold_na(self):
@classmethod
def _concat_same_type(self, to_concat):
- from pandas.core.dtypes.concat import _concat_categorical
+ from pandas.core.dtypes.concat import concat_categorical
- return _concat_categorical(to_concat)
+ return concat_categorical(to_concat)
def isin(self, values):
"""
@@ -2527,8 +2467,6 @@ def isin(self, values):
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
- from pandas.core.internals.construction import sanitize_array
-
if not is_list_like(values):
raise TypeError(
"only list-like objects are allowed to be passed"
@@ -2599,9 +2537,7 @@ def __init__(self, data):
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
- raise AttributeError(
- "Can only use .cat accessor with a " "'category' dtype"
- )
+ raise AttributeError("Can only use .cat accessor with a 'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
@@ -2645,7 +2581,7 @@ def name(self):
# need to be updated. `name` will need to be removed from
# `ok_for_cat`.
warn(
- "`Series.cat.name` has been deprecated. Use `Series.name` " "instead.",
+ "`Series.cat.name` has been deprecated. Use `Series.name` instead.",
FutureWarning,
stacklevel=2,
)
@@ -2657,7 +2593,7 @@ def index(self):
# need to be updated. `index` will need to be removed from
# ok_for_cat`.
warn(
- "`Series.cat.index` has been deprecated. Use `Series.index` " "instead.",
+ "`Series.cat.index` has been deprecated. Use `Series.index` instead.",
FutureWarning,
stacklevel=2,
)
@@ -2671,8 +2607,6 @@ def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
- from pandas.core.algorithms import _get_data_algo, _hashtables
-
dtype_equal = is_dtype_equal(values.dtype, categories.dtype)
if dtype_equal:
@@ -2701,18 +2635,18 @@ def _get_codes_for_values(values, categories):
return coerce_indexer_dtype(t.lookup(vals), cats)
-def _recode_for_categories(codes, old_categories, new_categories):
+def _recode_for_categories(codes: np.ndarray, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
- codes : array
+ codes : np.ndarray
old_categories, new_categories : Index
Returns
-------
- new_codes : array
+ new_codes : np.ndarray[np.int64]
Examples
--------
@@ -2722,8 +2656,6 @@ def _recode_for_categories(codes, old_categories, new_categories):
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
- from pandas.core.algorithms import take_1d
-
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
@@ -2747,7 +2679,7 @@ def _convert_to_list_like(list_like):
elif is_scalar(list_like):
return [list_like]
else:
- # is this reached?
+ # TODO: is this reached?
return [list_like]
@@ -2769,17 +2701,15 @@ def _factorize_from_iterable(values):
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
- from pandas.core.indexes.category import CategoricalIndex
-
if not is_list_like(values):
raise TypeError("Input must be list-like")
- if is_categorical(values):
- values = CategoricalIndex(values)
- # The CategoricalIndex level we want to build has the same categories
+ if is_categorical_dtype(values):
+ values = extract_array(values)
+ # The Categorical we want to build has the same categories
# as values but its codes are by def [0, ..., len(n_categories) - 1]
cat_codes = np.arange(len(values.categories), dtype=values.codes.dtype)
- categories = values._create_from_codes(cat_codes)
+ categories = Categorical.from_codes(cat_codes, dtype=values.dtype)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index df17388856117..bda5f8f4326f1 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -22,7 +22,6 @@
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
- is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like,
@@ -34,7 +33,12 @@
is_unsigned_integer_dtype,
pandas_dtype,
)
-from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
+from pandas.core.dtypes.generic import (
+ ABCDataFrame,
+ ABCIndexClass,
+ ABCPeriodArray,
+ ABCSeries,
+)
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
@@ -42,6 +46,7 @@
from pandas.core import missing, nanops
from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts
import pandas.core.common as com
+from pandas.core.ops.invalid import make_invalid_op
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick
@@ -52,21 +57,10 @@
class AttributesMixin:
_data = None # type: np.ndarray
- @property
- def _attributes(self):
- # Inheriting subclass should implement _attributes as a list of strings
- raise AbstractMethodError(self)
-
@classmethod
def _simple_new(cls, values, **kwargs):
raise AbstractMethodError(cls)
- def _get_attributes_dict(self):
- """
- return an attributes dict for my class
- """
- return {k: getattr(self, k, None) for k in self._attributes}
-
@property
def _scalar_type(self) -> Type[DatetimeLikeScalar]:
"""The scalar associated with this datelike
@@ -166,8 +160,8 @@ def strftime(self, date_format):
Returns
-------
- Index
- Index of formatted strings.
+ ndarray
+ NumPy ndarray of formatted strings.
See Also
--------
@@ -185,9 +179,7 @@ def strftime(self, date_format):
'March 10, 2018, 09:00:02 AM'],
dtype='object')
"""
- from pandas import Index
-
- return Index(self._format_native_types(date_format=date_format))
+ return self._format_native_types(date_format=date_format).astype(object)
class TimelikeOps:
@@ -468,6 +460,8 @@ def __setitem__(
# to a period in from_sequence). For DatetimeArray, it's Timestamp...
# I don't know if mypy can do that, possibly with Generics.
# https://mypy.readthedocs.io/en/latest/generics.html
+ if lib.is_scalar(value) and not isna(value):
+ value = com.maybe_box_datetimelike(value)
if is_list_like(value):
is_slice = isinstance(key, slice)
@@ -494,9 +488,6 @@ def __setitem__(
value = self._unbox_scalar(value)
elif is_valid_nat_for_dtype(value, self.dtype):
value = iNaT
- elif not isna(value) and lib.is_integer(value) and value == iNaT:
- # exclude misc e.g. object() and any NAs not allowed above
- value = iNaT
else:
msg = (
"'value' should be a '{scalar}', 'NaT', or array of those. "
@@ -552,18 +543,8 @@ def astype(self, dtype, copy=True):
return np.asarray(self, dtype=dtype)
def view(self, dtype=None):
- """
- New view on this array with the same data.
-
- Parameters
- ----------
- dtype : numpy dtype, optional
-
- Returns
- -------
- ndarray
- With the specified `dtype`.
- """
+ if dtype is None or dtype is self.dtype:
+ return type(self)(self._data, dtype=self.dtype)
return self._data.view(dtype=dtype)
# ------------------------------------------------------------------
@@ -926,6 +907,21 @@ def _is_unique(self):
# ------------------------------------------------------------------
# Arithmetic Methods
+ # pow is invalid for all three subclasses; TimedeltaArray will override
+ # the multiplication and division ops
+ __pow__ = make_invalid_op("__pow__")
+ __rpow__ = make_invalid_op("__rpow__")
+ __mul__ = make_invalid_op("__mul__")
+ __rmul__ = make_invalid_op("__rmul__")
+ __truediv__ = make_invalid_op("__truediv__")
+ __rtruediv__ = make_invalid_op("__rtruediv__")
+ __floordiv__ = make_invalid_op("__floordiv__")
+ __rfloordiv__ = make_invalid_op("__rfloordiv__")
+ __mod__ = make_invalid_op("__mod__")
+ __rmod__ = make_invalid_op("__rmod__")
+ __divmod__ = make_invalid_op("__divmod__")
+ __rdivmod__ = make_invalid_op("__rdivmod__")
+
def _add_datetimelike_scalar(self, other):
# Overriden by TimedeltaArray
raise TypeError(
@@ -1009,9 +1005,9 @@ def _add_delta_tdi(self, other):
if isinstance(other, np.ndarray):
# ndarray[timedelta64]; wrap in TimedeltaIndex for op
- from pandas import TimedeltaIndex
+ from pandas.core.arrays import TimedeltaArray
- other = TimedeltaIndex(other)
+ other = TimedeltaArray._from_sequence(other)
self_i8 = self.asi8
other_i8 = other.asi8
@@ -1077,7 +1073,7 @@ def _sub_period_array(self, other):
)
if len(self) != len(other):
- raise ValueError("cannot subtract arrays/indices of " "unequal length")
+ raise ValueError("cannot subtract arrays/indices of unequal length")
if self.freq != other.freq:
msg = DIFFERENT_FREQ.format(
cls=type(self).__name__, own_freq=self.freqstr, other_freq=other.freqstr
@@ -1199,7 +1195,7 @@ def _time_shift(self, periods, freq=None):
def __add__(self, other):
other = lib.item_from_zerodim(other)
- if isinstance(other, (ABCSeries, ABCDataFrame)):
+ if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)):
return NotImplemented
# scalar others
@@ -1233,29 +1229,17 @@ def __add__(self, other):
if not is_period_dtype(self):
maybe_integer_op_deprecated(self)
result = self._addsub_int_array(other, operator.add)
- elif is_float_dtype(other):
- # Explicitly catch invalid dtypes
- raise TypeError(
- "cannot add {dtype}-dtype to {cls}".format(
- dtype=other.dtype, cls=type(self).__name__
- )
- )
- elif is_period_dtype(other):
- # if self is a TimedeltaArray and other is a PeriodArray with
- # a timedelta-like (i.e. Tick) freq, this operation is valid.
- # Defer to the PeriodArray implementation.
- # In remaining cases, this will end up raising TypeError.
- return NotImplemented
- elif is_extension_array_dtype(other):
- # Categorical op will raise; defer explicitly
- return NotImplemented
- else: # pragma: no cover
+ else:
+ # Includes Categorical, other ExtensionArrays
+ # For PeriodDtype, if self is a TimedeltaArray and other is a
+ # PeriodArray with a timedelta-like (i.e. Tick) freq, this
+ # operation is valid. Defer to the PeriodArray implementation.
+ # In remaining cases, this will end up raising TypeError.
return NotImplemented
if is_timedelta64_dtype(result) and isinstance(result, np.ndarray):
from pandas.core.arrays import TimedeltaArray
- # TODO: infer freq?
return TimedeltaArray(result)
return result
@@ -1265,7 +1249,7 @@ def __radd__(self, other):
def __sub__(self, other):
other = lib.item_from_zerodim(other)
- if isinstance(other, (ABCSeries, ABCDataFrame)):
+ if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)):
return NotImplemented
# scalar others
@@ -1305,34 +1289,18 @@ def __sub__(self, other):
if not is_period_dtype(self):
maybe_integer_op_deprecated(self)
result = self._addsub_int_array(other, operator.sub)
- elif isinstance(other, ABCIndexClass):
- raise TypeError(
- "cannot subtract {cls} and {typ}".format(
- cls=type(self).__name__, typ=type(other).__name__
- )
- )
- elif is_float_dtype(other):
- # Explicitly catch invalid dtypes
- raise TypeError(
- "cannot subtract {dtype}-dtype from {cls}".format(
- dtype=other.dtype, cls=type(self).__name__
- )
- )
- elif is_extension_array_dtype(other):
- # Categorical op will raise; defer explicitly
- return NotImplemented
- else: # pragma: no cover
+ else:
+ # Includes ExtensionArrays, float_dtype
return NotImplemented
if is_timedelta64_dtype(result) and isinstance(result, np.ndarray):
from pandas.core.arrays import TimedeltaArray
- # TODO: infer freq?
return TimedeltaArray(result)
return result
def __rsub__(self, other):
- if is_datetime64_dtype(other) and is_timedelta64_dtype(self):
+ if is_datetime64_any_dtype(other) and is_timedelta64_dtype(self.dtype):
# ndarray[datetime64] cannot be subtracted from self, so
# we need to wrap in DatetimeArray/Index and flip the operation
if not isinstance(other, DatetimeLikeArrayMixin):
@@ -1342,9 +1310,9 @@ def __rsub__(self, other):
other = DatetimeArray(other)
return other - self
elif (
- is_datetime64_any_dtype(self)
+ is_datetime64_any_dtype(self.dtype)
and hasattr(other, "dtype")
- and not is_datetime64_any_dtype(other)
+ and not is_datetime64_any_dtype(other.dtype)
):
# GH#19959 datetime - datetime is well-defined as timedelta,
# but any other type - datetime is not well-defined.
@@ -1353,13 +1321,21 @@ def __rsub__(self, other):
cls=type(self).__name__, typ=type(other).__name__
)
)
- elif is_period_dtype(self) and is_timedelta64_dtype(other):
+ elif is_period_dtype(self.dtype) and is_timedelta64_dtype(other):
# TODO: Can we simplify/generalize these cases at all?
raise TypeError(
"cannot subtract {cls} from {dtype}".format(
cls=type(self).__name__, dtype=other.dtype
)
)
+ elif is_timedelta64_dtype(self.dtype):
+ if lib.is_integer(other) or is_integer_dtype(other):
+ # need to subtract before negating, since that flips freq
+ # -self flips self.freq, messing up results
+ return -(self - other)
+
+ return (-self) + other
+
return -(self - other)
# FIXME: DTA/TDA/PA inplace methods should actually be inplace, GH#24115
@@ -1664,11 +1640,10 @@ def _ensure_datetimelike_to_i8(other, to_utc=False):
i8 1d array
"""
from pandas import Index
- from pandas.core.arrays import PeriodArray
if lib.is_scalar(other) and isna(other):
return iNaT
- elif isinstance(other, (PeriodArray, ABCIndexClass, DatetimeLikeArrayMixin)):
+ elif isinstance(other, (ABCPeriodArray, ABCIndexClass, DatetimeLikeArrayMixin)):
# convert tz if needed
if getattr(other, "tz", None) is not None:
if to_utc:
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 5b540dcce53c8..0335058a69c63 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -53,6 +53,7 @@
from pandas.core.arrays import datetimelike as dtl
from pandas.core.arrays._ranges import generate_regular_range
import pandas.core.common as com
+from pandas.core.ops.invalid import invalid_comparison
from pandas.tseries.frequencies import get_period_alias, to_offset
from pandas.tseries.offsets import Day, Tick
@@ -171,13 +172,13 @@ def wrapper(self, other):
other = _to_M8(other, tz=self.tz)
except ValueError:
# string that cannot be parsed to Timestamp
- return ops.invalid_comparison(self, other, op)
+ return invalid_comparison(self, other, op)
result = op(self.asi8, other.view("i8"))
if isna(other):
result.fill(nat_result)
elif lib.is_scalar(other) or np.ndim(other) == 0:
- return ops.invalid_comparison(self, other, op)
+ return invalid_comparison(self, other, op)
elif len(other) != len(self):
raise ValueError("Lengths must match")
else:
@@ -191,20 +192,20 @@ def wrapper(self, other):
):
# Following Timestamp convention, __eq__ is all-False
# and __ne__ is all True, others raise TypeError.
- return ops.invalid_comparison(self, other, op)
+ return invalid_comparison(self, other, op)
if is_object_dtype(other):
- # We have to use _comp_method_OBJECT_ARRAY instead of numpy
+ # We have to use comp_method_OBJECT_ARRAY instead of numpy
# comparison otherwise it would fail to raise when
# comparing tz-aware and tz-naive
with np.errstate(all="ignore"):
- result = ops._comp_method_OBJECT_ARRAY(
+ result = ops.comp_method_OBJECT_ARRAY(
op, self.astype(object), other
)
o_mask = isna(other)
elif not (is_datetime64_dtype(other) or is_datetime64tz_dtype(other)):
# e.g. is_timedelta64_dtype(other)
- return ops.invalid_comparison(self, other, op)
+ return invalid_comparison(self, other, op)
else:
self._assert_tzawareness_compat(other)
if isinstance(other, (ABCIndexClass, ABCSeries)):
@@ -222,8 +223,6 @@ def wrapper(self, other):
result = op(self.view("i8"), other.view("i8"))
o_mask = other._isnan
- result = com.values_from_object(result)
-
if o_mask.any():
result[o_mask] = nat_result
@@ -328,7 +327,6 @@ class DatetimeArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps, dtl.DatelikeOps
# -----------------------------------------------------------------
# Constructors
- _attributes = ["freq", "tz"]
_dtype = None # type: Union[np.dtype, DatetimeTZDtype]
_freq = None
@@ -478,7 +476,7 @@ def _generate_range(
periods = dtl.validate_periods(periods)
if freq is None and any(x is None for x in [periods, start, end]):
- raise ValueError("Must provide freq argument if no data is " "supplied")
+ raise ValueError("Must provide freq argument if no data is supplied")
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
@@ -496,7 +494,7 @@ def _generate_range(
if start is None and end is None:
if closed is not None:
raise ValueError(
- "Closed has to be None if not both of start" "and end are defined"
+ "Closed has to be None if not both of start and end are defined"
)
if start is NaT or end is NaT:
raise ValueError("Neither `start` nor `end` can be NaT")
@@ -786,11 +784,11 @@ def _assert_tzawareness_compat(self, other):
elif self.tz is None:
if other_tz is not None:
raise TypeError(
- "Cannot compare tz-naive and tz-aware " "datetime-like objects."
+ "Cannot compare tz-naive and tz-aware datetime-like objects."
)
elif other_tz is None:
raise TypeError(
- "Cannot compare tz-naive and tz-aware " "datetime-like objects"
+ "Cannot compare tz-naive and tz-aware datetime-like objects"
)
# -----------------------------------------------------------------
@@ -833,7 +831,7 @@ def _add_offset(self, offset):
except NotImplementedError:
warnings.warn(
- "Non-vectorized DateOffset being applied to Series " "or DatetimeIndex",
+ "Non-vectorized DateOffset being applied to Series or DatetimeIndex",
PerformanceWarning,
)
result = self.astype("O") + offset
@@ -851,7 +849,7 @@ def _sub_datetimelike_scalar(self, other):
if not self._has_same_tz(other):
# require tz compat
raise TypeError(
- "Timestamp subtraction must have the same " "timezones or no timezones"
+ "Timestamp subtraction must have the same timezones or no timezones"
)
i8 = self.asi8
@@ -957,7 +955,7 @@ def tz_convert(self, tz):
if self.tz is None:
# tz naive, use tz_localize
raise TypeError(
- "Cannot convert tz-naive timestamps, use " "tz_localize to localize"
+ "Cannot convert tz-naive timestamps, use tz_localize to localize"
)
# No conversion since timestamps are all UTC to begin with
@@ -1065,6 +1063,7 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise", errors=None):
Be careful with DST changes. When there is sequential data, pandas can
infer the DST time:
+
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
@@ -1096,6 +1095,7 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise", errors=None):
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
+
>>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
@@ -1125,7 +1125,7 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise", errors=None):
nonexistent = "raise"
else:
raise ValueError(
- "The errors argument must be either 'coerce' " "or 'raise'."
+ "The errors argument must be either 'coerce' or 'raise'."
)
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
@@ -1160,7 +1160,7 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise", errors=None):
def to_pydatetime(self):
"""
Return Datetime Array/Index as object ndarray of datetime.datetime
- objects
+ objects.
Returns
-------
@@ -1274,7 +1274,7 @@ def to_period(self, freq=None):
if freq is None:
raise ValueError(
- "You must pass a freq argument as " "current index has none."
+ "You must pass a freq argument as current index has none."
)
freq = get_period_alias(freq)
@@ -1285,7 +1285,7 @@ def to_perioddelta(self, freq):
"""
Calculate TimedeltaArray of difference between index
values and index converted to PeriodArray at specified
- freq. Used for vectorized offsets
+ freq. Used for vectorized offsets.
Parameters
----------
@@ -1414,17 +1414,69 @@ def date(self):
return tslib.ints_to_pydatetime(timestamps, box="date")
- year = _field_accessor("year", "Y", "The year of the datetime.")
- month = _field_accessor("month", "M", "The month as January=1, December=12. ")
- day = _field_accessor("day", "D", "The days of the datetime.")
- hour = _field_accessor("hour", "h", "The hours of the datetime.")
- minute = _field_accessor("minute", "m", "The minutes of the datetime.")
- second = _field_accessor("second", "s", "The seconds of the datetime.")
+ year = _field_accessor(
+ "year",
+ "Y",
+ """
+ The year of the datetime.
+ """,
+ )
+ month = _field_accessor(
+ "month",
+ "M",
+ """
+ The month as January=1, December=12.
+ """,
+ )
+ day = _field_accessor(
+ "day",
+ "D",
+ """
+ The month as January=1, December=12.
+ """,
+ )
+ hour = _field_accessor(
+ "hour",
+ "h",
+ """
+ The hours of the datetime.
+ """,
+ )
+ minute = _field_accessor(
+ "minute",
+ "m",
+ """
+ The minutes of the datetime.
+ """,
+ )
+ second = _field_accessor(
+ "second",
+ "s",
+ """
+ The seconds of the datetime.
+ """,
+ )
microsecond = _field_accessor(
- "microsecond", "us", "The microseconds of the datetime."
+ "microsecond",
+ "us",
+ """
+ The microseconds of the datetime.
+ """,
+ )
+ nanosecond = _field_accessor(
+ "nanosecond",
+ "ns",
+ """
+ The nanoseconds of the datetime.
+ """,
+ )
+ weekofyear = _field_accessor(
+ "weekofyear",
+ "woy",
+ """
+ The week ordinal of the year.
+ """,
)
- nanosecond = _field_accessor("nanosecond", "ns", "The nanoseconds of the datetime.")
- weekofyear = _field_accessor("weekofyear", "woy", "The week ordinal of the year.")
week = weekofyear
_dayofweek_doc = """
The day of the week with Monday=0, Sunday=6.
@@ -1466,13 +1518,31 @@ def date(self):
weekday_name = _field_accessor(
"weekday_name",
"weekday_name",
- "The name of day in a week (ex: Friday)\n\n.. deprecated:: 0.23.0",
+ """
+ The name of day in a week (ex: Friday)\n\n.. deprecated:: 0.23.0
+ """,
)
- dayofyear = _field_accessor("dayofyear", "doy", "The ordinal day of the year.")
- quarter = _field_accessor("quarter", "q", "The quarter of the date.")
+ dayofyear = _field_accessor(
+ "dayofyear",
+ "doy",
+ """
+ The ordinal day of the year.
+ """,
+ )
+ quarter = _field_accessor(
+ "quarter",
+ "q",
+ """
+ The quarter of the date.
+ """,
+ )
days_in_month = _field_accessor(
- "days_in_month", "dim", "The number of days in the month."
+ "days_in_month",
+ "dim",
+ """
+ The number of days in the month.
+ """,
)
daysinmonth = days_in_month
_is_month_doc = """
@@ -1848,6 +1918,9 @@ def sequence_to_dt64ns(
tz = validate_tz_from_dtype(dtype, tz)
if isinstance(data, ABCIndexClass):
+ if data.nlevels > 1:
+ # Without this check, data._data below is None
+ raise TypeError("Cannot create a DatetimeArray from a MultiIndex.")
data = data._data
# By this point we are assured to have either a numpy array or Index
@@ -2047,7 +2120,7 @@ def maybe_convert_dtype(data, copy):
# Note: without explicitly raising here, PeriodIndex
# test_setops.test_join_does_not_recur fails
raise TypeError(
- "Passing PeriodDtype data is invalid. " "Use `data.to_timestamp()` instead"
+ "Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead"
)
elif is_categorical_dtype(data):
@@ -2177,7 +2250,7 @@ def validate_tz_from_dtype(dtype, tz):
dtz = getattr(dtype, "tz", None)
if dtz is not None:
if tz is not None and not timezones.tz_compare(tz, dtz):
- raise ValueError("cannot supply both a tz and a dtype" " with a tz")
+ raise ValueError("cannot supply both a tz and a dtype with a tz")
tz = dtz
if tz is not None and is_datetime64_dtype(dtype):
@@ -2214,9 +2287,10 @@ def _infer_tz_from_endpoints(start, end, tz):
"""
try:
inferred_tz = timezones.infer_tzinfo(start, end)
- except Exception:
+ except AssertionError:
+ # infer_tzinfo raises AssertionError if passed mismatched timezones
raise TypeError(
- "Start and end cannot both be tz-aware with " "different timezones"
+ "Start and end cannot both be tz-aware with different timezones"
)
inferred_tz = timezones.maybe_get_tz(inferred_tz)
@@ -2224,7 +2298,7 @@ def _infer_tz_from_endpoints(start, end, tz):
if tz is not None and inferred_tz is not None:
if not timezones.tz_compare(inferred_tz, tz):
- raise AssertionError("Inferred time zone not equal to passed " "time zone")
+ raise AssertionError("Inferred time zone not equal to passed time zone")
elif inferred_tz is not None:
tz = inferred_tz
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 867122964fe59..7b03bf35faf25 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -21,10 +21,11 @@
is_scalar,
)
from pandas.core.dtypes.dtypes import register_extension_dtype
-from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
+from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna, notna
from pandas.core import nanops, ops
+from pandas.core.algorithms import take
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
from pandas.core.tools.numeric import to_numeric
@@ -186,6 +187,7 @@ def coerce_to_array(values, dtype, mask=None, copy=False):
"floating",
"integer",
"mixed-integer",
+ "integer-na",
"mixed-integer-float",
]:
raise TypeError(
@@ -365,6 +367,14 @@ def __array__(self, dtype=None):
"""
return self._coerce_to_ndarray()
+ def __arrow_array__(self, type=None):
+ """
+ Convert myself into a pyarrow Array.
+ """
+ import pyarrow as pa
+
+ return pa.array(self._data, mask=self._mask, type=type)
+
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
@@ -420,8 +430,6 @@ def __iter__(self):
yield self._data[i]
def take(self, indexer, allow_fill=False, fill_value=None):
- from pandas.api.extensions import take
-
# we always fill with 1 internally
# to avoid upcasting
data_fill_value = 1 if isna(fill_value) else fill_value
@@ -592,25 +600,29 @@ def _values_for_argsort(self) -> np.ndarray:
@classmethod
def _create_comparison_method(cls, op):
- def cmp_method(self, other):
+ op_name = op.__name__
- op_name = op.__name__
- mask = None
+ def cmp_method(self, other):
- if isinstance(other, (ABCSeries, ABCIndexClass)):
+ if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)):
# Rely on pandas to unbox and dispatch to us.
return NotImplemented
+ other = lib.item_from_zerodim(other)
+ mask = None
+
if isinstance(other, IntegerArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
- if other.ndim > 0 and len(self) != len(other):
+ if other.ndim > 1:
+ raise NotImplementedError(
+ "can only perform ops with 1-d structures"
+ )
+ if len(self) != len(other):
raise ValueError("Lengths must match to compare")
- other = lib.item_from_zerodim(other)
-
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings():
@@ -683,31 +695,31 @@ def _maybe_mask_result(self, result, mask, other, op_name):
@classmethod
def _create_arithmetic_method(cls, op):
- def integer_arithmetic_method(self, other):
+ op_name = op.__name__
- op_name = op.__name__
- mask = None
+ def integer_arithmetic_method(self, other):
- if isinstance(other, (ABCSeries, ABCIndexClass)):
+ if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)):
# Rely on pandas to unbox and dispatch to us.
return NotImplemented
- if getattr(other, "ndim", 0) > 1:
- raise NotImplementedError("can only perform ops with 1-d structures")
+ other = lib.item_from_zerodim(other)
+ mask = None
if isinstance(other, IntegerArray):
other, mask = other._data, other._mask
- elif getattr(other, "ndim", None) == 0:
- other = other.item()
-
elif is_list_like(other):
other = np.asarray(other)
- if not other.ndim:
- other = other.item()
- elif other.ndim == 1:
- if not (is_float_dtype(other) or is_integer_dtype(other)):
- raise TypeError("can only perform ops with numeric values")
+ if other.ndim > 1:
+ raise NotImplementedError(
+ "can only perform ops with 1-d structures"
+ )
+ if len(self) != len(other):
+ raise ValueError("Lengths must match")
+ if not (is_float_dtype(other) or is_integer_dtype(other)):
+ raise TypeError("can only perform ops with numeric values")
+
else:
if not (is_float(other) or is_integer(other)):
raise TypeError("can only perform ops with numeric values")
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index a0319fe96896a..1f4b76a259f00 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -25,6 +25,7 @@
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas.core.dtypes.generic import (
ABCDatetimeIndex,
+ ABCIndexClass,
ABCInterval,
ABCIntervalIndex,
ABCPeriodIndex,
@@ -32,10 +33,11 @@
)
from pandas.core.dtypes.missing import isna, notna
+from pandas.core.algorithms import take, value_counts
from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from pandas.core.arrays.categorical import Categorical
import pandas.core.common as com
-from pandas.core.indexes.base import Index, ensure_index
+from pandas.core.indexes.base import ensure_index
_VALID_CLOSED = {"left", "right", "both", "neither"}
_interval_shared_docs = {}
@@ -127,9 +129,9 @@
``Interval`` objects:
>>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
- IntervalArray([(0, 1], (1, 5]],
- closed='right',
- dtype='interval[int64]')
+ <IntervalArray>
+ [(0, 1], (1, 5]]
+ Length: 2, closed: right, dtype: interval[int64]
It may also be constructed using one of the constructor
methods: :meth:`IntervalArray.from_arrays`,
@@ -205,7 +207,7 @@ def _simple_new(
left = left.astype(right.dtype)
if type(left) != type(right):
- msg = "must not have differing left [{ltype}] and right " "[{rtype}] types"
+ msg = "must not have differing left [{ltype}] and right [{rtype}] types"
raise ValueError(
msg.format(ltype=type(left).__name__, rtype=type(right).__name__)
)
@@ -246,9 +248,8 @@ def _from_factorized(cls, values, original):
values = values.astype(original.dtype.subtype)
return cls(values, closed=original.closed)
- _interval_shared_docs[
- "from_breaks"
- ] = """
+ _interval_shared_docs["from_breaks"] = textwrap.dedent(
+ """
Construct an %(klass)s from an array of splits.
Parameters
@@ -275,24 +276,34 @@ def _from_factorized(cls, values, original):
%(klass)s.from_arrays : Construct from a left and right array.
%(klass)s.from_tuples : Construct from a sequence of tuples.
- Examples
- --------
- >>> pd.%(qualname)s.from_breaks([0, 1, 2, 3])
- %(klass)s([(0, 1], (1, 2], (2, 3]],
- closed='right',
- dtype='interval[int64]')
+ %(examples)s\
"""
+ )
@classmethod
- @Appender(_interval_shared_docs["from_breaks"] % _shared_docs_kwargs)
+ @Appender(
+ _interval_shared_docs["from_breaks"]
+ % dict(
+ klass="IntervalArray",
+ examples=textwrap.dedent(
+ """\
+ Examples
+ --------
+ >>> pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3])
+ <IntervalArray>
+ [(0, 1], (1, 2], (2, 3]]
+ Length: 3, closed: right, dtype: interval[int64]
+ """
+ ),
+ )
+ )
def from_breaks(cls, breaks, closed="right", copy=False, dtype=None):
breaks = maybe_convert_platform_interval(breaks)
return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, dtype=dtype)
- _interval_shared_docs[
- "from_arrays"
- ] = """
+ _interval_shared_docs["from_arrays"] = textwrap.dedent(
+ """
Construct from two arrays defining the left and right bounds.
Parameters
@@ -338,16 +349,25 @@ def from_breaks(cls, breaks, closed="right", copy=False, dtype=None):
using an unsupported type for `left` or `right`. At the moment,
'category', 'object', and 'string' subtypes are not supported.
- Examples
- --------
- >>> %(klass)s.from_arrays([0, 1, 2], [1, 2, 3])
- %(klass)s([(0, 1], (1, 2], (2, 3]],
- closed='right',
- dtype='interval[int64]')
+ %(examples)s\
"""
+ )
@classmethod
- @Appender(_interval_shared_docs["from_arrays"] % _shared_docs_kwargs)
+ @Appender(
+ _interval_shared_docs["from_arrays"]
+ % dict(
+ klass="IntervalArray",
+ examples=textwrap.dedent(
+ """\
+ >>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3])
+ <IntervalArray>
+ [(0, 1], (1, 2], (2, 3]]
+ Length: 3, closed: right, dtype: interval[int64]
+ """
+ ),
+ )
+ )
def from_arrays(cls, left, right, closed="right", copy=False, dtype=None):
left = maybe_convert_platform_interval(left)
right = maybe_convert_platform_interval(right)
@@ -356,54 +376,9 @@ def from_arrays(cls, left, right, closed="right", copy=False, dtype=None):
left, right, closed, copy=copy, dtype=dtype, verify_integrity=True
)
- _interval_shared_docs[
- "from_intervals"
- ] = """
- Construct an %(klass)s from a 1d array of Interval objects
-
- .. deprecated:: 0.23.0
-
- Parameters
- ----------
- data : array-like (1-dimensional)
- Array of Interval objects. All intervals must be closed on the same
- sides.
- copy : boolean, default False
- by-default copy the data, this is compat only and ignored
- dtype : dtype or None, default None
- If None, dtype will be inferred
-
- ..versionadded:: 0.23.0
-
- See Also
- --------
- interval_range : Function to create a fixed frequency IntervalIndex.
- %(klass)s.from_arrays : Construct an %(klass)s from a left and
- right array.
- %(klass)s.from_breaks : Construct an %(klass)s from an array of
- splits.
- %(klass)s.from_tuples : Construct an %(klass)s from an
- array-like of tuples.
-
- Examples
- --------
- >>> pd.%(qualname)s.from_intervals([pd.Interval(0, 1),
- ... pd.Interval(1, 2)])
- %(klass)s([(0, 1], (1, 2]],
- closed='right', dtype='interval[int64]')
-
- The generic Index constructor work identically when it infers an array
- of all intervals:
-
- >>> pd.Index([pd.Interval(0, 1), pd.Interval(1, 2)])
- %(klass)s([(0, 1], (1, 2]],
- closed='right', dtype='interval[int64]')
- """
-
- _interval_shared_docs[
- "from_tuples"
- ] = """
- Construct an %(klass)s from an array-like of tuples
+ _interval_shared_docs["from_tuples"] = textwrap.dedent(
+ """
+ Construct an %(klass)s from an array-like of tuples.
Parameters
----------
@@ -431,15 +406,27 @@ def from_arrays(cls, left, right, closed="right", copy=False, dtype=None):
%(klass)s.from_breaks : Construct an %(klass)s from an array of
splits.
- Examples
- --------
- >>> pd.%(qualname)s.from_tuples([(0, 1), (1, 2)])
- %(klass)s([(0, 1], (1, 2]],
- closed='right', dtype='interval[int64]')
+ %(examples)s\
"""
+ )
@classmethod
- @Appender(_interval_shared_docs["from_tuples"] % _shared_docs_kwargs)
+ @Appender(
+ _interval_shared_docs["from_tuples"]
+ % dict(
+ klass="IntervalArray",
+ examples=textwrap.dedent(
+ """\
+ Examples
+ --------
+ >>> pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)])
+ <IntervalArray>
+ [(0, 1], (1, 2]]
+ Length: 2, closed: right, dtype: interval[int64]
+ """
+ ),
+ )
+ )
def from_tuples(cls, data, closed="right", copy=False, dtype=None):
if len(data):
left, right = [], []
@@ -457,13 +444,13 @@ def from_tuples(cls, data, closed="right", copy=False, dtype=None):
lhs, rhs = d
except ValueError:
msg = (
- "{name}.from_tuples requires tuples of " "length 2, got {tpl}"
+ "{name}.from_tuples requires tuples of length 2, got {tpl}"
).format(name=name, tpl=d)
raise ValueError(msg)
except TypeError:
- msg = (
- "{name}.from_tuples received an invalid " "item, {tpl}"
- ).format(name=name, tpl=d)
+ msg = ("{name}.from_tuples received an invalid item, {tpl}").format(
+ name=name, tpl=d
+ )
raise TypeError(msg)
left.append(lhs)
right.append(rhs)
@@ -510,7 +497,7 @@ def __getitem__(self, value):
right = self.right[value]
# scalar
- if not isinstance(left, Index):
+ if not isinstance(left, ABCIndexClass):
if isna(left):
return self._fill_value
return Interval(left, right, self.closed)
@@ -589,7 +576,7 @@ def fillna(self, value=None, method=None, limit=None):
filled : IntervalArray with NA/NaN filled
"""
if method is not None:
- raise TypeError("Filling by method is not supported for " "IntervalArray.")
+ raise TypeError("Filling by method is not supported for IntervalArray.")
if limit is not None:
raise TypeError("limit is not supported for IntervalArray.")
@@ -737,18 +724,14 @@ def isna(self):
return isna(self.left)
@property
- def nbytes(self):
+ def nbytes(self) -> int:
return self.left.nbytes + self.right.nbytes
@property
- def size(self):
+ def size(self) -> int:
# Avoid materializing self.values
return self.left.size
- @property
- def shape(self):
- return self.left.shape
-
def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs):
"""
Take elements from the IntervalArray.
@@ -795,8 +778,6 @@ def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs):
When `indices` contains negative values other than ``-1``
and `allow_fill` is True.
"""
- from pandas.core.algorithms import take
-
nv.validate_take(tuple(), kwargs)
fill_left = fill_right = fill_value
@@ -842,8 +823,6 @@ def value_counts(self, dropna=True):
Series.value_counts
"""
# TODO: implement this is a non-naive way!
- from pandas.core.algorithms import value_counts
-
return value_counts(np.asarray(self), dropna=dropna)
# Formatting
@@ -882,16 +861,20 @@ def _format_data(self):
return summary
def __repr__(self):
- tpl = textwrap.dedent(
- """\
- {cls}({data},
- {lead}closed='{closed}',
- {lead}dtype='{dtype}')"""
+ template = (
+ "{class_name}"
+ "{data}\n"
+ "Length: {length}, closed: {closed}, dtype: {dtype}"
)
- return tpl.format(
- cls=self.__class__.__name__,
- data=self._format_data(),
- lead=" " * len(self.__class__.__name__) + " ",
+ # the short repr has no trailing newline, while the truncated
+ # repr does. So we include a newline in our template, and strip
+ # any trailing newlines from format_object_summary
+ data = self._format_data()
+ class_name = "<{}>\n".format(self.__class__.__name__)
+ return template.format(
+ class_name=class_name,
+ data=data,
+ length=len(self),
closed=self.closed,
dtype=self.dtype,
)
@@ -904,7 +887,7 @@ def _format_space(self):
def left(self):
"""
Return the left endpoints of each Interval in the IntervalArray as
- an Index
+ an Index.
"""
return self._left
@@ -912,7 +895,7 @@ def left(self):
def right(self):
"""
Return the right endpoints of each Interval in the IntervalArray as
- an Index
+ an Index.
"""
return self._right
@@ -920,15 +903,14 @@ def right(self):
def closed(self):
"""
Whether the intervals are closed on the left-side, right-side, both or
- neither
+ neither.
"""
return self._closed
- _interval_shared_docs[
- "set_closed"
- ] = """
+ _interval_shared_docs["set_closed"] = textwrap.dedent(
+ """
Return an %(klass)s identical to the current one, but closed on the
- specified side
+ specified side.
.. versionadded:: 0.24.0
@@ -942,20 +924,31 @@ def closed(self):
-------
new_index : %(klass)s
+ %(examples)s\
+ """
+ )
+
+ @Appender(
+ _interval_shared_docs["set_closed"]
+ % dict(
+ klass="IntervalArray",
+ examples=textwrap.dedent(
+ """\
Examples
--------
- >>> index = pd.interval_range(0, 3)
+ >>> index = pd.arrays.IntervalArray.from_breaks(range(4))
>>> index
- IntervalIndex([(0, 1], (1, 2], (2, 3]],
- closed='right',
- dtype='interval[int64]')
+ <IntervalArray>
+ [(0, 1], (1, 2], (2, 3]]
+ Length: 3, closed: right, dtype: interval[int64]
>>> index.set_closed('both')
- IntervalIndex([[0, 1], [1, 2], [2, 3]],
- closed='both',
- dtype='interval[int64]')
+ <IntervalArray>
+ [[0, 1], [1, 2], [2, 3]]
+ Length: 3, closed: both, dtype: interval[int64]
"""
-
- @Appender(_interval_shared_docs["set_closed"] % _shared_docs_kwargs)
+ ),
+ )
+ )
def set_closed(self, closed):
if closed not in _VALID_CLOSED:
msg = "invalid option for 'closed': {closed}"
@@ -967,7 +960,7 @@ def set_closed(self, closed):
def length(self):
"""
Return an Index with entries denoting the length of each Interval in
- the IntervalArray
+ the IntervalArray.
"""
try:
return self.right - self.left
@@ -995,7 +988,7 @@ def mid(self):
] = """
Return True if the %(klass)s is non-overlapping (no Intervals share
points) and is either monotonic increasing or monotonic decreasing,
- else False
+ else False.
"""
# https://github.com/python/mypy/issues/1362
# Mypy does not support decorated properties
@@ -1045,7 +1038,7 @@ def __array__(self, dtype=None):
_interval_shared_docs[
"to_tuples"
] = """
- Return an %(return_type)s of tuples of the form (left, right)
+ Return an %(return_type)s of tuples of the form (left, right).
Parameters
----------
@@ -1078,9 +1071,8 @@ def repeat(self, repeats, axis=None):
right_repeat = self.right.repeat(repeats)
return self._shallow_copy(left=left_repeat, right=right_repeat)
- _interval_shared_docs[
- "contains"
- ] = """
+ _interval_shared_docs["contains"] = textwrap.dedent(
+ """
Check elementwise if the Intervals contain the value.
Return a boolean mask whether the value is contained in the Intervals
@@ -1105,16 +1097,27 @@ def repeat(self, repeats, axis=None):
Examples
--------
- >>> intervals = pd.%(qualname)s.from_tuples([(0, 1), (1, 3), (2, 4)])
- >>> intervals
- %(klass)s([(0, 1], (1, 3], (2, 4]],
- closed='right',
- dtype='interval[int64]')
+ %(examples)s
>>> intervals.contains(0.5)
array([ True, False, False])
"""
+ )
- @Appender(_interval_shared_docs["contains"] % _shared_docs_kwargs)
+ @Appender(
+ _interval_shared_docs["contains"]
+ % dict(
+ klass="IntervalArray",
+ examples=textwrap.dedent(
+ """\
+ >>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)])
+ >>> intervals
+ <IntervalArray>
+ [(0, 1], (1, 3], (2, 4]]
+ Length: 3, closed: right, dtype: interval[int64]
+ """
+ ),
+ )
+ )
def contains(self, other):
if isinstance(other, Interval):
raise NotImplementedError("contains not implemented for two intervals")
@@ -1123,9 +1126,8 @@ def contains(self, other):
other < self.right if self.open_right else other <= self.right
)
- _interval_shared_docs[
- "overlaps"
- ] = """
+ _interval_shared_docs["overlaps"] = textwrap.dedent(
+ """
Check elementwise if an Interval overlaps the values in the %(klass)s.
Two intervals overlap if they share a common point, including closed
@@ -1136,7 +1138,7 @@ def contains(self, other):
Parameters
----------
- other : Interval
+ other : %(klass)s
Interval to check against for an overlap.
Returns
@@ -1150,11 +1152,7 @@ def contains(self, other):
Examples
--------
- >>> intervals = pd.%(qualname)s.from_tuples([(0, 1), (1, 3), (2, 4)])
- >>> intervals
- %(klass)s([(0, 1], (1, 3], (2, 4]],
- closed='right',
- dtype='interval[int64]')
+ %(examples)s
>>> intervals.overlaps(pd.Interval(0.5, 1.5))
array([ True, True, False])
@@ -1167,9 +1165,25 @@ def contains(self, other):
>>> intervals.overlaps(pd.Interval(1, 2, closed='right'))
array([False, True, False])
- """
+ """
+ )
- @Appender(_interval_shared_docs["overlaps"] % _shared_docs_kwargs)
+ @Appender(
+ _interval_shared_docs["overlaps"]
+ % dict(
+ klass="IntervalArray",
+ examples=textwrap.dedent(
+ """\
+ >>> data = [(0, 1), (1, 3), (2, 4)]
+ >>> intervals = pd.arrays.IntervalArray.from_tuples(data)
+ >>> intervals
+ <IntervalArray>
+ [(0, 1], (1, 3], (2, 4]]
+ Length: 3, closed: right, dtype: interval[int64]
+ """
+ ),
+ )
+ )
def overlaps(self, other):
if isinstance(other, (IntervalArray, ABCIntervalIndex)):
raise NotImplementedError
@@ -1205,7 +1219,7 @@ def maybe_convert_platform_interval(values):
"""
if isinstance(values, (list, tuple)) and len(values) == 0:
# GH 19016
- # empty lists/tuples get object dtype by default, but this is not
+ # empty lists/tuples get object dtype by default, but this is
# prohibited for IntervalArray, so coerce to integer instead
return np.array([], dtype=np.int64)
elif is_categorical_dtype(values):
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 9f428a4ac10b2..32da0199e28f8 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -11,10 +11,12 @@
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_array_like, is_list_like
+from pandas.core.dtypes.missing import isna
from pandas import compat
from pandas.core import nanops
-from pandas.core.algorithms import searchsorted
+from pandas.core.algorithms import searchsorted, take, unique
+from pandas.core.construction import extract_array
from pandas.core.missing import backfill_1d, pad_1d
from .base import ExtensionArray, ExtensionOpsMixin
@@ -88,7 +90,7 @@ class PandasArray(ExtensionArray, ExtensionOpsMixin, NDArrayOperatorsMixin):
"""
A pandas ExtensionArray for NumPy data.
- .. versionadded :: 0.24.0
+ .. versionadded:: 0.24.0
This is mostly for internal compatibility, and is not especially
useful on its own.
@@ -123,7 +125,11 @@ def __init__(self, values, copy=False):
if isinstance(values, type(self)):
values = values._ndarray
if not isinstance(values, np.ndarray):
- raise ValueError("'values' must be a NumPy array.")
+ raise ValueError(
+ "'values' must be a NumPy array, not {typ}".format(
+ typ=type(values).__name__
+ )
+ )
if values.ndim != 1:
raise ValueError("PandasArray must be 1-dimensional.")
@@ -221,8 +227,6 @@ def __getitem__(self, item):
return result
def __setitem__(self, key, value):
- from pandas.core.internals.arrays import extract_array
-
value = extract_array(value, extract_numpy=True)
if not lib.is_scalar(key) and is_list_like(key):
@@ -231,26 +235,17 @@ def __setitem__(self, key, value):
if not lib.is_scalar(value):
value = np.asarray(value)
- values = self._ndarray
- t = np.result_type(value, values)
- if t != self._ndarray.dtype:
- values = values.astype(t, casting="safe")
- values[key] = value
- self._dtype = PandasDtype(t)
- self._ndarray = values
- else:
- self._ndarray[key] = value
+ value = np.asarray(value, dtype=self._ndarray.dtype)
+ self._ndarray[key] = value
- def __len__(self):
+ def __len__(self) -> int:
return len(self._ndarray)
@property
- def nbytes(self):
+ def nbytes(self) -> int:
return self._ndarray.nbytes
def isna(self):
- from pandas import isna
-
return isna(self._ndarray)
def fillna(self, value=None, method=None, limit=None):
@@ -281,8 +276,6 @@ def fillna(self, value=None, method=None, limit=None):
return new_values
def take(self, indices, allow_fill=False, fill_value=None):
- from pandas.core.algorithms import take
-
result = take(
self._ndarray, indices, allow_fill=allow_fill, fill_value=fill_value
)
@@ -298,8 +291,6 @@ def _values_for_factorize(self):
return self._ndarray, -1
def unique(self):
- from pandas import unique
-
return type(self)(unique(self._ndarray))
# ------------------------------------------------------------------------
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index b0336c46d1953..f2d74794eadf5 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -161,7 +161,6 @@ class PeriodArray(dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps):
# array priority higher than numpy scalars
__array_priority__ = 1000
- _attributes = ["freq"]
_typ = "periodarray" # ABCPeriodArray
_scalar_type = Period
@@ -286,13 +285,13 @@ def _generate_range(cls, start, end, periods, freq, fields):
if start is not None or end is not None:
if field_count > 0:
raise ValueError(
- "Can either instantiate from fields " "or endpoints, but not both"
+ "Can either instantiate from fields or endpoints, but not both"
)
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
- raise ValueError("Not enough parameters to construct " "Period range")
+ raise ValueError("Not enough parameters to construct Period range")
return subarr, freq
@@ -342,32 +341,92 @@ def __array__(self, dtype=None):
# --------------------------------------------------------------------
# Vectorized analogues of Period properties
- year = _field_accessor("year", 0, "The year of the period")
- month = _field_accessor("month", 3, "The month as January=1, December=12")
- day = _field_accessor("day", 4, "The days of the period")
- hour = _field_accessor("hour", 5, "The hour of the period")
- minute = _field_accessor("minute", 6, "The minute of the period")
- second = _field_accessor("second", 7, "The second of the period")
- weekofyear = _field_accessor("week", 8, "The week ordinal of the year")
+ year = _field_accessor(
+ "year",
+ 0,
+ """
+ The year of the period.
+ """,
+ )
+ month = _field_accessor(
+ "month",
+ 3,
+ """
+ The month as January=1, December=12.
+ """,
+ )
+ day = _field_accessor(
+ "day",
+ 4,
+ """
+ The days of the period.
+ """,
+ )
+ hour = _field_accessor(
+ "hour",
+ 5,
+ """
+ The hour of the period.
+ """,
+ )
+ minute = _field_accessor(
+ "minute",
+ 6,
+ """
+ The minute of the period.
+ """,
+ )
+ second = _field_accessor(
+ "second",
+ 7,
+ """
+ The second of the period.
+ """,
+ )
+ weekofyear = _field_accessor(
+ "week",
+ 8,
+ """
+ The week ordinal of the year.
+ """,
+ )
week = weekofyear
dayofweek = _field_accessor(
- "dayofweek", 10, "The day of the week with Monday=0, Sunday=6"
+ "dayofweek",
+ 10,
+ """
+ The day of the week with Monday=0, Sunday=6.
+ """,
)
weekday = dayofweek
dayofyear = day_of_year = _field_accessor(
- "dayofyear", 9, "The ordinal day of the year"
+ "dayofyear",
+ 9,
+ """
+ The ordinal day of the year.
+ """,
+ )
+ quarter = _field_accessor(
+ "quarter",
+ 2,
+ """
+ The quarter of the date.
+ """,
)
- quarter = _field_accessor("quarter", 2, "The quarter of the date")
qyear = _field_accessor("qyear", 1)
days_in_month = _field_accessor(
- "days_in_month", 11, "The number of days in the month"
+ "days_in_month",
+ 11,
+ """
+ The number of days in the month.
+ """,
)
daysinmonth = days_in_month
@property
def is_leap_year(self):
"""
- Logical indicating if the date belongs to a leap year
+ Logical indicating if the date belongs to a leap year.
"""
return isleapyear_arr(np.asarray(self.year))
@@ -654,7 +713,12 @@ def _add_delta_tdi(self, other):
"""
assert isinstance(self.freq, Tick) # checked by calling function
- delta = self._check_timedeltalike_freq_compat(other)
+ if not np.all(isna(other)):
+ delta = self._check_timedeltalike_freq_compat(other)
+ else:
+ # all-NaT TimedeltaIndex is equivalent to a single scalar td64 NaT
+ return self + np.timedelta64("NaT")
+
return self._addsub_int_array(delta, operator.add).asi8
def _add_delta(self, other):
@@ -839,7 +903,7 @@ def period_array(
dtype = None
if is_float_dtype(data) and len(data) > 0:
- raise TypeError("PeriodIndex does not allow " "floating point in construction")
+ raise TypeError("PeriodIndex does not allow floating point in construction")
data = ensure_object(data)
@@ -875,7 +939,7 @@ def validate_dtype_freq(dtype, freq):
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
- raise IncompatibleFrequency("specified freq and dtype " "are different")
+ raise IncompatibleFrequency("specified freq and dtype are different")
return freq
diff --git a/pandas/core/arrays/sparse/__init__.py b/pandas/core/arrays/sparse/__init__.py
new file mode 100644
index 0000000000000..75f3819fb19fd
--- /dev/null
+++ b/pandas/core/arrays/sparse/__init__.py
@@ -0,0 +1,5 @@
+# flake8: noqa: F401
+
+from .accessor import SparseAccessor, SparseFrameAccessor
+from .array import BlockIndex, IntIndex, SparseArray, _make_index
+from .dtype import SparseDtype
diff --git a/pandas/core/arrays/sparse/accessor.py b/pandas/core/arrays/sparse/accessor.py
new file mode 100644
index 0000000000000..57fd6d284af31
--- /dev/null
+++ b/pandas/core/arrays/sparse/accessor.py
@@ -0,0 +1,336 @@
+"""Sparse accessor"""
+
+import numpy as np
+
+from pandas.compat._optional import import_optional_dependency
+
+from pandas.core.dtypes.cast import find_common_type
+
+from pandas.core.accessor import PandasDelegate, delegate_names
+
+from .array import SparseArray
+from .dtype import SparseDtype
+
+
+class BaseAccessor:
+ _validation_msg = "Can only use the '.sparse' accessor with Sparse data."
+
+ def __init__(self, data=None):
+ self._parent = data
+ self._validate(data)
+
+ def _validate(self, data):
+ raise NotImplementedError
+
+
+@delegate_names(
+ SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"
+)
+class SparseAccessor(BaseAccessor, PandasDelegate):
+ """
+ Accessor for SparseSparse from other sparse matrix data types.
+ """
+
+ def _validate(self, data):
+ if not isinstance(data.dtype, SparseDtype):
+ raise AttributeError(self._validation_msg)
+
+ def _delegate_property_get(self, name, *args, **kwargs):
+ return getattr(self._parent.array, name)
+
+ def _delegate_method(self, name, *args, **kwargs):
+ if name == "from_coo":
+ return self.from_coo(*args, **kwargs)
+ elif name == "to_coo":
+ return self.to_coo(*args, **kwargs)
+ else:
+ raise ValueError
+
+ @classmethod
+ def from_coo(cls, A, dense_index=False):
+ """
+ Create a Series with sparse values from a scipy.sparse.coo_matrix.
+
+ Parameters
+ ----------
+ A : scipy.sparse.coo_matrix
+ dense_index : bool, default False
+ If False (default), the SparseSeries index consists of only the
+ coords of the non-null entries of the original coo_matrix.
+ If True, the SparseSeries index consists of the full sorted
+ (row, col) coordinates of the coo_matrix.
+
+ Returns
+ -------
+ s : Series
+ A Series with sparse values.
+
+ Examples
+ --------
+ >>> from scipy import sparse
+ >>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),
+ shape=(3, 4))
+ >>> A
+ <3x4 sparse matrix of type '<class 'numpy.float64'>'
+ with 3 stored elements in COOrdinate format>
+ >>> A.todense()
+ matrix([[ 0., 0., 1., 2.],
+ [ 3., 0., 0., 0.],
+ [ 0., 0., 0., 0.]])
+ >>> ss = pd.Series.sparse.from_coo(A)
+ >>> ss
+ 0 2 1
+ 3 2
+ 1 0 3
+ dtype: float64
+ BlockIndex
+ Block locations: array([0], dtype=int32)
+ Block lengths: array([3], dtype=int32)
+ """
+ from pandas.core.arrays.sparse.scipy_sparse import _coo_to_sparse_series
+ from pandas import Series
+
+ result = _coo_to_sparse_series(A, dense_index=dense_index)
+ result = Series(result.array, index=result.index, copy=False)
+
+ return result
+
+ def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False):
+ """
+ Create a scipy.sparse.coo_matrix from a Series with MultiIndex.
+
+ Use row_levels and column_levels to determine the row and column
+ coordinates respectively. row_levels and column_levels are the names
+ (labels) or numbers of the levels. {row_levels, column_levels} must be
+ a partition of the MultiIndex level names (or numbers).
+
+ Parameters
+ ----------
+ row_levels : tuple/list
+ column_levels : tuple/list
+ sort_labels : bool, default False
+ Sort the row and column labels before forming the sparse matrix.
+
+ Returns
+ -------
+ y : scipy.sparse.coo_matrix
+ rows : list (row labels)
+ columns : list (column labels)
+
+ Examples
+ --------
+ >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
+ >>> s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0),
+ (1, 2, 'a', 1),
+ (1, 1, 'b', 0),
+ (1, 1, 'b', 1),
+ (2, 1, 'b', 0),
+ (2, 1, 'b', 1)],
+ names=['A', 'B', 'C', 'D'])
+ >>> ss = s.astype("Sparse")
+ >>> A, rows, columns = ss.sparse.to_coo(row_levels=['A', 'B'],
+ ... column_levels=['C', 'D'],
+ ... sort_labels=True)
+ >>> A
+ <3x4 sparse matrix of type '<class 'numpy.float64'>'
+ with 3 stored elements in COOrdinate format>
+ >>> A.todense()
+ matrix([[ 0., 0., 1., 3.],
+ [ 3., 0., 0., 0.],
+ [ 0., 0., 0., 0.]])
+ >>> rows
+ [(1, 1), (1, 2), (2, 1)]
+ >>> columns
+ [('a', 0), ('a', 1), ('b', 0), ('b', 1)]
+ """
+ from pandas.core.arrays.sparse.scipy_sparse import _sparse_series_to_coo
+
+ A, rows, columns = _sparse_series_to_coo(
+ self._parent, row_levels, column_levels, sort_labels=sort_labels
+ )
+ return A, rows, columns
+
+ def to_dense(self):
+ """
+ Convert a Series from sparse values to dense.
+
+ .. versionadded:: 0.25.0
+
+ Returns
+ -------
+ Series:
+ A Series with the same values, stored as a dense array.
+
+ Examples
+ --------
+ >>> series = pd.Series(pd.SparseArray([0, 1, 0]))
+ >>> series
+ 0 0
+ 1 1
+ 2 0
+ dtype: Sparse[int64, 0]
+
+ >>> series.sparse.to_dense()
+ 0 0
+ 1 1
+ 2 0
+ dtype: int64
+ """
+ from pandas import Series
+
+ return Series(
+ self._parent.array.to_dense(),
+ index=self._parent.index,
+ name=self._parent.name,
+ )
+
+
+class SparseFrameAccessor(BaseAccessor, PandasDelegate):
+ """
+ DataFrame accessor for sparse data.
+
+ .. versionadded:: 0.25.0
+ """
+
+ def _validate(self, data):
+ dtypes = data.dtypes
+ if not all(isinstance(t, SparseDtype) for t in dtypes):
+ raise AttributeError(self._validation_msg)
+
+ @classmethod
+ def from_spmatrix(cls, data, index=None, columns=None):
+ """
+ Create a new DataFrame from a scipy sparse matrix.
+
+ .. versionadded:: 0.25.0
+
+ Parameters
+ ----------
+ data : scipy.sparse.spmatrix
+ Must be convertible to csc format.
+ index, columns : Index, optional
+ Row and column labels to use for the resulting DataFrame.
+ Defaults to a RangeIndex.
+
+ Returns
+ -------
+ DataFrame
+ Each column of the DataFrame is stored as a
+ :class:`SparseArray`.
+
+ Examples
+ --------
+ >>> import scipy.sparse
+ >>> mat = scipy.sparse.eye(3)
+ >>> pd.DataFrame.sparse.from_spmatrix(mat)
+ 0 1 2
+ 0 1.0 0.0 0.0
+ 1 0.0 1.0 0.0
+ 2 0.0 0.0 1.0
+ """
+ from pandas import DataFrame
+
+ data = data.tocsc()
+ index, columns = cls._prep_index(data, index, columns)
+ sparrays = [SparseArray.from_spmatrix(data[:, i]) for i in range(data.shape[1])]
+ data = dict(enumerate(sparrays))
+ result = DataFrame(data, index=index)
+ result.columns = columns
+ return result
+
+ def to_dense(self):
+ """
+ Convert a DataFrame with sparse values to dense.
+
+ .. versionadded:: 0.25.0
+
+ Returns
+ -------
+ DataFrame
+ A DataFrame with the same values stored as dense arrays.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame({"A": pd.SparseArray([0, 1, 0])})
+ >>> df.sparse.to_dense()
+ A
+ 0 0
+ 1 1
+ 2 0
+ """
+ from pandas import DataFrame
+
+ data = {k: v.array.to_dense() for k, v in self._parent.items()}
+ return DataFrame(data, index=self._parent.index, columns=self._parent.columns)
+
+ def to_coo(self):
+ """
+ Return the contents of the frame as a sparse SciPy COO matrix.
+
+ .. versionadded:: 0.25.0
+
+ Returns
+ -------
+ coo_matrix : scipy.sparse.spmatrix
+ If the caller is heterogeneous and contains booleans or objects,
+ the result will be of dtype=object. See Notes.
+
+ Notes
+ -----
+ The dtype will be the lowest-common-denominator type (implicit
+ upcasting); that is to say if the dtypes (even of numeric types)
+ are mixed, the one that accommodates all will be chosen.
+
+ e.g. If the dtypes are float16 and float32, dtype will be upcast to
+ float32. By numpy.find_common_type convention, mixing int64 and
+ and uint64 will result in a float64 dtype.
+ """
+ import_optional_dependency("scipy")
+ from scipy.sparse import coo_matrix
+
+ dtype = find_common_type(self._parent.dtypes)
+ if isinstance(dtype, SparseDtype):
+ dtype = dtype.subtype
+
+ cols, rows, datas = [], [], []
+ for col, name in enumerate(self._parent):
+ s = self._parent[name]
+ row = s.array.sp_index.to_int_index().indices
+ cols.append(np.repeat(col, len(row)))
+ rows.append(row)
+ datas.append(s.array.sp_values.astype(dtype, copy=False))
+
+ cols = np.concatenate(cols)
+ rows = np.concatenate(rows)
+ datas = np.concatenate(datas)
+ return coo_matrix((datas, (rows, cols)), shape=self._parent.shape)
+
+ @property
+ def density(self) -> float:
+ """
+ Ratio of non-sparse points to total (dense) data points
+ represented in the DataFrame.
+ """
+ return np.mean([column.array.density for _, column in self._parent.items()])
+
+ @staticmethod
+ def _prep_index(data, index, columns):
+ import pandas.core.indexes.base as ibase
+
+ N, K = data.shape
+ if index is None:
+ index = ibase.default_index(N)
+ if columns is None:
+ columns = ibase.default_index(K)
+
+ if len(columns) != K:
+ raise ValueError(
+ "Column length mismatch: {columns} vs. {K}".format(
+ columns=len(columns), K=K
+ )
+ )
+ if len(index) != N:
+ raise ValueError(
+ "Index length mismatch: {index} vs. {N}".format(index=len(index), N=N)
+ )
+ return index, columns
diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse/array.py
similarity index 69%
rename from pandas/core/arrays/sparse.py
rename to pandas/core/arrays/sparse/array.py
index 65976021f5053..5acc922734529 100644
--- a/pandas/core/arrays/sparse.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -4,7 +4,6 @@
from collections import abc
import numbers
import operator
-import re
from typing import Any, Callable
import warnings
@@ -15,11 +14,9 @@
from pandas._libs.sparse import BlockIndex, IntIndex, SparseIndex
from pandas._libs.tslibs import NaT
import pandas.compat as compat
-from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning
-from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.cast import (
astype_nansafe,
construct_1d_arraylike_from_scalar,
@@ -37,357 +34,25 @@
is_string_dtype,
pandas_dtype,
)
-from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.generic import (
+ ABCDataFrame,
ABCIndexClass,
ABCSeries,
ABCSparseArray,
- ABCSparseSeries,
)
from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna
-from pandas._typing import Dtype
-from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algos
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
from pandas.core.base import PandasObject
import pandas.core.common as com
+from pandas.core.construction import sanitize_array
from pandas.core.missing import interpolate_2d
import pandas.core.ops as ops
import pandas.io.formats.printing as printing
-
-# ----------------------------------------------------------------------------
-# Dtype
-@register_extension_dtype
-class SparseDtype(ExtensionDtype):
- """
- Dtype for data stored in :class:`SparseArray`.
-
- This dtype implements the pandas ExtensionDtype interface.
-
- .. versionadded:: 0.24.0
-
- Parameters
- ----------
- dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
- The dtype of the underlying array storing the non-fill value values.
- fill_value : scalar, optional
- The scalar value not stored in the SparseArray. By default, this
- depends on `dtype`.
-
- =========== ==========
- dtype na_value
- =========== ==========
- float ``np.nan``
- int ``0``
- bool ``False``
- datetime64 ``pd.NaT``
- timedelta64 ``pd.NaT``
- =========== ==========
-
- The default value may be overridden by specifying a `fill_value`.
-
- Attributes
- ----------
- None
-
- Methods
- -------
- None
- """
-
- # We include `_is_na_fill_value` in the metadata to avoid hash collisions
- # between SparseDtype(float, 0.0) and SparseDtype(float, nan).
- # Without is_na_fill_value in the comparison, those would be equal since
- # hash(nan) is (sometimes?) 0.
- _metadata = ("_dtype", "_fill_value", "_is_na_fill_value")
-
- def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None:
- from pandas.core.dtypes.missing import na_value_for_dtype
- from pandas.core.dtypes.common import pandas_dtype, is_string_dtype, is_scalar
-
- if isinstance(dtype, type(self)):
- if fill_value is None:
- fill_value = dtype.fill_value
- dtype = dtype.subtype
-
- dtype = pandas_dtype(dtype)
- if is_string_dtype(dtype):
- dtype = np.dtype("object")
-
- if fill_value is None:
- fill_value = na_value_for_dtype(dtype)
-
- if not is_scalar(fill_value):
- raise ValueError(
- "fill_value must be a scalar. Got {} " "instead".format(fill_value)
- )
- self._dtype = dtype
- self._fill_value = fill_value
-
- def __hash__(self):
- # Python3 doesn't inherit __hash__ when a base class overrides
- # __eq__, so we explicitly do it here.
- return super().__hash__()
-
- def __eq__(self, other):
- # We have to override __eq__ to handle NA values in _metadata.
- # The base class does simple == checks, which fail for NA.
- if isinstance(other, str):
- try:
- other = self.construct_from_string(other)
- except TypeError:
- return False
-
- if isinstance(other, type(self)):
- subtype = self.subtype == other.subtype
- if self._is_na_fill_value:
- # this case is complicated by two things:
- # SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
- # SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
- # i.e. we want to treat any floating-point NaN as equal, but
- # not a floating-point NaN and a datetime NaT.
- fill_value = (
- other._is_na_fill_value
- and isinstance(self.fill_value, type(other.fill_value))
- or isinstance(other.fill_value, type(self.fill_value))
- )
- else:
- fill_value = self.fill_value == other.fill_value
-
- return subtype and fill_value
- return False
-
- @property
- def fill_value(self):
- """
- The fill value of the array.
-
- Converting the SparseArray to a dense ndarray will fill the
- array with this value.
-
- .. warning::
-
- It's possible to end up with a SparseArray that has ``fill_value``
- values in ``sp_values``. This can occur, for example, when setting
- ``SparseArray.fill_value`` directly.
- """
- return self._fill_value
-
- @property
- def _is_na_fill_value(self):
- from pandas.core.dtypes.missing import isna
-
- return isna(self.fill_value)
-
- @property
- def _is_numeric(self):
- from pandas.core.dtypes.common import is_object_dtype
-
- return not is_object_dtype(self.subtype)
-
- @property
- def _is_boolean(self):
- from pandas.core.dtypes.common import is_bool_dtype
-
- return is_bool_dtype(self.subtype)
-
- @property
- def kind(self):
- """
- The sparse kind. Either 'integer', or 'block'.
- """
- return self.subtype.kind
-
- @property
- def type(self):
- return self.subtype.type
-
- @property
- def subtype(self):
- return self._dtype
-
- @property
- def name(self):
- return "Sparse[{}, {}]".format(self.subtype.name, self.fill_value)
-
- def __repr__(self):
- return self.name
-
- @classmethod
- def construct_array_type(cls):
- return SparseArray
-
- @classmethod
- def construct_from_string(cls, string):
- """
- Construct a SparseDtype from a string form.
-
- Parameters
- ----------
- string : str
- Can take the following forms.
-
- string dtype
- ================ ============================
- 'int' SparseDtype[np.int64, 0]
- 'Sparse' SparseDtype[np.float64, nan]
- 'Sparse[int]' SparseDtype[np.int64, 0]
- 'Sparse[int, 0]' SparseDtype[np.int64, 0]
- ================ ============================
-
- It is not possible to specify non-default fill values
- with a string. An argument like ``'Sparse[int, 1]'``
- will raise a ``TypeError`` because the default fill value
- for integers is 0.
-
- Returns
- -------
- SparseDtype
- """
- msg = "Could not construct SparseDtype from '{}'".format(string)
- if string.startswith("Sparse"):
- try:
- sub_type, has_fill_value = cls._parse_subtype(string)
- result = SparseDtype(sub_type)
- except Exception:
- raise TypeError(msg)
- else:
- msg = (
- "Could not construct SparseDtype from '{}'.\n\nIt "
- "looks like the fill_value in the string is not "
- "the default for the dtype. Non-default fill_values "
- "are not supported. Use the 'SparseDtype()' "
- "constructor instead."
- )
- if has_fill_value and str(result) != string:
- raise TypeError(msg.format(string))
- return result
- else:
- raise TypeError(msg)
-
- @staticmethod
- def _parse_subtype(dtype):
- """
- Parse a string to get the subtype
-
- Parameters
- ----------
- dtype : str
- A string like
-
- * Sparse[subtype]
- * Sparse[subtype, fill_value]
-
- Returns
- -------
- subtype : str
-
- Raises
- ------
- ValueError
- When the subtype cannot be extracted.
- """
- xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$")
- m = xpr.match(dtype)
- has_fill_value = False
- if m:
- subtype = m.groupdict()["subtype"]
- has_fill_value = m.groupdict()["fill_value"] or has_fill_value
- elif dtype == "Sparse":
- subtype = "float64"
- else:
- raise ValueError("Cannot parse {}".format(dtype))
- return subtype, has_fill_value
-
- @classmethod
- def is_dtype(cls, dtype):
- dtype = getattr(dtype, "dtype", dtype)
- if isinstance(dtype, str) and dtype.startswith("Sparse"):
- sub_type, _ = cls._parse_subtype(dtype)
- dtype = np.dtype(sub_type)
- elif isinstance(dtype, cls):
- return True
- return isinstance(dtype, np.dtype) or dtype == "Sparse"
-
- def update_dtype(self, dtype):
- """
- Convert the SparseDtype to a new dtype.
-
- This takes care of converting the ``fill_value``.
-
- Parameters
- ----------
- dtype : Union[str, numpy.dtype, SparseDtype]
- The new dtype to use.
-
- * For a SparseDtype, it is simply returned
- * For a NumPy dtype (or str), the current fill value
- is converted to the new dtype, and a SparseDtype
- with `dtype` and the new fill value is returned.
-
- Returns
- -------
- SparseDtype
- A new SparseDtype with the corret `dtype` and fill value
- for that `dtype`.
-
- Raises
- ------
- ValueError
- When the current fill value cannot be converted to the
- new `dtype` (e.g. trying to convert ``np.nan`` to an
- integer dtype).
-
-
- Examples
- --------
- >>> SparseDtype(int, 0).update_dtype(float)
- Sparse[float64, 0.0]
-
- >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
- Sparse[float64, nan]
- """
- cls = type(self)
- dtype = pandas_dtype(dtype)
-
- if not isinstance(dtype, cls):
- fill_value = astype_nansafe(np.array(self.fill_value), dtype).item()
- dtype = cls(dtype, fill_value=fill_value)
-
- return dtype
-
- @property
- def _subtype_with_str(self):
- """
- Whether the SparseDtype's subtype should be considered ``str``.
-
- Typically, pandas will store string data in an object-dtype array.
- When converting values to a dtype, e.g. in ``.astype``, we need to
- be more specific, we need the actual underlying type.
-
- Returns
- -------
-
- >>> SparseDtype(int, 1)._subtype_with_str
- dtype('int64')
-
- >>> SparseDtype(object, 1)._subtype_with_str
- dtype('O')
-
- >>> dtype = SparseDtype(str, '')
- >>> dtype.subtype
- dtype('O')
-
- >>> dtype._subtype_with_str
- str
- """
- if isinstance(self.fill_value, str):
- return type(self.fill_value)
- return self.subtype
-
+from .dtype import SparseDtype
# ----------------------------------------------------------------------------
# Array
@@ -609,15 +274,11 @@ def __init__(
dtype=None,
copy=False,
):
- from pandas.core.internals import SingleBlockManager
-
- if isinstance(data, SingleBlockManager):
- data = data.internal_values()
if fill_value is None and isinstance(dtype, SparseDtype):
fill_value = dtype.fill_value
- if isinstance(data, (type(self), ABCSparseSeries)):
+ if isinstance(data, type(self)):
# disable normal inference on dtype, sparse_index, & fill_value
if sparse_index is None:
sparse_index = data.sp_index
@@ -672,7 +333,6 @@ def __init__(
if not is_array_like(data):
try:
# probably shared code in sanitize_series
- from pandas.core.internals.construction import sanitize_array
data = sanitize_array(data, index=None)
except ValueError:
@@ -851,7 +511,7 @@ def fill_value(self, value):
self._dtype = SparseDtype(self.dtype.subtype, value)
@property
- def kind(self):
+ def kind(self) -> str:
"""
The kind of sparse index for this array. One of {'integer', 'block'}.
"""
@@ -866,7 +526,7 @@ def _valid_sp_values(self):
mask = notna(sp_vals)
return sp_vals[mask]
- def __len__(self):
+ def __len__(self) -> int:
return self.sp_index.length
@property
@@ -880,7 +540,7 @@ def _fill_value_matches(self, fill_value):
return self.fill_value == fill_value
@property
- def nbytes(self):
+ def nbytes(self) -> int:
return self.sp_values.nbytes + self.sp_index.nbytes
@property
@@ -898,7 +558,7 @@ def density(self):
return r
@property
- def npoints(self):
+ def npoints(self) -> int:
"""
The number of non- ``fill_value`` points.
@@ -928,8 +588,6 @@ def values(self):
return self.to_dense()
def isna(self):
- from pandas import isna
-
# If null fill value, we want SparseDtype[bool, true]
# to preserve the same memory usage.
dtype = SparseDtype(bool, self._null_fill_value)
@@ -1153,7 +811,7 @@ def _get_val_at(self, loc):
def take(self, indices, allow_fill=False, fill_value=None):
if is_scalar(indices):
raise ValueError(
- "'indices' must be an array, not a " "scalar '{}'.".format(indices)
+ "'indices' must be an array, not a scalar '{}'.".format(indices)
)
indices = np.asarray(indices, dtype=np.int32)
@@ -1190,7 +848,7 @@ def _take_with_fill(self, indices, fill_value=None):
taken.fill(fill_value)
return taken
else:
- raise IndexError("cannot do a non-empty take from an empty " "axes.")
+ raise IndexError("cannot do a non-empty take from an empty axes.")
sp_indexer = self.sp_index.lookup_array(indices)
@@ -1240,7 +898,7 @@ def _take_without_fill(self, indices):
if (indices.max() >= n) or (indices.min() < -n):
if n == 0:
- raise IndexError("cannot do a non-empty take from an " "empty axes.")
+ raise IndexError("cannot do a non-empty take from an empty axes.")
else:
raise IndexError("out of bounds value in 'indices'.")
@@ -1707,6 +1365,9 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
for sp_value, fv in zip(sp_values, fill_value)
)
return arrays
+ elif is_scalar(sp_values):
+ # e.g. reductions
+ return sp_values
return self._simple_new(
sp_values, self.sp_index, SparseDtype(sp_values.dtype, fill_value)
@@ -1746,13 +1407,15 @@ def sparse_unary_method(self):
@classmethod
def _create_arithmetic_method(cls, op):
- def sparse_arithmetic_method(self, other):
- op_name = op.__name__
+ op_name = op.__name__
- if isinstance(other, (ABCSeries, ABCIndexClass)):
+ def sparse_arithmetic_method(self, other):
+ if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)):
# Rely on pandas to dispatch to us.
return NotImplemented
+ other = lib.item_from_zerodim(other)
+
if isinstance(other, SparseArray):
return _sparse_array_op(self, other, op, op_name)
@@ -1795,11 +1458,11 @@ def sparse_arithmetic_method(self, other):
@classmethod
def _create_comparison_method(cls, op):
- def cmp_method(self, other):
- op_name = op.__name__
+ op_name = op.__name__
+ if op_name in {"and_", "or_"}:
+ op_name = op_name[:-1]
- if op_name in {"and_", "or_"}:
- op_name = op_name[:-1]
+ def cmp_method(self, other):
if isinstance(other, (ABCSeries, ABCIndexClass)):
# Rely on pandas to unbox and dispatch to us.
@@ -1869,15 +1532,6 @@ def _formatter(self, boxed=False):
SparseArray._add_unary_ops()
-def _maybe_to_dense(obj):
- """
- try to convert to dense
- """
- if hasattr(obj, "to_dense"):
- return obj.to_dense()
- return obj
-
-
def make_sparse(arr, kind="block", fill_value=None, dtype=None, copy=False):
"""
Convert ndarray to sparse format
@@ -1943,330 +1597,3 @@ def _make_index(length, indices, kind):
else: # pragma: no cover
raise ValueError("must be block or integer type")
return index
-
-
-# ----------------------------------------------------------------------------
-# Accessor
-
-
-class BaseAccessor:
- _validation_msg = "Can only use the '.sparse' accessor with Sparse data."
-
- def __init__(self, data=None):
- self._parent = data
- self._validate(data)
-
- def _validate(self, data):
- raise NotImplementedError
-
-
-@delegate_names(
- SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"
-)
-class SparseAccessor(BaseAccessor, PandasDelegate):
- """
- Accessor for SparseSparse from other sparse matrix data types.
- """
-
- def _validate(self, data):
- if not isinstance(data.dtype, SparseDtype):
- raise AttributeError(self._validation_msg)
-
- def _delegate_property_get(self, name, *args, **kwargs):
- return getattr(self._parent.array, name)
-
- def _delegate_method(self, name, *args, **kwargs):
- if name == "from_coo":
- return self.from_coo(*args, **kwargs)
- elif name == "to_coo":
- return self.to_coo(*args, **kwargs)
- else:
- raise ValueError
-
- @classmethod
- def from_coo(cls, A, dense_index=False):
- """
- Create a SparseSeries from a scipy.sparse.coo_matrix.
-
- Parameters
- ----------
- A : scipy.sparse.coo_matrix
- dense_index : bool, default False
- If False (default), the SparseSeries index consists of only the
- coords of the non-null entries of the original coo_matrix.
- If True, the SparseSeries index consists of the full sorted
- (row, col) coordinates of the coo_matrix.
-
- Returns
- -------
- s : SparseSeries
-
- Examples
- --------
- >>> from scipy import sparse
- >>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),
- shape=(3, 4))
- >>> A
- <3x4 sparse matrix of type '<class 'numpy.float64'>'
- with 3 stored elements in COOrdinate format>
- >>> A.todense()
- matrix([[ 0., 0., 1., 2.],
- [ 3., 0., 0., 0.],
- [ 0., 0., 0., 0.]])
- >>> ss = pd.SparseSeries.from_coo(A)
- >>> ss
- 0 2 1
- 3 2
- 1 0 3
- dtype: float64
- BlockIndex
- Block locations: array([0], dtype=int32)
- Block lengths: array([3], dtype=int32)
- """
- from pandas.core.sparse.scipy_sparse import _coo_to_sparse_series
- from pandas import Series
-
- result = _coo_to_sparse_series(A, dense_index=dense_index, sparse_series=False)
- result = Series(result.array, index=result.index, copy=False)
-
- return result
-
- def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False):
- """
- Create a scipy.sparse.coo_matrix from a SparseSeries with MultiIndex.
-
- Use row_levels and column_levels to determine the row and column
- coordinates respectively. row_levels and column_levels are the names
- (labels) or numbers of the levels. {row_levels, column_levels} must be
- a partition of the MultiIndex level names (or numbers).
-
- Parameters
- ----------
- row_levels : tuple/list
- column_levels : tuple/list
- sort_labels : bool, default False
- Sort the row and column labels before forming the sparse matrix.
-
- Returns
- -------
- y : scipy.sparse.coo_matrix
- rows : list (row labels)
- columns : list (column labels)
-
- Examples
- --------
- >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
- >>> s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0),
- (1, 2, 'a', 1),
- (1, 1, 'b', 0),
- (1, 1, 'b', 1),
- (2, 1, 'b', 0),
- (2, 1, 'b', 1)],
- names=['A', 'B', 'C', 'D'])
- >>> ss = s.to_sparse()
- >>> A, rows, columns = ss.to_coo(row_levels=['A', 'B'],
- column_levels=['C', 'D'],
- sort_labels=True)
- >>> A
- <3x4 sparse matrix of type '<class 'numpy.float64'>'
- with 3 stored elements in COOrdinate format>
- >>> A.todense()
- matrix([[ 0., 0., 1., 3.],
- [ 3., 0., 0., 0.],
- [ 0., 0., 0., 0.]])
- >>> rows
- [(1, 1), (1, 2), (2, 1)]
- >>> columns
- [('a', 0), ('a', 1), ('b', 0), ('b', 1)]
- """
- from pandas.core.sparse.scipy_sparse import _sparse_series_to_coo
-
- A, rows, columns = _sparse_series_to_coo(
- self._parent, row_levels, column_levels, sort_labels=sort_labels
- )
- return A, rows, columns
-
- def to_dense(self):
- """
- Convert a Series from sparse values to dense.
-
- .. versionadded:: 0.25.0
-
- Returns
- -------
- Series:
- A Series with the same values, stored as a dense array.
-
- Examples
- --------
- >>> series = pd.Series(pd.SparseArray([0, 1, 0]))
- >>> series
- 0 0
- 1 1
- 2 0
- dtype: Sparse[int64, 0]
-
- >>> series.sparse.to_dense()
- 0 0
- 1 1
- 2 0
- dtype: int64
- """
- from pandas import Series
-
- return Series(
- self._parent.array.to_dense(),
- index=self._parent.index,
- name=self._parent.name,
- )
-
-
-class SparseFrameAccessor(BaseAccessor, PandasDelegate):
- """
- DataFrame accessor for sparse data.
-
- .. versionadded :: 0.25.0
- """
-
- def _validate(self, data):
- dtypes = data.dtypes
- if not all(isinstance(t, SparseDtype) for t in dtypes):
- raise AttributeError(self._validation_msg)
-
- @classmethod
- def from_spmatrix(cls, data, index=None, columns=None):
- """
- Create a new DataFrame from a scipy sparse matrix.
-
- .. versionadded:: 0.25.0
-
- Parameters
- ----------
- data : scipy.sparse.spmatrix
- Must be convertible to csc format.
- index, columns : Index, optional
- Row and column labels to use for the resulting DataFrame.
- Defaults to a RangeIndex.
-
- Returns
- -------
- DataFrame
- Each column of the DataFrame is stored as a
- :class:`SparseArray`.
-
- Examples
- --------
- >>> import scipy.sparse
- >>> mat = scipy.sparse.eye(3)
- >>> pd.DataFrame.sparse.from_spmatrix(mat)
- 0 1 2
- 0 1.0 0.0 0.0
- 1 0.0 1.0 0.0
- 2 0.0 0.0 1.0
- """
- from pandas import DataFrame
-
- data = data.tocsc()
- index, columns = cls._prep_index(data, index, columns)
- sparrays = [SparseArray.from_spmatrix(data[:, i]) for i in range(data.shape[1])]
- data = dict(enumerate(sparrays))
- result = DataFrame(data, index=index)
- result.columns = columns
- return result
-
- def to_dense(self):
- """
- Convert a DataFrame with sparse values to dense.
-
- .. versionadded:: 0.25.0
-
- Returns
- -------
- DataFrame
- A DataFrame with the same values stored as dense arrays.
-
- Examples
- --------
- >>> df = pd.DataFrame({"A": pd.SparseArray([0, 1, 0])})
- >>> df.sparse.to_dense()
- A
- 0 0
- 1 1
- 2 0
- """
- from pandas import DataFrame
-
- data = {k: v.array.to_dense() for k, v in self._parent.items()}
- return DataFrame(data, index=self._parent.index, columns=self._parent.columns)
-
- def to_coo(self):
- """
- Return the contents of the frame as a sparse SciPy COO matrix.
-
- .. versionadded:: 0.25.0
-
- Returns
- -------
- coo_matrix : scipy.sparse.spmatrix
- If the caller is heterogeneous and contains booleans or objects,
- the result will be of dtype=object. See Notes.
-
- Notes
- -----
- The dtype will be the lowest-common-denominator type (implicit
- upcasting); that is to say if the dtypes (even of numeric types)
- are mixed, the one that accommodates all will be chosen.
-
- e.g. If the dtypes are float16 and float32, dtype will be upcast to
- float32. By numpy.find_common_type convention, mixing int64 and
- and uint64 will result in a float64 dtype.
- """
- import_optional_dependency("scipy")
- from scipy.sparse import coo_matrix
-
- dtype = find_common_type(self._parent.dtypes)
- if isinstance(dtype, SparseDtype):
- dtype = dtype.subtype
-
- cols, rows, datas = [], [], []
- for col, name in enumerate(self._parent):
- s = self._parent[name]
- row = s.array.sp_index.to_int_index().indices
- cols.append(np.repeat(col, len(row)))
- rows.append(row)
- datas.append(s.array.sp_values.astype(dtype, copy=False))
-
- cols = np.concatenate(cols)
- rows = np.concatenate(rows)
- datas = np.concatenate(datas)
- return coo_matrix((datas, (rows, cols)), shape=self._parent.shape)
-
- @property
- def density(self) -> float:
- """
- Ratio of non-sparse points to total (dense) data points
- represented in the DataFrame.
- """
- return np.mean([column.array.density for _, column in self._parent.items()])
-
- @staticmethod
- def _prep_index(data, index, columns):
- import pandas.core.indexes.base as ibase
-
- N, K = data.shape
- if index is None:
- index = ibase.default_index(N)
- if columns is None:
- columns = ibase.default_index(K)
-
- if len(columns) != K:
- raise ValueError(
- "Column length mismatch: {columns} vs. {K}".format(
- columns=len(columns), K=K
- )
- )
- if len(index) != N:
- raise ValueError(
- "Index length mismatch: {index} vs. {N}".format(index=len(index), N=N)
- )
- return index, columns
diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py
new file mode 100644
index 0000000000000..6fd73ae14fff1
--- /dev/null
+++ b/pandas/core/arrays/sparse/dtype.py
@@ -0,0 +1,343 @@
+"""Sparse Dtype"""
+
+import re
+from typing import Any
+
+import numpy as np
+
+from pandas.core.dtypes.base import ExtensionDtype
+from pandas.core.dtypes.cast import astype_nansafe
+from pandas.core.dtypes.common import (
+ is_bool_dtype,
+ is_object_dtype,
+ is_scalar,
+ is_string_dtype,
+ pandas_dtype,
+)
+from pandas.core.dtypes.dtypes import register_extension_dtype
+from pandas.core.dtypes.missing import isna, na_value_for_dtype
+
+from pandas._typing import Dtype
+
+
+@register_extension_dtype
+class SparseDtype(ExtensionDtype):
+ """
+ Dtype for data stored in :class:`SparseArray`.
+
+ This dtype implements the pandas ExtensionDtype interface.
+
+ .. versionadded:: 0.24.0
+
+ Parameters
+ ----------
+ dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
+ The dtype of the underlying array storing the non-fill value values.
+ fill_value : scalar, optional
+ The scalar value not stored in the SparseArray. By default, this
+ depends on `dtype`.
+
+ =========== ==========
+ dtype na_value
+ =========== ==========
+ float ``np.nan``
+ int ``0``
+ bool ``False``
+ datetime64 ``pd.NaT``
+ timedelta64 ``pd.NaT``
+ =========== ==========
+
+ The default value may be overridden by specifying a `fill_value`.
+
+ Attributes
+ ----------
+ None
+
+ Methods
+ -------
+ None
+ """
+
+ # We include `_is_na_fill_value` in the metadata to avoid hash collisions
+ # between SparseDtype(float, 0.0) and SparseDtype(float, nan).
+ # Without is_na_fill_value in the comparison, those would be equal since
+ # hash(nan) is (sometimes?) 0.
+ _metadata = ("_dtype", "_fill_value", "_is_na_fill_value")
+
+ def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None:
+
+ if isinstance(dtype, type(self)):
+ if fill_value is None:
+ fill_value = dtype.fill_value
+ dtype = dtype.subtype
+
+ dtype = pandas_dtype(dtype)
+ if is_string_dtype(dtype):
+ dtype = np.dtype("object")
+
+ if fill_value is None:
+ fill_value = na_value_for_dtype(dtype)
+
+ if not is_scalar(fill_value):
+ raise ValueError(
+ "fill_value must be a scalar. Got {} instead".format(fill_value)
+ )
+ self._dtype = dtype
+ self._fill_value = fill_value
+
+ def __hash__(self):
+ # Python3 doesn't inherit __hash__ when a base class overrides
+ # __eq__, so we explicitly do it here.
+ return super().__hash__()
+
+ def __eq__(self, other):
+ # We have to override __eq__ to handle NA values in _metadata.
+ # The base class does simple == checks, which fail for NA.
+ if isinstance(other, str):
+ try:
+ other = self.construct_from_string(other)
+ except TypeError:
+ return False
+
+ if isinstance(other, type(self)):
+ subtype = self.subtype == other.subtype
+ if self._is_na_fill_value:
+ # this case is complicated by two things:
+ # SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
+ # SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
+ # i.e. we want to treat any floating-point NaN as equal, but
+ # not a floating-point NaN and a datetime NaT.
+ fill_value = (
+ other._is_na_fill_value
+ and isinstance(self.fill_value, type(other.fill_value))
+ or isinstance(other.fill_value, type(self.fill_value))
+ )
+ else:
+ fill_value = self.fill_value == other.fill_value
+
+ return subtype and fill_value
+ return False
+
+ @property
+ def fill_value(self):
+ """
+ The fill value of the array.
+
+ Converting the SparseArray to a dense ndarray will fill the
+ array with this value.
+
+ .. warning::
+
+ It's possible to end up with a SparseArray that has ``fill_value``
+ values in ``sp_values``. This can occur, for example, when setting
+ ``SparseArray.fill_value`` directly.
+ """
+ return self._fill_value
+
+ @property
+ def _is_na_fill_value(self):
+ return isna(self.fill_value)
+
+ @property
+ def _is_numeric(self):
+ return not is_object_dtype(self.subtype)
+
+ @property
+ def _is_boolean(self):
+ return is_bool_dtype(self.subtype)
+
+ @property
+ def kind(self):
+ """
+ The sparse kind. Either 'integer', or 'block'.
+ """
+ return self.subtype.kind
+
+ @property
+ def type(self):
+ return self.subtype.type
+
+ @property
+ def subtype(self):
+ return self._dtype
+
+ @property
+ def name(self):
+ return "Sparse[{}, {}]".format(self.subtype.name, self.fill_value)
+
+ def __repr__(self):
+ return self.name
+
+ @classmethod
+ def construct_array_type(cls):
+ from .array import SparseArray
+
+ return SparseArray
+
+ @classmethod
+ def construct_from_string(cls, string):
+ """
+ Construct a SparseDtype from a string form.
+
+ Parameters
+ ----------
+ string : str
+ Can take the following forms.
+
+ string dtype
+ ================ ============================
+ 'int' SparseDtype[np.int64, 0]
+ 'Sparse' SparseDtype[np.float64, nan]
+ 'Sparse[int]' SparseDtype[np.int64, 0]
+ 'Sparse[int, 0]' SparseDtype[np.int64, 0]
+ ================ ============================
+
+ It is not possible to specify non-default fill values
+ with a string. An argument like ``'Sparse[int, 1]'``
+ will raise a ``TypeError`` because the default fill value
+ for integers is 0.
+
+ Returns
+ -------
+ SparseDtype
+ """
+ msg = "Could not construct SparseDtype from '{}'".format(string)
+ if string.startswith("Sparse"):
+ try:
+ sub_type, has_fill_value = cls._parse_subtype(string)
+ except ValueError:
+ raise TypeError(msg)
+ else:
+ result = SparseDtype(sub_type)
+ msg = (
+ "Could not construct SparseDtype from '{}'.\n\nIt "
+ "looks like the fill_value in the string is not "
+ "the default for the dtype. Non-default fill_values "
+ "are not supported. Use the 'SparseDtype()' "
+ "constructor instead."
+ )
+ if has_fill_value and str(result) != string:
+ raise TypeError(msg.format(string))
+ return result
+ else:
+ raise TypeError(msg)
+
+ @staticmethod
+ def _parse_subtype(dtype):
+ """
+ Parse a string to get the subtype
+
+ Parameters
+ ----------
+ dtype : str
+ A string like
+
+ * Sparse[subtype]
+ * Sparse[subtype, fill_value]
+
+ Returns
+ -------
+ subtype : str
+
+ Raises
+ ------
+ ValueError
+ When the subtype cannot be extracted.
+ """
+ xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$")
+ m = xpr.match(dtype)
+ has_fill_value = False
+ if m:
+ subtype = m.groupdict()["subtype"]
+ has_fill_value = m.groupdict()["fill_value"] or has_fill_value
+ elif dtype == "Sparse":
+ subtype = "float64"
+ else:
+ raise ValueError("Cannot parse {}".format(dtype))
+ return subtype, has_fill_value
+
+ @classmethod
+ def is_dtype(cls, dtype):
+ dtype = getattr(dtype, "dtype", dtype)
+ if isinstance(dtype, str) and dtype.startswith("Sparse"):
+ sub_type, _ = cls._parse_subtype(dtype)
+ dtype = np.dtype(sub_type)
+ elif isinstance(dtype, cls):
+ return True
+ return isinstance(dtype, np.dtype) or dtype == "Sparse"
+
+ def update_dtype(self, dtype):
+ """
+ Convert the SparseDtype to a new dtype.
+
+ This takes care of converting the ``fill_value``.
+
+ Parameters
+ ----------
+ dtype : Union[str, numpy.dtype, SparseDtype]
+ The new dtype to use.
+
+ * For a SparseDtype, it is simply returned
+ * For a NumPy dtype (or str), the current fill value
+ is converted to the new dtype, and a SparseDtype
+ with `dtype` and the new fill value is returned.
+
+ Returns
+ -------
+ SparseDtype
+ A new SparseDtype with the corret `dtype` and fill value
+ for that `dtype`.
+
+ Raises
+ ------
+ ValueError
+ When the current fill value cannot be converted to the
+ new `dtype` (e.g. trying to convert ``np.nan`` to an
+ integer dtype).
+
+
+ Examples
+ --------
+ >>> SparseDtype(int, 0).update_dtype(float)
+ Sparse[float64, 0.0]
+
+ >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
+ Sparse[float64, nan]
+ """
+ cls = type(self)
+ dtype = pandas_dtype(dtype)
+
+ if not isinstance(dtype, cls):
+ fill_value = astype_nansafe(np.array(self.fill_value), dtype).item()
+ dtype = cls(dtype, fill_value=fill_value)
+
+ return dtype
+
+ @property
+ def _subtype_with_str(self):
+ """
+ Whether the SparseDtype's subtype should be considered ``str``.
+
+ Typically, pandas will store string data in an object-dtype array.
+ When converting values to a dtype, e.g. in ``.astype``, we need to
+ be more specific, we need the actual underlying type.
+
+ Returns
+ -------
+
+ >>> SparseDtype(int, 1)._subtype_with_str
+ dtype('int64')
+
+ >>> SparseDtype(object, 1)._subtype_with_str
+ dtype('O')
+
+ >>> dtype = SparseDtype(str, '')
+ >>> dtype.subtype
+ dtype('O')
+
+ >>> dtype._subtype_with_str
+ str
+ """
+ if isinstance(self.fill_value, str):
+ return type(self.fill_value)
+ return self.subtype
diff --git a/pandas/core/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py
similarity index 89%
rename from pandas/core/sparse/scipy_sparse.py
rename to pandas/core/arrays/sparse/scipy_sparse.py
index 73638f5965119..11c27451a5801 100644
--- a/pandas/core/sparse/scipy_sparse.py
+++ b/pandas/core/arrays/sparse/scipy_sparse.py
@@ -1,7 +1,7 @@
"""
Interaction with scipy.sparse matrices.
-Currently only includes SparseSeries.to_coo helpers.
+Currently only includes to_coo helpers.
"""
from collections import OrderedDict
@@ -99,7 +99,7 @@ def _sparse_series_to_coo(ss, row_levels=(0,), column_levels=(1,), sort_labels=F
raise ValueError("to_coo requires MultiIndex with nlevels > 2")
if not ss.index.is_unique:
raise ValueError(
- "Duplicate index entries are not allowed in to_coo " "transformation."
+ "Duplicate index entries are not allowed in to_coo transformation."
)
# to keep things simple, only rely on integer indexing (not labels)
@@ -115,7 +115,7 @@ def _sparse_series_to_coo(ss, row_levels=(0,), column_levels=(1,), sort_labels=F
return sparse_matrix, rows, columns
-def _coo_to_sparse_series(A, dense_index: bool = False, sparse_series: bool = True):
+def _coo_to_sparse_series(A, dense_index: bool = False):
"""
Convert a scipy.sparse.coo_matrix to a SparseSeries.
@@ -123,16 +123,14 @@ def _coo_to_sparse_series(A, dense_index: bool = False, sparse_series: bool = Tr
----------
A : scipy.sparse.coo.coo_matrix
dense_index : bool, default False
- sparse_series : bool, default True
Returns
-------
- Series or SparseSeries
+ Series
Raises
------
TypeError if A is not a coo_matrix
-
"""
from pandas import SparseDtype
@@ -141,13 +139,7 @@ def _coo_to_sparse_series(A, dense_index: bool = False, sparse_series: bool = Tr
except AttributeError:
raise TypeError("Expected coo_matrix. Got {} instead.".format(type(A).__name__))
s = s.sort_index()
- if sparse_series:
- # TODO(SparseSeries): remove this and the sparse_series keyword.
- # This is just here to avoid a DeprecationWarning when
- # _coo_to_sparse_series is called via Series.sparse.from_coo
- s = s.to_sparse() # TODO: specify kind?
- else:
- s = s.astype(SparseDtype(s.dtype))
+ s = s.astype(SparseDtype(s.dtype))
if dense_index:
# is there a better constructor method to use here?
i = range(A.shape[0])
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 9d622d92e0979..3609c68a26c0f 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -41,9 +41,9 @@
)
from pandas.core.dtypes.missing import isna
-from pandas.core import ops
from pandas.core.algorithms import checked_add_with_arr
import pandas.core.common as com
+from pandas.core.ops.invalid import invalid_comparison
from pandas.tseries.frequencies import to_offset
from pandas.tseries.offsets import Tick
@@ -90,14 +90,14 @@ def wrapper(self, other):
other = Timedelta(other)
except ValueError:
# failed to parse as timedelta
- return ops.invalid_comparison(self, other, op)
+ return invalid_comparison(self, other, op)
result = op(self.view("i8"), other.value)
if isna(other):
result.fill(nat_result)
elif not is_list_like(other):
- return ops.invalid_comparison(self, other, op)
+ return invalid_comparison(self, other, op)
elif len(other) != len(self):
raise ValueError("Lengths must match")
@@ -106,7 +106,7 @@ def wrapper(self, other):
try:
other = type(self)._from_sequence(other)._data
except (ValueError, TypeError):
- return ops.invalid_comparison(self, other, op)
+ return invalid_comparison(self, other, op)
result = op(self.view("i8"), other.view("i8"))
result = com.values_from_object(result)
@@ -173,8 +173,8 @@ class TimedeltaArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps):
"ceil",
]
- # Needed so that NaT.__richcmp__(DateTimeArray) operates pointwise
- ndim = 1
+ # Note: ndim must be defined to ensure NaT.__richcmp(TimedeltaArray)
+ # operates pointwise.
@property
def _box_func(self):
@@ -199,7 +199,6 @@ def dtype(self):
# ----------------------------------------------------------------
# Constructors
- _attributes = ["freq"]
def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False):
if isinstance(values, (ABCSeries, ABCIndexClass)):
@@ -290,7 +289,7 @@ def _generate_range(cls, start, end, periods, freq, closed=None):
periods = dtl.validate_periods(periods)
if freq is None and any(x is None for x in [periods, start, end]):
- raise ValueError("Must provide freq argument if no data is " "supplied")
+ raise ValueError("Must provide freq argument if no data is supplied")
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
@@ -307,7 +306,7 @@ def _generate_range(cls, start, end, periods, freq, closed=None):
if start is None and end is None:
if closed is not None:
raise ValueError(
- "Closed has to be None if not both of start" "and end are defined"
+ "Closed has to be None if not both of startand end are defined"
)
left_closed, right_closed = dtl.validate_endpoints(closed)
@@ -777,12 +776,14 @@ def __rdivmod__(self, other):
res2 = other - res1 * self
return res1, res2
- # Note: TimedeltaIndex overrides this in call to cls._add_numeric_methods
def __neg__(self):
if self.freq is not None:
return type(self)(-self._data, freq=-self.freq)
return type(self)(-self._data)
+ def __pos__(self):
+ return type(self)(self._data, freq=self.freq)
+
def __abs__(self):
# Note: freq is not preserved
return type(self)(np.abs(self._data))
@@ -862,17 +863,17 @@ def to_pytimedelta(self):
seconds = _field_accessor(
"seconds",
"seconds",
- "Number of seconds (>= 0 and less than 1 day) " "for each element.",
+ "Number of seconds (>= 0 and less than 1 day) for each element.",
)
microseconds = _field_accessor(
"microseconds",
"microseconds",
- "Number of microseconds (>= 0 and less " "than 1 second) for each element.",
+ "Number of microseconds (>= 0 and less than 1 second) for each element.",
)
nanoseconds = _field_accessor(
"nanoseconds",
"nanoseconds",
- "Number of nanoseconds (>= 0 and less " "than 1 microsecond) for each element.",
+ "Number of nanoseconds (>= 0 and less than 1 microsecond) for each element.",
)
@property
@@ -1131,7 +1132,7 @@ def _generate_regular_range(start, end, periods, offset):
b = e - periods * stride
else:
raise ValueError(
- "at least 'start' or 'end' should be specified " "if a 'period' is given."
+ "at least 'start' or 'end' should be specified if a 'period' is given."
)
data = np.arange(b, e, stride, dtype=np.int64)
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 9480e2e425f79..910b05c47071d 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -4,6 +4,7 @@
import builtins
from collections import OrderedDict
import textwrap
+from typing import Dict, Optional
import warnings
import numpy as np
@@ -32,10 +33,11 @@
from pandas.core import algorithms, common as com
from pandas.core.accessor import DirNamesMixin
+from pandas.core.algorithms import duplicated, unique1d, value_counts
from pandas.core.arrays import ExtensionArray
import pandas.core.nanops as nanops
-_shared_docs = dict()
+_shared_docs = dict() # type: Dict[str, str]
_indexops_doc_kwargs = dict(
klass="IndexOpsMixin",
inplace="",
@@ -44,32 +46,7 @@
)
-class StringMixin:
- """
- Implements string methods so long as object defines a `__str__` method.
- """
-
- # side note - this could be made into a metaclass if more than one
- # object needs
-
- # ----------------------------------------------------------------------
- # Formatting
-
- def __str__(self):
- """
- Return a string representation for a particular Object
- """
- raise AbstractMethodError(self)
-
- def __repr__(self):
- """
- Return a string representation for a particular object.
- """
- return str(self)
-
-
class PandasObject(DirNamesMixin):
-
"""baseclass for various pandas objects"""
@property
@@ -435,7 +412,7 @@ def _agg_1dim(name, how, subset=None):
colg = self._gotitem(name, ndim=1, subset=subset)
if colg.ndim != 1:
raise SpecificationError(
- "nested dictionary is ambiguous " "in aggregation"
+ "nested dictionary is ambiguous in aggregation"
)
return colg.aggregate(how, _level=(_level or 0) + 1)
@@ -565,7 +542,7 @@ def is_any_frame():
else:
result = None
- f = self._is_cython_func(arg)
+ f = self._get_cython_func(arg)
if f and not args and not kwargs:
return getattr(self, f)(), None
@@ -632,9 +609,7 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis):
result = Series(results, index=keys, name=self.name)
if is_nested_object(result):
- raise ValueError(
- "cannot combine transform and " "aggregation operations"
- )
+ raise ValueError("cannot combine transform and aggregation operations")
return result
def _shallow_copy(self, obj=None, obj_type=None, **kwargs):
@@ -652,7 +627,7 @@ def _shallow_copy(self, obj=None, obj_type=None, **kwargs):
kwargs[attr] = getattr(self, attr)
return obj_type(obj, **kwargs)
- def _is_cython_func(self, arg):
+ def _get_cython_func(self, arg: str) -> Optional[str]:
"""
if we define an internal function for this argument, return it
"""
@@ -687,8 +662,9 @@ def transpose(self, *args, **kwargs):
T = property(
transpose,
- doc="""\nReturn the transpose, which is by
- definition self.\n""",
+ doc="""
+ Return the transpose, which is by definition self.
+ """,
)
@property
@@ -725,7 +701,7 @@ def item(self):
"""
Return the first element of the underlying data as a python scalar.
- .. deprecated 0.25.0
+ .. deprecated:: 0.25.0
Returns
-------
@@ -733,7 +709,7 @@ def item(self):
The first element of %(klass)s.
"""
warnings.warn(
- "`item` has been deprecated and will be removed in a " "future version",
+ "`item` has been deprecated and will be removed in a future version",
FutureWarning,
stacklevel=2,
)
@@ -1313,17 +1289,17 @@ def value_counts(
Parameters
----------
- normalize : boolean, default False
+ normalize : bool, default False
If True then the object returned will contain the relative
frequencies of the unique values.
- sort : boolean, default True
+ sort : bool, default True
Sort by frequencies.
- ascending : boolean, default False
+ ascending : bool, default False
Sort in ascending order.
- bins : integer, optional
+ bins : int, optional
Rather than count values, group them into half-open bins,
a convenience for ``pd.cut``, only works with numeric data.
- dropna : boolean, default True
+ dropna : bool, default True
Don't include counts of NaN.
Returns
@@ -1381,8 +1357,6 @@ def value_counts(
1.0 1
dtype: int64
"""
- from pandas.core.algorithms import value_counts
-
result = value_counts(
self,
sort=sort,
@@ -1400,8 +1374,6 @@ def unique(self):
result = values.unique()
else:
- from pandas.core.algorithms import unique1d
-
result = unique1d(values)
return result
@@ -1463,8 +1435,6 @@ def is_monotonic(self):
Return boolean if values in the object are
monotonic_increasing.
- .. versionadded:: 0.19.0
-
Returns
-------
bool
@@ -1481,8 +1451,6 @@ def is_monotonic_decreasing(self):
Return boolean if values in the object are
monotonic_decreasing.
- .. versionadded:: 0.19.0
-
Returns
-------
bool
@@ -1493,7 +1461,7 @@ def is_monotonic_decreasing(self):
def memory_usage(self, deep=False):
"""
- Memory usage of the values
+ Memory usage of the values.
Parameters
----------
@@ -1528,7 +1496,7 @@ def memory_usage(self, deep=False):
size_hint="",
sort=textwrap.dedent(
"""\
- sort : boolean, default False
+ sort : bool, default False
Sort `uniques` and shuffle `labels` to maintain the
relationship.
"""
@@ -1565,7 +1533,7 @@ def factorize(self, sort=False, na_sentinel=-1):
A scalar or array of insertion points with the
same shape as `value`.
- .. versionchanged :: 0.24.0
+ .. versionchanged:: 0.24.0
If `value` is a scalar, an int is now always returned.
Previously, scalar inputs returned an 1-item array for
:class:`Series` and :class:`Categorical`.
@@ -1631,8 +1599,6 @@ def drop_duplicates(self, keep="first", inplace=False):
return result
def duplicated(self, keep="first"):
- from pandas.core.algorithms import duplicated
-
if isinstance(self, ABCIndexClass):
if self.is_unique:
return np.zeros(len(self), dtype=np.bool)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index d2dd0d03d9425..565f5076fdddb 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -165,51 +165,39 @@ def cast_scalar_indexer(val):
return val
-def _not_none(*args):
+def not_none(*args):
"""
Returns a generator consisting of the arguments that are not None.
"""
return (arg for arg in args if arg is not None)
-def _any_none(*args):
+def any_none(*args):
"""
Returns a boolean indicating if any argument is None.
"""
- for arg in args:
- if arg is None:
- return True
- return False
+ return any(arg is None for arg in args)
-def _all_none(*args):
+def all_none(*args):
"""
Returns a boolean indicating if all arguments are None.
"""
- for arg in args:
- if arg is not None:
- return False
- return True
+ return all(arg is None for arg in args)
-def _any_not_none(*args):
+def any_not_none(*args):
"""
Returns a boolean indicating if any argument is not None.
"""
- for arg in args:
- if arg is not None:
- return True
- return False
+ return any(arg is not None for arg in args)
-def _all_not_none(*args):
+def all_not_none(*args):
"""
Returns a boolean indicating if all arguments are not None.
"""
- for arg in args:
- if arg is None:
- return False
- return True
+ return all(arg is not None for arg in args)
def count_not_none(*args):
@@ -223,7 +211,7 @@ def try_sort(iterable):
listed = list(iterable)
try:
return sorted(listed)
- except Exception:
+ except TypeError:
return listed
@@ -254,7 +242,6 @@ def asarray_tuplesafe(values, dtype=None):
if result.ndim == 2:
# Avoid building an array of arrays:
- # TODO: verify whether any path hits this except #18819 (invalid)
values = [tuple(x) for x in values]
result = construct_1d_object_array_from_listlike(values)
@@ -444,11 +431,11 @@ def random_state(state=None):
return np.random
else:
raise ValueError(
- "random_state must be an integer, a numpy " "RandomState, or None"
+ "random_state must be an integer, a numpy RandomState, or None"
)
-def _pipe(obj, func, *args, **kwargs):
+def pipe(obj, func, *args, **kwargs):
"""
Apply a function ``func`` to object ``obj`` either by passing obj as the
first argument to the function or, in the case that the func is a tuple,
@@ -458,15 +445,15 @@ def _pipe(obj, func, *args, **kwargs):
Parameters
----------
- func : callable or tuple of (callable, string)
+ func : callable or tuple of (callable, str)
Function to apply to this object or, alternatively, a
``(callable, data_keyword)`` tuple where ``data_keyword`` is a
string indicating the keyword of `callable`` that expects the
object.
- args : iterable, optional
- positional arguments passed into ``func``.
- kwargs : dict, optional
- a dictionary of keyword arguments passed into ``func``.
+ *args : iterable, optional
+ Positional arguments passed into ``func``.
+ **kwargs : dict, optional
+ A dictionary of keyword arguments passed into ``func``.
Returns
-------
@@ -483,7 +470,7 @@ def _pipe(obj, func, *args, **kwargs):
return func(obj, *args, **kwargs)
-def _get_rename_function(mapper):
+def get_rename_function(mapper):
"""
Returns a function that will map names/labels, dependent if mapper
is a dict, Series or just a function.
diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py
index 1046401850963..3e1e5ed89d877 100644
--- a/pandas/core/computation/align.py
+++ b/pandas/core/computation/align.py
@@ -9,6 +9,7 @@
from pandas.errors import PerformanceWarning
import pandas as pd
+from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.computation.common import _result_type_many
@@ -34,7 +35,7 @@ def _zip_axes_from_type(typ, new_axes):
def _any_pandas_objects(terms):
"""Check a sequence of terms for instances of PandasObject."""
- return any(isinstance(term.value, pd.core.generic.PandasObject) for term in terms)
+ return any(isinstance(term.value, PandasObject) for term in terms)
def _filter_special_cases(f):
@@ -132,7 +133,8 @@ def _align(terms):
def _reconstruct_object(typ, obj, axes, dtype):
- """Reconstruct an object given its type, raw value, and possibly empty
+ """
+ Reconstruct an object given its type, raw value, and possibly empty
(None) axes.
Parameters
@@ -157,7 +159,7 @@ def _reconstruct_object(typ, obj, axes, dtype):
res_t = np.result_type(obj.dtype, dtype)
- if not isinstance(typ, partial) and issubclass(typ, pd.core.generic.PandasObject):
+ if not isinstance(typ, partial) and issubclass(typ, PandasObject):
return typ(obj, dtype=res_t, **axes)
# special case for pathological things like ~True/~False
diff --git a/pandas/core/computation/common.py b/pandas/core/computation/common.py
index ddb1023479cba..bd32c8bee1cdf 100644
--- a/pandas/core/computation/common.py
+++ b/pandas/core/computation/common.py
@@ -2,7 +2,7 @@
import numpy as np
-import pandas as pd
+from pandas._config import get_option
# A token value Python's tokenizer probably will never use.
_BACKTICK_QUOTED_STRING = 100
@@ -11,7 +11,7 @@
def _ensure_decoded(s):
""" if we have bytes, decode them to unicode """
if isinstance(s, (np.bytes_, bytes)):
- s = s.decode(pd.get_option("display.encoding"))
+ s = s.decode(get_option("display.encoding"))
return s
diff --git a/pandas/core/computation/engines.py b/pandas/core/computation/engines.py
index 2c94b142a45b3..3cc34ea1f4ed7 100644
--- a/pandas/core/computation/engines.py
+++ b/pandas/core/computation/engines.py
@@ -17,7 +17,8 @@ class NumExprClobberingError(NameError):
def _check_ne_builtin_clash(expr):
- """Attempt to prevent foot-shooting in a helpful way.
+ """
+ Attempt to prevent foot-shooting in a helpful way.
Parameters
----------
@@ -53,7 +54,8 @@ def convert(self):
return printing.pprint_thing(self.expr)
def evaluate(self):
- """Run the engine on the expression
+ """
+ Run the engine on the expression.
This method performs alignment which is necessary no matter what engine
is being used, thus its implementation is in the base class.
@@ -78,7 +80,8 @@ def _is_aligned(self):
@abc.abstractmethod
def _evaluate(self):
- """Return an evaluated expression.
+ """
+ Return an evaluated expression.
Parameters
----------
@@ -94,7 +97,6 @@ def _evaluate(self):
class NumExprEngine(AbstractEngine):
-
"""NumExpr engine class"""
has_neg_frac = True
@@ -127,8 +129,8 @@ def _evaluate(self):
class PythonEngine(AbstractEngine):
-
- """Evaluate an expression in Python space.
+ """
+ Evaluate an expression in Python space.
Mostly for testing purposes.
"""
diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py
index 456ecf4b2594f..8614230c4811f 100644
--- a/pandas/core/computation/eval.py
+++ b/pandas/core/computation/eval.py
@@ -333,7 +333,7 @@ def eval(
" if all expressions contain an assignment"
)
elif inplace:
- raise ValueError("Cannot operate inplace " "if there is no assignment")
+ raise ValueError("Cannot operate inplace if there is no assignment")
# assign if needed
assigner = parsed_expr.assigner
diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py
index 772fb547567e3..45319a4d63d94 100644
--- a/pandas/core/computation/expr.py
+++ b/pandas/core/computation/expr.py
@@ -13,7 +13,6 @@
import pandas as pd
from pandas.core import common as com
-from pandas.core.base import StringMixin
from pandas.core.computation.common import (
_BACKTICK_QUOTED_STRING,
_remove_spaces_column_name,
@@ -42,7 +41,8 @@
def tokenize_string(source):
- """Tokenize a Python source code string.
+ """
+ Tokenize a Python source code string.
Parameters
----------
@@ -296,7 +296,7 @@ def _node_not_implemented(node_name, cls):
def f(self, *args, **kwargs):
raise NotImplementedError(
- "{name!r} nodes are not " "implemented".format(name=node_name)
+ "{name!r} nodes are not implemented".format(name=node_name)
)
return f
@@ -367,8 +367,8 @@ def f(cls):
@disallow(_unsupported_nodes)
@add_ops(_op_classes)
class BaseExprVisitor(ast.NodeVisitor):
-
- """Custom ast walker. Parsers of other engines should subclass this class
+ """
+ Custom ast walker. Parsers of other engines should subclass this class
if necessary.
Parameters
@@ -433,7 +433,7 @@ def visit(self, node, **kwargs):
from keyword import iskeyword
if any(iskeyword(x) for x in clean.split()):
- e.msg = "Python keyword not valid identifier" " in numexpr query"
+ e.msg = "Python keyword not valid identifier in numexpr query"
raise e
method = "visit_" + node.__class__.__name__
@@ -582,6 +582,9 @@ def visit_NameConstant(self, node, **kwargs):
def visit_Num(self, node, **kwargs):
return self.const_type(node.n, self.env)
+ def visit_Constant(self, node, **kwargs):
+ return self.const_type(node.n, self.env)
+
def visit_Str(self, node, **kwargs):
name = self.env.add_tmp(node.s)
return self.term_type(name, self.env)
@@ -642,9 +645,7 @@ def visit_Assign(self, node, **kwargs):
if len(node.targets) != 1:
raise SyntaxError("can only assign a single expression")
if not isinstance(node.targets[0], ast.Name):
- raise SyntaxError(
- "left hand side of an assignment must be a " "single name"
- )
+ raise SyntaxError("left hand side of an assignment must be a single name")
if self.env.target is None:
raise ValueError("cannot assign without a target object")
@@ -656,7 +657,7 @@ def visit_Assign(self, node, **kwargs):
self.assigner = getattr(assigner, "name", assigner)
if self.assigner is None:
raise SyntaxError(
- "left hand side of an assignment must be a " "single resolvable name"
+ "left hand side of an assignment must be a single resolvable name"
)
return self.visit(node.value, **kwargs)
@@ -801,9 +802,9 @@ def __init__(self, env, engine, parser, preparser=lambda x: x):
super().__init__(env, engine, parser, preparser=preparser)
-class Expr(StringMixin):
-
- """Object encapsulating an expression.
+class Expr:
+ """
+ Object encapsulating an expression.
Parameters
----------
@@ -833,7 +834,7 @@ def assigner(self):
def __call__(self):
return self.terms(self.env)
- def __str__(self):
+ def __repr__(self):
return printing.pprint_thing(self.terms)
def __len__(self):
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py
index ea61467080291..46bc762e1a0b3 100644
--- a/pandas/core/computation/expressions.py
+++ b/pandas/core/computation/expressions.py
@@ -62,8 +62,9 @@ def set_numexpr_threads(n=None):
ne.set_num_threads(n)
-def _evaluate_standard(op, op_str, a, b, **eval_kwargs):
+def _evaluate_standard(op, op_str, a, b, reversed=False):
""" standard evaluation """
+ # `reversed` kwarg is included for compatibility with _evaluate_numexpr
if _TEST_MODE:
_store_test_result(False)
with np.errstate(all="ignore"):
@@ -76,16 +77,17 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check):
# required min elements (otherwise we are adding overhead)
if np.prod(a.shape) > _MIN_ELEMENTS:
-
# check for dtype compatibility
dtypes = set()
for o in [a, b]:
- if hasattr(o, "dtypes"):
+ # Series implements dtypes, check for dimension count as well
+ if hasattr(o, "dtypes") and o.ndim > 1:
s = o.dtypes.value_counts()
if len(s) > 1:
return False
dtypes |= set(s.index.astype(str))
- elif isinstance(o, np.ndarray):
+ # ndarray and Series Case
+ elif hasattr(o, "dtype"):
dtypes |= {o.dtype.name}
# allowed are a superset
@@ -95,29 +97,22 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check):
return False
-def _evaluate_numexpr(op, op_str, a, b, truediv=True, reversed=False, **eval_kwargs):
+def _evaluate_numexpr(op, op_str, a, b, reversed=False):
result = None
if _can_use_numexpr(op, op_str, a, b, "evaluate"):
- try:
-
- # we were originally called by a reversed op
- # method
- if reversed:
- a, b = b, a
-
- a_value = getattr(a, "values", a)
- b_value = getattr(b, "values", b)
- result = ne.evaluate(
- "a_value {op} b_value".format(op=op_str),
- local_dict={"a_value": a_value, "b_value": b_value},
- casting="safe",
- truediv=truediv,
- **eval_kwargs
- )
- except ValueError as detail:
- if "unknown type object" in str(detail):
- pass
+ if reversed:
+ # we were originally called by a reversed op method
+ a, b = b, a
+
+ a_value = getattr(a, "values", a)
+ b_value = getattr(b, "values", b)
+
+ result = ne.evaluate(
+ "a_value {op} b_value".format(op=op_str),
+ local_dict={"a_value": a_value, "b_value": b_value},
+ casting="safe",
+ )
if _TEST_MODE:
_store_test_result(result is not None)
@@ -138,25 +133,19 @@ def _where_numexpr(cond, a, b):
result = None
if _can_use_numexpr(None, "where", a, b, "where"):
-
- try:
- cond_value = getattr(cond, "values", cond)
- a_value = getattr(a, "values", a)
- b_value = getattr(b, "values", b)
- result = ne.evaluate(
- "where(cond_value, a_value, b_value)",
- local_dict={
- "cond_value": cond_value,
- "a_value": a_value,
- "b_value": b_value,
- },
- casting="safe",
- )
- except ValueError as detail:
- if "unknown type object" in str(detail):
- pass
- except Exception as detail:
- raise TypeError(str(detail))
+ cond_value = getattr(cond, "values", cond)
+ a_value = getattr(a, "values", a)
+ b_value = getattr(b, "values", b)
+
+ result = ne.evaluate(
+ "where(cond_value, a_value, b_value)",
+ local_dict={
+ "cond_value": cond_value,
+ "a_value": a_value,
+ "b_value": b_value,
+ },
+ casting="safe",
+ )
if result is None:
result = _where_standard(cond, a, b)
@@ -169,11 +158,10 @@ def _where_numexpr(cond, a, b):
def _has_bool_dtype(x):
+ if isinstance(x, ABCDataFrame):
+ return "bool" in x.dtypes
try:
- if isinstance(x, ABCDataFrame):
- return "bool" in x.dtypes
- else:
- return x.dtype == bool
+ return x.dtype == bool
except AttributeError:
return isinstance(x, (bool, np.bool_))
@@ -197,41 +185,45 @@ def _bool_arith_check(
if op_str in not_allowed:
raise NotImplementedError(
- "operator {op!r} not implemented for " "bool dtypes".format(op=op_str)
+ "operator {op!r} not implemented for bool dtypes".format(op=op_str)
)
return True
-def evaluate(op, op_str, a, b, use_numexpr=True, **eval_kwargs):
- """ evaluate and return the expression of the op on a and b
-
- Parameters
- ----------
-
- op : the actual operand
- op_str: the string version of the op
- a : left operand
- b : right operand
- use_numexpr : whether to try to use numexpr (default True)
- """
+def evaluate(op, op_str, a, b, use_numexpr=True, reversed=False):
+ """
+ Evaluate and return the expression of the op on a and b.
+
+ Parameters
+ ----------
+ op : the actual operand
+ op_str : str
+ The string version of the op.
+ a : left operand
+ b : right operand
+ use_numexpr : bool, default True
+ Whether to try to use numexpr.
+ reversed : bool, default False
+ """
use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b)
if use_numexpr:
- return _evaluate(op, op_str, a, b, **eval_kwargs)
+ return _evaluate(op, op_str, a, b, reversed=reversed)
return _evaluate_standard(op, op_str, a, b)
def where(cond, a, b, use_numexpr=True):
- """ evaluate the where condition cond on a and b
-
- Parameters
- ----------
-
- cond : a boolean array
- a : return if cond is True
- b : return if cond is False
- use_numexpr : whether to try to use numexpr (default True)
- """
+ """
+ Evaluate the where condition cond on a and b.
+
+ Parameters
+ ----------
+ cond : np.ndarray[bool]
+ a : return if cond is True
+ b : return if cond is False
+ use_numexpr : bool, default True
+ Whether to try to use numexpr.
+ """
if use_numexpr:
return _where(cond, a, b)
diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py
index 9e6928372808e..28b6aef693bfe 100644
--- a/pandas/core/computation/ops.py
+++ b/pandas/core/computation/ops.py
@@ -12,7 +12,6 @@
from pandas.core.dtypes.common import is_list_like, is_scalar
-from pandas.core.base import StringMixin
import pandas.core.common as com
from pandas.core.computation.common import _ensure_decoded, _result_type_many
from pandas.core.computation.scope import _DEFAULT_GLOBALS
@@ -52,8 +51,9 @@
class UndefinedVariableError(NameError):
-
- """NameError subclass for local variables."""
+ """
+ NameError subclass for local variables.
+ """
def __init__(self, name, is_local):
if is_local:
@@ -63,7 +63,7 @@ def __init__(self, name, is_local):
super().__init__(msg.format(name))
-class Term(StringMixin):
+class Term:
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, str) else cls
supr_new = super(Term, klass).__new__
@@ -82,7 +82,7 @@ def __init__(self, name, env, side=None, encoding=None):
def local_name(self):
return self.name.replace(_LOCAL_TAG, "")
- def __str__(self):
+ def __repr__(self):
return pprint_thing(self.name)
def __call__(self, *args, **kwargs):
@@ -97,7 +97,7 @@ def _resolve_name(self):
if hasattr(res, "ndim") and res.ndim > 2:
raise NotImplementedError(
- "N-dimensional objects, where N > 2," " are not supported with eval"
+ "N-dimensional objects, where N > 2, are not supported with eval"
)
return res
@@ -182,7 +182,7 @@ def _resolve_name(self):
def name(self):
return self.value
- def __str__(self):
+ def __repr__(self):
# in python 2 str() of float
# can truncate shorter than repr()
return repr(self.name)
@@ -191,9 +191,9 @@ def __str__(self):
_bool_op_map = {"not": "~", "and": "&", "or": "|"}
-class Op(StringMixin):
-
- """Hold an operator of arbitrary arity
+class Op:
+ """
+ Hold an operator of arbitrary arity.
"""
def __init__(self, op, operands, *args, **kwargs):
@@ -204,9 +204,10 @@ def __init__(self, op, operands, *args, **kwargs):
def __iter__(self):
return iter(self.operands)
- def __str__(self):
- """Print a generic n-ary operator and its operands using infix
- notation"""
+ def __repr__(self):
+ """
+ Print a generic n-ary operator and its operands using infix notation.
+ """
# recurse over the operands
parened = ("({0})".format(pprint_thing(opr)) for opr in self.operands)
return pprint_thing(" {0} ".format(self.op).join(parened))
@@ -297,7 +298,8 @@ def _not_in(x, y):
def _cast_inplace(terms, acceptable_dtypes, dtype):
- """Cast an expression inplace.
+ """
+ Cast an expression inplace.
Parameters
----------
@@ -305,9 +307,6 @@ def _cast_inplace(terms, acceptable_dtypes, dtype):
The expression that should cast.
acceptable_dtypes : list of acceptable numpy.dtype
Will not cast if term's dtype in this list.
-
- .. versionadded:: 0.19.0
-
dtype : str or numpy.dtype
The dtype to cast to.
"""
@@ -328,8 +327,8 @@ def is_term(obj):
class BinOp(Op):
-
- """Hold a binary operator and its operands
+ """
+ Hold a binary operator and its operands.
Parameters
----------
@@ -358,7 +357,8 @@ def __init__(self, op, lhs, rhs, **kwargs):
)
def __call__(self, env):
- """Recursively evaluate an expression in Python space.
+ """
+ Recursively evaluate an expression in Python space.
Parameters
----------
@@ -380,7 +380,8 @@ def __call__(self, env):
return self.func(left, right)
def evaluate(self, env, engine, parser, term_type, eval_in_python):
- """Evaluate a binary operation *before* being passed to the engine.
+ """
+ Evaluate a binary operation *before* being passed to the engine.
Parameters
----------
@@ -475,8 +476,8 @@ def isnumeric(dtype):
class Div(BinOp):
-
- """Div operator to special case casting.
+ """
+ Div operator to special case casting.
Parameters
----------
@@ -507,8 +508,8 @@ def __init__(self, lhs, rhs, truediv, *args, **kwargs):
class UnaryOp(Op):
-
- """Hold a unary operator and its operands
+ """
+ Hold a unary operator and its operands.
Parameters
----------
@@ -539,7 +540,7 @@ def __call__(self, env):
operand = self.operand(env)
return self.func(operand)
- def __str__(self):
+ def __repr__(self):
return pprint_thing("{0}({1})".format(self.op, self.operand))
@property
@@ -564,7 +565,7 @@ def __call__(self, env):
with np.errstate(all="ignore"):
return self.func.func(*operands)
- def __str__(self):
+ def __repr__(self):
operands = map(str, self.operands)
return pprint_thing("{0}({1})".format(self.op, ",".join(operands)))
diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py
index 8ba01670bd879..81658ab23ba46 100644
--- a/pandas/core/computation/pytables.py
+++ b/pandas/core/computation/pytables.py
@@ -11,7 +11,6 @@
from pandas.core.dtypes.common import is_list_like
import pandas as pd
-from pandas.core.base import StringMixin
import pandas.core.common as com
from pandas.core.computation import expr, ops
from pandas.core.computation.common import _ensure_decoded
@@ -32,8 +31,7 @@ def __init__(self, level, global_dict=None, local_dict=None, queryables=None):
class Term(ops.Term):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, str) else cls
- supr_new = StringMixin.__new__
- return supr_new(klass)
+ return object.__new__(klass)
def __init__(self, name, env, side=None, encoding=None):
super().__init__(name, env, side=side, encoding=encoding)
@@ -231,7 +229,7 @@ def convert_values(self):
class FilterBinOp(BinOp):
- def __str__(self):
+ def __repr__(self):
return pprint_thing(
"[Filter : [{lhs}] -> [{op}]".format(lhs=self.filter[0], op=self.filter[1])
)
@@ -297,7 +295,7 @@ def evaluate(self):
class ConditionBinOp(BinOp):
- def __str__(self):
+ def __repr__(self):
return pprint_thing("[Condition : [{cond}]]".format(cond=self.condition))
def invert(self):
@@ -306,7 +304,7 @@ def invert(self):
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError(
- "cannot use an invert condition when " "passing to numexpr"
+ "cannot use an invert condition when passing to numexpr"
)
def format(self):
@@ -474,15 +472,12 @@ def _validate_where(w):
"""
if not (isinstance(w, (Expr, str)) or is_list_like(w)):
- raise TypeError(
- "where must be passed as a string, Expr, " "or list-like of Exprs"
- )
+ raise TypeError("where must be passed as a string, Expr, or list-like of Exprs")
return w
class Expr(expr.Expr):
-
""" hold a pytables like expression, comprised of possibly multiple 'terms'
Parameters
@@ -550,7 +545,7 @@ def __init__(self, where, queryables=None, encoding=None, scope_level=0):
)
self.terms = self.parse()
- def __str__(self):
+ def __repr__(self):
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
@@ -577,7 +572,6 @@ def evaluate(self):
class TermValue:
-
""" hold a term value the we use to construct a condition/filter """
def __init__(self, value, converted, kind):
diff --git a/pandas/core/computation/scope.py b/pandas/core/computation/scope.py
index 4d5a523337f66..b11411eb2dc66 100644
--- a/pandas/core/computation/scope.py
+++ b/pandas/core/computation/scope.py
@@ -15,9 +15,6 @@
from pandas._libs.tslibs import Timestamp
from pandas.compat.chainmap import DeepChainMap
-from pandas.core.base import StringMixin
-import pandas.core.computation as compu
-
def _ensure_scope(
level, global_dict=None, local_dict=None, resolvers=(), target=None, **kwargs
@@ -67,7 +64,8 @@ def _raw_hex_id(obj):
def _get_pretty_string(obj):
- """Return a prettier version of obj
+ """
+ Return a prettier version of obj.
Parameters
----------
@@ -84,9 +82,9 @@ def _get_pretty_string(obj):
return sio.getvalue()
-class Scope(StringMixin):
-
- """Object to hold scope, with a few bells to deal with some custom syntax
+class Scope:
+ """
+ Object to hold scope, with a few bells to deal with some custom syntax
and contexts added by pandas.
Parameters
@@ -105,7 +103,7 @@ class Scope(StringMixin):
temps : dict
"""
- __slots__ = "level", "scope", "target", "temps"
+ __slots__ = ["level", "scope", "target", "resolvers", "temps"]
def __init__(
self, level, global_dict=None, local_dict=None, resolvers=(), target=None
@@ -141,7 +139,7 @@ def __init__(
self.resolvers = DeepChainMap(*resolvers)
self.temps = {}
- def __str__(self):
+ def __repr__(self):
scope_keys = _get_pretty_string(list(self.scope.keys()))
res_keys = _get_pretty_string(list(self.resolvers.keys()))
unicode_str = "{name}(scope={scope_keys}, resolvers={res_keys})"
@@ -163,7 +161,8 @@ def has_resolvers(self):
return bool(len(self.resolvers))
def resolve(self, key, is_local):
- """Resolve a variable name in a possibly local context
+ """
+ Resolve a variable name in a possibly local context.
Parameters
----------
@@ -198,10 +197,14 @@ def resolve(self, key, is_local):
# e.g., df[df > 0]
return self.temps[key]
except KeyError:
- raise compu.ops.UndefinedVariableError(key, is_local)
+ # runtime import because ops imports from scope
+ from pandas.core.computation.ops import UndefinedVariableError
+
+ raise UndefinedVariableError(key, is_local)
def swapkey(self, old_key, new_key, new_value=None):
- """Replace a variable name, with a potentially new value.
+ """
+ Replace a variable name, with a potentially new value.
Parameters
----------
@@ -225,7 +228,8 @@ def swapkey(self, old_key, new_key, new_value=None):
return
def _get_vars(self, stack, scopes):
- """Get specifically scoped variables from a list of stack frames.
+ """
+ Get specifically scoped variables from a list of stack frames.
Parameters
----------
@@ -247,7 +251,8 @@ def _get_vars(self, stack, scopes):
del frame
def update(self, level):
- """Update the current scope by going back `level` levels.
+ """
+ Update the current scope by going back `level` levels.
Parameters
----------
@@ -266,7 +271,8 @@ def update(self, level):
del stack[:], stack
def add_tmp(self, value):
- """Add a temporary variable to the scope.
+ """
+ Add a temporary variable to the scope.
Parameters
----------
@@ -297,7 +303,8 @@ def ntemps(self):
@property
def full_scope(self):
- """Return the full scope for use with passing to engines transparently
+ """
+ Return the full scope for use with passing to engines transparently
as a mapping.
Returns
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index be6086dd360f2..bc2eb3511629d 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -9,14 +9,13 @@
module is imported, register them here rather then in the module.
"""
-import importlib
-
import pandas._config.config as cf
from pandas._config.config import (
is_bool,
is_callable,
is_instance_factory,
is_int,
+ is_nonnegative_int,
is_one_of_factory,
is_text,
)
@@ -149,10 +148,10 @@ def use_numexpr_cb(key):
"""
max_colwidth_doc = """
-: int
+: int or None
The maximum width in characters of a column in the repr of
a pandas data structure. When the column overflows, a "..."
- placeholder is embedded in the output.
+ placeholder is embedded in the output. A 'None' value means unlimited.
"""
colheader_justify_doc = """
@@ -319,7 +318,7 @@ def is_terminal():
with cf.config_prefix("display"):
- cf.register_option("precision", 6, pc_precision_doc, validator=is_int)
+ cf.register_option("precision", 6, pc_precision_doc, validator=is_nonnegative_int)
cf.register_option(
"float_format",
None,
@@ -333,12 +332,7 @@ def is_terminal():
pc_max_info_rows_doc,
validator=is_instance_factory((int, type(None))),
)
- cf.register_option(
- "max_rows",
- 60,
- pc_max_rows_doc,
- validator=is_instance_factory([type(None), int]),
- )
+ cf.register_option("max_rows", 60, pc_max_rows_doc, validator=is_nonnegative_int)
cf.register_option(
"min_rows",
10,
@@ -346,16 +340,15 @@ def is_terminal():
validator=is_instance_factory([type(None), int]),
)
cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int)
- cf.register_option("max_colwidth", 50, max_colwidth_doc, validator=is_int)
+ cf.register_option(
+ "max_colwidth", 50, max_colwidth_doc, validator=is_nonnegative_int
+ )
if is_terminal():
max_cols = 0 # automatically determine optimal number of columns
else:
max_cols = 20 # cannot determine optimal number of columns
cf.register_option(
- "max_columns",
- max_cols,
- pc_max_cols_doc,
- validator=is_instance_factory([type(None), int]),
+ "max_columns", max_cols, pc_max_cols_doc, validator=is_nonnegative_int
)
cf.register_option(
"large_repr",
@@ -588,26 +581,12 @@ def use_inf_as_na_cb(key):
def register_plotting_backend_cb(key):
- backend_str = cf.get_option(key)
- if backend_str == "matplotlib":
- try:
- import pandas.plotting._matplotlib # noqa
- except ImportError:
- raise ImportError(
- "matplotlib is required for plotting when the "
- 'default backend "matplotlib" is selected.'
- )
- else:
- return
+ if key == "matplotlib":
+ # We defer matplotlib validation, since it's the default
+ return
+ from pandas.plotting._core import _get_plot_backend
- try:
- importlib.import_module(backend_str)
- except ImportError:
- raise ValueError(
- '"{}" does not seem to be an installed module. '
- "A pandas plotting backend must be a module that "
- "can be imported".format(backend_str)
- )
+ _get_plot_backend(key)
with cf.config_prefix("plotting"):
@@ -615,8 +594,7 @@ def register_plotting_backend_cb(key):
"backend",
defval="matplotlib",
doc=plotting_backend_doc,
- validator=str,
- cb=register_plotting_backend_cb,
+ validator=register_plotting_backend_cb,
)
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
new file mode 100644
index 0000000000000..5e8b28267f24f
--- /dev/null
+++ b/pandas/core/construction.py
@@ -0,0 +1,552 @@
+"""
+Constructor functions intended to be shared by pd.array, Series.__init__,
+and Index.__new__.
+
+These should not depend on core.internals.
+"""
+from typing import Optional, Sequence, Union, cast
+
+import numpy as np
+import numpy.ma as ma
+
+from pandas._libs import lib
+from pandas._libs.tslibs import IncompatibleFrequency, OutOfBoundsDatetime
+
+from pandas.core.dtypes.cast import (
+ construct_1d_arraylike_from_scalar,
+ construct_1d_ndarray_preserving_na,
+ construct_1d_object_array_from_listlike,
+ infer_dtype_from_scalar,
+ maybe_cast_to_datetime,
+ maybe_cast_to_integer_array,
+ maybe_castable,
+ maybe_convert_platform,
+ maybe_upcast,
+)
+from pandas.core.dtypes.common import (
+ is_categorical_dtype,
+ is_datetime64_ns_dtype,
+ is_extension_array_dtype,
+ is_extension_type,
+ is_float_dtype,
+ is_integer_dtype,
+ is_iterator,
+ is_list_like,
+ is_object_dtype,
+ is_timedelta64_ns_dtype,
+ pandas_dtype,
+)
+from pandas.core.dtypes.dtypes import CategoricalDtype, ExtensionDtype, registry
+from pandas.core.dtypes.generic import (
+ ABCExtensionArray,
+ ABCIndexClass,
+ ABCPandasArray,
+ ABCSeries,
+)
+from pandas.core.dtypes.missing import isna
+
+import pandas.core.common as com
+
+
+def array(
+ data: Sequence[object],
+ dtype: Optional[Union[str, np.dtype, ExtensionDtype]] = None,
+ copy: bool = True,
+) -> ABCExtensionArray:
+ """
+ Create an array.
+
+ .. versionadded:: 0.24.0
+
+ Parameters
+ ----------
+ data : Sequence of objects
+ The scalars inside `data` should be instances of the
+ scalar type for `dtype`. It's expected that `data`
+ represents a 1-dimensional array of data.
+
+ When `data` is an Index or Series, the underlying array
+ will be extracted from `data`.
+
+ dtype : str, np.dtype, or ExtensionDtype, optional
+ The dtype to use for the array. This may be a NumPy
+ dtype or an extension type registered with pandas using
+ :meth:`pandas.api.extensions.register_extension_dtype`.
+
+ If not specified, there are two possibilities:
+
+ 1. When `data` is a :class:`Series`, :class:`Index`, or
+ :class:`ExtensionArray`, the `dtype` will be taken
+ from the data.
+ 2. Otherwise, pandas will attempt to infer the `dtype`
+ from the data.
+
+ Note that when `data` is a NumPy array, ``data.dtype`` is
+ *not* used for inferring the array type. This is because
+ NumPy cannot represent all the types of data that can be
+ held in extension arrays.
+
+ Currently, pandas will infer an extension dtype for sequences of
+
+ ============================== =====================================
+ Scalar Type Array Type
+ ============================== =====================================
+ :class:`pandas.Interval` :class:`pandas.arrays.IntervalArray`
+ :class:`pandas.Period` :class:`pandas.arrays.PeriodArray`
+ :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray`
+ :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray`
+ ============================== =====================================
+
+ For all other cases, NumPy's usual inference rules will be used.
+
+ copy : bool, default True
+ Whether to copy the data, even if not necessary. Depending
+ on the type of `data`, creating the new array may require
+ copying data, even if ``copy=False``.
+
+ Returns
+ -------
+ ExtensionArray
+ The newly created array.
+
+ Raises
+ ------
+ ValueError
+ When `data` is not 1-dimensional.
+
+ See Also
+ --------
+ numpy.array : Construct a NumPy array.
+ Series : Construct a pandas Series.
+ Index : Construct a pandas Index.
+ arrays.PandasArray : ExtensionArray wrapping a NumPy array.
+ Series.array : Extract the array stored within a Series.
+
+ Notes
+ -----
+ Omitting the `dtype` argument means pandas will attempt to infer the
+ best array type from the values in the data. As new array types are
+ added by pandas and 3rd party libraries, the "best" array type may
+ change. We recommend specifying `dtype` to ensure that
+
+ 1. the correct array type for the data is returned
+ 2. the returned array type doesn't change as new extension types
+ are added by pandas and third-party libraries
+
+ Additionally, if the underlying memory representation of the returned
+ array matters, we recommend specifying the `dtype` as a concrete object
+ rather than a string alias or allowing it to be inferred. For example,
+ a future version of pandas or a 3rd-party library may include a
+ dedicated ExtensionArray for string data. In this event, the following
+ would no longer return a :class:`arrays.PandasArray` backed by a NumPy
+ array.
+
+ >>> pd.array(['a', 'b'], dtype=str)
+ <PandasArray>
+ ['a', 'b']
+ Length: 2, dtype: str32
+
+ This would instead return the new ExtensionArray dedicated for string
+ data. If you really need the new array to be backed by a NumPy array,
+ specify that in the dtype.
+
+ >>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
+ <PandasArray>
+ ['a', 'b']
+ Length: 2, dtype: str32
+
+ Or use the dedicated constructor for the array you're expecting, and
+ wrap that in a PandasArray
+
+ >>> pd.array(np.array(['a', 'b'], dtype='<U1'))
+ <PandasArray>
+ ['a', 'b']
+ Length: 2, dtype: str32
+
+ Finally, Pandas has arrays that mostly overlap with NumPy
+
+ * :class:`arrays.DatetimeArray`
+ * :class:`arrays.TimedeltaArray`
+
+ When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is
+ passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``
+ rather than a ``PandasArray``. This is for symmetry with the case of
+ timezone-aware data, which NumPy does not natively support.
+
+ >>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
+ <DatetimeArray>
+ ['2015-01-01 00:00:00', '2016-01-01 00:00:00']
+ Length: 2, dtype: datetime64[ns]
+
+ >>> pd.array(["1H", "2H"], dtype='timedelta64[ns]')
+ <TimedeltaArray>
+ ['01:00:00', '02:00:00']
+ Length: 2, dtype: timedelta64[ns]
+
+ Examples
+ --------
+ If a dtype is not specified, `data` is passed through to
+ :meth:`numpy.array`, and a :class:`arrays.PandasArray` is returned.
+
+ >>> pd.array([1, 2])
+ <PandasArray>
+ [1, 2]
+ Length: 2, dtype: int64
+
+ Or the NumPy dtype can be specified
+
+ >>> pd.array([1, 2], dtype=np.dtype("int32"))
+ <PandasArray>
+ [1, 2]
+ Length: 2, dtype: int32
+
+ You can use the string alias for `dtype`
+
+ >>> pd.array(['a', 'b', 'a'], dtype='category')
+ [a, b, a]
+ Categories (2, object): [a, b]
+
+ Or specify the actual dtype
+
+ >>> pd.array(['a', 'b', 'a'],
+ ... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
+ [a, b, a]
+ Categories (3, object): [a < b < c]
+
+ Because omitting the `dtype` passes the data through to NumPy,
+ a mixture of valid integers and NA will return a floating-point
+ NumPy array.
+
+ >>> pd.array([1, 2, np.nan])
+ <PandasArray>
+ [1.0, 2.0, nan]
+ Length: 3, dtype: float64
+
+ To use pandas' nullable :class:`pandas.arrays.IntegerArray`, specify
+ the dtype:
+
+ >>> pd.array([1, 2, np.nan], dtype='Int64')
+ <IntegerArray>
+ [1, 2, NaN]
+ Length: 3, dtype: Int64
+
+ Pandas will infer an ExtensionArray for some types of data:
+
+ >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
+ <PeriodArray>
+ ['2000-01-01', '2000-01-01']
+ Length: 2, dtype: period[D]
+
+ `data` must be 1-dimensional. A ValueError is raised when the input
+ has the wrong dimensionality.
+
+ >>> pd.array(1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Cannot pass scalar '1' to 'pandas.array'.
+ """
+ from pandas.core.arrays import (
+ period_array,
+ IntervalArray,
+ PandasArray,
+ DatetimeArray,
+ TimedeltaArray,
+ )
+
+ if lib.is_scalar(data):
+ msg = "Cannot pass scalar '{}' to 'pandas.array'."
+ raise ValueError(msg.format(data))
+
+ data = extract_array(data, extract_numpy=True)
+
+ if dtype is None and isinstance(data, ABCExtensionArray):
+ dtype = data.dtype
+
+ # this returns None for not-found dtypes.
+ if isinstance(dtype, str):
+ dtype = registry.find(dtype) or dtype
+
+ if is_extension_array_dtype(dtype):
+ cls = cast(ExtensionDtype, dtype).construct_array_type()
+ return cls._from_sequence(data, dtype=dtype, copy=copy)
+
+ if dtype is None:
+ inferred_dtype = lib.infer_dtype(data, skipna=False)
+ if inferred_dtype == "period":
+ try:
+ return period_array(data, copy=copy)
+ except IncompatibleFrequency:
+ # We may have a mixture of frequencies.
+ # We choose to return an ndarray, rather than raising.
+ pass
+ elif inferred_dtype == "interval":
+ try:
+ return IntervalArray(data, copy=copy)
+ except ValueError:
+ # We may have a mixture of `closed` here.
+ # We choose to return an ndarray, rather than raising.
+ pass
+
+ elif inferred_dtype.startswith("datetime"):
+ # datetime, datetime64
+ try:
+ return DatetimeArray._from_sequence(data, copy=copy)
+ except ValueError:
+ # Mixture of timezones, fall back to PandasArray
+ pass
+
+ elif inferred_dtype.startswith("timedelta"):
+ # timedelta, timedelta64
+ return TimedeltaArray._from_sequence(data, copy=copy)
+
+ # TODO(BooleanArray): handle this type
+
+ # Pandas overrides NumPy for
+ # 1. datetime64[ns]
+ # 2. timedelta64[ns]
+ # so that a DatetimeArray is returned.
+ if is_datetime64_ns_dtype(dtype):
+ return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
+ elif is_timedelta64_ns_dtype(dtype):
+ return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)
+
+ result = PandasArray._from_sequence(data, dtype=dtype, copy=copy)
+ return result
+
+
+def extract_array(obj, extract_numpy=False):
+ """
+ Extract the ndarray or ExtensionArray from a Series or Index.
+
+ For all other types, `obj` is just returned as is.
+
+ Parameters
+ ----------
+ obj : object
+ For Series / Index, the underlying ExtensionArray is unboxed.
+ For Numpy-backed ExtensionArrays, the ndarray is extracted.
+
+ extract_numpy : bool, default False
+ Whether to extract the ndarray from a PandasArray
+
+ Returns
+ -------
+ arr : object
+
+ Examples
+ --------
+ >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category'))
+ [a, b, c]
+ Categories (3, object): [a, b, c]
+
+ Other objects like lists, arrays, and DataFrames are just passed through.
+
+ >>> extract_array([1, 2, 3])
+ [1, 2, 3]
+
+ For an ndarray-backed Series / Index a PandasArray is returned.
+
+ >>> extract_array(pd.Series([1, 2, 3]))
+ <PandasArray>
+ [1, 2, 3]
+ Length: 3, dtype: int64
+
+ To extract all the way down to the ndarray, pass ``extract_numpy=True``.
+
+ >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True)
+ array([1, 2, 3])
+ """
+ if isinstance(obj, (ABCIndexClass, ABCSeries)):
+ obj = obj.array
+
+ if extract_numpy and isinstance(obj, ABCPandasArray):
+ obj = obj.to_numpy()
+
+ return obj
+
+
+def sanitize_array(
+ data, index, dtype=None, copy: bool = False, raise_cast_failure: bool = False
+):
+ """
+ Sanitize input data to an ndarray, copy if specified, coerce to the
+ dtype if specified.
+ """
+ if dtype is not None:
+ dtype = pandas_dtype(dtype)
+
+ if isinstance(data, ma.MaskedArray):
+ mask = ma.getmaskarray(data)
+ if mask.any():
+ data, fill_value = maybe_upcast(data, copy=True)
+ data.soften_mask() # set hardmask False if it was True
+ data[mask] = fill_value
+ else:
+ data = data.copy()
+
+ # extract ndarray or ExtensionArray, ensure we have no PandasArray
+ data = extract_array(data, extract_numpy=True)
+
+ # GH#846
+ if isinstance(data, np.ndarray):
+
+ if dtype is not None and is_float_dtype(data.dtype) and is_integer_dtype(dtype):
+ # possibility of nan -> garbage
+ try:
+ subarr = _try_cast(data, dtype, copy, True)
+ except ValueError:
+ if copy:
+ subarr = data.copy()
+ else:
+ subarr = np.array(data, copy=False)
+ else:
+ # we will try to copy be-definition here
+ subarr = _try_cast(data, dtype, copy, raise_cast_failure)
+
+ elif isinstance(data, ABCExtensionArray):
+ # it is already ensured above this is not a PandasArray
+ subarr = data
+
+ if dtype is not None:
+ subarr = subarr.astype(dtype, copy=copy)
+ elif copy:
+ subarr = subarr.copy()
+ return subarr
+
+ elif isinstance(data, (list, tuple)) and len(data) > 0:
+ if dtype is not None:
+ subarr = _try_cast(data, dtype, copy, raise_cast_failure)
+ else:
+ subarr = maybe_convert_platform(data)
+
+ subarr = maybe_cast_to_datetime(subarr, dtype)
+
+ elif isinstance(data, range):
+ # GH#16804
+ arr = np.arange(data.start, data.stop, data.step, dtype="int64")
+ subarr = _try_cast(arr, dtype, copy, raise_cast_failure)
+ else:
+ subarr = _try_cast(data, dtype, copy, raise_cast_failure)
+
+ # scalar like, GH
+ if getattr(subarr, "ndim", 0) == 0:
+ if isinstance(data, list): # pragma: no cover
+ subarr = np.array(data, dtype=object)
+ elif index is not None:
+ value = data
+
+ # figure out the dtype from the value (upcast if necessary)
+ if dtype is None:
+ dtype, value = infer_dtype_from_scalar(value)
+ else:
+ # need to possibly convert the value here
+ value = maybe_cast_to_datetime(value, dtype)
+
+ subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype)
+
+ else:
+ return subarr.item()
+
+ # the result that we want
+ elif subarr.ndim == 1:
+ if index is not None:
+
+ # a 1-element ndarray
+ if len(subarr) != len(index) and len(subarr) == 1:
+ subarr = construct_1d_arraylike_from_scalar(
+ subarr[0], len(index), subarr.dtype
+ )
+
+ elif subarr.ndim > 1:
+ if isinstance(data, np.ndarray):
+ raise Exception("Data must be 1-dimensional")
+ else:
+ subarr = com.asarray_tuplesafe(data, dtype=dtype)
+
+ if not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype)):
+ # This is to prevent mixed-type Series getting all casted to
+ # NumPy string type, e.g. NaN --> '-1#IND'.
+ if issubclass(subarr.dtype.type, str):
+ # GH#16605
+ # If not empty convert the data to dtype
+ # GH#19853: If data is a scalar, subarr has already the result
+ if not lib.is_scalar(data):
+ if not np.all(isna(data)):
+ data = np.array(data, dtype=dtype, copy=False)
+ subarr = np.array(data, dtype=object, copy=copy)
+
+ if is_object_dtype(subarr.dtype) and not is_object_dtype(dtype):
+ inferred = lib.infer_dtype(subarr, skipna=False)
+ if inferred == "period":
+ from pandas.core.arrays import period_array
+
+ try:
+ subarr = period_array(subarr)
+ except IncompatibleFrequency:
+ pass
+
+ return subarr
+
+
+def _try_cast(
+ arr,
+ dtype: Optional[Union[np.dtype, "ExtensionDtype"]],
+ copy: bool,
+ raise_cast_failure: bool,
+):
+ """
+ Convert input to numpy ndarray and optionally cast to a given dtype.
+
+ Parameters
+ ----------
+ arr : ndarray, list, tuple, iterator (catchall)
+ Excludes: ExtensionArray, Series, Index.
+ dtype : np.dtype, ExtensionDtype or None
+ copy : bool
+ If False, don't copy the data if not needed.
+ raise_cast_failure : bool
+ If True, and if a dtype is specified, raise errors during casting.
+ Otherwise an object array is returned.
+ """
+ # perf shortcut as this is the most common case
+ if isinstance(arr, np.ndarray):
+ if maybe_castable(arr) and not copy and dtype is None:
+ return arr
+
+ try:
+ # GH#15832: Check if we are requesting a numeric dype and
+ # that we can convert the data to the requested dtype.
+ if is_integer_dtype(dtype):
+ subarr = maybe_cast_to_integer_array(arr, dtype)
+
+ subarr = maybe_cast_to_datetime(arr, dtype)
+ # Take care in creating object arrays (but iterators are not
+ # supported):
+ if is_object_dtype(dtype) and (
+ is_list_like(subarr)
+ and not (is_iterator(subarr) or isinstance(subarr, np.ndarray))
+ ):
+ subarr = construct_1d_object_array_from_listlike(subarr)
+ elif not is_extension_type(subarr):
+ subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy)
+ except OutOfBoundsDatetime:
+ # in case of out of bound datetime64 -> always raise
+ raise
+ except (ValueError, TypeError):
+ if is_categorical_dtype(dtype):
+ # We *do* allow casting to categorical, since we know
+ # that Categorical is the only array type for 'category'.
+ dtype = cast(CategoricalDtype, dtype)
+ subarr = dtype.construct_array_type()(
+ arr, dtype.categories, ordered=dtype._ordered
+ )
+ elif is_extension_array_dtype(dtype):
+ # create an extension array from its dtype
+ dtype = cast(ExtensionDtype, dtype)
+ array_type = dtype.construct_array_type()._from_sequence
+ subarr = array_type(arr, dtype=dtype, copy=copy)
+ elif dtype is not None and raise_cast_failure:
+ raise
+ else:
+ subarr = np.array(arr, dtype=object, copy=copy)
+ return subarr
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 44a3fefb1689a..b59660056aadb 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -6,6 +6,7 @@
from pandas._libs import lib, tslib, tslibs
from pandas._libs.tslibs import NaT, OutOfBoundsDatetime, Period, iNaT
+from pandas.util._validators import validate_bool_kwarg
from .common import (
_INT64_DTYPE,
@@ -45,6 +46,7 @@
)
from .dtypes import DatetimeTZDtype, ExtensionDtype, PeriodDtype
from .generic import (
+ ABCDataFrame,
ABCDatetimeArray,
ABCDatetimeIndex,
ABCPeriodArray,
@@ -94,12 +96,13 @@ def maybe_downcast_to_dtype(result, dtype):
""" try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
+ do_round = False
if is_scalar(result):
return result
-
- def trans(x):
- return x
+ elif isinstance(result, ABCDataFrame):
+ # occurs in pivot_table doctest
+ return result
if isinstance(dtype, str):
if dtype == "infer":
@@ -117,83 +120,115 @@ def trans(x):
elif inferred_type == "floating":
dtype = "int64"
if issubclass(result.dtype.type, np.number):
-
- def trans(x): # noqa
- return x.round()
+ do_round = True
else:
dtype = "object"
- if isinstance(dtype, str):
dtype = np.dtype(dtype)
- try:
+ converted = maybe_downcast_numeric(result, dtype, do_round)
+ if converted is not result:
+ return converted
+
+ # a datetimelike
+ # GH12821, iNaT is casted to float
+ if dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]:
+ if hasattr(dtype, "tz"):
+ # not a numpy dtype
+ if dtype.tz:
+ # convert to datetime and change timezone
+ from pandas import to_datetime
+
+ result = to_datetime(result).tz_localize("utc")
+ result = result.tz_convert(dtype.tz)
+ else:
+ result = result.astype(dtype)
+
+ elif dtype.type is Period:
+ # TODO(DatetimeArray): merge with previous elif
+ from pandas.core.arrays import PeriodArray
+ try:
+ return PeriodArray(result, freq=dtype.freq)
+ except TypeError:
+ # e.g. TypeError: int() argument must be a string, a
+ # bytes-like object or a number, not 'Period
+ pass
+
+ return result
+
+
+def maybe_downcast_numeric(result, dtype, do_round: bool = False):
+ """
+ Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
+
+ Parameters
+ ----------
+ result : ndarray or ExtensionArray
+ dtype : np.dtype or ExtensionDtype
+ do_round : bool
+
+ Returns
+ -------
+ ndarray or ExtensionArray
+ """
+ if not isinstance(dtype, np.dtype):
+ # e.g. SparseDtype has no itemsize attr
+ return result
+
+ if isinstance(result, list):
+ # reached via groupoby.agg _ohlc; really this should be handled
+ # earlier
+ result = np.array(result)
+
+ def trans(x):
+ if do_round:
+ return x.round()
+ return x
+
+ if dtype.kind == result.dtype.kind:
# don't allow upcasts here (except if empty)
- if dtype.kind == result.dtype.kind:
- if result.dtype.itemsize <= dtype.itemsize and np.prod(result.shape):
- return result
+ if result.dtype.itemsize <= dtype.itemsize and result.size:
+ return result
- if is_bool_dtype(dtype) or is_integer_dtype(dtype):
+ if is_bool_dtype(dtype) or is_integer_dtype(dtype):
+ if not result.size:
# if we don't have any elements, just astype it
- if not np.prod(result.shape):
- return trans(result).astype(dtype)
+ return trans(result).astype(dtype)
- # do a test on the first element, if it fails then we are done
- r = result.ravel()
- arr = np.array([r[0]])
+ # do a test on the first element, if it fails then we are done
+ r = result.ravel()
+ arr = np.array([r[0]])
+ if isna(arr).any() or not np.allclose(arr, trans(arr).astype(dtype), rtol=0):
# if we have any nulls, then we are done
- if isna(arr).any() or not np.allclose(
- arr, trans(arr).astype(dtype), rtol=0
- ):
- return result
+ return result
+ elif not isinstance(r[0], (np.integer, np.floating, np.bool, int, float, bool)):
# a comparable, e.g. a Decimal may slip in here
- elif not isinstance(
- r[0], (np.integer, np.floating, np.bool, int, float, bool)
- ):
- return result
+ return result
- if (
- issubclass(result.dtype.type, (np.object_, np.number))
- and notna(result).all()
- ):
- new_result = trans(result).astype(dtype)
- try:
- if np.allclose(new_result, result, rtol=0):
- return new_result
- except Exception:
-
- # comparison of an object dtype with a number type could
- # hit here
- if (new_result == result).all():
- return new_result
- elif issubclass(dtype.type, np.floating) and not is_bool_dtype(result.dtype):
- return result.astype(dtype)
-
- # a datetimelike
- # GH12821, iNaT is casted to float
- elif dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]:
- try:
- result = result.astype(dtype)
- except Exception:
- if dtype.tz:
- # convert to datetime and change timezone
- from pandas import to_datetime
-
- result = to_datetime(result).tz_localize("utc")
- result = result.tz_convert(dtype.tz)
-
- elif dtype.type == Period:
- # TODO(DatetimeArray): merge with previous elif
- from pandas.core.arrays import PeriodArray
-
- return PeriodArray(result, freq=dtype.freq)
+ if (
+ issubclass(result.dtype.type, (np.object_, np.number))
+ and notna(result).all()
+ ):
+ new_result = trans(result).astype(dtype)
+ if new_result.dtype.kind == "O" or result.dtype.kind == "O":
+ # np.allclose may raise TypeError on object-dtype
+ if (new_result == result).all():
+ return new_result
+ else:
+ if np.allclose(new_result, result, rtol=0):
+ return new_result
- except Exception:
- pass
+ elif (
+ issubclass(dtype.type, np.floating)
+ and not is_bool_dtype(result.dtype)
+ and not is_string_dtype(result.dtype)
+ ):
+ return result.astype(dtype)
return result
@@ -251,14 +286,18 @@ def changeit():
# length of the boolean
try:
om = other[mask]
+ except (IndexError, TypeError):
+ # IndexError occurs in test_upcast when we have a boolean
+ # mask of the wrong shape
+ # TypeError occurs in test_upcast when `other` is a bool
+ pass
+ else:
om_at = om.astype(result.dtype)
if (om == om_at).all():
new_result = result.values.copy()
new_result[mask] = om_at
result[:] = new_result
return result, False
- except Exception:
- pass
# we are forced to change the dtype of the result as the input
# isn't compatible
@@ -289,7 +328,8 @@ def changeit():
try:
np.place(result, mask, other)
- except Exception:
+ except TypeError:
+ # e.g. int-dtype result and float-dtype other
return changeit()
return result, False
@@ -601,24 +641,21 @@ def coerce_to_dtypes(result, dtypes):
raise AssertionError("_coerce_to_dtypes requires equal len arrays")
def conv(r, dtype):
- try:
- if isna(r):
- pass
- elif dtype == _NS_DTYPE:
- r = tslibs.Timestamp(r)
- elif dtype == _TD_DTYPE:
- r = tslibs.Timedelta(r)
- elif dtype == np.bool_:
- # messy. non 0/1 integers do not get converted.
- if is_integer(r) and r not in [0, 1]:
- return int(r)
- r = bool(r)
- elif dtype.kind == "f":
- r = float(r)
- elif dtype.kind == "i":
- r = int(r)
- except Exception:
+ if np.any(isna(r)):
pass
+ elif dtype == _NS_DTYPE:
+ r = tslibs.Timestamp(r)
+ elif dtype == _TD_DTYPE:
+ r = tslibs.Timedelta(r)
+ elif dtype == np.bool_:
+ # messy. non 0/1 integers do not get converted.
+ if is_integer(r) and r not in [0, 1]:
+ return int(r)
+ r = bool(r)
+ elif dtype.kind == "f":
+ r = float(r)
+ elif dtype.kind == "i":
+ r = int(r)
return r
@@ -696,9 +733,7 @@ def astype_nansafe(arr, dtype, copy=True, skipna=False):
elif np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer):
if not np.isfinite(arr).all():
- raise ValueError(
- "Cannot convert non-finite values (NA or inf) to " "integer"
- )
+ raise ValueError("Cannot convert non-finite values (NA or inf) to integer")
elif is_object_dtype(arr):
@@ -719,9 +754,7 @@ def astype_nansafe(arr, dtype, copy=True, skipna=False):
return astype_nansafe(to_timedelta(arr).values, dtype, copy=copy)
if dtype.name in ("datetime64", "timedelta64"):
- msg = (
- "The '{dtype}' dtype has no unit. " "Please pass in '{dtype}[ns]' instead."
- )
+ msg = "The '{dtype}' dtype has no unit. Please pass in '{dtype}[ns]' instead."
raise ValueError(msg.format(dtype=dtype.name))
if copy or is_object_dtype(arr) or is_object_dtype(dtype):
@@ -731,93 +764,81 @@ def astype_nansafe(arr, dtype, copy=True, skipna=False):
return arr.view(dtype)
-def maybe_convert_objects(
- values, convert_dates=True, convert_numeric=True, convert_timedeltas=True, copy=True
-):
- """ if we have an object dtype, try to coerce dates and/or numbers """
-
- # if we have passed in a list or scalar
- if isinstance(values, (list, tuple)):
- values = np.array(values, dtype=np.object_)
- if not hasattr(values, "dtype"):
- values = np.array([values], dtype=np.object_)
+def maybe_convert_objects(values: np.ndarray, convert_numeric: bool = True):
+ """
+ If we have an object dtype array, try to coerce dates and/or numbers.
- # convert dates
- if convert_dates and values.dtype == np.object_:
+ Parameters
+ ----------
+ values : ndarray
+ convert_numeric : bool, default True
- # we take an aggressive stance and convert to datetime64[ns]
- if convert_dates == "coerce":
- new_values = maybe_cast_to_datetime(values, "M8[ns]", errors="coerce")
+ Returns
+ -------
+ ndarray or DatetimeIndex
+ """
+ validate_bool_kwarg(convert_numeric, "convert_numeric")
- # if we are all nans then leave me alone
- if not isna(new_values).all():
- values = new_values
+ orig_values = values
- else:
- values = lib.maybe_convert_objects(values, convert_datetime=convert_dates)
+ # convert dates
+ if is_object_dtype(values.dtype):
+ values = lib.maybe_convert_objects(values, convert_datetime=True)
# convert timedeltas
- if convert_timedeltas and values.dtype == np.object_:
-
- if convert_timedeltas == "coerce":
- from pandas.core.tools.timedeltas import to_timedelta
-
- new_values = to_timedelta(values, errors="coerce")
-
- # if we are all nans then leave me alone
- if not isna(new_values).all():
- values = new_values
-
- else:
- values = lib.maybe_convert_objects(
- values, convert_timedelta=convert_timedeltas
- )
+ if is_object_dtype(values.dtype):
+ values = lib.maybe_convert_objects(values, convert_timedelta=True)
# convert to numeric
- if values.dtype == np.object_:
+ if is_object_dtype(values.dtype):
if convert_numeric:
try:
new_values = lib.maybe_convert_numeric(
values, set(), coerce_numeric=True
)
-
+ except (ValueError, TypeError):
+ pass
+ else:
# if we are all nans then leave me alone
if not isna(new_values).all():
values = new_values
- except Exception:
- pass
else:
# soft-conversion
values = lib.maybe_convert_objects(values)
- values = values.copy() if copy else values
+ if values is orig_values:
+ values = values.copy()
return values
def soft_convert_objects(
- values, datetime=True, numeric=True, timedelta=True, coerce=False, copy=True
+ values: np.ndarray,
+ datetime: bool = True,
+ numeric: bool = True,
+ timedelta: bool = True,
+ coerce: bool = False,
+ copy: bool = True,
):
""" if we have an object dtype, try to coerce dates and/or numbers """
+ validate_bool_kwarg(datetime, "datetime")
+ validate_bool_kwarg(numeric, "numeric")
+ validate_bool_kwarg(timedelta, "timedelta")
+ validate_bool_kwarg(coerce, "coerce")
+ validate_bool_kwarg(copy, "copy")
+
conversion_count = sum((datetime, numeric, timedelta))
if conversion_count == 0:
- raise ValueError(
- "At least one of datetime, numeric or timedelta must " "be True."
- )
+ raise ValueError("At least one of datetime, numeric or timedelta must be True.")
elif conversion_count > 1 and coerce:
raise ValueError(
"Only one of 'datetime', 'numeric' or "
"'timedelta' can be True when when coerce=True."
)
- if isinstance(values, (list, tuple)):
- # List or scalar
- values = np.array(values, dtype=np.object_)
- elif not hasattr(values, "dtype"):
- values = np.array([values], dtype=np.object_)
- elif not is_object_dtype(values.dtype):
+ if not is_object_dtype(values.dtype):
# If not object, do not attempt conversion
values = values.copy() if copy else values
return values
@@ -843,22 +864,23 @@ def soft_convert_objects(
# GH 20380, when datetime is beyond year 2262, hence outside
# bound of nanosecond-resolution 64-bit integers.
try:
- values = lib.maybe_convert_objects(values, convert_datetime=datetime)
+ values = lib.maybe_convert_objects(values, convert_datetime=True)
except OutOfBoundsDatetime:
pass
if timedelta and is_object_dtype(values.dtype):
# Object check to ensure only run if previous did not convert
- values = lib.maybe_convert_objects(values, convert_timedelta=timedelta)
+ values = lib.maybe_convert_objects(values, convert_timedelta=True)
if numeric and is_object_dtype(values.dtype):
try:
converted = lib.maybe_convert_numeric(values, set(), coerce_numeric=True)
+ except (ValueError, TypeError):
+ pass
+ else:
# If all NaNs, then do not-alter
values = converted if not isna(converted).all() else values
values = values.copy() if copy else values
- except Exception:
- pass
return values
@@ -931,9 +953,10 @@ def try_datetime(v):
# we might have a sequence of the same-datetimes with tz's
# if so coerce to a DatetimeIndex; if they are not the same,
# then these stay as object dtype, xref GH19671
+ from pandas._libs.tslibs import conversion
+ from pandas import DatetimeIndex
+
try:
- from pandas._libs.tslibs import conversion
- from pandas import DatetimeIndex
values, tz = conversion.datetime_to_datetime64(v)
return DatetimeIndex(values).tz_localize("UTC").tz_convert(tz=tz)
@@ -953,7 +976,7 @@ def try_timedelta(v):
try:
return to_timedelta(v)._ndarray_values.reshape(shape)
- except Exception:
+ except ValueError:
return v.reshape(shape)
inferred_type = lib.infer_datetimelike_array(ensure_object(v))
@@ -1007,7 +1030,10 @@ def maybe_cast_to_datetime(value, dtype, errors="raise"):
)
if is_datetime64 and not is_dtype_equal(dtype, _NS_DTYPE):
- if dtype.name in ("datetime64", "datetime64[ns]"):
+
+ # pandas supports dtype whose granularity is less than [ns]
+ # e.g., [ps], [fs], [as]
+ if dtype <= np.dtype("M8[ns]"):
if dtype.name == "datetime64":
raise ValueError(msg.format(dtype=dtype.name))
dtype = _NS_DTYPE
@@ -1025,7 +1051,10 @@ def maybe_cast_to_datetime(value, dtype, errors="raise"):
value = [value]
elif is_timedelta64 and not is_dtype_equal(dtype, _TD_DTYPE):
- if dtype.name in ("timedelta64", "timedelta64[ns]"):
+
+ # pandas supports dtype whose granularity is less than [ns]
+ # e.g., [ps], [fs], [as]
+ if dtype <= np.dtype("m8[ns]"):
if dtype.name == "timedelta64":
raise ValueError(msg.format(dtype=dtype.name))
dtype = _TD_DTYPE
@@ -1282,9 +1311,8 @@ def construct_1d_ndarray_preserving_na(values, dtype=None, copy=False):
>>> np.array([1.0, 2.0, None], dtype='str')
array(['1.0', '2.0', 'None'], dtype='<U4')
- >>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype='str')
-
-
+ >>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype=np.dtype('str'))
+ array(['1.0', '2.0', None], dtype=object)
"""
subarr = np.array(values, dtype=dtype, copy=copy)
@@ -1368,7 +1396,7 @@ def maybe_cast_to_integer_array(arr, dtype, copy=False):
arr = np.asarray(arr)
if is_unsigned_integer_dtype(dtype) and (arr < 0).any():
- raise OverflowError("Trying to coerce negative values " "to unsigned integers")
+ raise OverflowError("Trying to coerce negative values to unsigned integers")
if is_integer_dtype(dtype) and (is_float_dtype(arr) or is_object_dtype(arr)):
raise ValueError("Trying to coerce float values to integers")
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index f2571573bd1bc..41677af7b1721 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1,5 +1,5 @@
""" common type operations """
-from typing import Any, Union
+from typing import Any, Callable, Union
import warnings
import numpy as np
@@ -141,7 +141,7 @@ def ensure_categorical(arr):
return arr
-def ensure_int_or_float(arr: ArrayLike, copy=False) -> np.array:
+def ensure_int_or_float(arr: ArrayLike, copy: bool = False) -> np.array:
"""
Ensure that an dtype array of some integer dtype
has an int64 dtype if possible.
@@ -167,12 +167,13 @@ def ensure_int_or_float(arr: ArrayLike, copy=False) -> np.array:
If the array is explicitly of type uint64 the type
will remain unchanged.
"""
+ # TODO: GH27506 potential bug with ExtensionArrays
try:
- return arr.astype("int64", copy=copy, casting="safe")
+ return arr.astype("int64", copy=copy, casting="safe") # type: ignore
except TypeError:
pass
try:
- return arr.astype("uint64", copy=copy, casting="safe")
+ return arr.astype("uint64", copy=copy, casting="safe") # type: ignore
except TypeError:
return arr.astype("float64", copy=copy)
@@ -206,12 +207,12 @@ def ensure_python_int(value: Union[int, np.integer]) -> int:
return new_value
-def classes(*klasses):
+def classes(*klasses) -> Callable:
""" evaluate if the tipo is a subclass of the klasses """
return lambda tipo: issubclass(tipo, klasses)
-def classes_and_not_datetimelike(*klasses):
+def classes_and_not_datetimelike(*klasses) -> Callable:
"""
evaluate if the tipo is a subclass of the klasses
and not a datetimelike
@@ -272,8 +273,6 @@ def is_sparse(arr):
See Also
--------
- DataFrame.to_sparse : Convert DataFrame to a SparseDataFrame.
- Series.to_sparse : Convert Series to SparseSeries.
Series.to_dense : Return dense representation of a Series.
Examples
@@ -282,7 +281,7 @@ def is_sparse(arr):
>>> is_sparse(pd.SparseArray([0, 0, 1, 0]))
True
- >>> is_sparse(pd.SparseSeries([0, 0, 1, 0]))
+ >>> is_sparse(pd.Series(pd.SparseArray([0, 0, 1, 0])))
True
Returns `False` if the parameter is not sparse.
@@ -299,14 +298,6 @@ def is_sparse(arr):
False
Returns `False` if the parameter has more than one dimension.
-
- >>> df = pd.SparseDataFrame([389., 24., 80.5, np.nan],
- columns=['max_speed'],
- index=['falcon', 'parrot', 'lion', 'monkey'])
- >>> is_sparse(df)
- False
- >>> is_sparse(df.max_speed)
- True
"""
from pandas.core.arrays.sparse import SparseDtype
@@ -339,8 +330,6 @@ def is_scipy_sparse(arr):
True
>>> is_scipy_sparse(pd.SparseArray([1, 2, 3]))
False
- >>> is_scipy_sparse(pd.SparseSeries([1, 2, 3]))
- False
"""
global _is_scipy_sparse
@@ -354,7 +343,7 @@ def is_scipy_sparse(arr):
return _is_scipy_sparse(arr)
-def is_categorical(arr):
+def is_categorical(arr) -> bool:
"""
Check whether an array-like is a Categorical instance.
@@ -675,7 +664,7 @@ def is_interval_dtype(arr_or_dtype):
return IntervalDtype.is_dtype(arr_or_dtype)
-def is_categorical_dtype(arr_or_dtype):
+def is_categorical_dtype(arr_or_dtype) -> bool:
"""
Check whether an array-like or dtype is of the Categorical dtype.
@@ -898,8 +887,9 @@ def is_dtype_equal(source, target):
return False
-def is_any_int_dtype(arr_or_dtype):
- """Check whether the provided array or dtype is of an integer dtype.
+def is_any_int_dtype(arr_or_dtype) -> bool:
+ """
+ Check whether the provided array or dtype is of an integer dtype.
In this function, timedelta64 instances are also considered "any-integer"
type objects and will return True.
@@ -1160,7 +1150,7 @@ def is_int64_dtype(arr_or_dtype):
return _is_dtype_type(arr_or_dtype, classes(np.int64))
-def is_datetime64_any_dtype(arr_or_dtype):
+def is_datetime64_any_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of the datetime64 dtype.
@@ -1320,7 +1310,7 @@ def is_datetime_or_timedelta_dtype(arr_or_dtype):
return _is_dtype_type(arr_or_dtype, classes(np.datetime64, np.timedelta64))
-def _is_unorderable_exception(e):
+def _is_unorderable_exception(e: TypeError) -> bool:
"""
Check if the exception raised is an unorderable exception.
@@ -1616,7 +1606,7 @@ def is_float_dtype(arr_or_dtype):
return _is_dtype_type(arr_or_dtype, classes(np.floating))
-def is_bool_dtype(arr_or_dtype):
+def is_bool_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of a boolean dtype.
@@ -1714,9 +1704,6 @@ def is_extension_type(arr):
True
>>> is_extension_type(pd.SparseArray([1, 2, 3]))
True
- >>> is_extension_type(pd.SparseSeries([1, 2, 3]))
- True
- >>>
>>> from scipy.sparse import bsr_matrix
>>> is_extension_type(bsr_matrix([1, 2, 3]))
False
@@ -1789,7 +1776,7 @@ def is_extension_array_dtype(arr_or_dtype):
return isinstance(dtype, ExtensionDtype) or registry.find(dtype) is not None
-def is_complex_dtype(arr_or_dtype):
+def is_complex_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of a complex dtype.
@@ -1822,7 +1809,7 @@ def is_complex_dtype(arr_or_dtype):
return _is_dtype_type(arr_or_dtype, classes(np.complexfloating))
-def _is_dtype(arr_or_dtype, condition):
+def _is_dtype(arr_or_dtype, condition) -> bool:
"""
Return a boolean if the condition is satisfied for the arr_or_dtype.
@@ -1883,7 +1870,7 @@ def _get_dtype(arr_or_dtype):
return pandas_dtype(arr_or_dtype)
-def _is_dtype_type(arr_or_dtype, condition):
+def _is_dtype_type(arr_or_dtype, condition) -> bool:
"""
Return a boolean if the condition is satisfied for the arr_or_dtype.
@@ -1992,7 +1979,7 @@ def infer_dtype_from_object(dtype):
return infer_dtype_from_object(np.dtype(dtype))
-def _validate_date_like_dtype(dtype):
+def _validate_date_like_dtype(dtype) -> None:
"""
Check whether the dtype is a date-like dtype. Raises an error if invalid.
@@ -2048,10 +2035,8 @@ def pandas_dtype(dtype):
# raise a consistent TypeError if failed
try:
npdtype = np.dtype(dtype)
- except Exception:
- # we don't want to force a repr of the non-string
- if not isinstance(dtype, str):
- raise TypeError("data type not understood")
+ except SyntaxError:
+ # np.dtype uses `eval` which can raise SyntaxError
raise TypeError("data type '{}' not understood".format(dtype))
# Any invalid dtype (such as pd.Timestamp) should raise an error.
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index ac74ad5726a99..1094ab22238e9 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -20,13 +20,11 @@
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import (
+ ABCCategoricalIndex,
ABCDatetimeArray,
- ABCDatetimeIndex,
ABCIndexClass,
- ABCPeriodIndex,
ABCRangeIndex,
- ABCSparseDataFrame,
- ABCTimedeltaIndex,
+ ABCSeries,
)
@@ -71,41 +69,7 @@ def get_dtype_kinds(l):
return typs
-def _get_series_result_type(result, objs=None):
- """
- return appropriate class of Series concat
- input is either dict or array-like
- """
- from pandas import SparseSeries, SparseDataFrame, DataFrame
-
- # concat Series with axis 1
- if isinstance(result, dict):
- # concat Series with axis 1
- if all(isinstance(c, (SparseSeries, SparseDataFrame)) for c in result.values()):
- return SparseDataFrame
- else:
- return DataFrame
-
- # otherwise it is a SingleBlockManager (axis = 0)
- return objs[0]._constructor
-
-
-def _get_frame_result_type(result, objs):
- """
- return appropriate class of DataFrame-like concat
- if all blocks are sparse, return SparseDataFrame
- otherwise, return 1st obj
- """
-
- if result.blocks and (any(isinstance(obj, ABCSparseDataFrame) for obj in objs)):
- from pandas.core.sparse.api import SparseDataFrame
-
- return SparseDataFrame
- else:
- return next(obj for obj in objs if not isinstance(obj, ABCSparseDataFrame))
-
-
-def _concat_compat(to_concat, axis=0):
+def concat_compat(to_concat, axis=0):
"""
provide concatenation of an array of arrays each of which is a single
'normalized' dtypes (in that for example, if it's object, then it is a
@@ -125,10 +89,9 @@ def _concat_compat(to_concat, axis=0):
# filter empty arrays
# 1-d dtypes always are included here
def is_nonempty(x):
- try:
- return x.shape[axis] > 0
- except Exception:
+ if x.ndim <= axis:
return True
+ return x.shape[axis] > 0
# If all arrays are empty, there's nothing to convert, just short-cut to
# the concatenation, #3121.
@@ -142,12 +105,12 @@ def is_nonempty(x):
_contains_period = any(typ.startswith("period") for typ in typs)
if "category" in typs:
- # this must be prior to _concat_datetime,
+ # this must be prior to concat_datetime,
# to support Categorical + datetime-like
- return _concat_categorical(to_concat, axis=axis)
+ return concat_categorical(to_concat, axis=axis)
elif _contains_datetime or "timedelta" in typs or _contains_period:
- return _concat_datetime(to_concat, axis=axis, typs=typs)
+ return concat_datetime(to_concat, axis=axis, typs=typs)
# these are mandated to handle empties as well
elif "sparse" in typs:
@@ -174,7 +137,7 @@ def is_nonempty(x):
return np.concatenate(to_concat, axis=axis)
-def _concat_categorical(to_concat, axis=0):
+def concat_categorical(to_concat, axis=0):
"""Concatenate an object/categorical array of arrays, each of which is a
single dtype
@@ -214,7 +177,7 @@ def _concat_categorical(to_concat, axis=0):
else np.asarray(x.astype(object))
for x in to_concat
]
- result = _concat_compat(to_concat)
+ result = concat_compat(to_concat)
if axis == 1:
result = result.reshape(1, len(result))
return result
@@ -225,8 +188,6 @@ def union_categoricals(to_union, sort_categories=False, ignore_order=False):
Combine list-like of Categorical-like, unioning categories. All
categories must have the same dtype.
- .. versionadded:: 0.19.0
-
Parameters
----------
to_union : list-like of Categorical, CategoricalIndex,
@@ -322,14 +283,14 @@ def union_categoricals(to_union, sort_categories=False, ignore_order=False):
[b, c, a, b]
Categories (3, object): [b, c, a]
"""
- from pandas import Index, Categorical, CategoricalIndex, Series
+ from pandas import Index, Categorical
from pandas.core.arrays.categorical import _recode_for_categories
if len(to_union) == 0:
raise ValueError("No Categoricals to union")
def _maybe_unwrap(x):
- if isinstance(x, (CategoricalIndex, Series)):
+ if isinstance(x, (ABCCategoricalIndex, ABCSeries)):
return x.values
elif isinstance(x, Categorical):
return x
@@ -361,9 +322,7 @@ def _maybe_unwrap(x):
new_codes = np.concatenate(codes)
if sort_categories and not ignore_order and ordered:
- raise TypeError(
- "Cannot use sort_categories=True with " "ordered Categoricals"
- )
+ raise TypeError("Cannot use sort_categories=True with ordered Categoricals")
if sort_categories and not categories.is_monotonic_increasing:
categories = categories.sort_values()
@@ -386,7 +345,7 @@ def _maybe_unwrap(x):
else:
# ordered - to show a proper error message
if all(c.ordered for c in to_union):
- msg = "to union ordered Categoricals, " "all categories must be the same"
+ msg = "to union ordered Categoricals, all categories must be the same"
raise TypeError(msg)
else:
raise TypeError("Categorical.ordered must be the same")
@@ -404,7 +363,7 @@ def _concatenate_2d(to_concat, axis):
return np.concatenate(to_concat, axis=axis)
-def _concat_datetime(to_concat, axis=0, typs=None):
+def concat_datetime(to_concat, axis=0, typs=None):
"""
provide concatenation of an datetimelike array of arrays each of which is a
single M8[ns], datetimet64[ns, tz] or m8[ns] dtype
@@ -489,31 +448,6 @@ def _concat_datetimetz(to_concat, name=None):
return sample._concat_same_type(to_concat)
-def _concat_index_same_dtype(indexes, klass=None):
- klass = klass if klass is not None else indexes[0].__class__
- return klass(np.concatenate([x._values for x in indexes]))
-
-
-def _concat_index_asobject(to_concat, name=None):
- """
- concat all inputs as object. DatetimeIndex, TimedeltaIndex and
- PeriodIndex are converted to object dtype before concatenation
- """
- from pandas import Index
- from pandas.core.arrays import ExtensionArray
-
- klasses = (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex, ExtensionArray)
- to_concat = [x.astype(object) if isinstance(x, klasses) else x for x in to_concat]
-
- self = to_concat[0]
- attribs = self._get_attributes_dict()
- attribs["name"] = name
-
- to_concat = [x._values if isinstance(x, Index) else x for x in to_concat]
-
- return self._shallow_copy_with_infer(np.concatenate(to_concat), **attribs)
-
-
def _concat_sparse(to_concat, axis=0, typs=None):
"""
provide concatenation of an sparse/dense array of arrays each of which is a
@@ -544,52 +478,3 @@ def _concat_sparse(to_concat, axis=0, typs=None):
]
return SparseArray._concat_same_type(to_concat)
-
-
-def _concat_rangeindex_same_dtype(indexes):
- """
- Concatenates multiple RangeIndex instances. All members of "indexes" must
- be of type RangeIndex; result will be RangeIndex if possible, Int64Index
- otherwise. E.g.:
- indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
- indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])
- """
- from pandas import Int64Index, RangeIndex
-
- start = step = next_ = None
-
- # Filter the empty indexes
- non_empty_indexes = [obj for obj in indexes if len(obj)]
-
- for obj in non_empty_indexes:
- rng = obj._range # type: range
-
- if start is None:
- # This is set by the first non-empty index
- start = rng.start
- if step is None and len(rng) > 1:
- step = rng.step
- elif step is None:
- # First non-empty index had only one element
- if rng.start == start:
- return _concat_index_same_dtype(indexes, klass=Int64Index)
- step = rng.start - start
-
- non_consecutive = (step != rng.step and len(rng) > 1) or (
- next_ is not None and rng.start != next_
- )
- if non_consecutive:
- return _concat_index_same_dtype(indexes, klass=Int64Index)
-
- if step is not None:
- next_ = rng[-1] + step
-
- if non_empty_indexes:
- # Get the stop value from "next" or alternatively
- # from the last non-empty index
- stop = non_empty_indexes[-1].stop if next_ is None else next_
- return RangeIndex(start, stop, step)
-
- # Here all "indexes" had 0 length, i.e. were empty.
- # In this case return an empty range index.
- return RangeIndex(0, 0)
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 6728d048efb79..fcdb89dd8a334 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -1,6 +1,6 @@
""" define extension dtypes """
import re
-from typing import Any, Dict, List, Optional, Tuple, Type, Union
+from typing import Any, Dict, List, Optional, Tuple, Type, Union, cast
import warnings
import numpy as np
@@ -11,8 +11,10 @@
from pandas.core.dtypes.generic import ABCCategoricalIndex, ABCDateOffset, ABCIndexClass
+from pandas._typing import Ordered
+
from .base import ExtensionDtype
-from .inference import is_list_like
+from .inference import is_bool, is_list_like
str_type = str
@@ -20,11 +22,8 @@
# CategoricalDtype constructor to detect when ordered=None is explicitly passed
ordered_sentinel = object() # type: object
-# TODO(GH26403): Replace with Optional[bool] or bool
-OrderedType = Union[None, bool, object]
-
-def register_extension_dtype(cls: Type[ExtensionDtype],) -> Type[ExtensionDtype]:
+def register_extension_dtype(cls: Type[ExtensionDtype]) -> Type[ExtensionDtype]:
"""
Register an ExtensionType with pandas as class decorator.
@@ -149,7 +148,7 @@ def __repr__(self) -> str_type:
return str(self)
def __hash__(self) -> int:
- raise NotImplementedError("sub-classes should implement an __hash__ " "method")
+ raise NotImplementedError("sub-classes should implement an __hash__ method")
def __getstate__(self) -> Dict[str_type, Any]:
# pickle support; we don't want to pickle the cache
@@ -222,7 +221,11 @@ class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
_metadata = ("categories", "ordered", "_ordered_from_sentinel")
_cache = {} # type: Dict[str_type, PandasExtensionDtype]
- def __init__(self, categories=None, ordered: OrderedType = ordered_sentinel):
+ def __init__(
+ self, categories=None, ordered: Union[Ordered, object] = ordered_sentinel
+ ):
+ # TODO(GH26403): Set type of ordered to Ordered
+ ordered = cast(Ordered, ordered)
self._finalize(categories, ordered, fastpath=False)
@classmethod
@@ -235,7 +238,7 @@ def _from_fastpath(
@classmethod
def _from_categorical_dtype(
- cls, dtype: "CategoricalDtype", categories=None, ordered: OrderedType = None
+ cls, dtype: "CategoricalDtype", categories=None, ordered: Ordered = None
) -> "CategoricalDtype":
if categories is ordered is None:
return dtype
@@ -320,7 +323,7 @@ def _from_values_or_dtype(
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError(
- "Cannot specify `categories` or `ordered` " "together with `dtype`."
+ "Cannot specify `categories` or `ordered` together with `dtype`."
)
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
@@ -336,9 +339,7 @@ def _from_values_or_dtype(
return dtype
- def _finalize(
- self, categories, ordered: OrderedType, fastpath: bool = False
- ) -> None:
+ def _finalize(self, categories, ordered: Ordered, fastpath: bool = False) -> None:
if ordered is not None and ordered is not ordered_sentinel:
self.validate_ordered(ordered)
@@ -423,7 +424,7 @@ def __repr__(self):
return tpl.format(data, self._ordered)
@staticmethod
- def _hash_categories(categories, ordered: OrderedType = True) -> int:
+ def _hash_categories(categories, ordered: Ordered = True) -> int:
from pandas.core.util.hashing import (
hash_array,
_combine_hash_arrays,
@@ -475,7 +476,7 @@ def construct_array_type(cls):
return Categorical
@staticmethod
- def validate_ordered(ordered: OrderedType) -> None:
+ def validate_ordered(ordered: Ordered) -> None:
"""
Validates that we have a valid ordered parameter. If
it is not a boolean, a TypeError will be raised.
@@ -490,8 +491,6 @@ def validate_ordered(ordered: OrderedType) -> None:
TypeError
If 'ordered' is not a boolean.
"""
- from pandas.core.dtypes.common import is_bool
-
if not is_bool(ordered):
raise TypeError("'ordered' must either be 'True' or 'False'")
@@ -531,7 +530,9 @@ def validate_categories(categories, fastpath: bool = False):
return categories
- def update_dtype(self, dtype: "CategoricalDtype") -> "CategoricalDtype":
+ def update_dtype(
+ self, dtype: Union[str_type, "CategoricalDtype"]
+ ) -> "CategoricalDtype":
"""
Returns a CategoricalDtype with categories and ordered taken from dtype
if specified, otherwise falling back to self if unspecified
@@ -553,6 +554,9 @@ def update_dtype(self, dtype: "CategoricalDtype") -> "CategoricalDtype":
"got {dtype!r}"
).format(dtype=dtype)
raise ValueError(msg)
+ else:
+ # from here on, dtype is a CategoricalDtype
+ dtype = cast(CategoricalDtype, dtype)
# dtype is CDT: keep current categories/ordered if None
new_categories = dtype.categories
@@ -585,7 +589,7 @@ def categories(self):
return self._categories
@property
- def ordered(self) -> OrderedType:
+ def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
@@ -681,7 +685,7 @@ def __init__(self, unit="ns", tz=None):
tz = timezones.tz_standardize(tz)
elif tz is not None:
raise pytz.UnknownTimeZoneError(tz)
- elif tz is None:
+ if tz is None:
raise TypeError("A 'tz' is required.")
self._unit = unit
@@ -733,14 +737,17 @@ def construct_from_string(cls, string):
"""
if isinstance(string, str):
msg = "Could not construct DatetimeTZDtype from '{}'"
- try:
- match = cls._match.match(string)
- if match:
- d = match.groupdict()
+ match = cls._match.match(string)
+ if match:
+ d = match.groupdict()
+ try:
return cls(unit=d["unit"], tz=d["tz"])
- except Exception:
- # TODO(py3): Change this pass to `raise TypeError(msg) from e`
- pass
+ except (KeyError, TypeError, ValueError) as err:
+ # KeyError if maybe_get_tz tries and fails to get a
+ # pytz timezone (actually pytz.UnknownTimeZoneError).
+ # TypeError if we pass a nonsense tz;
+ # ValueError if we pass a unit other than "ns"
+ raise TypeError(msg.format(string)) from err
raise TypeError(msg.format(string))
raise TypeError("Could not construct DatetimeTZDtype")
diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py
index de41644f09b66..2518f330b26a3 100644
--- a/pandas/core/dtypes/generic.py
+++ b/pandas/core/dtypes/generic.py
@@ -52,12 +52,7 @@ def _check(cls, inst):
ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series",))
ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",))
-ABCSparseDataFrame = create_pandas_abc_type(
- "ABCSparseDataFrame", "_subtyp", ("sparse_frame",)
-)
-ABCSparseSeries = create_pandas_abc_type(
- "ABCSparseSeries", "_subtyp", ("sparse_series", "sparse_time_series")
-)
+
ABCSparseArray = create_pandas_abc_type(
"ABCSparseArray", "_subtyp", ("sparse_array", "sparse_series")
)
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index bea73d72b91c9..cd87fbef02e4f 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -3,6 +3,8 @@
"""
import numpy as np
+from pandas._config import get_option
+
from pandas._libs import lib
import pandas._libs.missing as libmissing
from pandas._libs.tslibs import NaT, iNaT
@@ -131,6 +133,8 @@ def _isna_new(obj):
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
+ elif isinstance(obj, type):
+ return False
elif isinstance(
obj,
(
@@ -169,6 +173,8 @@ def _isna_old(obj):
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
+ elif isinstance(obj, type):
+ return False
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):
return _isna_ndarraylike_old(obj)
elif isinstance(obj, ABCGeneric):
@@ -203,8 +209,6 @@ def _use_inf_as_na(key):
* http://stackoverflow.com/questions/4859217/
programmatically-creating-variables-in-python/4859312#4859312
"""
- from pandas._config import get_option
-
flag = get_option(key)
if flag:
globals()["_isna"] = _isna_old
@@ -441,8 +445,14 @@ def array_equivalent(left, right, strict_nan=False):
if not isinstance(right_value, float) or not np.isnan(right_value):
return False
else:
- if left_value != right_value:
- return False
+ try:
+ if np.any(left_value != right_value):
+ return False
+ except TypeError as err:
+ if "Cannot compare tz-naive" in str(err):
+ # tzawareness compat failure, see GH#28507
+ return False
+ raise
return True
# NaNs can occur in float and complex arrays.
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index c15f4ad8e1900..e4a44a89998e3 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -10,12 +10,11 @@
"""
import collections
from collections import OrderedDict, abc
-import functools
from io import StringIO
import itertools
import sys
from textwrap import dedent
-from typing import FrozenSet, List, Optional, Set, Tuple, Type, Union
+from typing import FrozenSet, List, Optional, Sequence, Set, Tuple, Type, Union
import warnings
import numpy as np
@@ -80,26 +79,19 @@
)
from pandas.core.dtypes.missing import isna, notna
+from pandas._typing import Axes, Dtype, FilePathOrBuffer
from pandas.core import algorithms, common as com, nanops, ops
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import Categorical, ExtensionArray
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin as DatetimeLikeArray
from pandas.core.arrays.sparse import SparseFrameAccessor
from pandas.core.generic import NDFrame, _shared_docs
-from pandas.core.index import (
- Index,
- MultiIndex,
- ensure_index,
- ensure_index_from_sequences,
-)
+from pandas.core.index import Index, ensure_index, ensure_index_from_sequences
from pandas.core.indexes import base as ibase
from pandas.core.indexes.datetimes import DatetimeIndex
+from pandas.core.indexes.multi import maybe_droplevels
from pandas.core.indexes.period import PeriodIndex
-from pandas.core.indexing import (
- check_bool_indexer,
- convert_to_index_sliceable,
- maybe_droplevels,
-)
+from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.core.internals import BlockManager
from pandas.core.internals.construction import (
arrays_to_mgr,
@@ -111,6 +103,7 @@
sanitize_index,
to_arrays,
)
+from pandas.core.ops.missing import dispatch_fill_zeros
from pandas.core.series import Series
from pandas.io.formats import console, format as fmt
@@ -313,13 +306,13 @@ class DataFrame(NDFrame):
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
- .. versionchanged :: 0.23.0
+ .. versionchanged:: 0.23.0
If data is a dict, column order follows insertion-order for
Python 3.6 and later.
- .. versionchanged :: 0.25.0
+ .. versionchanged:: 0.25.0
If data is a list of dicts, column order follows insertion-order
- Python 3.6 and later.
+ for Python 3.6 and later.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
@@ -329,7 +322,7 @@ class DataFrame(NDFrame):
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
- copy : boolean, default False
+ copy : bool, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
See Also
@@ -377,12 +370,12 @@ class DataFrame(NDFrame):
"""
@property
- def _constructor(self):
+ def _constructor(self) -> Type["DataFrame"]:
return DataFrame
_constructor_sliced = Series # type: Type[Series]
_deprecations = NDFrame._deprecations | frozenset(
- ["get_value", "set_value", "from_items"]
+ ["from_items"]
) # type: FrozenSet[str]
_accessors = set() # type: Set[str]
@@ -393,7 +386,14 @@ def _constructor_expanddim(self):
# ----------------------------------------------------------------------
# Constructors
- def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
+ def __init__(
+ self,
+ data=None,
+ index: Optional[Axes] = None,
+ columns: Optional[Axes] = None,
+ dtype: Optional[Dtype] = None,
+ copy: bool = False,
+ ):
if data is None:
data = {}
if dtype is not None:
@@ -488,7 +488,7 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
# ----------------------------------------------------------------------
@property
- def axes(self):
+ def axes(self) -> List[Index]:
"""
Return a list representing the axes of the DataFrame.
@@ -505,7 +505,7 @@ def axes(self):
return [self.index, self.columns]
@property
- def shape(self):
+ def shape(self) -> Tuple[int, int]:
"""
Return a tuple representing the dimensionality of the DataFrame.
@@ -527,7 +527,7 @@ def shape(self):
return len(self.index), len(self.columns)
@property
- def _is_homogeneous_type(self):
+ def _is_homogeneous_type(self) -> bool:
"""
Whether all the columns in a DataFrame have the same type.
@@ -558,14 +558,14 @@ def _is_homogeneous_type(self):
# ----------------------------------------------------------------------
# Rendering Methods
- def _repr_fits_vertical_(self):
+ def _repr_fits_vertical_(self) -> bool:
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
- def _repr_fits_horizontal_(self, ignore_width=False):
+ def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns.
@@ -619,7 +619,7 @@ def _repr_fits_horizontal_(self, ignore_width=False):
return repr_width < width
- def _info_repr(self):
+ def _info_repr(self) -> bool:
"""
True if the repr should show the info view.
"""
@@ -628,7 +628,7 @@ def _info_repr(self):
self._repr_fits_horizontal_() and self._repr_fits_vertical_()
)
- def __repr__(self):
+ def __repr__(self) -> str:
"""
Return a string representation for a particular DataFrame.
"""
@@ -640,6 +640,7 @@ def __repr__(self):
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
+ max_colwidth = get_option("display.max_colwidth")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
@@ -651,12 +652,13 @@ def __repr__(self):
min_rows=min_rows,
max_cols=max_cols,
line_width=width,
+ max_colwidth=max_colwidth,
show_dimensions=show_dimensions,
)
return buf.getvalue()
- def _repr_html_(self):
+ def _repr_html_(self) -> Optional[str]:
"""
Return a html representation for a particular DataFrame.
@@ -672,19 +674,38 @@ def _repr_html_(self):
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
+ min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
- return self.to_html(
+ formatter = fmt.DataFrameFormatter(
+ self,
+ columns=None,
+ col_space=None,
+ na_rep="NaN",
+ formatters=None,
+ float_format=None,
+ sparsify=None,
+ justify=None,
+ index_names=True,
+ header=True,
+ index=True,
+ bold_rows=True,
+ escape=True,
max_rows=max_rows,
+ min_rows=min_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
- notebook=True,
+ decimal=".",
+ table_id=None,
+ render_links=False,
)
+ return formatter.to_html(notebook=True)
else:
return None
@Substitution(
+ header_type="bool or sequence",
header="Write out the column names. If a list of strings "
"is given, it is assumed to be aliases for the "
"column names",
@@ -694,29 +715,34 @@ def _repr_html_(self):
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_string(
self,
- buf=None,
- columns=None,
- col_space=None,
- header=True,
- index=True,
- na_rep="NaN",
- formatters=None,
- float_format=None,
- sparsify=None,
- index_names=True,
- justify=None,
- max_rows=None,
- min_rows=None,
- max_cols=None,
- show_dimensions=False,
- decimal=".",
- line_width=None,
- ):
+ buf: Optional[FilePathOrBuffer[str]] = None,
+ columns: Optional[Sequence[str]] = None,
+ col_space: Optional[int] = None,
+ header: Union[bool, Sequence[str]] = True,
+ index: bool = True,
+ na_rep: str = "NaN",
+ formatters: Optional[fmt.formatters_type] = None,
+ float_format: Optional[fmt.float_format_type] = None,
+ sparsify: Optional[bool] = None,
+ index_names: bool = True,
+ justify: Optional[str] = None,
+ max_rows: Optional[int] = None,
+ min_rows: Optional[int] = None,
+ max_cols: Optional[int] = None,
+ show_dimensions: bool = False,
+ decimal: str = ".",
+ line_width: Optional[int] = None,
+ max_colwidth: Optional[int] = None,
+ ) -> Optional[str]:
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
+ max_colwidth : int, optional
+ Max width to truncate each column in characters. By default, no limit.
+
+ .. versionadded:: 1.0.0
%(returns)s
See Also
--------
@@ -733,31 +759,29 @@ def to_string(
2 3 6
"""
- formatter = fmt.DataFrameFormatter(
- self,
- buf=buf,
- columns=columns,
- col_space=col_space,
- na_rep=na_rep,
- formatters=formatters,
- float_format=float_format,
- sparsify=sparsify,
- justify=justify,
- index_names=index_names,
- header=header,
- index=index,
- min_rows=min_rows,
- max_rows=max_rows,
- max_cols=max_cols,
- show_dimensions=show_dimensions,
- decimal=decimal,
- line_width=line_width,
- )
- formatter.to_string()
+ from pandas import option_context
- if buf is None:
- result = formatter.buf.getvalue()
- return result
+ with option_context("display.max_colwidth", max_colwidth):
+ formatter = fmt.DataFrameFormatter(
+ self,
+ columns=columns,
+ col_space=col_space,
+ na_rep=na_rep,
+ formatters=formatters,
+ float_format=float_format,
+ sparsify=sparsify,
+ justify=justify,
+ index_names=index_names,
+ header=header,
+ index=index,
+ min_rows=min_rows,
+ max_rows=max_rows,
+ max_cols=max_cols,
+ show_dimensions=show_dimensions,
+ decimal=decimal,
+ line_width=line_width,
+ )
+ return formatter.to_string(buf=buf)
# ----------------------------------------------------------------------
@@ -778,12 +802,13 @@ def style(self):
_shared_docs[
"items"
] = r"""
- Iterator over (column name, Series) pairs.
+ Iterate over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
- %s
+ Yields
+ ------
label : object
The column names for the DataFrame being iterated over.
content : Series
@@ -824,7 +849,7 @@ def style(self):
Name: population, dtype: int64
"""
- @Appender(_shared_docs["items"] % "Yields\n ------")
+ @Appender(_shared_docs["items"])
def items(self):
if self.columns.is_unique and hasattr(self, "_item_cache"):
for k in self.columns:
@@ -833,9 +858,9 @@ def items(self):
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
- @Appender(_shared_docs["items"] % "Returns\n -------")
+ @Appender(_shared_docs["items"])
def iteritems(self):
- return self.items()
+ yield from self.items()
def iterrows(self):
"""
@@ -853,8 +878,8 @@ def iterrows(self):
See Also
--------
- itertuples : Iterate over DataFrame rows as namedtuples of the values.
- items : Iterate over (column name, Series) pairs.
+ DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.
+ DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
@@ -1176,9 +1201,7 @@ def from_dict(cls, data, orient="columns", dtype=None, columns=None):
data, index = list(data.values()), list(data.keys())
elif orient == "columns":
if columns is not None:
- raise ValueError(
- "cannot use columns parameter with " "orient='columns'"
- )
+ raise ValueError("cannot use columns parameter with orient='columns'")
else: # pragma: no cover
raise ValueError("only recognize index or columns for orient")
@@ -1199,7 +1222,7 @@ def to_numpy(self, dtype=None, copy=False):
Parameters
----------
dtype : str or numpy.dtype, optional
- The dtype to pass to :meth:`numpy.asarray`
+ The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
@@ -1330,7 +1353,7 @@ def to_dict(self, orient="dict", into=dict):
"""
if not self.columns.is_unique:
warnings.warn(
- "DataFrame columns are not unique, some " "columns will be omitted.",
+ "DataFrame columns are not unique, some columns will be omitted.",
UserWarning,
stacklevel=2,
)
@@ -1519,7 +1542,7 @@ def from_records(
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
- index : string, list of fields, array-like
+ index : str, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
@@ -1530,7 +1553,7 @@ def from_records(
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
- coerce_float : boolean, default False
+ coerce_float : bool, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
nrows : int, default None
@@ -1739,7 +1762,7 @@ def to_records(
if is_datetime64_any_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
- if isinstance(self.index, MultiIndex):
+ if isinstance(self.index, ABCMultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = list(map(np.array, zip(*self.index.values)))
else:
@@ -1750,7 +1773,7 @@ def to_records(
count = 0
index_names = list(self.index.names)
- if isinstance(self.index, MultiIndex):
+ if isinstance(self.index, ABCMultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = "level_%d" % count
@@ -1811,9 +1834,9 @@ def to_records(
formats.append(dtype_mapping)
else:
element = "row" if i < index_len else "column"
- msg = (
- "Invalid dtype {dtype} specified for " "{element} {name}"
- ).format(dtype=dtype_mapping, element=element, name=name)
+ msg = ("Invalid dtype {dtype} specified for {element} {name}").format(
+ dtype=dtype_mapping, element=element, name=name
+ )
raise ValueError(msg)
return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats})
@@ -1920,81 +1943,6 @@ def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
- def to_sparse(self, fill_value=None, kind="block"):
- """
- Convert to SparseDataFrame.
-
- .. deprecated:: 0.25.0
-
- Implement the sparse version of the DataFrame meaning that any data
- matching a specific value it's omitted in the representation.
- The sparse DataFrame allows for a more efficient storage.
-
- Parameters
- ----------
- fill_value : float, default None
- The specific value that should be omitted in the representation.
- kind : {'block', 'integer'}, default 'block'
- The kind of the SparseIndex tracking where data is not equal to
- the fill value:
-
- - 'block' tracks only the locations and sizes of blocks of data.
- - 'integer' keeps an array with all the locations of the data.
-
- In most cases 'block' is recommended, since it's more memory
- efficient.
-
- Returns
- -------
- SparseDataFrame
- The sparse representation of the DataFrame.
-
- See Also
- --------
- DataFrame.to_dense :
- Converts the DataFrame back to the its dense form.
-
- Examples
- --------
- >>> df = pd.DataFrame([(np.nan, np.nan),
- ... (1., np.nan),
- ... (np.nan, 1.)])
- >>> df
- 0 1
- 0 NaN NaN
- 1 1.0 NaN
- 2 NaN 1.0
- >>> type(df)
- <class 'pandas.core.frame.DataFrame'>
-
- >>> sdf = df.to_sparse() # doctest: +SKIP
- >>> sdf # doctest: +SKIP
- 0 1
- 0 NaN NaN
- 1 1.0 NaN
- 2 NaN 1.0
- >>> type(sdf) # doctest: +SKIP
- <class 'pandas.core.sparse.frame.SparseDataFrame'>
- """
- warnings.warn(
- "DataFrame.to_sparse is deprecated and will be removed "
- "in a future version",
- FutureWarning,
- stacklevel=2,
- )
-
- from pandas.core.sparse.api import SparseDataFrame
-
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore", message="SparseDataFrame")
- return SparseDataFrame(
- self._series,
- index=self.index,
- columns=self.columns,
- default_kind=kind,
- default_fill_value=fill_value,
- )
-
@deprecate_kwarg(old_arg_name="encoding", new_arg_name=None)
def to_stata(
self,
@@ -2043,9 +1991,6 @@ def to_stata(
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
-
- .. versionadded:: 0.19.0
-
version : {114, 117}, default 114
Version to use in the output dta file. Version 114 can be used
read by Stata 10 and later. Version 117 can be read by Stata 13
@@ -2074,8 +2019,6 @@ def to_stata(
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
- .. versionadded:: 0.19.0
-
See Also
--------
read_stata : Import Stata data files.
@@ -2094,9 +2037,7 @@ def to_stata(
raise ValueError("Only formats 114 and 117 supported.")
if version == 114:
if convert_strl is not None:
- raise ValueError(
- "strl support is only available when using " "format 117"
- )
+ raise ValueError("strl support is only available when using format 117")
from pandas.io.stata import StataWriter as statawriter
else:
from pandas.io.stata import StataWriter117 as statawriter
@@ -2167,8 +2108,12 @@ def to_parquet(
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
- If ``False``, they will not be written to the file. If ``None``,
- the behavior depends on the chosen engine.
+ If ``False``, they will not be written to the file.
+ If ``None``, similar to ``True`` the dataframe's index(es)
+ will be saved. However, instead of being saved as values,
+ the RangeIndex will be stored as a range in the metadata so it
+ doesn't require much space and is faster. Other indexes will
+ be included as columns in the file output.
.. versionadded:: 0.24.0
@@ -2218,6 +2163,7 @@ def to_parquet(
)
@Substitution(
+ header_type="bool",
header="Whether to print column labels, default True",
col_space_type="str or int",
col_space="The minimum width of each column in CSS length "
@@ -2265,9 +2211,6 @@ def to_html(
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.display.html.border``.
-
- .. versionadded:: 0.19.0
-
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
@@ -2288,7 +2231,6 @@ def to_html(
formatter = fmt.DataFrameFormatter(
self,
- buf=buf,
columns=columns,
col_space=col_space,
na_rep=na_rep,
@@ -2309,10 +2251,9 @@ def to_html(
render_links=render_links,
)
# TODO: a generic formatter wld b in DataFrameFormatter
- formatter.to_html(classes=classes, notebook=notebook, border=border)
-
- if buf is None:
- return formatter.buf.getvalue()
+ return formatter.to_html(
+ buf=buf, classes=classes, notebook=notebook, border=border
+ )
# ----------------------------------------------------------------------
@@ -2513,7 +2454,7 @@ def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if num < 1024.0:
- return "{num:3.1f}{size_q} " "{x}".format(
+ return "{num:3.1f}{size_q} {x}".format(
num=num, size_q=size_qualifier, x=x
)
num /= 1024.0
@@ -2601,12 +2542,12 @@ def memory_usage(self, index=True, deep=False):
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
- int64 float64 complex128 object bool
- 0 1 1.0 1.0+0.0j 1 True
- 1 1 1.0 1.0+0.0j 1 True
- 2 1 1.0 1.0+0.0j 1 True
- 3 1 1.0 1.0+0.0j 1 True
- 4 1 1.0 1.0+0.0j 1 True
+ int64 float64 complex128 object bool
+ 0 1 1.0 1.000000+0.000000j 1 True
+ 1 1 1.0 1.000000+0.000000j 1 True
+ 2 1 1.0 1.000000+0.000000j 1 True
+ 3 1 1.0 1.000000+0.000000j 1 True
+ 4 1 1.0 1.000000+0.000000j 1 True
>>> df.memory_usage()
Index 128
@@ -2785,113 +2726,7 @@ def _unpickle_matrix_compat(self, state): # pragma: no cover
self._data = dm._data
# ----------------------------------------------------------------------
- # Getting and setting elements
-
- def get_value(self, index, col, takeable=False):
- """
- Quickly retrieve single value at passed column and index.
-
- .. deprecated:: 0.21.0
- Use .at[] or .iat[] accessors instead.
-
- Parameters
- ----------
- index : row label
- col : column label
- takeable : interpret the index/col as indexers, default False
-
- Returns
- -------
- scalar
- """
-
- warnings.warn(
- "get_value is deprecated and will be removed "
- "in a future release. Please use "
- ".at[] or .iat[] accessors instead",
- FutureWarning,
- stacklevel=2,
- )
- return self._get_value(index, col, takeable=takeable)
-
- def _get_value(self, index, col, takeable=False):
-
- if takeable:
- series = self._iget_item_cache(col)
- return com.maybe_box_datetimelike(series._values[index])
-
- series = self._get_item_cache(col)
- engine = self.index._engine
-
- try:
- return engine.get_value(series._values, index)
- except KeyError:
- # GH 20629
- if self.index.nlevels > 1:
- # partial indexing forbidden
- raise
- except (TypeError, ValueError):
- pass
-
- # we cannot handle direct indexing
- # use positional
- col = self.columns.get_loc(col)
- index = self.index.get_loc(index)
- return self._get_value(index, col, takeable=True)
-
- _get_value.__doc__ = get_value.__doc__
-
- def set_value(self, index, col, value, takeable=False):
- """
- Put single value at passed column and index.
-
- .. deprecated:: 0.21.0
- Use .at[] or .iat[] accessors instead.
-
- Parameters
- ----------
- index : row label
- col : column label
- value : scalar
- takeable : interpret the index/col as indexers, default False
-
- Returns
- -------
- DataFrame
- If label pair is contained, will be reference to calling DataFrame,
- otherwise a new object.
- """
- warnings.warn(
- "set_value is deprecated and will be removed "
- "in a future release. Please use "
- ".at[] or .iat[] accessors instead",
- FutureWarning,
- stacklevel=2,
- )
- return self._set_value(index, col, value, takeable=takeable)
-
- def _set_value(self, index, col, value, takeable=False):
- try:
- if takeable is True:
- series = self._iget_item_cache(col)
- return series._set_value(index, value, takeable=True)
-
- series = self._get_item_cache(col)
- engine = self.index._engine
- engine.set_value(series._values, index, value)
- return self
- except (KeyError, TypeError):
-
- # set using a non-recursive method & reset the cache
- if takeable:
- self.iloc[index, col] = value
- else:
- self.loc[index, col] = value
- self._item_cache.pop(col, None)
-
- return self
-
- _set_value.__doc__ = set_value.__doc__
+ # Indexing Methods
def _ixs(self, i: int, axis: int = 0):
"""
@@ -2908,8 +2743,6 @@ def _ixs(self, i: int, axis: int = 0):
if axis == 0:
label = self.index[i]
new_values = self._data.fast_xs(i)
- if is_scalar(new_values):
- return new_values
# if we are a copy, mark as such
copy = isinstance(new_values, np.ndarray) and new_values.base is None
@@ -2955,11 +2788,13 @@ def __getitem__(self, key):
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
+ # either we have a slice or we have a string that can be converted
+ # to a slice for partial-string date indexing
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
- return self._getitem_frame(key)
+ return self.where(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
@@ -2978,7 +2813,7 @@ def __getitem__(self, key):
else:
if is_iterator(key):
key = list(key)
- indexer = self.loc._convert_to_indexer(key, axis=1, raise_missing=True)
+ indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1]
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
@@ -2991,75 +2826,266 @@ def __getitem__(self, key):
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
- if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):
+ if data.shape[1] == 1 and not isinstance(self.columns, ABCMultiIndex):
data = data[key]
return data
- def _getitem_bool_array(self, key):
- # also raises Exception if object array with NA values
- # warning here just in case -- previously __setitem__ was
- # reindexing but __getitem__ was not; it seems more reasonable to
- # go with the __setitem__ behavior since that is more consistent
- # with all other indexing behavior
- if isinstance(key, Series) and not key.index.equals(self.index):
- warnings.warn(
- "Boolean Series key will be reindexed to match " "DataFrame index.",
- UserWarning,
- stacklevel=3,
- )
- elif len(key) != len(self.index):
- raise ValueError(
- "Item wrong length %d instead of %d." % (len(key), len(self.index))
- )
+ def _getitem_bool_array(self, key):
+ # also raises Exception if object array with NA values
+ # warning here just in case -- previously __setitem__ was
+ # reindexing but __getitem__ was not; it seems more reasonable to
+ # go with the __setitem__ behavior since that is more consistent
+ # with all other indexing behavior
+ if isinstance(key, Series) and not key.index.equals(self.index):
+ warnings.warn(
+ "Boolean Series key will be reindexed to match DataFrame index.",
+ UserWarning,
+ stacklevel=3,
+ )
+ elif len(key) != len(self.index):
+ raise ValueError(
+ "Item wrong length %d instead of %d." % (len(key), len(self.index))
+ )
+
+ # check_bool_indexer will throw exception if Series key cannot
+ # be reindexed to match DataFrame rows
+ key = check_bool_indexer(self.index, key)
+ indexer = key.nonzero()[0]
+ return self.take(indexer, axis=0)
+
+ def _getitem_multilevel(self, key):
+ # self.columns is a MultiIndex
+ loc = self.columns.get_loc(key)
+ if isinstance(loc, (slice, Series, np.ndarray, Index)):
+ new_columns = self.columns[loc]
+ result_columns = maybe_droplevels(new_columns, key)
+ if self._is_mixed_type:
+ result = self.reindex(columns=new_columns)
+ result.columns = result_columns
+ else:
+ new_values = self.values[:, loc]
+ result = self._constructor(
+ new_values, index=self.index, columns=result_columns
+ )
+ result = result.__finalize__(self)
+
+ # If there is only one column being returned, and its name is
+ # either an empty string, or a tuple with an empty string as its
+ # first element, then treat the empty string as a placeholder
+ # and return the column as if the user had provided that empty
+ # string in the key. If the result is a Series, exclude the
+ # implied empty string from its name.
+ if len(result.columns) == 1:
+ top = result.columns[0]
+ if isinstance(top, tuple):
+ top = top[0]
+ if top == "":
+ result = result[""]
+ if isinstance(result, Series):
+ result = self._constructor_sliced(
+ result, index=self.index, name=key
+ )
+
+ result._set_is_copy(self)
+ return result
+ else:
+ return self._get_item_cache(key)
+
+ def _get_value(self, index, col, takeable: bool = False):
+ """
+ Quickly retrieve single value at passed column and index.
+
+ Parameters
+ ----------
+ index : row label
+ col : column label
+ takeable : interpret the index/col as indexers, default False
+
+ Returns
+ -------
+ scalar
+ """
+ if takeable:
+ series = self._iget_item_cache(col)
+ return com.maybe_box_datetimelike(series._values[index])
+
+ series = self._get_item_cache(col)
+ engine = self.index._engine
+
+ try:
+ return engine.get_value(series._values, index)
+ except KeyError:
+ # GH 20629
+ if self.index.nlevels > 1:
+ # partial indexing forbidden
+ raise
+ except (TypeError, ValueError):
+ pass
+
+ # we cannot handle direct indexing
+ # use positional
+ col = self.columns.get_loc(col)
+ index = self.index.get_loc(index)
+ return self._get_value(index, col, takeable=True)
+
+ def __setitem__(self, key, value):
+ key = com.apply_if_callable(key, self)
+
+ # see if we can slice the rows
+ indexer = convert_to_index_sliceable(self, key)
+ if indexer is not None:
+ # either we have a slice or we have a string that can be converted
+ # to a slice for partial-string date indexing
+ return self._setitem_slice(indexer, value)
+
+ if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2:
+ self._setitem_frame(key, value)
+ elif isinstance(key, (Series, np.ndarray, list, Index)):
+ self._setitem_array(key, value)
+ else:
+ # set column
+ self._set_item(key, value)
+
+ def _setitem_slice(self, key, value):
+ self._check_setitem_copy()
+ self.loc[key] = value
+
+ def _setitem_array(self, key, value):
+ # also raises Exception if object array with NA values
+ if com.is_bool_indexer(key):
+ if len(key) != len(self.index):
+ raise ValueError(
+ "Item wrong length %d instead of %d!" % (len(key), len(self.index))
+ )
+ key = check_bool_indexer(self.index, key)
+ indexer = key.nonzero()[0]
+ self._check_setitem_copy()
+ self.loc._setitem_with_indexer(indexer, value)
+ else:
+ if isinstance(value, DataFrame):
+ if len(value.columns) != len(key):
+ raise ValueError("Columns must be same length as key")
+ for k1, k2 in zip(key, value.columns):
+ self[k1] = value[k2]
+ else:
+ indexer = self.loc._get_listlike_indexer(
+ key, axis=1, raise_missing=False
+ )[1]
+ self._check_setitem_copy()
+ self.loc._setitem_with_indexer((slice(None), indexer), value)
+
+ def _setitem_frame(self, key, value):
+ # support boolean setting with DataFrame input, e.g.
+ # df[df > df2] = 0
+ if isinstance(key, np.ndarray):
+ if key.shape != self.shape:
+ raise ValueError("Array conditional must be same shape as self")
+ key = self._constructor(key, **self._construct_axes_dict())
+
+ if key.values.size and not is_bool_dtype(key.values):
+ raise TypeError(
+ "Must pass DataFrame or 2-d ndarray with boolean values only"
+ )
+
+ self._check_inplace_setting(value)
+ self._check_setitem_copy()
+ self._where(-key, value, inplace=True)
+
+ def _set_item(self, key, value):
+ """
+ Add series to DataFrame in specified column.
+
+ If series is a numpy-array (not a Series/TimeSeries), it must be the
+ same length as the DataFrames index or an error will be thrown.
+
+ Series/TimeSeries will be conformed to the DataFrames index to
+ ensure homogeneity.
+ """
+
+ self._ensure_valid_index(value)
+ value = self._sanitize_column(key, value)
+ NDFrame._set_item(self, key, value)
+
+ # check if we are modifying a copy
+ # try to set first as we want an invalid
+ # value exception to occur first
+ if len(self):
+ self._check_setitem_copy()
+
+ def _set_value(self, index, col, value, takeable: bool = False):
+ """
+ Put single value at passed column and index.
+
+ Parameters
+ ----------
+ index : row label
+ col : column label
+ value : scalar
+ takeable : interpret the index/col as indexers, default False
+
+ Returns
+ -------
+ DataFrame
+ If label pair is contained, will be reference to calling DataFrame,
+ otherwise a new object.
+ """
+ try:
+ if takeable is True:
+ series = self._iget_item_cache(col)
+ return series._set_value(index, value, takeable=True)
- # check_bool_indexer will throw exception if Series key cannot
- # be reindexed to match DataFrame rows
- key = check_bool_indexer(self.index, key)
- indexer = key.nonzero()[0]
- return self.take(indexer, axis=0)
+ series = self._get_item_cache(col)
+ engine = self.index._engine
+ engine.set_value(series._values, index, value)
+ return self
+ except (KeyError, TypeError):
- def _getitem_multilevel(self, key):
- loc = self.columns.get_loc(key)
- if isinstance(loc, (slice, Series, np.ndarray, Index)):
- new_columns = self.columns[loc]
- result_columns = maybe_droplevels(new_columns, key)
- if self._is_mixed_type:
- result = self.reindex(columns=new_columns)
- result.columns = result_columns
+ # set using a non-recursive method & reset the cache
+ if takeable:
+ self.iloc[index, col] = value
else:
- new_values = self.values[:, loc]
- result = self._constructor(
- new_values, index=self.index, columns=result_columns
+ self.loc[index, col] = value
+ self._item_cache.pop(col, None)
+
+ return self
+
+ def _ensure_valid_index(self, value):
+ """
+ Ensure that if we don't have an index, that we can create one from the
+ passed value.
+ """
+ # GH5632, make sure that we are a Series convertible
+ if not len(self.index) and is_list_like(value) and len(value):
+ try:
+ value = Series(value)
+ except (ValueError, NotImplementedError, TypeError):
+ raise ValueError(
+ "Cannot set a frame with no defined index "
+ "and a value that cannot be converted to a "
+ "Series"
)
- result = result.__finalize__(self)
- # If there is only one column being returned, and its name is
- # either an empty string, or a tuple with an empty string as its
- # first element, then treat the empty string as a placeholder
- # and return the column as if the user had provided that empty
- # string in the key. If the result is a Series, exclude the
- # implied empty string from its name.
- if len(result.columns) == 1:
- top = result.columns[0]
- if isinstance(top, tuple):
- top = top[0]
- if top == "":
- result = result[""]
- if isinstance(result, Series):
- result = self._constructor_sliced(
- result, index=self.index, name=key
- )
+ self._data = self._data.reindex_axis(
+ value.index.copy(), axis=1, fill_value=np.nan
+ )
- result._set_is_copy(self)
- return result
+ def _box_item_values(self, key, values):
+ items = self.columns[self.columns.get_loc(key)]
+ if values.ndim == 2:
+ return self._constructor(values.T, columns=items, index=self.index)
else:
- return self._get_item_cache(key)
+ return self._box_col_values(values, items)
- def _getitem_frame(self, key):
- if key.values.size and not is_bool_dtype(key.values):
- raise ValueError("Must pass DataFrame with boolean values only")
- return self.where(key)
+ def _box_col_values(self, values, items):
+ """
+ Provide boxed values for a column.
+ """
+ klass = self._constructor_sliced
+ return klass(values, index=self.index, name=items, fastpath=True)
+
+ # ----------------------------------------------------------------------
+ # Unsorted
def query(self, expr, inplace=False, **kwargs):
"""
@@ -3087,8 +3113,6 @@ def query(self, expr, inplace=False, **kwargs):
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
- .. versionadded:: 0.18.0
-
Returns
-------
DataFrame
@@ -3206,8 +3230,6 @@ def eval(self, expr, inplace=False, **kwargs):
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
-
- .. versionadded:: 0.18.0.
kwargs : dict
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by
@@ -3374,29 +3396,19 @@ def select_dtypes(self, include=None, exclude=None):
5 False 2.0
"""
- def _get_info_slice(obj, indexer):
- """Slice the info axis of `obj` with `indexer`."""
- if not hasattr(obj, "_info_axis_number"):
- msg = "object of type {typ!r} has no info axis"
- raise TypeError(msg.format(typ=type(obj).__name__))
- slices = [slice(None)] * obj.ndim
- slices[obj._info_axis_number] = indexer
- return tuple(slices)
-
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
- selection = tuple(map(frozenset, (include, exclude)))
+ selection = (frozenset(include), frozenset(exclude))
if not any(selection):
- raise ValueError("at least one of include or exclude must be " "nonempty")
+ raise ValueError("at least one of include or exclude must be nonempty")
# convert the myriad valid dtypes object to a single representation
- include, exclude = map(
- lambda x: frozenset(map(infer_dtype_from_object, x)), selection
- )
+ include = frozenset(infer_dtype_from_object(x) for x in include)
+ exclude = frozenset(infer_dtype_from_object(x) for x in exclude)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
@@ -3408,147 +3420,35 @@ def _get_info_slice(obj, indexer):
)
)
- # empty include/exclude -> defaults to True
- # three cases (we've already raised if both are empty)
- # case 1: empty include, nonempty exclude
- # we have True, True, ... True for include, same for exclude
- # in the loop below we get the excluded
- # and when we call '&' below we get only the excluded
- # case 2: nonempty include, empty exclude
- # same as case 1, but with include
- # case 3: both nonempty
- # the "union" of the logic of case 1 and case 2:
- # we get the included and excluded, and return their logical and
- include_these = Series(not bool(include), index=self.columns)
- exclude_these = Series(not bool(exclude), index=self.columns)
-
- def is_dtype_instance_mapper(idx, dtype):
- return idx, functools.partial(issubclass, dtype.type)
-
- for idx, f in itertools.starmap(
- is_dtype_instance_mapper, enumerate(self.dtypes)
- ):
- if include: # checks for the case of empty include or exclude
- include_these.iloc[idx] = any(map(f, include))
- if exclude:
- exclude_these.iloc[idx] = not any(map(f, exclude))
-
- dtype_indexer = include_these & exclude_these
- return self.loc[_get_info_slice(self, dtype_indexer)]
-
- def _box_item_values(self, key, values):
- items = self.columns[self.columns.get_loc(key)]
- if values.ndim == 2:
- return self._constructor(values.T, columns=items, index=self.index)
- else:
- return self._box_col_values(values, items)
-
- def _box_col_values(self, values, items):
- """
- Provide boxed values for a column.
- """
- klass = self._constructor_sliced
- return klass(values, index=self.index, name=items, fastpath=True)
-
- def __setitem__(self, key, value):
- key = com.apply_if_callable(key, self)
-
- # see if we can slice the rows
- indexer = convert_to_index_sliceable(self, key)
- if indexer is not None:
- return self._setitem_slice(indexer, value)
-
- if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2:
- self._setitem_frame(key, value)
- elif isinstance(key, (Series, np.ndarray, list, Index)):
- self._setitem_array(key, value)
- else:
- # set column
- self._set_item(key, value)
-
- def _setitem_slice(self, key, value):
- self._check_setitem_copy()
- self.loc._setitem_with_indexer(key, value)
-
- def _setitem_array(self, key, value):
- # also raises Exception if object array with NA values
- if com.is_bool_indexer(key):
- if len(key) != len(self.index):
- raise ValueError(
- "Item wrong length %d instead of %d!" % (len(key), len(self.index))
- )
- key = check_bool_indexer(self.index, key)
- indexer = key.nonzero()[0]
- self._check_setitem_copy()
- self.loc._setitem_with_indexer(indexer, value)
- else:
- if isinstance(value, DataFrame):
- if len(value.columns) != len(key):
- raise ValueError("Columns must be same length as key")
- for k1, k2 in zip(key, value.columns):
- self[k1] = value[k2]
- else:
- indexer = self.loc._convert_to_indexer(key, axis=1)
- self._check_setitem_copy()
- self.loc._setitem_with_indexer((slice(None), indexer), value)
+ # We raise when both include and exclude are empty
+ # Hence, we can just shrink the columns we want to keep
+ keep_these = np.full(self.shape[1], True)
+
+ def extract_unique_dtypes_from_dtypes_set(
+ dtypes_set: FrozenSet[Dtype], unique_dtypes: np.ndarray
+ ) -> List[Dtype]:
+ extracted_dtypes = [
+ unique_dtype
+ for unique_dtype in unique_dtypes
+ if issubclass(unique_dtype.type, tuple(dtypes_set)) # type: ignore
+ ]
+ return extracted_dtypes
- def _setitem_frame(self, key, value):
- # support boolean setting with DataFrame input, e.g.
- # df[df > df2] = 0
- if isinstance(key, np.ndarray):
- if key.shape != self.shape:
- raise ValueError("Array conditional must be same shape as self")
- key = self._constructor(key, **self._construct_axes_dict())
+ unique_dtypes = self.dtypes.unique()
- if key.values.size and not is_bool_dtype(key.values):
- raise TypeError(
- "Must pass DataFrame or 2-d ndarray with boolean values only"
+ if include:
+ included_dtypes = extract_unique_dtypes_from_dtypes_set(
+ include, unique_dtypes
)
+ keep_these &= self.dtypes.isin(included_dtypes)
- self._check_inplace_setting(value)
- self._check_setitem_copy()
- self._where(-key, value, inplace=True)
-
- def _ensure_valid_index(self, value):
- """
- Ensure that if we don't have an index, that we can create one from the
- passed value.
- """
- # GH5632, make sure that we are a Series convertible
- if not len(self.index) and is_list_like(value):
- try:
- value = Series(value)
- except (ValueError, NotImplementedError, TypeError):
- raise ValueError(
- "Cannot set a frame with no defined index "
- "and a value that cannot be converted to a "
- "Series"
- )
-
- self._data = self._data.reindex_axis(
- value.index.copy(), axis=1, fill_value=np.nan
+ if exclude:
+ excluded_dtypes = extract_unique_dtypes_from_dtypes_set(
+ exclude, unique_dtypes
)
+ keep_these &= ~self.dtypes.isin(excluded_dtypes)
- def _set_item(self, key, value):
- """
- Add series to DataFrame in specified column.
-
- If series is a numpy-array (not a Series/TimeSeries), it must be the
- same length as the DataFrames index or an error will be thrown.
-
- Series/TimeSeries will be conformed to the DataFrames index to
- ensure homogeneity.
- """
-
- self._ensure_valid_index(value)
- value = self._sanitize_column(key, value)
- NDFrame._set_item(self, key, value)
-
- # check if we are modifying a copy
- # try to set first as we want an invalid
- # value exception to occur first
- if len(self):
- self._check_setitem_copy()
+ return self.iloc[:, keep_these.values]
def insert(self, loc, column, value, allow_duplicates=False):
"""
@@ -3561,7 +3461,7 @@ def insert(self, loc, column, value, allow_duplicates=False):
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns)
- column : string, number, or hashable object
+ column : str, number, or hashable object
label of the inserted column
value : int, Series, or array-like
allow_duplicates : bool, optional
@@ -3603,7 +3503,7 @@ def assign(self, **kwargs):
or modified columns. All items are computed first, and then assigned
in alphabetical order.
- .. versionchanged :: 0.23.0
+ .. versionchanged:: 0.23.0
Keyword argument order is maintained for Python 3.6 and later.
@@ -3698,7 +3598,7 @@ def reindexer(value):
# other
raise TypeError(
- "incompatible index of inserted column " "with frame index"
+ "incompatible index of inserted column with frame index"
)
return value
@@ -3708,7 +3608,7 @@ def reindexer(value):
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
- if isinstance(self.columns, MultiIndex) and key in self.columns:
+ if isinstance(self.columns, ABCMultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
@@ -3757,7 +3657,7 @@ def reindexer(value):
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
- if not self.columns.is_unique or isinstance(self.columns, MultiIndex):
+ if not self.columns.is_unique or isinstance(self.columns, ABCMultiIndex):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
@@ -4381,7 +4281,7 @@ def set_index(
found = col in self.columns
except TypeError:
raise TypeError(
- err_msg + " Received column of " "type {}".format(type(col))
+ err_msg + " Received column of type {}".format(type(col))
)
else:
if not found:
@@ -4652,7 +4552,7 @@ def _maybe_casted_values(index, labels=None):
new_index = self.index.droplevel(level)
if not drop:
- if isinstance(self.index, MultiIndex):
+ if isinstance(self.index, ABCMultiIndex):
names = [
n if n is not None else ("level_%d" % i)
for (i, n) in enumerate(self.index.names)
@@ -4663,7 +4563,7 @@ def _maybe_casted_values(index, labels=None):
names = [default] if self.index.name is None else [self.index.name]
to_insert = ((self.index, None),)
- multi_col = isinstance(self.columns, MultiIndex)
+ multi_col = isinstance(self.columns, ABCMultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
@@ -4875,7 +4775,7 @@ def drop_duplicates(self, subset=None, keep="first", inplace=False):
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
- inplace : boolean, default False
+ inplace : bool, default False
Whether to drop duplicates in place or to return a copy
Returns
@@ -5045,7 +4945,7 @@ def sort_index(
level, ascending=ascending, sort_remaining=sort_remaining
)
- elif isinstance(labels, MultiIndex):
+ elif isinstance(labels, ABCMultiIndex):
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer(
@@ -5297,17 +5197,12 @@ def swaplevel(self, i=-2, j=-1, axis=0):
Parameters
----------
- i, j : int, string (can be mixed)
+ i, j : int, str (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
DataFrame
-
- .. versionchanged:: 0.18.1
-
- The indexes ``i`` and ``j`` are now optional, and default to
- the two innermost levels of the index.
"""
result = self.copy()
@@ -5336,7 +5231,7 @@ def reorder_levels(self, order, axis=0):
type of caller (new object)
"""
axis = self._get_axis_number(axis)
- if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover
+ if not isinstance(self._get_axis(axis), ABCMultiIndex): # pragma: no cover
raise TypeError("Can only reorder levels on a hierarchical axis.")
result = self.copy()
@@ -5352,48 +5247,66 @@ def reorder_levels(self, order, axis=0):
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join="outer", level=level, copy=False)
- new_index, new_columns = this.index, this.columns
- def _arith_op(left, right):
- # for the mixed_type case where we iterate over columns,
- # _arith_op(left, right) is equivalent to
- # left._binop(right, func, fill_value=fill_value)
- left, right = ops.fill_binop(left, right, fill_value)
- return func(left, right)
+ if fill_value is None:
+ # since _arith_op may be called in a loop, avoid function call
+ # overhead if possible by doing this check once
+ _arith_op = func
+
+ else:
+
+ def _arith_op(left, right):
+ # for the mixed_type case where we iterate over columns,
+ # _arith_op(left, right) is equivalent to
+ # left._binop(right, func, fill_value=fill_value)
+ left, right = ops.fill_binop(left, right, fill_value)
+ return func(left, right)
if ops.should_series_dispatch(this, other, func):
# iterate over columns
- return ops.dispatch_to_series(this, other, _arith_op)
+ new_data = ops.dispatch_to_series(this, other, _arith_op)
else:
- result = _arith_op(this.values, other.values)
- return self._constructor(
- result, index=new_index, columns=new_columns, copy=False
- )
+ with np.errstate(all="ignore"):
+ res_values = _arith_op(this.values, other.values)
+ new_data = dispatch_fill_zeros(func, this.values, other.values, res_values)
+ return this._construct_result(new_data)
def _combine_match_index(self, other, func, level=None):
left, right = self.align(other, join="outer", axis=0, level=level, copy=False)
- assert left.index.equals(right.index)
+ # at this point we have `left.index.equals(right.index)`
if left._is_mixed_type or right._is_mixed_type:
# operate column-wise; avoid costly object-casting in `.values`
- return ops.dispatch_to_series(left, right, func)
+ new_data = ops.dispatch_to_series(left, right, func)
else:
# fastpath --> operate directly on values
with np.errstate(all="ignore"):
new_data = func(left.values.T, right.values).T
- return self._constructor(
- new_data, index=left.index, columns=self.columns, copy=False
- )
+ return left._construct_result(new_data)
- def _combine_match_columns(self, other, func, level=None):
- assert isinstance(other, Series)
+ def _combine_match_columns(self, other: Series, func, level=None):
left, right = self.align(other, join="outer", axis=1, level=level, copy=False)
- assert left.columns.equals(right.index)
- return ops.dispatch_to_series(left, right, func, axis="columns")
+ # at this point we have `left.columns.equals(right.index)`
+ new_data = ops.dispatch_to_series(left, right, func, axis="columns")
+ return left._construct_result(new_data)
+
+ def _construct_result(self, result) -> "DataFrame":
+ """
+ Wrap the result of an arithmetic, comparison, or logical operation.
+
+ Parameters
+ ----------
+ result : DataFrame
- def _combine_const(self, other, func):
- assert lib.is_scalar(other) or np.ndim(other) == 0
- return ops.dispatch_to_series(self, other, func)
+ Returns
+ -------
+ DataFrame
+ """
+ out = self._constructor(result, index=self.index, copy=False)
+ # Pin columns instead of passing to constructor for compat with
+ # non-unique columns case
+ out.columns = self.columns
+ return out
def combine(self, other, func, fill_value=None, overwrite=True):
"""
@@ -5676,7 +5589,7 @@ def update(
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
- .. versionchanged :: 0.24.0
+ .. versionchanged:: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
@@ -5763,9 +5676,7 @@ def update(
if join != "left": # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ["ignore", "raise"]:
- raise ValueError(
- "The parameter errors must be either " "'ignore' or 'raise'"
- )
+ raise ValueError("The parameter errors must be either 'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
@@ -5812,17 +5723,17 @@ def update(
Parameters
----------%s
- index : string or object, optional
+ index : str or object, optional
Column to use to make new frame's index. If None, uses
existing index.
- columns : string or object
+ columns : str or object
Column to use to make new frame's columns.
- values : string, object or a list of the previous, optional
+ values : str, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
- .. versionchanged :: 0.23.0
+ .. versionchanged:: 0.23.0
Also accept list of column names.
Returns
@@ -5939,19 +5850,19 @@ def pivot(self, index=None, columns=None, values=None):
is function or list of functions
fill_value : scalar, default None
Value to replace missing values with
- margins : boolean, default False
+ margins : bool, default False
Add all row / columns (e.g. for subtotal / grand totals)
- dropna : boolean, default True
+ dropna : bool, default True
Do not include columns whose entries are all NaN
- margins_name : string, default 'All'
+ margins_name : str, default 'All'
Name of the row / column that will contain the totals
when margins is True.
- observed : boolean, default False
+ observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
- .. versionchanged :: 0.25.0
+ .. versionchanged:: 0.25.0
Returns
-------
@@ -6239,14 +6150,14 @@ def stack(self, level=-1, dropna=True):
def explode(self, column: Union[str, Tuple]) -> "DataFrame":
"""
- Transform each element of a list-like to a row, replicating the
- index values.
+ Transform each element of a list-like to a row, replicating index values.
.. versionadded:: 0.25.0
Parameters
----------
column : str or tuple
+ Column to explode.
Returns
-------
@@ -6262,8 +6173,8 @@ def explode(self, column: Union[str, Tuple]) -> "DataFrame":
See Also
--------
DataFrame.unstack : Pivot a level of the (necessarily hierarchical)
- index labels
- DataFrame.melt : Unpivot a DataFrame from wide format to long format
+ index labels.
+ DataFrame.melt : Unpivot a DataFrame from wide format to long format.
Series.explode : Explode a DataFrame from list-like columns to long format.
Notes
@@ -6299,12 +6210,13 @@ def explode(self, column: Union[str, Tuple]) -> "DataFrame":
if not self.columns.is_unique:
raise ValueError("columns must be unique")
- result = self[column].explode()
- return (
- self.drop([column], axis=1)
- .join(result)
- .reindex(columns=self.columns, copy=False)
- )
+ df = self.reset_index(drop=True)
+ result = df[column].explode()
+ result = df.drop([column], axis=1).join(result)
+ result.index = self.index.take(result.index)
+ result = result.reindex(columns=self.columns, copy=False)
+
+ return result
def unstack(self, level=-1, fill_value=None):
"""
@@ -6319,13 +6231,11 @@ def unstack(self, level=-1, fill_value=None):
Parameters
----------
- level : int, string, or list of these, default -1 (last level)
+ level : int, str, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
- .. versionadded:: 0.18.0
-
Returns
-------
Series or DataFrame
@@ -6395,7 +6305,7 @@ def unstack(self, level=-1, fill_value=None):
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
- col_level : int or string, optional
+ col_level : int or str, optional
If columns are a MultiIndex then use this level to melt.
Returns
@@ -6984,11 +6894,11 @@ def append(self, other, ignore_index=False, verify_integrity=False, sort=None):
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
- ignore_index : boolean, default False
+ ignore_index : bool, default False
If True, do not use the index labels.
- verify_integrity : boolean, default False
+ verify_integrity : bool, default False
If True, raise ValueError on creating index with duplicates.
- sort : boolean, default None
+ sort : bool, default None
Sort columns if the columns of `self` and `other` are not aligned.
The default sorting is deprecated and will change to not-sorting
in a future version of pandas. Explicitly pass ``sort=True`` to
@@ -7234,7 +7144,6 @@ def join(self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False):
4 K4 A4 NaN
5 K5 A5 NaN
"""
- # For SparseDataFrame's benefit
return self._join_compat(
other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort
)
@@ -7264,7 +7173,7 @@ def _join_compat(
else:
if on is not None:
raise ValueError(
- "Joining multiple DataFrames only supported" " for joining on index"
+ "Joining multiple DataFrames only supported for joining on index"
)
frames = [self] + list(other)
@@ -7274,10 +7183,14 @@ def _join_compat(
# join indexes only using concat
if can_concat:
if how == "left":
- res = concat(frames, axis=1, join="outer", verify_integrity=True)
+ res = concat(
+ frames, axis=1, join="outer", verify_integrity=True, sort=sort
+ )
return res.reindex(self.index, copy=False)
else:
- return concat(frames, axis=1, join=how, verify_integrity=True)
+ return concat(
+ frames, axis=1, join=how, verify_integrity=True, sort=sort
+ )
joined = frames[0]
@@ -7425,7 +7338,7 @@ def _series_round(s, decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals) for _, v in self.items()]
else:
- raise TypeError("decimals must be an integer, a dict-like or a " "Series")
+ raise TypeError("decimals must be an integer, a dict-like or a Series")
if len(new_cols) > 0:
return self._constructor(
@@ -7832,7 +7745,7 @@ def _count_level(self, level, axis=0, numeric_only=False):
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
- if not isinstance(count_axis, MultiIndex):
+ if not isinstance(count_axis, ABCMultiIndex):
raise TypeError(
"Can only count levels on hierarchical "
"{ax}.".format(ax=self._get_axis_name(axis))
@@ -8027,7 +7940,7 @@ def idxmin(self, axis=0, skipna=True):
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
- skipna : boolean, default True
+ skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
@@ -8064,7 +7977,7 @@ def idxmax(self, axis=0, skipna=True):
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
- skipna : boolean, default True
+ skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
@@ -8213,8 +8126,6 @@ def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"):
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
- .. versionadded:: 0.18.0
-
Returns
-------
Series or DataFrame
@@ -8266,6 +8177,13 @@ def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"):
if is_transposed:
data = data.T
+ if len(data.columns) == 0:
+ # GH#23925 _get_numeric_data may have dropped all columns
+ cols = Index([], name=self.columns.name)
+ if is_list_like(q):
+ return self._constructor([], index=q, columns=cols)
+ return self._constructor_sliced([], index=cols, name=q)
+
result = data._data.quantile(
qs=q, axis=1, interpolation=interpolation, transposed=is_transposed
)
@@ -8422,11 +8340,11 @@ def isin(self, values):
)
elif isinstance(values, Series):
if not values.index.is_unique:
- raise ValueError("cannot compute isin with " "a duplicate axis.")
+ raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self), axis="index")
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
- raise ValueError("cannot compute isin with " "a duplicate axis.")
+ raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index f28f58b070368..87ea5dc52a6b6 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5,8 +5,20 @@
import json
import operator
import pickle
+import re
from textwrap import dedent
-from typing import Callable, FrozenSet, List, Optional, Set
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ FrozenSet,
+ Hashable,
+ List,
+ Optional,
+ Sequence,
+ Set,
+ Union,
+)
import warnings
import weakref
@@ -22,7 +34,6 @@
from pandas.util._decorators import Appender, Substitution, rewrite_axis_style_signature
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
-from pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
@@ -30,10 +41,10 @@
is_bool,
is_bool_dtype,
is_datetime64_any_dtype,
- is_datetime64_dtype,
is_datetime64tz_dtype,
is_dict_like,
is_extension_array_dtype,
+ is_float,
is_integer,
is_list_like,
is_number,
@@ -50,7 +61,7 @@
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
-from pandas._typing import Dtype
+from pandas._typing import Dtype, FilePathOrBuffer, Scalar
from pandas.core import missing, nanops
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject, SelectionMixin
@@ -68,13 +79,14 @@
from pandas.core.internals import BlockManager
from pandas.core.ops import _align_method_FRAME
+from pandas.io.formats import format as fmt
from pandas.io.formats.format import DataFrameFormatter, format_percentiles
from pandas.io.formats.printing import pprint_thing
from pandas.tseries.frequencies import to_offset
# goal is to be able to define the docs close to function, while still being
# able to share
-_shared_docs = dict()
+_shared_docs = dict() # type: Dict[str, str]
_shared_doc_kwargs = dict(
axes="keywords for axes",
klass="Series/DataFrame",
@@ -121,6 +133,9 @@ def _single_replace(self, to_replace, method, inplace, limit):
return result
+bool_t = bool # Need alias because NDFrame has def bool:
+
+
class NDFrame(PandasObject, SelectionMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
@@ -130,7 +145,7 @@ class NDFrame(PandasObject, SelectionMixin):
----------
data : BlockManager
axes : list
- copy : boolean, default False
+ copy : bool, default False
"""
_internal_names = [
@@ -151,7 +166,7 @@ class NDFrame(PandasObject, SelectionMixin):
_internal_names_set = set(_internal_names) # type: Set[str]
_accessors = set() # type: Set[str]
_deprecations = frozenset(
- ["as_blocks", "blocks", "is_copy"]
+ ["as_blocks", "blocks", "is_copy", "ftypes", "ix"]
) # type: FrozenSet[str]
_metadata = [] # type: List[str]
_is_copy = None
@@ -279,7 +294,8 @@ def _setup_axes(
ns=None,
docs=None,
):
- """Provide axes setup for the major PandasObjects.
+ """
+ Provide axes setup for the major PandasObjects.
Parameters
----------
@@ -287,8 +303,8 @@ def _setup_axes(
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
- axes_are_reversed : boolean whether to treat passed axes as
- reversed (DataFrame)
+ axes_are_reversed : bool
+ Whether to treat passed axes as reversed (DataFrame).
build_axes : setup the axis properties (default True)
"""
@@ -381,7 +397,7 @@ def _construct_axes_from_arguments(
kwargs[a] = args.pop(0)
except IndexError:
if require_all:
- raise TypeError("not enough/duplicate arguments " "specified!")
+ raise TypeError("not enough/duplicate arguments specified!")
axes = {a: kwargs.pop(a, sentinel) for a in self._AXIS_ORDERS}
return axes, kwargs
@@ -565,7 +581,7 @@ def _obj_with_exclusions(self):
""" internal compat with SelectionMixin """
return self
- def set_axis(self, labels, axis=0, inplace=None):
+ def set_axis(self, labels, axis=0, inplace=False):
"""
Assign desired index to given axis.
@@ -588,15 +604,9 @@ def set_axis(self, labels, axis=0, inplace=None):
The axis to update. The value 0 identifies the rows, and 1
identifies the columns.
- inplace : bool, default None
+ inplace : bool, default False
Whether to return a new %(klass)s instance.
- .. warning::
-
- ``inplace=None`` currently falls back to to True, but in a
- future version, will default to False. Use inplace=True
- explicitly rather than relying on the default.
-
Returns
-------
renamed : %(klass)s or None
@@ -617,27 +627,19 @@ def set_axis(self, labels, axis=0, inplace=None):
2 3
dtype: int64
- >>> s.set_axis(['a', 'b', 'c'], axis=0, inplace=False)
+ >>> s.set_axis(['a', 'b', 'c'], axis=0)
a 1
b 2
c 3
dtype: int64
- The original object is not modified.
-
- >>> s
- 0 1
- 1 2
- 2 3
- dtype: int64
-
**DataFrame**
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
Change the row labels.
- >>> df.set_axis(['a', 'b', 'c'], axis='index', inplace=False)
+ >>> df.set_axis(['a', 'b', 'c'], axis='index')
A B
a 1 4
b 2 5
@@ -645,7 +647,7 @@ def set_axis(self, labels, axis=0, inplace=None):
Change the column labels.
- >>> df.set_axis(['I', 'II'], axis='columns', inplace=False)
+ >>> df.set_axis(['I', 'II'], axis='columns')
I II
0 1 4
1 2 5
@@ -671,15 +673,6 @@ def set_axis(self, labels, axis=0, inplace=None):
)
labels, axis = axis, labels
- if inplace is None:
- warnings.warn(
- "set_axis currently defaults to operating inplace.\nThis "
- "will change in a future version of pandas, use "
- "inplace=True to avoid this warning.",
- FutureWarning,
- stacklevel=2,
- )
- inplace = True
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
@@ -698,7 +691,7 @@ def transpose(self, *args, **kwargs):
Parameters
----------
args : %(args_transpose)s
- copy : boolean, default False
+ copy : bool, default False
Make a copy of the underlying data. Mixed-dtype data will
always result in a copy
**kwargs
@@ -975,15 +968,12 @@ def squeeze(self, axis=None):
1
"""
axis = self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)
- try:
- return self.iloc[
- tuple(
- 0 if i in axis and len(a) == 1 else slice(None)
- for i, a in enumerate(self.axes)
- )
- ]
- except Exception:
- return self
+ return self.iloc[
+ tuple(
+ 0 if i in axis and len(a) == 1 else slice(None)
+ for i, a in enumerate(self.axes)
+ )
+ ]
def swaplevel(self, i=-2, j=-1, axis=0):
"""
@@ -997,11 +987,6 @@ def swaplevel(self, i=-2, j=-1, axis=0):
Returns
-------
swapped : same type as caller (new object)
-
- .. versionchanged:: 0.18.1
-
- The indexes ``i`` and ``j`` are now optional, and default to
- the two innermost levels of the index.
"""
axis = self._get_axis_number(axis)
result = self.copy()
@@ -1152,7 +1137,7 @@ def rename(self, *args, **kwargs):
v = axes.get(self._AXIS_NAMES[axis])
if v is None:
continue
- f = com._get_rename_function(v)
+ f = com.get_rename_function(v)
baxis = self._get_block_manager_axis(axis)
if level is not None:
level = self.axes[axis]._get_level_number(level)
@@ -1326,7 +1311,7 @@ class name
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
- raise ValueError("Use `.rename` to alter labels " "with a mapper.")
+ raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns
# is specified
@@ -1340,7 +1325,7 @@ class name
if non_mapper:
newnames = v
else:
- f = com._get_rename_function(v)
+ f = com.get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
@@ -1538,16 +1523,12 @@ def __pos__(self):
return self.__array_wrap__(arr)
def __invert__(self):
- try:
- arr = operator.inv(com.values_from_object(self))
- return self.__array_wrap__(arr)
- except Exception:
-
+ if not self.size:
# inv fails with 0 len
- if not np.prod(self.shape):
- return self
+ return self
- raise
+ arr = operator.inv(com.values_from_object(self))
+ return self.__array_wrap__(arr)
def __nonzero__(self):
raise ValueError(
@@ -1901,7 +1882,7 @@ def __iter__(self):
# can we get a better explanation of this?
def keys(self):
"""
- Get the 'info axis' (see Indexing for more)
+ Get the 'info axis' (see Indexing for more).
This is index for Series, columns for DataFrame.
@@ -2206,6 +2187,12 @@ def _repr_data_resource_(self):
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
+ ExcelWriter can also be used to append to an existing Excel file:
+
+ >>> with pd.ExcelWriter('output.xlsx',
+ ... mode='a') as writer: # doctest: +SKIP
+ ... df.to_excel(writer, sheet_name='Sheet_name_3')
+
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
@@ -2259,17 +2246,18 @@ def to_excel(
def to_json(
self,
- path_or_buf=None,
- orient=None,
- date_format=None,
- double_precision=10,
- force_ascii=True,
- date_unit="ms",
- default_handler=None,
- lines=False,
- compression="infer",
- index=True,
- ):
+ path_or_buf: Optional[FilePathOrBuffer] = None,
+ orient: Optional[str] = None,
+ date_format: Optional[str] = None,
+ double_precision: int = 10,
+ force_ascii: bool_t = True,
+ date_unit: str = "ms",
+ default_handler: Optional[Callable[[Any], Union[Scalar, List, Dict]]] = None,
+ lines: bool_t = False,
+ compression: Optional[str] = "infer",
+ index: bool_t = True,
+ indent: Optional[int] = None,
+ ) -> Optional[str]:
"""
Convert the object to a JSON string.
@@ -2278,10 +2266,10 @@ def to_json(
Parameters
----------
- path_or_buf : string or file handle, optional
+ path_or_buf : str or file handle, optional
File path or object. If not specified, the result is returned as
a string.
- orient : string
+ orient : str
Indication of expected JSON string format.
* Series
@@ -2320,7 +2308,7 @@ def to_json(
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
- date_unit : string, default 'ms' (milliseconds)
+ date_unit : str, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
@@ -2333,8 +2321,6 @@ def to_json(
throw ValueError if incorrect 'orient' since others are not list
like.
- .. versionadded:: 0.19.0
-
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
@@ -2351,6 +2337,11 @@ def to_json(
.. versionadded:: 0.23.0
+ indent : integer, optional
+ Length of whitespace used to indent each record.
+
+ .. versionadded:: 1.0.0
+
Returns
-------
None or str
@@ -2361,6 +2352,13 @@ def to_json(
--------
read_json
+ Notes
+ -----
+ The behavior of ``indent=0`` varies from the stdlib, which does not
+ indent the output but does insert newlines. Currently, ``indent=0``
+ and the default ``indent=None`` are equivalent in pandas, though this
+ may change in a future release.
+
Examples
--------
@@ -2411,6 +2409,10 @@ def to_json(
date_format = "iso"
elif date_format is None:
date_format = "epoch"
+
+ config.is_nonnegative_int(indent)
+ indent = indent or 0
+
return json.to_json(
path_or_buf=path_or_buf,
obj=self,
@@ -2423,6 +2425,7 @@ def to_json(
lines=lines,
compression=compression,
index=index,
+ indent=indent,
)
def to_hdf(self, path_or_buf, key, **kwargs):
@@ -2540,10 +2543,22 @@ def to_msgpack(self, path_or_buf=None, encoding="utf-8", **kwargs):
It is recommended to use pyarrow for on-the-wire transmission of
pandas objects.
+ Example pyarrow usage:
+
+ >>> import pandas as pd
+ >>> import pyarrow as pa
+ >>> df = pd.DataFrame({'A': [1, 2, 3]})
+ >>> context = pa.default_serialization_context()
+ >>> df_bytestring = context.serialize(df).to_buffer().to_pybytes()
+
+ For documentation on pyarrow, see `here
+ <https://arrow.apache.org/docs/python/index.html>`__.
+
Parameters
----------
- path : string File path, buffer-like, or None
- if None, return generated bytes
+ path : str, buffer-like, or None
+ Destination for the serialized object.
+ If None, return generated bytes
append : bool whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
@@ -2562,7 +2577,7 @@ def to_msgpack(self, path_or_buf=None, encoding="utf-8", **kwargs):
def to_sql(
self,
- name,
+ name: str,
con,
schema=None,
if_exists="fail",
@@ -2580,12 +2595,12 @@ def to_sql(
Parameters
----------
- name : string
+ name : str
Name of SQL table.
con : sqlalchemy.engine.Engine or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects.
- schema : string, optional
+ schema : str, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
@@ -2598,18 +2613,19 @@ def to_sql(
index : bool, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
- index_label : string or sequence, default None
+ index_label : str or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
- Rows will be written in batches of this size at a time. By default,
- all rows will be written at once.
- dtype : dict, optional
- Specifying the datatype for columns. The keys should be the column
- names and the values should be the SQLAlchemy types or strings for
- the sqlite3 legacy mode.
- method : {None, 'multi', callable}, default None
+ Specify the number of rows in each batch to be written at a time.
+ By default, all rows will be written at once.
+ dtype : dict or scalar, optional
+ Specifying the datatype for columns. If a dictionary is used, the
+ keys should be the column names and the values should be the
+ SQLAlchemy types or strings for the sqlite3 legacy mode. If a
+ scalar is provided, it will be applied to all columns.
+ method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
@@ -2911,6 +2927,7 @@ class (index) object 'bird' 'bird' 'mammal' 'mammal'
else:
return xarray.Dataset.from_dataframe(self)
+ @Substitution(returns=fmt.return_docstring)
def to_latex(
self,
buf=None,
@@ -2932,19 +2949,25 @@ def to_latex(
multicolumn=None,
multicolumn_format=None,
multirow=None,
+ caption=None,
+ label=None,
):
r"""
- Render an object to a LaTeX tabular environment table.
+ Render object to a LaTeX tabular, longtable, or nested table/tabular.
- Render an object to a tabular environment table. You can splice
- this into a LaTeX document. Requires \usepackage{booktabs}.
+ Requires ``\usepackage{booktabs}``. The output can be copy/pasted
+ into a main LaTeX document or read from an external file
+ with ``\input{table.tex}``.
.. versionchanged:: 0.20.2
- Added to Series
+ Added to Series.
+
+ .. versionchanged:: 1.0.0
+ Added caption and label arguments.
Parameters
----------
- buf : file descriptor or None
+ buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
@@ -2991,8 +3014,6 @@ def to_latex(
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
-
- .. versionadded:: 0.18.0
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
@@ -3012,12 +3033,17 @@ def to_latex(
.. versionadded:: 0.20.0
- Returns
- -------
- str or None
- If buf is None, returns the resulting LateX format as a
- string. Otherwise returns None.
+ caption : str, optional
+ The LaTeX caption to be placed inside ``\caption{}`` in the output.
+
+ .. versionadded:: 1.0.0
+ label : str, optional
+ The LaTeX label to be placed inside ``\label{}`` in the output.
+ This is used with ``\ref{}`` in the main ``.tex`` file.
+
+ .. versionadded:: 1.0.0
+ %(returns)s
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
@@ -3029,10 +3055,15 @@ def to_latex(
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
- >>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE
- '\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon
- \\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello &
- purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n'
+ >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
+ \begin{tabular}{lll}
+ \toprule
+ name & mask & weapon \\
+ \midrule
+ Raphael & red & sai \\
+ Donatello & purple & bo staff \\
+ \bottomrule
+ \end{tabular}
"""
# Get defaults from the pandas config
if self.ndim == 1:
@@ -3050,7 +3081,6 @@ def to_latex(
formatter = DataFrameFormatter(
self,
- buf=buf,
columns=columns,
col_space=col_space,
na_rep=na_rep,
@@ -3064,40 +3094,40 @@ def to_latex(
escape=escape,
decimal=decimal,
)
- formatter.to_latex(
+ return formatter.to_latex(
+ buf=buf,
column_format=column_format,
longtable=longtable,
encoding=encoding,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
+ caption=caption,
+ label=label,
)
- if buf is None:
- return formatter.buf.getvalue()
-
def to_csv(
self,
- path_or_buf=None,
- sep=",",
- na_rep="",
- float_format=None,
- columns=None,
- header=True,
- index=True,
- index_label=None,
- mode="w",
- encoding=None,
- compression="infer",
- quoting=None,
- quotechar='"',
- line_terminator=None,
- chunksize=None,
- date_format=None,
- doublequote=True,
- escapechar=None,
- decimal=".",
- ):
+ path_or_buf: Optional[FilePathOrBuffer] = None,
+ sep: str = ",",
+ na_rep: str = "",
+ float_format: Optional[str] = None,
+ columns: Optional[Sequence[Hashable]] = None,
+ header: Union[bool_t, List[str]] = True,
+ index: bool_t = True,
+ index_label: Optional[Union[bool_t, str, Sequence[Hashable]]] = None,
+ mode: str = "w",
+ encoding: Optional[str] = None,
+ compression: Optional[Union[str, Dict[str, str]]] = "infer",
+ quoting: Optional[int] = None,
+ quotechar: str = '"',
+ line_terminator: Optional[str] = None,
+ chunksize: Optional[int] = None,
+ date_format: Optional[str] = None,
+ doublequote: bool_t = True,
+ escapechar: Optional[str] = None,
+ decimal: Optional[str] = ".",
+ ) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
@@ -3144,16 +3174,21 @@ def to_csv(
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
- compression : str, default 'infer'
- Compression mode among the following possible values: {'infer',
- 'gzip', 'bz2', 'zip', 'xz', None}. If 'infer' and `path_or_buf`
- is path-like, then detect compression from the following
- extensions: '.gz', '.bz2', '.zip' or '.xz'. (otherwise no
- compression).
-
- .. versionchanged:: 0.24.0
-
- 'infer' option added and set to default.
+ compression : str or dict, default 'infer'
+ If str, represents compression mode. If dict, value at 'method' is
+ the compression mode. Compression mode may be any of the following
+ possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If
+ compression mode is 'infer' and `path_or_buf` is path-like, then
+ detect compression mode from the following extensions: '.gz',
+ '.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given
+ and mode is 'zip' or inferred as 'zip', other entries passed as
+ additional compression options.
+
+ .. versionchanged:: 0.25.0
+
+ May now be a dict with key 'method' as compression mode
+ and other entries as additional compression options if
+ compression mode is 'zip'.
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
@@ -3198,6 +3233,13 @@ def to_csv(
... 'weapon': ['sai', 'bo staff']})
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
+
+ # create 'out.zip' containing 'out.csv'
+ >>> compression_opts = dict(method='zip',
+ ... archive_name='out.csv') # doctest: +SKIP
+
+ >>> df.to_csv('out.zip', index=False,
+ ... compression=compression_opts) # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
@@ -3231,6 +3273,8 @@ def to_csv(
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
+ return None
+
# ----------------------------------------------------------------------
# Fancy Indexing
@@ -3241,41 +3285,8 @@ def _create_indexer(cls, name, indexer):
_indexer = functools.partial(indexer, name)
setattr(cls, name, property(_indexer, doc=indexer.__doc__))
- def get(self, key, default=None):
- """
- Get item from object for given key (ex: DataFrame column).
-
- Returns default value if not found.
-
- Parameters
- ----------
- key : object
-
- Returns
- -------
- value : same type as items contained in object
- """
- try:
- return self[key]
- except (KeyError, ValueError, IndexError):
- return default
-
- def __getitem__(self, item):
- return self._get_item_cache(item)
-
- def _get_item_cache(self, item):
- """Return the cached item, item represents a label indexer."""
- cache = self._item_cache
- res = cache.get(item)
- if res is None:
- values = self._data.get(item)
- res = self._box_item_values(item, values)
- cache[item] = res
- res._set_as_cached(item, self)
-
- # for a chain
- res._is_copy = self._is_copy
- return res
+ # ----------------------------------------------------------------------
+ # Lookup Caching
def _set_as_cached(self, item, cacher):
"""Set the _cacher attribute on the calling object with a weakref to
@@ -3288,18 +3299,6 @@ def _reset_cacher(self):
if hasattr(self, "_cacher"):
del self._cacher
- def _iget_item_cache(self, item):
- """Return the cached item, item represents a positional indexer."""
- ax = self._info_axis
- if ax.is_unique:
- lower = self._get_item_cache(ax[item])
- else:
- lower = self.take(item, axis=self._info_axis_number)
- return lower
-
- def _box_item_values(self, key, values):
- raise AbstractMethodError(self)
-
def _maybe_cache_changed(self, item, value):
"""The object has called back to us saying maybe it has changed.
"""
@@ -3317,11 +3316,6 @@ def _get_cacher(self):
cacher = cacher[1]()
return cacher
- @property
- def _is_view(self):
- """Return boolean indicating if self is view of another array """
- return self._data.is_view
-
def _maybe_update_cacher(self, clear=False, verify_is_copy=True):
"""
See if we need to update our parent cacher if clear, then clear our
@@ -3329,11 +3323,10 @@ def _maybe_update_cacher(self, clear=False, verify_is_copy=True):
Parameters
----------
- clear : boolean, default False
- clear the item cache
- verify_is_copy : boolean, default True
- provide is_copy checks
-
+ clear : bool, default False
+ Clear the item cache.
+ verify_is_copy : bool, default True
+ Provide is_copy checks.
"""
cacher = getattr(self, "_cacher", None)
@@ -3356,173 +3349,15 @@ def _maybe_update_cacher(self, clear=False, verify_is_copy=True):
if clear:
self._clear_item_cache()
- def _clear_item_cache(self, i=None):
- if i is not None:
- self._item_cache.pop(i, None)
- else:
- self._item_cache.clear()
-
- def _slice(self, slobj, axis=0, kind=None):
- """
- Construct a slice of this container.
-
- kind parameter is maintained for compatibility with Series slicing.
- """
- axis = self._get_block_manager_axis(axis)
- result = self._constructor(self._data.get_slice(slobj, axis=axis))
- result = result.__finalize__(self)
-
- # this could be a view
- # but only in a single-dtyped view sliceable case
- is_copy = axis != 0 or result._is_view
- result._set_is_copy(self, copy=is_copy)
- return result
-
- def _set_item(self, key, value):
- self._data.set(key, value)
- self._clear_item_cache()
-
- def _set_is_copy(self, ref=None, copy=True):
- if not copy:
- self._is_copy = None
- else:
- if ref is not None:
- self._is_copy = weakref.ref(ref)
- else:
- self._is_copy = None
-
- def _check_is_chained_assignment_possible(self):
- """
- Check if we are a view, have a cacher, and are of mixed type.
- If so, then force a setitem_copy check.
-
- Should be called just near setting a value
-
- Will return a boolean if it we are a view and are cached, but a
- single-dtype meaning that the cacher should be updated following
- setting.
- """
- if self._is_view and self._is_cached:
- ref = self._get_cacher()
- if ref is not None and ref._is_mixed_type:
- self._check_setitem_copy(stacklevel=4, t="referant", force=True)
- return True
- elif self._is_copy:
- self._check_setitem_copy(stacklevel=4, t="referant")
- return False
-
- def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
- """
-
- Parameters
- ----------
- stacklevel : integer, default 4
- the level to show of the stack when the error is output
- t : string, the type of setting error
- force : boolean, default False
- if True, then force showing an error
-
- validate if we are doing a setitem on a chained copy.
-
- If you call this function, be sure to set the stacklevel such that the
- user will see the error *at the level of setting*
-
- It is technically possible to figure out that we are setting on
- a copy even WITH a multi-dtyped pandas object. In other words, some
- blocks may be views while other are not. Currently _is_view will ALWAYS
- return False for multi-blocks to avoid having to handle this case.
-
- df = DataFrame(np.arange(0,9), columns=['count'])
- df['group'] = 'b'
-
- # This technically need not raise SettingWithCopy if both are view
- # (which is not # generally guaranteed but is usually True. However,
- # this is in general not a good practice and we recommend using .loc.
- df.iloc[0:5]['group'] = 'a'
-
- """
-
- # return early if the check is not needed
- if not (force or self._is_copy):
- return
-
- value = config.get_option("mode.chained_assignment")
- if value is None:
- return
-
- # see if the copy is not actually referred; if so, then dissolve
- # the copy weakref
- if self._is_copy is not None and not isinstance(self._is_copy, str):
- r = self._is_copy()
- if not gc.get_referents(r) or r.shape == self.shape:
- self._is_copy = None
- return
-
- # a custom message
- if isinstance(self._is_copy, str):
- t = self._is_copy
-
- elif t == "referant":
- t = (
- "\n"
- "A value is trying to be set on a copy of a slice from a "
- "DataFrame\n\n"
- "See the caveats in the documentation: "
- "http://pandas.pydata.org/pandas-docs/stable/user_guide/"
- "indexing.html#returning-a-view-versus-a-copy"
- )
-
- else:
- t = (
- "\n"
- "A value is trying to be set on a copy of a slice from a "
- "DataFrame.\n"
- "Try using .loc[row_indexer,col_indexer] = value "
- "instead\n\nSee the caveats in the documentation: "
- "http://pandas.pydata.org/pandas-docs/stable/user_guide/"
- "indexing.html#returning-a-view-versus-a-copy"
- )
-
- if value == "raise":
- raise com.SettingWithCopyError(t)
- elif value == "warn":
- warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
-
- def __delitem__(self, key):
- """
- Delete item
- """
- deleted = False
-
- maybe_shortcut = False
- if self.ndim == 2 and isinstance(self.columns, MultiIndex):
- try:
- maybe_shortcut = key not in self.columns._engine
- except TypeError:
- pass
-
- if maybe_shortcut:
- # Allow shorthand to delete all columns whose first len(key)
- # elements match key:
- if not isinstance(key, tuple):
- key = (key,)
- for col in self.columns:
- if isinstance(col, tuple) and col[: len(key)] == key:
- del self[col]
- deleted = True
- if not deleted:
- # If the above loop ran and didn't delete anything because
- # there was no match, this call should raise the appropriate
- # exception:
- self._data.delete(key)
+ def _clear_item_cache(self):
+ self._item_cache.clear()
- # delete from the caches
- try:
- del self._item_cache[key]
- except KeyError:
- pass
+ # ----------------------------------------------------------------------
+ # Indexing Methods
- def take(self, indices, axis=0, is_copy=True, **kwargs):
+ # def take(self, indices, axis=0, is_copy=True, **kwargs):
+
+ def take(self, indices, axis=0, is_copy=None, **kwargs):
"""
Return the elements in the given *positional* indices along an axis.
@@ -3539,6 +3374,8 @@ def take(self, indices, axis=0, is_copy=True, **kwargs):
selecting rows, ``1`` means that we are selecting columns.
is_copy : bool, default True
Whether to return a copy of the original object or not.
+
+ .. deprecated:: 0.25.2
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
@@ -3597,6 +3434,14 @@ class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
+ if is_copy is not None:
+ warnings.warn(
+ "is_copy is deprecated and will be removed in a future version",
+ FutureWarning,
+ stacklevel=2,
+ )
+ is_copy = True
+
nv.validate_take(tuple(), kwargs)
self._consolidate_inplace()
@@ -3776,6 +3621,222 @@ class animal locomotion
_xs = xs # type: Callable
+ def __getitem__(self, item):
+ raise AbstractMethodError(self)
+
+ def _get_item_cache(self, item):
+ """Return the cached item, item represents a label indexer."""
+ cache = self._item_cache
+ res = cache.get(item)
+ if res is None:
+ values = self._data.get(item)
+ res = self._box_item_values(item, values)
+ cache[item] = res
+ res._set_as_cached(item, self)
+
+ # for a chain
+ res._is_copy = self._is_copy
+ return res
+
+ def _iget_item_cache(self, item):
+ """Return the cached item, item represents a positional indexer."""
+ ax = self._info_axis
+ if ax.is_unique:
+ lower = self._get_item_cache(ax[item])
+ else:
+ lower = self.take(item, axis=self._info_axis_number)
+ return lower
+
+ def _box_item_values(self, key, values):
+ raise AbstractMethodError(self)
+
+ def _slice(self, slobj: slice, axis=0, kind=None):
+ """
+ Construct a slice of this container.
+
+ kind parameter is maintained for compatibility with Series slicing.
+ """
+ axis = self._get_block_manager_axis(axis)
+ result = self._constructor(self._data.get_slice(slobj, axis=axis))
+ result = result.__finalize__(self)
+
+ # this could be a view
+ # but only in a single-dtyped view sliceable case
+ is_copy = axis != 0 or result._is_view
+ result._set_is_copy(self, copy=is_copy)
+ return result
+
+ def _set_item(self, key, value):
+ self._data.set(key, value)
+ self._clear_item_cache()
+
+ def _set_is_copy(self, ref=None, copy=True):
+ if not copy:
+ self._is_copy = None
+ else:
+ if ref is not None:
+ self._is_copy = weakref.ref(ref)
+ else:
+ self._is_copy = None
+
+ def _check_is_chained_assignment_possible(self):
+ """
+ Check if we are a view, have a cacher, and are of mixed type.
+ If so, then force a setitem_copy check.
+
+ Should be called just near setting a value
+
+ Will return a boolean if it we are a view and are cached, but a
+ single-dtype meaning that the cacher should be updated following
+ setting.
+ """
+ if self._is_view and self._is_cached:
+ ref = self._get_cacher()
+ if ref is not None and ref._is_mixed_type:
+ self._check_setitem_copy(stacklevel=4, t="referant", force=True)
+ return True
+ elif self._is_copy:
+ self._check_setitem_copy(stacklevel=4, t="referant")
+ return False
+
+ def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
+ """
+
+ Parameters
+ ----------
+ stacklevel : int, default 4
+ the level to show of the stack when the error is output
+ t : str, the type of setting error
+ force : bool, default False
+ If True, then force showing an error.
+
+ validate if we are doing a setitem on a chained copy.
+
+ If you call this function, be sure to set the stacklevel such that the
+ user will see the error *at the level of setting*
+
+ It is technically possible to figure out that we are setting on
+ a copy even WITH a multi-dtyped pandas object. In other words, some
+ blocks may be views while other are not. Currently _is_view will ALWAYS
+ return False for multi-blocks to avoid having to handle this case.
+
+ df = DataFrame(np.arange(0,9), columns=['count'])
+ df['group'] = 'b'
+
+ # This technically need not raise SettingWithCopy if both are view
+ # (which is not # generally guaranteed but is usually True. However,
+ # this is in general not a good practice and we recommend using .loc.
+ df.iloc[0:5]['group'] = 'a'
+
+ """
+
+ # return early if the check is not needed
+ if not (force or self._is_copy):
+ return
+
+ value = config.get_option("mode.chained_assignment")
+ if value is None:
+ return
+
+ # see if the copy is not actually referred; if so, then dissolve
+ # the copy weakref
+ if self._is_copy is not None and not isinstance(self._is_copy, str):
+ r = self._is_copy()
+ if not gc.get_referents(r) or r.shape == self.shape:
+ self._is_copy = None
+ return
+
+ # a custom message
+ if isinstance(self._is_copy, str):
+ t = self._is_copy
+
+ elif t == "referant":
+ t = (
+ "\n"
+ "A value is trying to be set on a copy of a slice from a "
+ "DataFrame\n\n"
+ "See the caveats in the documentation: "
+ "http://pandas.pydata.org/pandas-docs/stable/user_guide/"
+ "indexing.html#returning-a-view-versus-a-copy"
+ )
+
+ else:
+ t = (
+ "\n"
+ "A value is trying to be set on a copy of a slice from a "
+ "DataFrame.\n"
+ "Try using .loc[row_indexer,col_indexer] = value "
+ "instead\n\nSee the caveats in the documentation: "
+ "http://pandas.pydata.org/pandas-docs/stable/user_guide/"
+ "indexing.html#returning-a-view-versus-a-copy"
+ )
+
+ if value == "raise":
+ raise com.SettingWithCopyError(t)
+ elif value == "warn":
+ warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
+
+ def __delitem__(self, key):
+ """
+ Delete item
+ """
+ deleted = False
+
+ maybe_shortcut = False
+ if self.ndim == 2 and isinstance(self.columns, MultiIndex):
+ try:
+ maybe_shortcut = key not in self.columns._engine
+ except TypeError:
+ pass
+
+ if maybe_shortcut:
+ # Allow shorthand to delete all columns whose first len(key)
+ # elements match key:
+ if not isinstance(key, tuple):
+ key = (key,)
+ for col in self.columns:
+ if isinstance(col, tuple) and col[: len(key)] == key:
+ del self[col]
+ deleted = True
+ if not deleted:
+ # If the above loop ran and didn't delete anything because
+ # there was no match, this call should raise the appropriate
+ # exception:
+ self._data.delete(key)
+
+ # delete from the caches
+ try:
+ del self._item_cache[key]
+ except KeyError:
+ pass
+
+ # ----------------------------------------------------------------------
+ # Unsorted
+
+ def get(self, key, default=None):
+ """
+ Get item from object for given key (ex: DataFrame column).
+
+ Returns default value if not found.
+
+ Parameters
+ ----------
+ key : object
+
+ Returns
+ -------
+ value : same type as items contained in object
+ """
+ try:
+ return self[key]
+ except (KeyError, ValueError, IndexError):
+ return default
+
+ @property
+ def _is_view(self):
+ """Return boolean indicating if self is view of another array """
+ return self._data.is_view
+
def reindex_like(self, other, method=None, copy=True, limit=None, tolerance=None):
"""
Return an object with matching indices as other object.
@@ -3896,16 +3957,14 @@ def drop(
if labels is not None:
if index is not None or columns is not None:
- raise ValueError(
- "Cannot specify both 'labels' and " "'index'/'columns'"
- )
+ raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError(
- "Need to specify at least one of 'labels', " "'index' or 'columns'"
+ "Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
@@ -3978,9 +4037,8 @@ def _update_inplace(self, result, verify_is_copy=True):
Parameters
----------
- verify_is_copy : boolean, default True
- provide is_copy checks
-
+ verify_is_copy : bool, default True
+ Provide is_copy checks.
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
@@ -4595,12 +4653,13 @@ def filter(self, items=None, like=None, regex=None, axis=None):
----------
items : list-like
Keep labels from axis which are in items.
- like : string
+ like : str
Keep labels from axis for which "like in label == True".
- regex : string (regular expression)
+ regex : str (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
- axis : int or string axis name
- The axis to filter on. By default this is the info axis,
+ axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
+ The axis to filter on, expressed either as an index (int)
+ or axis name (str). By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
@@ -4642,8 +4701,6 @@ def filter(self, items=None, like=None, regex=None, axis=None):
one two three
rabbit 4 5 6
"""
- import re
-
nkw = com.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
@@ -4818,7 +4875,7 @@ def sample(
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
- Sample with or without replacement.
+ Allow or disallow sampling of the same row more than once.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
@@ -4835,7 +4892,7 @@ def sample(
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
- axis : int or string, optional
+ axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames).
@@ -4902,18 +4959,18 @@ def sample(
if weights is not None:
# If a series, align with frame
- if isinstance(weights, pd.Series):
+ if isinstance(weights, ABCSeries):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, str):
- if isinstance(self, pd.DataFrame):
+ if isinstance(self, ABCDataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError(
- "String passed to weights not a " "valid column"
+ "String passed to weights not a valid column"
)
else:
raise ValueError(
@@ -4931,14 +4988,14 @@ def sample(
if len(weights) != axis_length:
raise ValueError(
- "Weights and axis to be sampled must be of " "same length"
+ "Weights and axis to be sampled must be of same length"
)
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
- raise ValueError("weight vector many not include negative " "values")
+ raise ValueError("weight vector many not include negative values")
# If has nan, set to zero.
weights = weights.fillna(0)
@@ -4960,16 +5017,16 @@ def sample(
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
- raise ValueError("Please enter a value for `frac` OR `n`, not " "both")
+ raise ValueError("Please enter a value for `frac` OR `n`, not both")
# Check for negative sizes
if n < 0:
raise ValueError(
- "A negative number of rows requested. Please " "provide positive value."
+ "A negative number of rows requested. Please provide positive value."
)
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
- return self.take(locs, axis=axis, is_copy=False)
+ return self.take(locs, axis=axis)
_shared_docs[
"pipe"
@@ -5026,7 +5083,7 @@ def sample(
@Appender(_shared_docs["pipe"] % _shared_doc_kwargs)
def pipe(self, func, *args, **kwargs):
- return com._pipe(self, func, *args, **kwargs)
+ return com.pipe(self, func, *args, **kwargs)
_shared_docs["aggregate"] = dedent(
"""
@@ -5259,8 +5316,8 @@ def _consolidate(self, inplace=False):
Parameters
----------
- inplace : boolean, default False
- If False return new object, otherwise modify existing object
+ inplace : bool, default False
+ If False return new object, otherwise modify existing object.
Returns
-------
@@ -5296,11 +5353,8 @@ def _check_inplace_setting(self, value):
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
- try:
- if np.isnan(value):
- return True
- except Exception:
- pass
+ if is_float(value) and np.isnan(value):
+ return True
raise TypeError(
"Cannot do inplace boolean setting on "
@@ -5561,9 +5615,6 @@ def get_ftype_counts(self):
.. deprecated:: 0.23.0
- This is useful for SparseDataFrame or for DataFrames containing
- sparse arrays.
-
Returns
-------
dtype : Series
@@ -5592,7 +5643,7 @@ def get_ftype_counts(self):
dtype: int64
"""
warnings.warn(
- "get_ftype_counts is deprecated and will " "be removed in a future version",
+ "get_ftype_counts is deprecated and will be removed in a future version",
FutureWarning,
stacklevel=2,
)
@@ -5658,7 +5709,6 @@ def ftypes(self):
See Also
--------
DataFrame.dtypes: Series with just dtype information.
- SparseDataFrame : Container for sparse tabular data.
Notes
-----
@@ -5674,13 +5724,6 @@ def ftypes(self):
2 float64:dense
3 float64:dense
dtype: object
-
- >>> pd.SparseDataFrame(arr).ftypes # doctest: +SKIP
- 0 float64:sparse
- 1 float64:sparse
- 2 float64:sparse
- 3 float64:sparse
- dtype: object
"""
warnings.warn(
"DataFrame.ftypes is deprecated and will "
@@ -5706,14 +5749,15 @@ def as_blocks(self, copy=True):
Parameters
----------
- copy : boolean, default True
+ copy : bool, default True
Returns
-------
- values : a dict of dtype -> Constructor Types
+ dict
+ Mapping dtype -> Constructor Types.
"""
warnings.warn(
- "as_blocks is deprecated and will " "be removed in a future version",
+ "as_blocks is deprecated and will be removed in a future version",
FutureWarning,
stacklevel=2,
)
@@ -5740,7 +5784,7 @@ def _to_dict_of_blocks(self, copy=True):
for k, v, in self._data.to_dict(copy=copy).items()
}
- def astype(self, dtype, copy=True, errors="raise", **kwargs):
+ def astype(self, dtype, copy=True, errors="raise"):
"""
Cast a pandas object to a specified dtype ``dtype``.
@@ -5759,12 +5803,10 @@ def astype(self, dtype, copy=True, errors="raise", **kwargs):
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- - ``ignore`` : suppress exceptions. On error return original object
+ - ``ignore`` : suppress exceptions. On error return original object.
.. versionadded:: 0.20.0
- kwargs : keyword arguments to pass on to the constructor
-
Returns
-------
casted : same type as caller
@@ -5824,7 +5866,7 @@ def astype(self, dtype, copy=True, errors="raise", **kwargs):
Convert to ordered categorical type with custom ordering:
>>> cat_dtype = pd.api.types.CategoricalDtype(
- ... categories=[2, 1], ordered=True)
+ ... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
@@ -5834,7 +5876,7 @@ def astype(self, dtype, copy=True, errors="raise", **kwargs):
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
- >>> s1 = pd.Series([1,2])
+ >>> s1 = pd.Series([1, 2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
@@ -5850,7 +5892,7 @@ def astype(self, dtype, copy=True, errors="raise", **kwargs):
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
- return self.astype(new_type, copy, errors, **kwargs)
+ return self.astype(new_type, copy, errors)
for col_name in dtype.keys():
if col_name not in self:
@@ -5862,9 +5904,7 @@ def astype(self, dtype, copy=True, errors="raise", **kwargs):
for col_name, col in self.items():
if col_name in dtype:
results.append(
- col.astype(
- dtype=dtype[col_name], copy=copy, errors=errors, **kwargs
- )
+ col.astype(dtype=dtype[col_name], copy=copy, errors=errors)
)
else:
results.append(results.append(col.copy() if copy else col))
@@ -5879,9 +5919,7 @@ def astype(self, dtype, copy=True, errors="raise", **kwargs):
else:
# else, only a single dtype is given
- new_data = self._data.astype(
- dtype=dtype, copy=copy, errors=errors, **kwargs
- )
+ new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors)
return self._constructor(new_data).__finalize__(self)
# GH 19920: retain column metadata after concat
@@ -6019,17 +6057,17 @@ def _convert(
Parameters
----------
- datetime : boolean, default False
+ datetime : bool, default False
If True, convert to date where possible.
- numeric : boolean, default False
+ numeric : bool, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
- timedelta : boolean, default False
+ timedelta : bool, default False
If True, convert to timedelta where possible.
- coerce : boolean, default False
+ coerce : bool, default False
If True, force conversion with unconvertible values converted to
- nulls (NaN or NaT)
- copy : boolean, default True
+ nulls (NaN or NaT).
+ copy : bool, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
@@ -6038,6 +6076,11 @@ def _convert(
-------
converted : same as input object
"""
+ validate_bool_kwarg(datetime, "datetime")
+ validate_bool_kwarg(numeric, "numeric")
+ validate_bool_kwarg(timedelta, "timedelta")
+ validate_bool_kwarg(coerce, "coerce")
+ validate_bool_kwarg(copy, "copy")
return self._constructor(
self._data.convert(
datetime=datetime,
@@ -6215,8 +6258,6 @@ def fillna(
axis = 0
axis = self._get_axis_number(axis)
- from pandas import DataFrame
-
if value is None:
if self._is_mixed_type and axis == 1:
@@ -6279,7 +6320,7 @@ def fillna(
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
- elif isinstance(value, DataFrame) and self.ndim == 2:
+ elif isinstance(value, ABCDataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)
else:
raise ValueError("invalid fill value with a %s" % type(value))
@@ -6620,9 +6661,7 @@ def replace(
):
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_bool(regex) and to_replace is not None:
- raise AssertionError(
- "'to_replace' must be 'None' if 'regex' is " "not a bool"
- )
+ raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool")
self._consolidate_inplace()
@@ -6633,7 +6672,7 @@ def replace(
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
- if isinstance(self, pd.DataFrame):
+ if isinstance(self, ABCDataFrame):
return self.apply(
_single_replace, args=(to_replace, method, inplace, limit)
)
@@ -6667,11 +6706,7 @@ def replace(
for k, v in items:
keys, values = list(zip(*v.items())) or ([], [])
- if set(keys) & set(values):
- raise ValueError(
- "Replacement not allowed with "
- "overlapping keys and values"
- )
+
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
@@ -6685,9 +6720,8 @@ def replace(
else:
# need a non-zero len on all axes
- for a in self._AXIS_ORDERS:
- if not len(self._get_axis(a)):
- return self
+ if not self.size:
+ return self
new_data = self._data
if is_dict_like(to_replace):
@@ -6720,7 +6754,7 @@ def replace(
convert=convert,
)
else:
- raise TypeError("value argument must be scalar, dict, or " "Series")
+ raise TypeError("value argument must be scalar, dict, or Series")
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
@@ -6819,14 +6853,6 @@ def replace(
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
-
- .. versionadded:: 0.18.1
-
- Added support for the 'akima' method.
- Added interpolate method 'from_derivatives' which replaces
- 'piecewise_polynomial' in SciPy 0.18; backwards-compatible with
- SciPy < 0.18
-
axis : {0 or 'index', 1 or 'columns', None}, default None
Axis to interpolate along.
limit : int, optional
@@ -7014,7 +7040,7 @@ def interpolate(
if isinstance(_maybe_transposed_self.index, MultiIndex) and method != "linear":
raise ValueError(
- "Only `method=linear` interpolation is supported " "on MultiIndexes."
+ "Only `method=linear` interpolation is supported on MultiIndexes."
)
if _maybe_transposed_self._data.get_dtype_counts().get("object") == len(
@@ -7035,7 +7061,7 @@ def interpolate(
methods = {"index", "values", "nearest", "time"}
is_numeric_or_datetime = (
is_numeric_dtype(index)
- or is_datetime64_dtype(index)
+ or is_datetime64_any_dtype(index)
or is_timedelta64_dtype(index)
)
if method not in methods and not is_numeric_or_datetime:
@@ -7088,8 +7114,6 @@ def asof(self, where, subset=None):
In case of a :class:`~pandas.DataFrame`, the last row without NaN
considering only the subset of columns (if not `None`)
- .. versionadded:: 0.19.0 For DataFrame
-
If there is no good value, NaN is returned for a Series or
a Series of NaN values for a DataFrame
@@ -7178,9 +7202,7 @@ def asof(self, where, subset=None):
2018-02-27 09:04:30 40.0 NaN
"""
if isinstance(where, str):
- from pandas import to_datetime
-
- where = to_datetime(where)
+ where = Timestamp(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
@@ -7245,7 +7267,7 @@ def asof(self, where, subset=None):
# mask the missing
missing = locs == -1
- data = self.take(locs, is_copy=False)
+ data = self.take(locs)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
@@ -7436,7 +7458,7 @@ def _clip_with_one_bound(self, threshold, method, axis, inplace):
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
- threshold = pd.Series(threshold, index=self.index)
+ threshold = self._constructor(threshold, index=self.index)
else:
threshold = _align_method_FRAME(self, threshold, axis)
return self.where(subset, threshold, axis=axis, inplace=inplace)
@@ -7525,9 +7547,9 @@ def clip(self, lower=None, upper=None, axis=None, inplace=False, *args, **kwargs
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
- if not is_list_like(lower) and np.any(pd.isnull(lower)):
+ if not is_list_like(lower) and np.any(isna(lower)):
lower = None
- if not is_list_like(upper) and np.any(pd.isnull(upper)):
+ if not is_list_like(upper) and np.any(isna(upper)):
upper = None
# GH 2747 (arguments were reversed)
@@ -7630,7 +7652,7 @@ def clip_upper(self, threshold, axis=None, inplace=False):
dtype: int64
"""
warnings.warn(
- "clip_upper(threshold) is deprecated, " "use clip(upper=threshold) instead",
+ "clip_upper(threshold) is deprecated, use clip(upper=threshold) instead",
FutureWarning,
stacklevel=2,
)
@@ -7749,7 +7771,7 @@ def clip_lower(self, threshold, axis=None, inplace=False):
2 5 6
"""
warnings.warn(
- "clip_lower(threshold) is deprecated, " "use clip(lower=threshold) instead",
+ "clip_lower(threshold) is deprecated, use clip(lower=threshold) instead",
FutureWarning,
stacklevel=2,
)
@@ -7908,7 +7930,7 @@ def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None):
Parameters
----------
- freq : DateOffset object, or string
+ freq : DateOffset or str
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
@@ -8207,14 +8229,10 @@ def resample(
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
- .. versionadded:: 0.19.0
-
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
- .. versionadded:: 0.19.0
-
Returns
-------
Resampler object
@@ -8461,7 +8479,7 @@ def first(self, offset):
Parameters
----------
- offset : string, DateOffset, dateutil.relativedelta
+ offset : str, DateOffset, dateutil.relativedelta
Returns
-------
@@ -8524,7 +8542,7 @@ def last(self, offset):
Parameters
----------
- offset : string, DateOffset, dateutil.relativedelta
+ offset : str, DateOffset, dateutil.relativedelta
Returns
-------
@@ -8713,7 +8731,7 @@ def ranker(data):
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level
- copy : boolean, default True
+ copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
@@ -8756,12 +8774,10 @@ def align(
fill_axis=0,
broadcast_axis=None,
):
- from pandas import DataFrame, Series
-
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
- if isinstance(self, Series):
+ if isinstance(self, ABCSeries):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
@@ -8779,7 +8795,7 @@ def align(
limit=limit,
fill_axis=fill_axis,
)
- elif isinstance(other, Series):
+ elif isinstance(other, ABCSeries):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
@@ -8800,7 +8816,7 @@ def align(
if axis is not None:
axis = self._get_axis_number(axis)
- if isinstance(other, DataFrame):
+ if isinstance(other, ABCDataFrame):
return self._align_frame(
other,
join=join,
@@ -8812,7 +8828,7 @@ def align(
limit=limit,
fill_axis=fill_axis,
)
- elif isinstance(other, Series):
+ elif isinstance(other, ABCSeries):
return self._align_series(
other,
join=join,
@@ -8905,7 +8921,7 @@ def _align_series(
# series/series compat, other must always be a Series
if is_series:
if axis:
- raise ValueError("cannot align series to a series other than " "axis 0")
+ raise ValueError("cannot align series to a series other than axis 0")
# equal
if self.index.equals(other.index):
@@ -8995,7 +9011,7 @@ def _where(
if not hasattr(cond, "shape"):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
- raise ValueError("Array conditional must be same shape as " "self")
+ raise ValueError("Array conditional must be same shape as self")
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
@@ -9004,7 +9020,7 @@ def _where(
msg = "Boolean array expected for the condition, not {dtype}"
- if not isinstance(cond, pd.DataFrame):
+ if not isinstance(cond, ABCDataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
@@ -9035,7 +9051,7 @@ def _where(
# slice me out of the other
else:
raise NotImplementedError(
- "cannot align with a higher " "dimensional NDFrame"
+ "cannot align with a higher dimensional NDFrame"
)
if isinstance(other, np.ndarray):
@@ -9058,32 +9074,19 @@ def _where(
# try to not change dtype at first (if try_quick)
if try_quick:
- try:
- new_other = com.values_from_object(self)
- new_other = new_other.copy()
- new_other[icond] = other
- other = new_other
- except Exception:
- try_quick = False
-
- # let's create a new (if we failed at the above
- # or not try_quick
- if not try_quick:
-
- dtype, fill_value = maybe_promote(other.dtype)
- new_other = np.empty(len(icond), dtype=dtype)
- new_other.fill(fill_value)
- maybe_upcast_putmask(new_other, icond, other)
+ new_other = com.values_from_object(self)
+ new_other = new_other.copy()
+ new_other[icond] = other
other = new_other
else:
raise ValueError(
- "Length of replacements must equal " "series length"
+ "Length of replacements must equal series length"
)
else:
raise ValueError(
- "other must be the same shape as self " "when an ndarray"
+ "other must be the same shape as self when an ndarray"
)
# we are the same shape, so create an actual object for alignment
@@ -9134,26 +9137,18 @@ def _where(
Parameters
----------
- cond : boolean %(klass)s, array-like, or callable
+ cond : bool %(klass)s, array-like, or callable
Where `cond` is %(cond)s, keep the original value. Where
%(cond_rev)s, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the %(klass)s and
should return boolean %(klass)s or array. The callable must
not change input %(klass)s (though pandas doesn't check it).
-
- .. versionadded:: 0.18.1
- A callable can be used as cond.
-
other : scalar, %(klass)s, or callable
Entries where `cond` is %(cond_rev)s are replaced with
corresponding value from `other`.
If other is callable, it is computed on the %(klass)s and
should return scalar or %(klass)s. The callable must not
change input %(klass)s (though pandas doesn't check it).
-
- .. versionadded:: 0.18.1
- A callable can be used as other.
-
inplace : bool, default False
Whether to perform the operation in place on the data.
axis : int, default None
@@ -9449,9 +9444,10 @@ def tshift(self, periods=1, freq=None, axis=0):
----------
periods : int
Number of periods to move, can be positive or negative
- freq : DateOffset, timedelta, or time rule string, default None
- Increment to use from the tseries module or time rule (e.g. 'EOM')
- axis : int or basestring
+ freq : DateOffset, timedelta, or str, default None
+ Increment to use from the tseries module
+ or time rule expressed as a string (e.g. 'EOM')
+ axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0
Corresponds to the axis that contains the Index
Returns
@@ -9509,13 +9505,13 @@ def truncate(self, before=None, after=None, axis=None, copy=True):
Parameters
----------
- before : date, string, int
+ before : date, str, int
Truncate all rows before this index value.
- after : date, string, int
+ after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
- copy : boolean, default is True,
+ copy : bool, default is True,
Return a copy of the truncated section.
Returns
@@ -9659,13 +9655,13 @@ def tz_convert(self, tz, axis=0, level=None, copy=True):
Parameters
----------
- tz : string or pytz.timezone object
+ tz : str or tzinfo object
axis : the axis to convert
level : int, str, default None
- If axis ia a MultiIndex, convert a specific level. Otherwise
- must be None
- copy : boolean, default True
- Also make a copy of the underlying data
+ If axis is a MultiIndex, convert a specific level. Otherwise
+ must be None.
+ copy : bool, default True
+ Also make a copy of the underlying data.
Returns
-------
@@ -9685,7 +9681,7 @@ def _tz_convert(ax, tz):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
- "%s is not a valid DatetimeIndex or " "PeriodIndex" % ax_name
+ "%s is not a valid DatetimeIndex or PeriodIndex" % ax_name
)
else:
ax = DatetimeIndex([], tz=tz)
@@ -9719,12 +9715,12 @@ def tz_localize(
Parameters
----------
- tz : string or pytz.timezone object
+ tz : str or tzinfo
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None
- copy : boolean, default True
+ copy : bool, default True
Also make a copy of the underlying data
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
@@ -9849,7 +9845,7 @@ def _tz_localize(ax, tz, ambiguous, nonexistent):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
- "%s is not a valid DatetimeIndex or " "PeriodIndex" % ax_name
+ "%s is not a valid DatetimeIndex or PeriodIndex" % ax_name
)
else:
ax = DatetimeIndex([], tz=tz)
@@ -10293,7 +10289,7 @@ def _check_percentile(self, q):
Validate percentiles (used by describe and quantile).
"""
- msg = "percentiles should all be in the interval [0, 1]. " "Try {0} instead."
+ msg = "percentiles should all be in the interval [0, 1]. Try {0} instead."
q = np.asarray(q)
if q.ndim == 0:
if not 0 <= q <= 1:
@@ -10320,7 +10316,7 @@ def _check_percentile(self, q):
How to handle NAs before computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
- freq : DateOffset, timedelta, or offset alias string, optional
+ freq : DateOffset, timedelta, or str, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
@@ -10746,9 +10742,9 @@ def _add_series_or_dataframe_operations(cls):
the doc strings again.
"""
- from pandas.core import window as rwindow
+ from pandas.core.window import EWM, Expanding, Rolling, Window
- @Appender(rwindow.rolling.__doc__)
+ @Appender(Rolling.__doc__)
def rolling(
self,
window,
@@ -10760,7 +10756,20 @@ def rolling(
closed=None,
):
axis = self._get_axis_number(axis)
- return rwindow.rolling(
+
+ if win_type is not None:
+ return Window(
+ self,
+ window=window,
+ min_periods=min_periods,
+ center=center,
+ win_type=win_type,
+ on=on,
+ axis=axis,
+ closed=closed,
+ )
+
+ return Rolling(
self,
window=window,
min_periods=min_periods,
@@ -10773,16 +10782,14 @@ def rolling(
cls.rolling = rolling
- @Appender(rwindow.expanding.__doc__)
+ @Appender(Expanding.__doc__)
def expanding(self, min_periods=1, center=False, axis=0):
axis = self._get_axis_number(axis)
- return rwindow.expanding(
- self, min_periods=min_periods, center=center, axis=axis
- )
+ return Expanding(self, min_periods=min_periods, center=center, axis=axis)
cls.expanding = expanding
- @Appender(rwindow.ewm.__doc__)
+ @Appender(EWM.__doc__)
def ewm(
self,
com=None,
@@ -10795,7 +10802,7 @@ def ewm(
axis=0,
):
axis = self._get_axis_number(axis)
- return rwindow.ewm(
+ return EWM(
self,
com=com,
span=span,
@@ -10813,7 +10820,7 @@ def ewm(
def transform(self, func, *args, **kwargs):
result = self.agg(func, *args, **kwargs)
if is_scalar(result) or len(result) != len(self):
- raise ValueError("transforms cannot produce " "aggregated results")
+ raise ValueError("transforms cannot produce aggregated results")
return result
@@ -11050,7 +11057,7 @@ def _doc_parms(cls):
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
-skipna : boolean, default True
+skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs :
@@ -11534,7 +11541,7 @@ def _doc_parms(cls):
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
- .. versionadded :: 0.22.0
+ .. versionadded:: 0.22.0
Added with the default being 0. This means the sum of an all-NA
or empty Series is 0, and the product of an all-NA or empty
@@ -11713,7 +11720,7 @@ def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs
if level is not None:
if bool_only is not None:
raise NotImplementedError(
- "Option bool_only is not " "implemented with option level."
+ "Option bool_only is not implemented with option level."
)
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index 5c4f1fa3fbddf..fc3bb69afd0cb 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -98,6 +98,103 @@ def _gotitem(self, key, ndim, subset=None):
dataframe_apply_whitelist = common_apply_whitelist | frozenset(["dtypes", "corrwith"])
-cython_transforms = frozenset(["cumprod", "cumsum", "shift", "cummin", "cummax"])
+# cythonized transformations or canned "agg+broadcast", which do not
+# require postprocessing of the result by transform.
+cythonized_kernels = frozenset(["cumprod", "cumsum", "shift", "cummin", "cummax"])
cython_cast_blacklist = frozenset(["rank", "count", "size", "idxmin", "idxmax"])
+
+# List of aggregation/reduction functions.
+# These map each group to a single numeric value
+reduction_kernels = frozenset(
+ [
+ "all",
+ "any",
+ "count",
+ "first",
+ "idxmax",
+ "idxmin",
+ "last",
+ "mad",
+ "max",
+ "mean",
+ "median",
+ "min",
+ "ngroup",
+ "nth",
+ "nunique",
+ "prod",
+ # as long as `quantile`'s signature accepts only
+ # a single quantile value, it's a reduction.
+ # GH#27526 might change that.
+ "quantile",
+ "sem",
+ "size",
+ "skew",
+ "std",
+ "sum",
+ "var",
+ ]
+)
+
+# List of transformation functions.
+# a transformation is a function that, for each group,
+# produces a result that has the same shape as the group.
+transformation_kernels = frozenset(
+ [
+ "backfill",
+ "bfill",
+ "corrwith",
+ "cumcount",
+ "cummax",
+ "cummin",
+ "cumprod",
+ "cumsum",
+ "diff",
+ "ffill",
+ "fillna",
+ "pad",
+ "pct_change",
+ "rank",
+ "shift",
+ "tshift",
+ ]
+)
+
+# these are all the public methods on Grouper which don't belong
+# in either of the above lists
+groupby_other_methods = frozenset(
+ [
+ "agg",
+ "aggregate",
+ "apply",
+ "boxplot",
+ # corr and cov return ngroups*ncolumns rows, so they
+ # are neither a transformation nor a reduction
+ "corr",
+ "cov",
+ "describe",
+ "dtypes",
+ "expanding",
+ "filter",
+ "get_group",
+ "groups",
+ "head",
+ "hist",
+ "indices",
+ "ndim",
+ "ngroups",
+ "ohlc",
+ "pipe",
+ "plot",
+ "resample",
+ "rolling",
+ "tail",
+ "take",
+ "transform",
+ ]
+)
+# Valid values of `name` for `groupby.transform(name)`
+# NOTE: do NOT edit this directly. New additions should be inserted
+# into the appropriate list above.
+transform_kernel_whitelist = reduction_kernels | transformation_kernels
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 7fd0ca94e7997..f8f1455561c03 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -21,7 +21,11 @@
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution
-from pandas.core.dtypes.cast import maybe_convert_objects, maybe_downcast_to_dtype
+from pandas.core.dtypes.cast import (
+ maybe_convert_objects,
+ maybe_downcast_numeric,
+ maybe_downcast_to_dtype,
+)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
@@ -35,21 +39,25 @@
is_object_dtype,
is_scalar,
)
-from pandas.core.dtypes.missing import isna, notna
+from pandas.core.dtypes.missing import _isna_ndarraylike, isna, notna
from pandas._typing import FrameOrSeries
import pandas.core.algorithms as algorithms
from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
from pandas.core.frame import DataFrame
-from pandas.core.generic import NDFrame, _shared_docs
+from pandas.core.generic import ABCDataFrame, ABCSeries, NDFrame, _shared_docs
from pandas.core.groupby import base
-from pandas.core.groupby.groupby import GroupBy, _apply_docs, _transform_template
-from pandas.core.index import Index, MultiIndex
+from pandas.core.groupby.groupby import (
+ GroupBy,
+ _apply_docs,
+ _transform_template,
+ groupby,
+)
+from pandas.core.index import Index, MultiIndex, _all_indexes_same
import pandas.core.indexes.base as ibase
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
-from pandas.core.sparse.frame import SparseDataFrame
from pandas.plotting import boxplot_frame_groupby
@@ -143,8 +151,10 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True, min_count=-1):
new_blocks = []
new_items = []
deleted_items = []
+ no_result = object()
for block in data.blocks:
-
+ # Avoid inheriting result from earlier in the loop
+ result = no_result
locs = block.mgr_locs.as_array
try:
result, _ = self.grouper.aggregate(
@@ -162,8 +172,6 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True, min_count=-1):
continue
# call our grouper again with only this block
- from pandas.core.groupby.groupby import groupby
-
obj = self.obj[data.items[locs]]
s = groupby(obj, self.grouper)
try:
@@ -171,15 +179,13 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True, min_count=-1):
except TypeError:
# we may have an exception in trying to aggregate
# continue and exclude the block
- pass
-
+ deleted_items.append(locs)
+ continue
finally:
-
- dtype = block.values.dtype
-
- # see if we can cast the block back to the original dtype
- result = block._try_coerce_and_cast_result(result, dtype=dtype)
- newb = block.make_block(result)
+ if result is not no_result:
+ # see if we can cast the block back to the original dtype
+ result = maybe_downcast_numeric(result, block.dtype)
+ newb = block.make_block(result)
new_items.append(locs)
new_blocks.append(newb)
@@ -222,7 +228,7 @@ def aggregate(self, func, *args, **kwargs):
kwargs = {}
elif func is None:
# nicer error message
- raise TypeError("Must provide 'func' or tuples of " "'(column, aggfunc).")
+ raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).")
func = _maybe_mangle_lambdas(func)
@@ -235,33 +241,30 @@ def aggregate(self, func, *args, **kwargs):
# grouper specific aggregations
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
+ elif args or kwargs:
+ result = self._aggregate_generic(func, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
- assert not args and not kwargs
result = self._aggregate_multiple_funcs(
[func], _level=_level, _axis=self.axis
)
-
+ except Exception:
+ result = self._aggregate_generic(func)
+ else:
result.columns = Index(
result.columns.levels[0], name=self._selected_obj.columns.name
)
- if isinstance(self.obj, SparseDataFrame):
- # Backwards compat for groupby.agg() with sparse
- # values. concat no longer converts DataFrame[Sparse]
- # to SparseDataFrame, so we do it here.
- result = SparseDataFrame(result._data)
- except Exception:
- result = self._aggregate_generic(func, *args, **kwargs)
-
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
if relabeling:
- result = result[order]
+
+ # used reordered index of columns
+ result = result.iloc[:, order]
result.columns = columns
return result._convert(datetime=True)
@@ -304,17 +307,21 @@ def _aggregate_item_by_item(self, func, *args, **kwargs):
cannot_agg = []
errors = None
for item in obj:
- try:
- data = obj[item]
- colg = SeriesGroupBy(data, selection=item, grouper=self.grouper)
+ data = obj[item]
+ colg = SeriesGroupBy(data, selection=item, grouper=self.grouper)
+ try:
cast = self._transform_should_cast(func)
result[item] = colg.aggregate(func, *args, **kwargs)
if cast:
result[item] = self._try_cast(result[item], data)
- except ValueError:
+ except ValueError as err:
+ if "Must produce aggregated value" in str(err):
+ # raised in _aggregate_named, handle at higher level
+ # see test_apply_with_mutated_index
+ raise
cannot_agg.append(item)
continue
except TypeError as e:
@@ -339,7 +346,7 @@ def _decide_output_index(self, output, labels):
output_keys = sorted(output)
try:
output_keys.sort()
- except Exception: # pragma: no cover
+ except TypeError:
pass
if isinstance(labels, MultiIndex):
@@ -348,8 +355,6 @@ def _decide_output_index(self, output, labels):
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
- from pandas.core.index import _all_indexes_same
-
if len(keys) == 0:
return DataFrame(index=keys)
@@ -358,7 +363,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
# GH12824.
def first_not_none(values):
try:
- return next(com._not_none(*values))
+ return next(com.not_none(*values))
except StopIteration:
return None
@@ -572,13 +577,19 @@ def _transform_general(self, func, *args, **kwargs):
def transform(self, func, *args, **kwargs):
# optimized transforms
- func = self._is_cython_func(func) or func
+ func = self._get_cython_func(func) or func
+
if isinstance(func, str):
- if func in base.cython_transforms:
- # cythonized transform
+ if not (func in base.transform_kernel_whitelist):
+ msg = "'{func}' is not a valid function name for transform(name)"
+ raise ValueError(msg.format(func=func))
+ if func in base.cythonized_kernels:
+ # cythonized transformation or canned "reduction+broadcast"
return getattr(self, func)(*args, **kwargs)
else:
- # cythonized aggregation and merge
+ # If func is a reduction, we need to broadcast the
+ # result to the whole group. Compute func result
+ # and deal with possible broadcasting below.
result = getattr(self, func)(*args, **kwargs)
else:
return self._transform_general(func, *args, **kwargs)
@@ -589,7 +600,7 @@ def transform(self, func, *args, **kwargs):
obj = self._obj_with_exclusions
- # nuiscance columns
+ # nuisance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
@@ -635,20 +646,21 @@ def _choose_path(self, fast_path, slow_path, group):
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
-
- # verify fast path does not change columns (and names), otherwise
- # its results cannot be joined with those of the slow path
- if res_fast.columns != group.columns:
- return path, res
- # verify numerical equality with the slow path
- if res.shape == res_fast.shape:
- res_r = res.values.ravel()
- res_fast_r = res_fast.values.ravel()
- mask = notna(res_r)
- if (res_r[mask] == res_fast_r[mask]).all():
- path = fast_path
except Exception:
- pass
+ # Hard to know ex-ante what exceptions `fast_path` might raise
+ return path, res
+
+ # verify fast path does not change columns (and names), otherwise
+ # its results cannot be joined with those of the slow path
+ if not isinstance(res_fast, DataFrame):
+ return path, res
+
+ if not res_fast.columns.equals(group.columns):
+ return path, res
+
+ if res_fast.equals(res):
+ path = fast_path
+
return path, res
def _transform_item_by_item(self, obj, wrapper):
@@ -662,7 +674,7 @@ def _transform_item_by_item(self, obj, wrapper):
except Exception:
pass
- if len(output) == 0: # pragma: no cover
+ if len(output) == 0:
raise TypeError("Transform function invalid for data types")
columns = obj.columns
@@ -671,7 +683,7 @@ def _transform_item_by_item(self, obj, wrapper):
return DataFrame(output, index=obj.index, columns=columns)
- def filter(self, func, dropna=True, *args, **kwargs): # noqa
+ def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
@@ -822,47 +834,45 @@ def apply(self, func, *args, **kwargs):
axis="",
)
@Appender(_shared_docs["aggregate"])
- def aggregate(self, func_or_funcs=None, *args, **kwargs):
+ def aggregate(self, func=None, *args, **kwargs):
_level = kwargs.pop("_level", None)
- relabeling = func_or_funcs is None
+ relabeling = func is None
columns = None
- no_arg_message = (
- "Must provide 'func_or_funcs' or named " "aggregation **kwargs."
- )
+ no_arg_message = "Must provide 'func' or named aggregation **kwargs."
if relabeling:
columns = list(kwargs)
if not PY36:
# sort for 3.5 and earlier
columns = list(sorted(columns))
- func_or_funcs = [kwargs[col] for col in columns]
+ func = [kwargs[col] for col in columns]
kwargs = {}
if not columns:
raise TypeError(no_arg_message)
- if isinstance(func_or_funcs, str):
- return getattr(self, func_or_funcs)(*args, **kwargs)
+ if isinstance(func, str):
+ return getattr(self, func)(*args, **kwargs)
- if isinstance(func_or_funcs, abc.Iterable):
+ if isinstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
- func_or_funcs = _maybe_mangle_lambdas(func_or_funcs)
- ret = self._aggregate_multiple_funcs(func_or_funcs, (_level or 0) + 1)
+ func = _maybe_mangle_lambdas(func)
+ ret = self._aggregate_multiple_funcs(func, (_level or 0) + 1)
if relabeling:
ret.columns = columns
else:
- cyfunc = self._is_cython_func(func_or_funcs)
+ cyfunc = self._get_cython_func(func)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
- return self._python_agg_general(func_or_funcs, *args, **kwargs)
+ return self._python_agg_general(func, *args, **kwargs)
try:
- return self._python_agg_general(func_or_funcs, *args, **kwargs)
+ return self._python_agg_general(func, *args, **kwargs)
except Exception:
- result = self._aggregate_named(func_or_funcs, *args, **kwargs)
+ result = self._aggregate_named(func, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
@@ -996,7 +1006,7 @@ def _aggregate_named(self, func, *args, **kwargs):
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
- raise Exception("Must produce aggregated value")
+ raise ValueError("Must produce aggregated value")
result[name] = self._try_cast(output, group)
return result
@@ -1004,15 +1014,19 @@ def _aggregate_named(self, func, *args, **kwargs):
@Substitution(klass="Series", selected="A.")
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
- func = self._is_cython_func(func) or func
+ func = self._get_cython_func(func) or func
- # if string function
if isinstance(func, str):
- if func in base.cython_transforms:
- # cythonized transform
+ if not (func in base.transform_kernel_whitelist):
+ msg = "'{func}' is not a valid function name for transform(name)"
+ raise ValueError(msg.format(func=func))
+ if func in base.cythonized_kernels:
+ # cythonized transform or canned "agg+broadcast"
return getattr(self, func)(*args, **kwargs)
else:
- # cythonized aggregation and merge
+ # If func is a reduction, we need to broadcast the
+ # result to the whole group. Compute func result
+ # and deal with possible broadcasting below.
return self._transform_fast(
lambda: getattr(self, func)(*args, **kwargs), func
)
@@ -1025,8 +1039,8 @@ def transform(self, func, *args, **kwargs):
object.__setattr__(group, "name", name)
res = wrapper(group)
- if hasattr(res, "values"):
- res = res.values
+ if isinstance(res, (ABCDataFrame, ABCSeries)):
+ res = res._values
indexer = self._get_index(name)
s = klass(res, indexer)
@@ -1130,6 +1144,10 @@ def nunique(self, dropna=True):
val = self.obj._internal_get_values()
+ # GH 27951
+ # temporary fix while we wait for NumPy bug 12629 to be fixed
+ val[isna(val)] = np.datetime64("NaT")
+
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
@@ -1451,8 +1469,8 @@ class DataFrameGroupBy(NDFrameGroupBy):
axis="",
)
@Appender(_shared_docs["aggregate"])
- def aggregate(self, arg=None, *args, **kwargs):
- return super().aggregate(arg, *args, **kwargs)
+ def aggregate(self, func=None, *args, **kwargs):
+ return super().aggregate(func, *args, **kwargs)
agg = aggregate
@@ -1590,13 +1608,14 @@ def count(self):
DataFrame
Count of values within each group.
"""
- from pandas.core.dtypes.missing import _isna_ndarraylike as _isna
-
data, _ = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
- val = ((mask & ~_isna(np.atleast_2d(blk.get_values()))) for blk in data.blocks)
+ val = (
+ (mask & ~_isna_ndarraylike(np.atleast_2d(blk.get_values())))
+ for blk in data.blocks
+ )
loc = (blk.mgr_locs for blk in data.blocks)
counter = partial(lib.count_level_2d, labels=ids, max_bin=ngroups, axis=1)
@@ -1717,8 +1736,8 @@ def _normalize_keyword_aggregation(kwargs):
The transformed kwargs.
columns : List[str]
The user-provided keys.
- order : List[Tuple[str, str]]
- Pairs of the input and output column names.
+ col_idx_order : List[int]
+ List of columns indices.
Examples
--------
@@ -1745,7 +1764,39 @@ def _normalize_keyword_aggregation(kwargs):
else:
aggspec[column] = [aggfunc]
order.append((column, com.get_callable_name(aggfunc) or aggfunc))
- return aggspec, columns, order
+
+ # uniquify aggfunc name if duplicated in order list
+ uniquified_order = _make_unique(order)
+
+ # GH 25719, due to aggspec will change the order of assigned columns in aggregation
+ # uniquified_aggspec will store uniquified order list and will compare it with order
+ # based on index
+ aggspec_order = [
+ (column, com.get_callable_name(aggfunc) or aggfunc)
+ for column, aggfuncs in aggspec.items()
+ for aggfunc in aggfuncs
+ ]
+ uniquified_aggspec = _make_unique(aggspec_order)
+
+ # get the new indice of columns by comparison
+ col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)
+ return aggspec, columns, col_idx_order
+
+
+def _make_unique(seq):
+ """Uniquify aggfunc name of the pairs in the order list
+
+ Examples:
+ --------
+ >>> _make_unique([('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')])
+ [('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
+ """
+ return [
+ (pair[0], "_".join([pair[1], str(seq[:i].count(pair))]))
+ if seq.count(pair) > 1
+ else pair
+ for i, pair in enumerate(seq)
+ ]
# TODO: Can't use, because mypy doesn't like us setting __name__
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 9aba9723e0546..6facbe7e01c57 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -29,14 +29,16 @@ class providing the base-class of operations.
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.common import (
ensure_float,
+ is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
+ is_integer_dtype,
is_numeric_dtype,
+ is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import isna, notna
-from pandas.api.types import is_datetime64_dtype, is_integer_dtype, is_object_dtype
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical
from pandas.core.base import (
@@ -47,6 +49,7 @@ class providing the base-class of operations.
SpecificationError,
)
import pandas.core.common as com
+from pandas.core.construction import extract_array
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base
@@ -261,7 +264,7 @@ class providing the base-class of operations.
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
- For example, f returns a scalar it will be broadcast to have the
+ For example, if `f` returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
@@ -342,7 +345,7 @@ class _GroupBy(PandasObject, SelectionMixin):
def __init__(
self,
- obj,
+ obj: NDFrame,
keys=None,
axis=0,
level=None,
@@ -359,8 +362,8 @@ def __init__(
self._selection = selection
- if isinstance(obj, NDFrame):
- obj._consolidate_inplace()
+ assert isinstance(obj, NDFrame), type(obj)
+ obj._consolidate_inplace()
self.level = level
@@ -461,7 +464,7 @@ def get_converter(s):
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
- msg = "must supply a tuple to get_group with multiple" " grouping keys"
+ msg = "must supply a tuple to get_group with multiple grouping keys"
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
@@ -589,7 +592,7 @@ def __getattr__(self, attr):
)
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
- return com._pipe(self, func, *args, **kwargs)
+ return com.pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
@@ -650,7 +653,8 @@ def curried(x):
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
- except (AttributeError):
+ except AttributeError:
+ # e.g. SparseArray has no flags attr
raise ValueError
return wrapper
@@ -714,7 +718,7 @@ def f(g):
else:
raise ValueError(
- "func must be a callable if args or " "kwargs are supplied"
+ "func must be a callable if args or kwargs are supplied"
)
else:
f = func
@@ -723,8 +727,7 @@ def f(g):
with option_context("mode.chained_assignment", None):
try:
result = self._python_apply_general(f)
- except Exception:
-
+ except TypeError:
# gh-20949
# try again, with .apply acting as a filtering
# operation, by excluding the grouping column
@@ -803,10 +806,9 @@ def _try_cast(self, result, obj, numeric_only=False):
# Prior results _may_ have been generated in UTC.
# Ensure we localize to UTC first before converting
# to the target timezone
+ arr = extract_array(obj)
try:
- result = obj._values._from_sequence(
- result, dtype="datetime64[ns, UTC]"
- )
+ result = arr._from_sequence(result, dtype="datetime64[ns, UTC]")
result = result.astype(dtype)
except TypeError:
# _try_cast was called at a point where the result
@@ -928,7 +930,7 @@ def _concat_objects(self, keys, values, not_indexed_same=False):
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
- for v in com._not_none(*values):
+ for v in com.not_none(*values):
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
@@ -1009,7 +1011,6 @@ def _apply_filter(self, indices, dropna):
class GroupBy(_GroupBy):
-
"""
Class for grouping and aggregating relational data.
@@ -1031,7 +1032,7 @@ class GroupBy(_GroupBy):
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
- name : string
+ name : str
Most users should ignore this
Returns
@@ -1206,7 +1207,7 @@ def mean(self, *args, **kwargs):
)
except GroupByError:
raise
- except Exception: # pragma: no cover
+ except Exception:
with _group_selection_context(self):
f = lambda x: x.mean(axis=self.axis, **kwargs)
return self._python_agg_general(f)
@@ -1232,7 +1233,7 @@ def median(self, **kwargs):
)
except GroupByError:
raise
- except Exception: # pragma: no cover
+ except Exception:
def f(x):
if isinstance(x, np.ndarray):
@@ -1252,7 +1253,7 @@ def std(self, ddof=1, *args, **kwargs):
Parameters
----------
- ddof : integer, default 1
+ ddof : int, default 1
degrees of freedom
Returns
@@ -1275,7 +1276,7 @@ def var(self, ddof=1, *args, **kwargs):
Parameters
----------
- ddof : integer, default 1
+ ddof : int, default 1
degrees of freedom
Returns
@@ -1310,7 +1311,7 @@ def sem(self, ddof=1):
Parameters
----------
- ddof : integer, default 1
+ ddof : int, default 1
degrees of freedom
Returns
@@ -1622,7 +1623,7 @@ def pad(self, limit=None):
Parameters
----------
- limit : integer, optional
+ limit : int, optional
limit of how many values to fill
Returns
@@ -1648,7 +1649,7 @@ def backfill(self, limit=None):
Parameters
----------
- limit : integer, optional
+ limit : int, optional
limit of how many values to fill
Returns
@@ -1771,7 +1772,11 @@ def nth(self, n: Union[int, List[int]], dropna: Optional[str] = None) -> DataFra
if not self.as_index:
return out
- out.index = self.grouper.result_index[ids[mask]]
+ result_index = self.grouper.result_index
+ out.index = result_index[ids[mask]]
+
+ if not self.observed and isinstance(result_index, CategoricalIndex):
+ out = out.reindex(result_index)
return out.sort_index() if self.sort else out
@@ -1868,11 +1873,12 @@ def quantile(self, q=0.5, interpolation="linear"):
a 2.0
b 3.0
"""
+ from pandas import concat
def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]:
if is_object_dtype(vals):
raise TypeError(
- "'quantile' cannot be performed against " "'object' dtypes!"
+ "'quantile' cannot be performed against 'object' dtypes!"
)
inference = None
@@ -1895,18 +1901,57 @@ def post_processor(vals: np.ndarray, inference: Optional[Type]) -> np.ndarray:
return vals
- return self._get_cythonized_result(
- "group_quantile",
- self.grouper,
- aggregate=True,
- needs_values=True,
- needs_mask=True,
- cython_dtype=np.float64,
- pre_processing=pre_processor,
- post_processing=post_processor,
- q=q,
- interpolation=interpolation,
- )
+ if is_scalar(q):
+ return self._get_cythonized_result(
+ "group_quantile",
+ self.grouper,
+ aggregate=True,
+ needs_values=True,
+ needs_mask=True,
+ cython_dtype=np.float64,
+ pre_processing=pre_processor,
+ post_processing=post_processor,
+ q=q,
+ interpolation=interpolation,
+ )
+ else:
+ results = [
+ self._get_cythonized_result(
+ "group_quantile",
+ self.grouper,
+ aggregate=True,
+ needs_values=True,
+ needs_mask=True,
+ cython_dtype=np.float64,
+ pre_processing=pre_processor,
+ post_processing=post_processor,
+ q=qi,
+ interpolation=interpolation,
+ )
+ for qi in q
+ ]
+ result = concat(results, axis=0, keys=q)
+ # fix levels to place quantiles on the inside
+ # TODO(GH-10710): Ideally, we could write this as
+ # >>> result.stack(0).loc[pd.IndexSlice[:, ..., q], :]
+ # but this hits https://github.com/pandas-dev/pandas/issues/10710
+ # which doesn't reorder the list-like `q` on the inner level.
+ order = np.roll(list(range(result.index.nlevels)), -1)
+ result = result.reorder_levels(order)
+ result = result.reindex(q, level=-1)
+
+ # fix order.
+ hi = len(q) * self.ngroups
+ arr = np.arange(0, hi, self.ngroups)
+ arrays = []
+
+ for i in range(self.ngroups):
+ arr2 = arr + i
+ arrays.append(arr2)
+
+ indices = np.concatenate(arrays)
+ assert len(indices) == len(result)
+ return result.take(indices)
@Substitution(name="groupby")
def ngroup(self, ascending=True):
@@ -2054,13 +2099,13 @@ def rank(
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
- ascending : boolean, default True
+ ascending : bool, default True
False for ranks by high (1) to low (N)
na_option : {'keep', 'top', 'bottom'}, default 'keep'
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
- pct : boolean, default False
+ pct : bool, default False
Compute percentage rank of data within each group
axis : int, default 0
The axis of the object over which to compute the rank.
@@ -2201,9 +2246,7 @@ def _get_cythonized_result(
`Series` or `DataFrame` with filled values
"""
if result_is_index and aggregate:
- raise ValueError(
- "'result_is_index' and 'aggregate' cannot both " "be True!"
- )
+ raise ValueError("'result_is_index' and 'aggregate' cannot both be True!")
if post_processing:
if not callable(pre_processing):
raise ValueError("'post_processing' must be a callable!")
@@ -2212,7 +2255,7 @@ def _get_cythonized_result(
raise ValueError("'pre_processing' must be a callable!")
if not needs_values:
raise ValueError(
- "Cannot use 'pre_processing' without " "specifying 'needs_values'!"
+ "Cannot use 'pre_processing' without specifying 'needs_values'!"
)
labels, _, ngroups = grouper.group_info
@@ -2220,26 +2263,28 @@ def _get_cythonized_result(
base_func = getattr(libgroupby, how)
for name, obj in self._iterate_slices():
+ values = obj._data._values
+
if aggregate:
result_sz = ngroups
else:
- result_sz = len(obj.values)
+ result_sz = len(values)
if not cython_dtype:
- cython_dtype = obj.values.dtype
+ cython_dtype = values.dtype
result = np.zeros(result_sz, dtype=cython_dtype)
func = partial(base_func, result, labels)
inferences = None
if needs_values:
- vals = obj.values
+ vals = values
if pre_processing:
vals, inferences = pre_processing(vals)
func = partial(func, vals)
if needs_mask:
- mask = isna(obj.values).view(np.uint8)
+ mask = isna(values).view(np.uint8)
func = partial(func, mask)
if needs_ngroups:
@@ -2248,7 +2293,7 @@ def _get_cythonized_result(
func(**kwargs) # Call func to modify indexer values in place
if result_is_index:
- result = algorithms.take_nd(obj.values, result)
+ result = algorithms.take_nd(values, result)
if post_processing:
result = post_processing(result, inferences)
@@ -2268,7 +2313,7 @@ def shift(self, periods=1, freq=None, axis=0, fill_value=None):
Parameters
----------
- periods : integer, default 1
+ periods : int, default 1
number of periods to shift
freq : frequency string
axis : axis to shift, default 0
@@ -2326,8 +2371,9 @@ def head(self, n=5):
"""
Return first n rows of each group.
- Essentially equivalent to ``.apply(lambda x: x.head(n))``,
- except ignores as_index flag.
+ Similar to ``.apply(lambda x: x.head(n))``, but it returns a subset of rows
+ from the original DataFrame with original index and order preserved
+ (``as_index`` flag is ignored).
Returns
-------
@@ -2338,10 +2384,6 @@ def head(self, n=5):
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],
... columns=['A', 'B'])
- >>> df.groupby('A', as_index=False).head(1)
- A B
- 0 1 2
- 2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
@@ -2357,8 +2399,9 @@ def tail(self, n=5):
"""
Return last n rows of each group.
- Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
- except ignores as_index flag.
+ Similar to ``.apply(lambda x: x.tail(n))``, but it returns a subset of rows
+ from the original DataFrame with original index and order preserved
+ (``as_index`` flag is ignored).
Returns
-------
@@ -2373,10 +2416,6 @@ def tail(self, n=5):
A B
1 a 2
3 b 2
- >>> df.groupby('A').head(1)
- A B
- 0 a 1
- 2 b 1
"""
self._reset_group_selection()
mask = self._cumcount_array(ascending=False) < n
@@ -2472,7 +2511,7 @@ def groupby(obj, by, **kwds):
from pandas.core.groupby.generic import DataFrameGroupBy
klass = DataFrameGroupBy
- else: # pragma: no cover
+ else:
raise TypeError("invalid type: {}".format(obj))
return klass(obj, by, **kwds)
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index f8417c3f01eac..2d37121d28308 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -25,6 +25,8 @@
from pandas.core.arrays import Categorical, ExtensionArray
import pandas.core.common as com
from pandas.core.frame import DataFrame
+from pandas.core.generic import NDFrame
+from pandas.core.groupby.categorical import recode_for_groupby, recode_from_groupby
from pandas.core.groupby.ops import BaseGrouper
from pandas.core.index import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
@@ -35,7 +37,7 @@
class Grouper:
"""
A Grouper allows the user to specify a groupby instruction for a target
- object
+ object.
This specification will select a column via the key parameter, or if the
level and/or axis parameters are given, a level of the index of the target
@@ -46,17 +48,17 @@ class Grouper:
Parameters
----------
- key : string, defaults to None
+ key : str, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
- freq : string / frequency object, defaults to None
+ freq : str / frequency object, defaults to None
This will groupby the specified frequency if the target selection
(via key or level) is a datetime-like object. For full specification
of available frequencies, please see `here
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_.
axis : number/name of the axis, defaults to 0
- sort : boolean, default to False
+ sort : bool, default to False
whether to sort the resulting labels
closed : {'left' or 'right'}
Closed end of interval. Only when `freq` parameter is passed.
@@ -67,7 +69,7 @@ class Grouper:
If grouper is PeriodIndex and `freq` parameter is passed.
base : int, default 0
Only when `freq` parameter is passed.
- loffset : string, DateOffset, timedelta object
+ loffset : str, DateOffset, timedelta object
Only when `freq` parameter is passed.
Returns
@@ -215,7 +217,6 @@ def __repr__(self):
class Grouping:
-
"""
Holds the grouping information for a single key
@@ -310,8 +311,6 @@ def __init__(
# a passed Categorical
elif is_categorical_dtype(self.grouper):
- from pandas.core.groupby.categorical import recode_for_groupby
-
self.grouper, self.all_grouper = recode_for_groupby(
self.grouper, self.sort, observed
)
@@ -361,13 +360,10 @@ def __init__(
# Timestamps like
if getattr(self.grouper, "dtype", None) is not None:
if is_datetime64_dtype(self.grouper):
- from pandas import to_datetime
-
- self.grouper = to_datetime(self.grouper)
+ self.grouper = self.grouper.astype("datetime64[ns]")
elif is_timedelta64_dtype(self.grouper):
- from pandas import to_timedelta
- self.grouper = to_timedelta(self.grouper)
+ self.grouper = self.grouper.astype("timedelta64[ns]")
def __repr__(self):
return "Grouping({0})".format(self.name)
@@ -400,8 +396,6 @@ def labels(self):
@cache_readonly
def result_index(self):
if self.all_grouper is not None:
- from pandas.core.groupby.categorical import recode_from_groupby
-
return recode_from_groupby(self.all_grouper, self.sort, self.group_index)
return self.group_index
@@ -429,7 +423,7 @@ def groups(self):
def _get_grouper(
- obj,
+ obj: NDFrame,
key=None,
axis=0,
level=None,
@@ -493,12 +487,12 @@ def _get_grouper(
elif nlevels == 0:
raise ValueError("No group keys passed!")
else:
- raise ValueError("multiple levels only valid with " "MultiIndex")
+ raise ValueError("multiple levels only valid with MultiIndex")
if isinstance(level, str):
if obj.index.name != level:
raise ValueError(
- "level name {} is not the name of the " "index".format(level)
+ "level name {} is not the name of the index".format(level)
)
elif level > 0 or level < -1:
raise ValueError("level > 0 or level < -1 only valid with MultiIndex")
@@ -589,18 +583,22 @@ def _get_grouper(
# if the actual grouper should be obj[key]
def is_in_axis(key):
if not _is_label_like(key):
+ items = obj._data.items
try:
- obj._data.items.get_loc(key)
- except Exception:
+ items.get_loc(key)
+ except (KeyError, TypeError):
+ # TypeError shows up here if we pass e.g. Int64Index
return False
return True
# if the grouper is obj[name]
def is_in_obj(gpr):
+ if not hasattr(gpr, "name"):
+ return False
try:
- return id(gpr) == id(obj[gpr.name])
- except Exception:
+ return gpr is obj[gpr.name]
+ except (KeyError, IndexError):
return False
for i, (gpr, level) in enumerate(zip(keys, levels)):
@@ -612,10 +610,10 @@ def is_in_obj(gpr):
elif is_in_axis(gpr): # df.groupby('name')
if gpr in obj:
if validate:
- obj._check_label_or_level_ambiguity(gpr)
+ obj._check_label_or_level_ambiguity(gpr, axis=axis)
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
- elif obj._is_level_reference(gpr):
+ elif obj._is_level_reference(gpr, axis=axis):
in_axis, name, level, gpr = False, None, gpr, None
else:
raise KeyError(gpr)
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index e341a66bb7459..40517eefe4d5d 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -12,7 +12,7 @@
from pandas._libs import NaT, iNaT, lib
import pandas._libs.groupby as libgroupby
-import pandas._libs.reduction as reduction
+import pandas._libs.reduction as libreduction
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
@@ -20,12 +20,12 @@
ensure_float64,
ensure_int64,
ensure_int_or_float,
- ensure_object,
ensure_platform_int,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_any_dtype,
+ is_datetime64tz_dtype,
is_integer_dtype,
is_numeric_dtype,
is_sparse,
@@ -207,14 +207,17 @@ def apply(self, f, data, axis=0):
if len(result_values) == len(group_keys):
return group_keys, result_values, mutated
- except reduction.InvalidApply:
+ except libreduction.InvalidApply:
# Cannot fast apply on MultiIndex (_has_complex_internals).
# This Exception is also raised if `f` triggers an exception
# but it is preferable to raise the exception in Python.
pass
- except Exception:
- # raise this error to the caller
- pass
+ except TypeError as err:
+ if "Cannot convert" in str(err):
+ # via apply_frame_axis0 if we pass a non-ndarray
+ pass
+ else:
+ raise
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, "name", key)
@@ -452,6 +455,7 @@ def wrapper(*args, **kwargs):
def _cython_operation(self, kind, values, how, axis, min_count=-1, **kwargs):
assert kind in ["transform", "aggregate"]
+ orig_values = values
# can we do this operation with our cython functions
# if not raise NotImplementedError
@@ -462,37 +466,23 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, **kwargs):
# categoricals are only 1d, so we
# are not setup for dim transforming
if is_categorical_dtype(values) or is_sparse(values):
- raise NotImplementedError(
- "{} are not support in cython ops".format(values.dtype)
- )
+ raise NotImplementedError("{} dtype not supported".format(values.dtype))
elif is_datetime64_any_dtype(values):
if how in ["add", "prod", "cumsum", "cumprod"]:
raise NotImplementedError(
- "datetime64 type does not support {} " "operations".format(how)
+ "datetime64 type does not support {} operations".format(how)
)
elif is_timedelta64_dtype(values):
if how in ["prod", "cumprod"]:
raise NotImplementedError(
- "timedelta64 type does not support {} " "operations".format(how)
+ "timedelta64 type does not support {} operations".format(how)
)
- arity = self._cython_arity.get(how, 1)
-
- vdim = values.ndim
- swapped = False
- if vdim == 1:
- values = values[:, None]
- out_shape = (self.ngroups, arity)
- else:
- if axis > 0:
- swapped = True
- assert axis == 1, axis
- values = values.T
- if arity > 1:
- raise NotImplementedError(
- "arity of more than 1 is not " "supported for the 'how' argument"
- )
- out_shape = (self.ngroups,) + values.shape[1:]
+ if is_datetime64tz_dtype(values.dtype):
+ # Cast to naive; we'll cast back at the end of the function
+ # TODO: possible need to reshape? kludge can be avoided when
+ # 2D EA is allowed.
+ values = values.view("M8[ns]")
is_datetimelike = needs_i8_conversion(values.dtype)
is_numeric = is_numeric_dtype(values.dtype)
@@ -514,6 +504,24 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, **kwargs):
else:
values = values.astype(object)
+ arity = self._cython_arity.get(how, 1)
+
+ vdim = values.ndim
+ swapped = False
+ if vdim == 1:
+ values = values[:, None]
+ out_shape = (self.ngroups, arity)
+ else:
+ if axis > 0:
+ swapped = True
+ assert axis == 1, axis
+ values = values.T
+ if arity > 1:
+ raise NotImplementedError(
+ "arity of more than 1 is not supported for the 'how' argument"
+ )
+ out_shape = (self.ngroups,) + values.shape[1:]
+
try:
func = self._get_cython_function(kind, how, values, is_numeric)
except NotImplementedError:
@@ -567,15 +575,8 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, **kwargs):
result[mask] = np.nan
if kind == "aggregate" and self._filter_empty_groups and not counts.all():
- if result.ndim == 2:
- try:
- result = lib.row_bool_subset(result, (counts > 0).view(np.uint8))
- except ValueError:
- result = lib.row_bool_subset_object(
- ensure_object(result), (counts > 0).view(np.uint8)
- )
- else:
- result = result[counts > 0]
+ assert result.ndim != 2
+ result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
@@ -589,6 +590,11 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, **kwargs):
if swapped:
result = result.swapaxes(0, axis)
+ if is_datetime64tz_dtype(orig_values.dtype):
+ result = type(orig_values)(result.astype(np.int64), dtype=orig_values.dtype)
+ elif is_datetimelike and kind == "aggregate":
+ result = result.astype(orig_values.dtype)
+
return result, names
def aggregate(self, values, how, axis=0, min_count=-1):
@@ -610,16 +616,9 @@ def _aggregate(
is_datetimelike,
min_count=-1,
):
- if values.ndim > 3:
+ if values.ndim > 2:
# punting for now
- raise NotImplementedError(
- "number of dimensions is currently " "limited to 3"
- )
- elif values.ndim > 2:
- for i, chunk in enumerate(values.transpose(2, 0, 1)):
-
- chunk = chunk.squeeze()
- agg_func(result[:, :, i], counts, chunk, comp_ids, min_count)
+ raise NotImplementedError("number of dimensions is currently limited to 2")
else:
agg_func(result, counts, values, comp_ids, min_count)
@@ -637,22 +636,9 @@ def _transform(
):
comp_ids, _, ngroups = self.group_info
- if values.ndim > 3:
+ if values.ndim > 2:
# punting for now
- raise NotImplementedError(
- "number of dimensions is currently " "limited to 3"
- )
- elif values.ndim > 2:
- for i, chunk in enumerate(values.transpose(2, 0, 1)):
-
- transform_func(
- result[:, :, i],
- values,
- comp_ids,
- ngroups,
- is_datetimelike,
- **kwargs
- )
+ raise NotImplementedError("number of dimensions is currently limited to 2")
else:
transform_func(result, values, comp_ids, ngroups, is_datetimelike, **kwargs)
@@ -677,7 +663,7 @@ def _aggregate_series_fast(self, obj, func):
indexer = get_group_index_sorter(group_index, ngroups)
obj = obj.take(indexer)
group_index = algorithms.take_nd(group_index, indexer, allow_fill=False)
- grouper = reduction.SeriesGrouper(obj, func, group_index, ngroups, dummy)
+ grouper = libreduction.SeriesGrouper(obj, func, group_index, ngroups, dummy)
result, counts = grouper.get_result()
return result, counts
@@ -705,7 +691,6 @@ def _aggregate_series_pure_python(self, obj, func):
class BinGrouper(BaseGrouper):
-
"""
This is an internal Grouper class
@@ -851,7 +836,7 @@ def groupings(self):
def agg_series(self, obj, func):
dummy = obj[:0]
- grouper = reduction.SeriesBinGrouper(obj, func, self.bins, dummy)
+ grouper = libreduction.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
@@ -918,7 +903,7 @@ def _get_sorted_data(self):
return self.data.take(self.sort_idx, axis=self.axis)
def _chop(self, sdata, slice_obj):
- return sdata.iloc[slice_obj]
+ raise AbstractMethodError(self)
def apply(self, f):
raise AbstractMethodError(self)
@@ -932,20 +917,16 @@ def _chop(self, sdata, slice_obj):
class FrameSplitter(DataSplitter):
def fast_apply(self, f, names):
# must return keys::list, values::list, mutated::bool
- try:
- starts, ends = lib.generate_slices(self.slabels, self.ngroups)
- except Exception:
- # fails when all -1
- return [], True
+ starts, ends = lib.generate_slices(self.slabels, self.ngroups)
sdata = self._get_sorted_data()
- return reduction.apply_frame_axis0(sdata, f, names, starts, ends)
+ return libreduction.apply_frame_axis0(sdata, f, names, starts, ends)
def _chop(self, sdata, slice_obj):
if self.axis == 0:
return sdata.iloc[slice_obj]
else:
- return sdata._slice(slice_obj, axis=1) # .loc[:, slice_obj]
+ return sdata._slice(slice_obj, axis=1)
def get_splitter(data, *args, **kwargs):
diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py
index 70c48e969172f..433bca940c028 100644
--- a/pandas/core/indexers.py
+++ b/pandas/core/indexers.py
@@ -226,6 +226,7 @@ def length_of_indexer(indexer, target=None) -> int:
if step is None:
step = 1
elif step < 0:
+ start, stop = stop + 1, start + 1
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, ABCIndexClass, np.ndarray, list)):
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index 5ba23990cbd51..cc8ecc0e64684 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -316,7 +316,7 @@ def __new__(cls, data):
# do all the validation here.
from pandas import Series
- if not isinstance(data, Series):
+ if not isinstance(data, ABCSeries):
raise TypeError(
"cannot convert an object of type {0} to a "
"datetimelike index".format(type(data))
@@ -326,18 +326,15 @@ def __new__(cls, data):
if orig is not None:
data = Series(orig.values.categories, name=orig.name, copy=False)
- try:
- if is_datetime64_dtype(data.dtype):
- return DatetimeProperties(data, orig)
- elif is_datetime64tz_dtype(data.dtype):
- return DatetimeProperties(data, orig)
- elif is_timedelta64_dtype(data.dtype):
- return TimedeltaProperties(data, orig)
- elif is_period_arraylike(data):
- return PeriodProperties(data, orig)
- elif is_datetime_arraylike(data):
- return DatetimeProperties(data, orig)
- except Exception:
- pass # we raise an attribute error anyway
-
- raise AttributeError("Can only use .dt accessor with datetimelike " "values")
+ if is_datetime64_dtype(data.dtype):
+ return DatetimeProperties(data, orig)
+ elif is_datetime64tz_dtype(data.dtype):
+ return DatetimeProperties(data, orig)
+ elif is_timedelta64_dtype(data.dtype):
+ return TimedeltaProperties(data, orig)
+ elif is_period_arraylike(data):
+ return PeriodProperties(data, orig)
+ elif is_datetime_arraylike(data):
+ return DatetimeProperties(data, orig)
+
+ raise AttributeError("Can only use .dt accessor with datetimelike values")
diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py
index a17f74286d59f..86d55ce2e7cc3 100644
--- a/pandas/core/indexes/api.py
+++ b/pandas/core/indexes/api.py
@@ -283,7 +283,7 @@ def _get_consensus_names(indexes):
# find the non-none names, need to tupleify to make
# the set hashable, then reverse on return
- consensus_names = {tuple(i.names) for i in indexes if com._any_not_none(*i.names)}
+ consensus_names = {tuple(i.names) for i in indexes if com.any_not_none(*i.names)}
if len(consensus_names) == 1:
return list(list(consensus_names)[0])
return [None] * indexes[0].nlevels
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 33de8e41b2f65..0b5f9fb61fce8 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1,4 +1,4 @@
-from datetime import datetime, timedelta
+from datetime import datetime
import operator
from textwrap import dedent
from typing import Union
@@ -9,12 +9,14 @@
from pandas._libs import algos as libalgos, index as libindex, lib
import pandas._libs.join as libjoin
from pandas._libs.lib import is_datetime_array
-from pandas._libs.tslibs import OutOfBoundsDatetime, Timedelta, Timestamp
+from pandas._libs.tslibs import OutOfBoundsDatetime, Timestamp
+from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas._libs.tslibs.timezones import tz_compare
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
+from pandas.core.dtypes import concat as _concat
from pandas.core.dtypes.cast import maybe_cast_to_integer_array
from pandas.core.dtypes.common import (
ensure_categorical,
@@ -45,17 +47,17 @@
is_unsigned_integer_dtype,
pandas_dtype,
)
-import pandas.core.dtypes.concat as _concat
+from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import (
+ ABCCategorical,
ABCDataFrame,
- ABCDateOffset,
ABCDatetimeArray,
+ ABCDatetimeIndex,
ABCIndexClass,
ABCMultiIndex,
ABCPandasArray,
ABCPeriodIndex,
ABCSeries,
- ABCTimedeltaArray,
ABCTimedeltaIndex,
)
from pandas.core.dtypes.missing import array_equivalent, isna
@@ -69,7 +71,8 @@
from pandas.core.indexers import maybe_convert_indices
from pandas.core.indexes.frozen import FrozenList
import pandas.core.missing as missing
-from pandas.core.ops import get_op_result_name, make_invalid_op
+from pandas.core.ops import get_op_result_name
+from pandas.core.ops.invalid import make_invalid_op
import pandas.core.sorting as sorting
from pandas.core.strings import StringMethods
@@ -97,57 +100,34 @@
def _make_comparison_op(op, cls):
def cmp_method(self, other):
- if isinstance(other, (np.ndarray, Index, ABCSeries)):
+ if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError("Lengths must match to compare")
- if is_object_dtype(self) and not isinstance(self, ABCMultiIndex):
+ if is_object_dtype(self) and isinstance(other, ABCCategorical):
+ left = type(other)(self._values, dtype=other.dtype)
+ return op(left, other)
+ elif is_object_dtype(self) and not isinstance(self, ABCMultiIndex):
# don't pass MultiIndex
with np.errstate(all="ignore"):
- result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
+ result = ops.comp_method_OBJECT_ARRAY(op, self.values, other)
else:
with np.errstate(all="ignore"):
result = op(self.values, np.asarray(other))
- # technically we could support bool dtyped Index
- # for now just return the indexing array directly
if is_bool_dtype(result):
return result
- try:
- return Index(result)
- except TypeError:
- return result
+ return ops.invalid_comparison(self, other, op)
name = "__{name}__".format(name=op.__name__)
- # TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
- if isinstance(other, (ABCSeries, ABCDataFrame)):
- return NotImplemented
- elif isinstance(other, ABCTimedeltaIndex):
- # Defer to subclass implementation
+ if isinstance(other, (ABCSeries, ABCDataFrame, ABCTimedeltaIndex)):
return NotImplemented
- elif isinstance(
- other, (np.ndarray, ABCTimedeltaArray)
- ) and is_timedelta64_dtype(other):
- # GH#22390; wrap in Series for op, this will in turn wrap in
- # TimedeltaIndex, but will correctly raise TypeError instead of
- # NullFrequencyError for add/sub ops
- from pandas import Series
-
- other = Series(other)
- out = op(self, other)
- return Index(out, name=self.name)
-
- # handle time-based others
- if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
- return self._evaluate_with_timedelta_like(other, op)
-
- other = self._validate_for_numeric_binop(other, op)
from pandas import Series
@@ -225,7 +205,7 @@ class Index(IndexOpsMixin, PandasObject):
"""
# tolist is not actually deprecated, just suppressed in the __dir__
- _deprecations = DirNamesMixin._deprecations | frozenset(["tolist"])
+ _deprecations = DirNamesMixin._deprecations | frozenset(["tolist", "dtype_str"])
# To hand over control to subclasses
_join_precedence = 1
@@ -263,6 +243,9 @@ def _outer_indexer(self, left, right):
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
+ # whether we support partial string indexing. Overridden
+ # in DatetimeIndex and PeriodIndex
+ _supports_partial_string_indexing = False
_accessors = {"str"}
@@ -280,7 +263,13 @@ def __new__(
fastpath=None,
tupleize_cols=True,
**kwargs
- ):
+ ) -> "Index":
+
+ from .range import RangeIndex
+ from pandas import PeriodIndex, DatetimeIndex, TimedeltaIndex
+ from .numeric import Float64Index, Int64Index, UInt64Index
+ from .interval import IntervalIndex
+ from .category import CategoricalIndex
if name is None and hasattr(data, "name"):
name = data.name
@@ -295,8 +284,6 @@ def __new__(
if fastpath:
return cls._simple_new(data, name)
- from .range import RangeIndex
-
if isinstance(data, ABCPandasArray):
# ensure users don't accidentally put a PandasArray in an index.
data = data.to_numpy()
@@ -309,72 +296,53 @@ def __new__(
# categorical
elif is_categorical_dtype(data) or is_categorical_dtype(dtype):
- from .category import CategoricalIndex
-
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name, **kwargs)
# interval
elif (
is_interval_dtype(data) or is_interval_dtype(dtype)
) and not is_object_dtype(dtype):
- from .interval import IntervalIndex
-
closed = kwargs.get("closed", None)
return IntervalIndex(data, dtype=dtype, name=name, copy=copy, closed=closed)
elif (
is_datetime64_any_dtype(data)
- or (dtype is not None and is_datetime64_any_dtype(dtype))
+ or is_datetime64_any_dtype(dtype)
or "tz" in kwargs
):
- from pandas import DatetimeIndex
-
- if dtype is not None and is_dtype_equal(_o_dtype, dtype):
+ if is_dtype_equal(_o_dtype, dtype):
# GH#23524 passing `dtype=object` to DatetimeIndex is invalid,
# will raise in the where `data` is already tz-aware. So
# we leave it out of this step and cast to object-dtype after
# the DatetimeIndex construction.
# Note we can pass copy=False because the .astype below
# will always make a copy
- result = DatetimeIndex(data, copy=False, name=name, **kwargs)
+ result = DatetimeIndex(
+ data, copy=False, name=name, **kwargs
+ ) # type: "Index"
return result.astype(object)
else:
- result = DatetimeIndex(
- data, copy=copy, name=name, dtype=dtype, **kwargs
- )
- return result
-
- elif is_timedelta64_dtype(data) or (
- dtype is not None and is_timedelta64_dtype(dtype)
- ):
- from pandas import TimedeltaIndex
+ return DatetimeIndex(data, copy=copy, name=name, dtype=dtype, **kwargs)
- if dtype is not None and is_dtype_equal(_o_dtype, dtype):
+ elif is_timedelta64_dtype(data) or is_timedelta64_dtype(dtype):
+ if is_dtype_equal(_o_dtype, dtype):
# Note we can pass copy=False because the .astype below
# will always make a copy
result = TimedeltaIndex(data, copy=False, name=name, **kwargs)
return result.astype(object)
else:
- result = TimedeltaIndex(
- data, copy=copy, name=name, dtype=dtype, **kwargs
- )
- return result
+ return TimedeltaIndex(data, copy=copy, name=name, dtype=dtype, **kwargs)
elif is_period_dtype(data) and not is_object_dtype(dtype):
- from pandas import PeriodIndex
-
- result = PeriodIndex(data, copy=copy, name=name, **kwargs)
- return result
+ return PeriodIndex(data, copy=copy, name=name, **kwargs)
# extension dtype
elif is_extension_array_dtype(data) or is_extension_array_dtype(dtype):
data = np.asarray(data)
if not (dtype is None or is_object_dtype(dtype)):
-
# coerce to the provided dtype
- data = dtype.construct_array_type()._from_sequence(
- data, dtype=dtype, copy=False
- )
+ ea_cls = dtype.construct_array_type()
+ data = ea_cls._from_sequence(data, dtype=dtype, copy=False)
# coerce to the object dtype
data = data.astype(object)
@@ -383,75 +351,53 @@ def __new__(
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if dtype is not None:
- try:
-
- # we need to avoid having numpy coerce
- # things that look like ints/floats to ints unless
- # they are actually ints, e.g. '0' and 0.0
- # should not be coerced
- # GH 11836
- if is_integer_dtype(dtype):
- inferred = lib.infer_dtype(data, skipna=False)
- if inferred == "integer":
- data = maybe_cast_to_integer_array(data, dtype, copy=copy)
- elif inferred in ["floating", "mixed-integer-float"]:
- if isna(data).any():
- raise ValueError(
- "cannot convert float " "NaN to integer"
- )
-
- if inferred == "mixed-integer-float":
- data = maybe_cast_to_integer_array(data, dtype)
-
- # If we are actually all equal to integers,
- # then coerce to integer.
- try:
- return cls._try_convert_to_int_index(
- data, copy, name, dtype
- )
- except ValueError:
- pass
-
- # Return an actual float index.
- from .numeric import Float64Index
-
- return Float64Index(data, copy=copy, dtype=dtype, name=name)
-
- elif inferred == "string":
- pass
- else:
- data = data.astype(dtype)
- elif is_float_dtype(dtype):
- inferred = lib.infer_dtype(data, skipna=False)
- if inferred == "string":
+ # we need to avoid having numpy coerce
+ # things that look like ints/floats to ints unless
+ # they are actually ints, e.g. '0' and 0.0
+ # should not be coerced
+ # GH 11836
+ if is_integer_dtype(dtype):
+ inferred = lib.infer_dtype(data, skipna=False)
+ if inferred == "integer":
+ data = maybe_cast_to_integer_array(data, dtype, copy=copy)
+ elif inferred in ["floating", "mixed-integer-float"]:
+ if isna(data).any():
+ raise ValueError("cannot convert float NaN to integer")
+
+ if inferred == "mixed-integer-float":
+ data = maybe_cast_to_integer_array(data, dtype)
+
+ # If we are actually all equal to integers,
+ # then coerce to integer.
+ try:
+ return cls._try_convert_to_int_index(
+ data, copy, name, dtype
+ )
+ except ValueError:
pass
- else:
- data = data.astype(dtype)
+
+ # Return an actual float index.
+ return Float64Index(data, copy=copy, dtype=dtype, name=name)
+
+ elif inferred == "string":
+ pass
else:
- data = np.array(data, dtype=dtype, copy=copy)
-
- except (TypeError, ValueError) as e:
- msg = str(e)
- if (
- "cannot convert float" in msg
- or "Trying to coerce float values to integer" in msg
- ):
- raise
+ data = data.astype(dtype)
+ elif is_float_dtype(dtype):
+ inferred = lib.infer_dtype(data, skipna=False)
+ if inferred == "string":
+ pass
+ else:
+ data = data.astype(dtype)
+ else:
+ data = np.array(data, dtype=dtype, copy=copy)
# maybe coerce to a sub-class
- from pandas.core.indexes.period import PeriodIndex, IncompatibleFrequency
-
if is_signed_integer_dtype(data.dtype):
- from .numeric import Int64Index
-
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
- from .numeric import UInt64Index
-
return UInt64Index(data, copy=copy, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
- from .numeric import Float64Index
-
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype("object")
@@ -472,13 +418,10 @@ def __new__(
pass
return Index(subarr, copy=copy, dtype=object, name=name)
- elif inferred in ["floating", "mixed-integer-float"]:
- from .numeric import Float64Index
-
+ elif inferred in ["floating", "mixed-integer-float", "integer-na"]:
+ # TODO: Returns IntegerArray for integer-na case in the future
return Float64Index(subarr, copy=copy, name=name)
elif inferred == "interval":
- from .interval import IntervalIndex
-
try:
return IntervalIndex(subarr, name=name, copy=copy)
except ValueError:
@@ -489,23 +432,15 @@ def __new__(
pass
elif inferred != "string":
if inferred.startswith("datetime"):
- if (
- lib.is_datetime_with_singletz_array(subarr)
- or "tz" in kwargs
- ):
- # only when subarr has the same tz
- from pandas import DatetimeIndex
-
- try:
- return DatetimeIndex(
- subarr, copy=copy, name=name, **kwargs
- )
- except OutOfBoundsDatetime:
- pass
+ try:
+ return DatetimeIndex(subarr, copy=copy, name=name, **kwargs)
+ except (ValueError, OutOfBoundsDatetime):
+ # GH 27011
+ # If we have mixed timezones, just send it
+ # down the base constructor
+ pass
elif inferred.startswith("timedelta"):
- from pandas import TimedeltaIndex
-
return TimedeltaIndex(subarr, copy=copy, name=name, **kwargs)
elif inferred == "period":
try:
@@ -517,7 +452,7 @@ def __new__(
elif hasattr(data, "__array__"):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name, **kwargs)
elif data is None or is_scalar(data):
- cls._scalar_data_error(data)
+ raise cls._scalar_data_error(data)
else:
if tupleize_cols and is_list_like(data):
# GH21470: convert iterable to list before determining if empty
@@ -575,16 +510,6 @@ def _simple_new(cls, values, name=None, dtype=None, **kwargs):
Must be careful not to recurse.
"""
- if not hasattr(values, "dtype"):
- if (values is None or not len(values)) and dtype is not None:
- values = np.empty(0, dtype=dtype)
- else:
- values = np.array(values, copy=False)
- if is_object_dtype(values):
- values = cls(
- values, name=name, dtype=dtype, **kwargs
- )._ndarray_values
-
if isinstance(values, (ABCSeries, ABCIndexClass)):
# Index._data must always be an ndarray.
# This is no-copy for when _values is an ndarray,
@@ -711,7 +636,11 @@ def _cleanup(self):
@cache_readonly
def _engine(self):
# property, for now, slow to look up
- return self._engine_type(lambda: self._ndarray_values, len(self))
+
+ # to avoid a reference cycle, bind `_ndarray_values` to a local variable, so
+ # `self` is not passed into the lambda.
+ _ndarray_values = self._ndarray_values
+ return self._engine_type(lambda: _ndarray_values, len(self))
# --------------------------------------------------------------------
# Array-Like Methods
@@ -738,7 +667,6 @@ def __array_wrap__(self, result, context=None):
return result
attrs = self._get_attributes_dict()
- attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
@@ -809,8 +737,6 @@ def view(self, cls=None):
satisfied, the original data is used to create a new Index
or the original Index is returned.
- .. versionadded:: 0.19.0
-
Returns
-------
Index
@@ -978,8 +904,8 @@ def repeat(self, repeats, axis=None):
Parameters
----------
- name : string, optional
- deep : boolean, default False
+ name : str, optional
+ deep : bool, default False
dtype : numpy dtype or pandas type
Returns
@@ -1208,7 +1134,7 @@ def summary(self, name=None):
.. deprecated:: 0.23.0
"""
warnings.warn(
- "'summary' is deprecated and will be removed in a " "future version.",
+ "'summary' is deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=2,
)
@@ -1246,7 +1172,7 @@ def to_series(self, index=None, name=None):
----------
index : Index, optional
index of resulting Series. If None, defaults to original index
- name : string, optional
+ name : str, optional
name of resulting Series. If None, defaults to name of original
index
@@ -1272,7 +1198,7 @@ def to_frame(self, index=True, name=None):
Parameters
----------
- index : boolean, default True
+ index : bool, default True
Set the index of the returned DataFrame as the original Index.
name : object, default None
@@ -1475,7 +1401,7 @@ def rename(self, name, inplace=False):
----------
name : label or list of labels
Name(s) to set.
- inplace : boolean, default False
+ inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
@@ -1547,10 +1473,14 @@ def _validate_index_level(self, level):
)
elif level > 0:
raise IndexError(
- "Too many levels:" " Index has only 1 level, not %d" % (level + 1)
+ "Too many levels: Index has only 1 level, not %d" % (level + 1)
)
elif level != self.name:
- raise KeyError("Level %s must be same as name (%s)" % (level, self.name))
+ raise KeyError(
+ "Requested level ({}) does not match index name ({})".format(
+ level, self.name
+ )
+ )
def _get_level_number(self, level):
self._validate_index_level(level)
@@ -1564,7 +1494,7 @@ def sortlevel(self, level=None, ascending=True, sort_remaining=None):
Parameters
----------
- ascending : boolean, default True
+ ascending : bool, default True
False to sort in descending order
level, sort_remaining are compat parameters
@@ -1811,7 +1741,7 @@ def is_integer(self):
return self.inferred_type in ["integer"]
def is_floating(self):
- return self.inferred_type in ["floating", "mixed-integer-float"]
+ return self.inferred_type in ["floating", "mixed-integer-float", "integer-na"]
def is_numeric(self):
return self.inferred_type in ["integer", "floating"]
@@ -1876,8 +1806,6 @@ def inferred_type(self):
@cache_readonly
def is_all_dates(self):
- if self._data is None:
- return False
return is_datetime_array(ensure_object(self.values))
# --------------------------------------------------------------------
@@ -2064,7 +1992,7 @@ def notna(self):
_index_shared_docs[
"fillna"
] = """
- Fill NA/NaN values with the specified value
+ Fill NA/NaN values with the specified value.
Parameters
----------
@@ -2095,7 +2023,7 @@ def fillna(self, value=None, downcast=None):
_index_shared_docs[
"dropna"
] = """
- Return Index without NA/NaN values
+ Return Index without NA/NaN values.
Parameters
----------
@@ -2369,7 +2297,10 @@ def __sub__(self, other):
return Index(np.array(self) - other)
def __rsub__(self, other):
- return Index(other - np.array(self))
+ # wrap Series to ensure we pin name correctly
+ from pandas import Series
+
+ return Index(other - Series(self))
def __and__(self, other):
return self.intersection(other)
@@ -2563,7 +2494,7 @@ def _union(self, other, sort):
if len(indexer) > 0:
other_diff = algos.take_nd(rvals, indexer, allow_fill=False)
- result = _concat._concat_compat((lvals, other_diff))
+ result = concat_compat((lvals, other_diff))
else:
result = lvals
@@ -2657,8 +2588,9 @@ def intersection(self, other, sort=False):
try:
indexer = Index(rvals).get_indexer(lvals)
indexer = indexer.take((indexer != -1).nonzero()[0])
- except Exception:
- # duplicates
+ except (InvalidIndexError, IncompatibleFrequency):
+ # InvalidIndexError raised by get_indexer if non-unique
+ # IncompatibleFrequency raised by PeriodIndex.get_indexer
indexer = algos.unique1d(Index(rvals).get_indexer_non_unique(lvals)[0])
indexer = indexer[indexer != -1]
@@ -2809,7 +2741,7 @@ def symmetric_difference(self, other, result_name=None, sort=None):
right_indexer = (indexer == -1).nonzero()[0]
right_diff = other.values.take(right_indexer)
- the_diff = _concat._concat_compat([left_diff, right_diff])
+ the_diff = concat_compat([left_diff, right_diff])
if sort is None:
try:
the_diff = sorting.safe_sort(the_diff)
@@ -2975,7 +2907,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
if not self.is_unique:
raise InvalidIndexError(
- "Reindexing only valid with uniquely" " valued Index objects"
+ "Reindexing only valid with uniquely valued Index objects"
)
if method == "pad" or method == "backfill":
@@ -3002,7 +2934,7 @@ def _convert_tolerance(self, tolerance, target):
# override this method on subclasses
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
- raise ValueError("list-like tolerance size must match " "target index size")
+ raise ValueError("list-like tolerance size must match target index size")
return tolerance
def _get_fill_indexer(self, target, method, limit=None, tolerance=None):
@@ -3120,6 +3052,7 @@ def _convert_scalar_indexer(self, key, kind=None):
if self.inferred_type not in [
"floating",
"mixed-integer-float",
+ "integer-na",
"string",
"unicode",
"mixed",
@@ -3147,13 +3080,9 @@ def _convert_scalar_indexer(self, key, kind=None):
"""
@Appender(_index_shared_docs["_convert_slice_indexer"])
- def _convert_slice_indexer(self, key, kind=None):
+ def _convert_slice_indexer(self, key: slice, kind=None):
assert kind in ["ix", "loc", "getitem", "iloc", None]
- # if we are not a slice, then we are done
- if not isinstance(key, slice):
- return key
-
# validate iloc
if kind == "iloc":
return slice(
@@ -3197,7 +3126,7 @@ def is_int(v):
self.get_loc(stop)
is_positional = False
except KeyError:
- if self.inferred_type == "mixed-integer-float":
+ if self.inferred_type in ["mixed-integer-float", "integer-na"]:
raise
if is_null_slicer:
@@ -3487,8 +3416,8 @@ def _reindex_non_unique(self, target):
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
- return_indexers : boolean, default False
- sort : boolean, default False
+ return_indexers : bool, default False
+ sort : bool, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword)
@@ -3610,8 +3539,8 @@ def _join_multi(self, other, how, return_indexers=True):
from pandas.core.reshape.merge import _restore_dropped_levels_multijoin
# figure out join names
- self_names = set(com._not_none(*self.names))
- other_names = set(com._not_none(*other.names))
+ self_names = set(com.not_none(*self.names))
+ other_names = set(com.not_none(*other.names))
overlap = self_names & other_names
# need at least 1 in common
@@ -3734,9 +3663,7 @@ def _get_leaf_sorter(labels):
return lib.get_level_sorter(lab, ensure_int64(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
- raise TypeError(
- "Join on level between two MultiIndex objects " "is ambiguous"
- )
+ raise TypeError("Join on level between two MultiIndex objects is ambiguous")
left, right = self, other
@@ -3750,7 +3677,7 @@ def _get_leaf_sorter(labels):
if not right.is_unique:
raise NotImplementedError(
- "Index._join_level on non-unique index " "is not implemented"
+ "Index._join_level on non-unique index is not implemented"
)
new_level, left_lev_indexer, right_lev_indexer = old_level.join(
@@ -4014,11 +3941,9 @@ def memory_usage(self, deep=False):
entries are from self where cond is True and otherwise are from
other.
- .. versionadded:: 0.19.0
-
Parameters
----------
- cond : boolean array-like with the same length as self
+ cond : bool array-like with the same length as self
other : scalar, or array-like
Returns
@@ -4096,7 +4021,9 @@ def _try_convert_to_int_index(cls, data, copy, name, dtype):
@classmethod
def _scalar_data_error(cls, data):
- raise TypeError(
+ # We return the TypeError so that we can raise it from the constructor
+ # in order to keep mypy happy
+ return TypeError(
"{0}(...) must be called with a collection of some "
"kind, {1} was passed".format(cls.__name__, repr(data))
)
@@ -4124,7 +4051,7 @@ def _coerce_to_ndarray(cls, data):
if not isinstance(data, (np.ndarray, Index)):
if data is None or is_scalar(data):
- cls._scalar_data_error(data)
+ raise cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
@@ -4335,14 +4262,25 @@ def _concat(self, to_concat, name):
if len(typs) == 1:
return self._concat_same_dtype(to_concat, name=name)
- return _concat._concat_index_asobject(to_concat, name=name)
+ return Index._concat_same_dtype(self, to_concat, name=name)
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class.
"""
# must be overridden in specific classes
- return _concat._concat_index_asobject(to_concat, name)
+ klasses = (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex, ExtensionArray)
+ to_concat = [
+ x.astype(object) if isinstance(x, klasses) else x for x in to_concat
+ ]
+
+ self = to_concat[0]
+ attribs = self._get_attributes_dict()
+ attribs["name"] = name
+
+ to_concat = [x._values if isinstance(x, Index) else x for x in to_concat]
+
+ return self._shallow_copy_with_infer(np.concatenate(to_concat), **attribs)
def putmask(self, mask, value):
"""
@@ -4387,12 +4325,9 @@ def equals(self, other):
# if other is not object, use other's logic for coercion
return other.equals(self)
- try:
- return array_equivalent(
- com.values_from_object(self), com.values_from_object(other)
- )
- except Exception:
- return False
+ return array_equivalent(
+ com.values_from_object(self), com.values_from_object(other)
+ )
def identical(self, other):
"""
@@ -4578,9 +4513,7 @@ def sort(self, *args, **kwargs):
"""
Use sort_values instead.
"""
- raise TypeError(
- "cannot sort an Index object in-place, use " "sort_values instead"
- )
+ raise TypeError("cannot sort an Index object in-place, use sort_values instead")
def shift(self, periods=1, freq=None):
"""
@@ -4726,7 +4659,7 @@ def get_value(self, series, key):
raise
try:
- return libindex.get_value_box(s, key)
+ return libindex.get_value_at(s, key)
except IndexError:
raise
except TypeError:
@@ -4780,13 +4713,13 @@ def set_value(self, arr, key, value):
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = ensure_index(target)
- if is_categorical(target):
- target = target.astype(target.dtype.categories.dtype)
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
- if self.is_all_dates:
+ if is_categorical(target):
+ tgt_values = np.asarray(target)
+ elif self.is_all_dates:
tgt_values = target.asi8
else:
tgt_values = target._ndarray_values
@@ -4798,7 +4731,7 @@ def get_indexer_for(self, target, **kwargs):
"""
Guaranteed return of an indexer even when non-unique.
- This dispatches to get_indexer or get_indexer_nonunique
+ This dispatches to get_indexer or get_indexer_non_unique
as appropriate.
Returns
@@ -4904,11 +4837,6 @@ def isin(self, values, level=None):
----------
values : set or list-like
Sought values.
-
- .. versionadded:: 0.18.1
-
- Support for values as a set.
-
level : str or int, optional
Name or position of the index level to use (if the index is a
`MultiIndex`).
@@ -4997,7 +4925,7 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None):
end : label, default None
If None, defaults to the end
step : int, default None
- kind : string, default None
+ kind : str, default None
Returns
-------
@@ -5234,7 +5162,7 @@ def slice_locs(self, start=None, end=None, step=None, kind=None):
pass
else:
if not tz_compare(ts_start.tzinfo, ts_end.tzinfo):
- raise ValueError("Both dates must have the " "same UTC offset")
+ raise ValueError("Both dates must have the same UTC offset")
start_slice = None
if start is not None:
@@ -5337,32 +5265,6 @@ def drop(self, labels, errors="raise"):
# --------------------------------------------------------------------
# Generated Arithmetic, Comparison, and Unary Methods
- def _evaluate_with_timedelta_like(self, other, op):
- # Timedelta knows how to operate with np.array, so dispatch to that
- # operation and then wrap the results
- if self._is_numeric_dtype and op.__name__ in ["add", "sub", "radd", "rsub"]:
- raise TypeError(
- "Operation {opname} between {cls} and {other} "
- "is invalid".format(
- opname=op.__name__, cls=self.dtype, other=type(other).__name__
- )
- )
-
- other = Timedelta(other)
- values = self.values
-
- with np.errstate(all="ignore"):
- result = op(values, other)
-
- attrs = self._get_attributes_dict()
- attrs = self._maybe_update_attributes(attrs)
- if op == divmod:
- return Index(result[0], **attrs), Index(result[1], **attrs)
- return Index(result, **attrs)
-
- def _evaluate_with_datetime_like(self, other, op):
- raise TypeError("can only perform ops with datetime like values")
-
@classmethod
def _add_comparison_methods(cls):
"""
@@ -5407,69 +5309,6 @@ def _add_numeric_methods_disabled(cls):
cls.__abs__ = make_invalid_op("__abs__")
cls.__inv__ = make_invalid_op("__inv__")
- def _maybe_update_attributes(self, attrs):
- """
- Update Index attributes (e.g. freq) depending on op.
- """
- return attrs
-
- def _validate_for_numeric_unaryop(self, op, opstr):
- """
- Validate if we can perform a numeric unary operation.
- """
- if not self._is_numeric_dtype:
- raise TypeError(
- "cannot evaluate a numeric op "
- "{opstr} for type: {typ}".format(opstr=opstr, typ=type(self).__name__)
- )
-
- def _validate_for_numeric_binop(self, other, op):
- """
- Return valid other; evaluate or raise TypeError if we are not of
- the appropriate type.
-
- Notes
- -----
- This is an internal method called by ops.
- """
- opstr = "__{opname}__".format(opname=op.__name__)
- # if we are an inheritor of numeric,
- # but not actually numeric (e.g. DatetimeIndex/PeriodIndex)
- if not self._is_numeric_dtype:
- raise TypeError(
- "cannot evaluate a numeric op {opstr} "
- "for type: {typ}".format(opstr=opstr, typ=type(self).__name__)
- )
-
- if isinstance(other, Index):
- if not other._is_numeric_dtype:
- raise TypeError(
- "cannot evaluate a numeric op "
- "{opstr} with type: {typ}".format(opstr=opstr, typ=type(other))
- )
- elif isinstance(other, np.ndarray) and not other.ndim:
- other = other.item()
-
- if isinstance(other, (Index, ABCSeries, np.ndarray)):
- if len(self) != len(other):
- raise ValueError("cannot evaluate a numeric op with " "unequal lengths")
- other = com.values_from_object(other)
- if other.dtype.kind not in ["f", "i", "u"]:
- raise TypeError(
- "cannot evaluate a numeric op " "with a non-numeric dtype"
- )
- elif isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
- # higher up to handle
- pass
- elif isinstance(other, (datetime, np.datetime64)):
- # higher up to handle
- pass
- else:
- if not (is_float(other) or is_integer(other)):
- raise TypeError("can only perform ops with scalar values")
-
- return other
-
@classmethod
def _add_numeric_methods_binary(cls):
"""
@@ -5502,9 +5341,7 @@ def _add_numeric_methods_unary(cls):
def _make_evaluate_unary(op, opstr):
def _evaluate_numeric_unary(self):
- self._validate_for_numeric_unaryop(op, opstr)
attrs = self._get_attributes_dict()
- attrs = self._maybe_update_attributes(attrs)
return Index(op(self.values), **attrs)
_evaluate_numeric_unary.__name__ = opstr
@@ -5626,7 +5463,7 @@ def logical_func(self, *args, **kwargs):
return logical_func
cls.all = _make_logical_function(
- "all", "Return whether all elements " "are True.", np.all
+ "all", "Return whether all elements are True.", np.all
)
cls.any = _make_logical_function(
"any", "Return whether any element is True.", np.any
@@ -5645,7 +5482,10 @@ def shape(self):
"""
Return a tuple of the shape of the underlying data.
"""
- return (len(self),)
+ # not using "(len(self), )" to return "correct" shape if the values
+ # consists of a >1 D array (see GH-27775)
+ # overridden in MultiIndex.shape to avoid materializing the values
+ return self._values.shape
Index._add_numeric_methods_disabled()
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index e14bf7f86c0be..c4321c993e638 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -7,6 +7,7 @@
from pandas._config import get_option
from pandas._libs import index as libindex
+from pandas._libs.hashtable import duplicated_int64
import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, cache_readonly
@@ -25,7 +26,7 @@
from pandas._typing import AnyArrayLike
from pandas.core import accessor
from pandas.core.algorithms import take_1d
-from pandas.core.arrays.categorical import Categorical, contains
+from pandas.core.arrays.categorical import Categorical, _recode_for_categories, contains
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs
@@ -193,7 +194,7 @@ def __new__(
# if data is None, then categories must be provided
if is_scalar(data):
if data is not None or categories is None:
- cls._scalar_data_error(data)
+ raise cls._scalar_data_error(data)
data = []
data = cls._create_categorical(data, dtype=dtype)
@@ -290,7 +291,7 @@ def _is_dtype_compat(self, other):
other = other._values
if not other.is_dtype_equal(self):
raise TypeError(
- "categories must match existing categories " "when appending"
+ "categories must match existing categories when appending"
)
else:
values = other
@@ -299,7 +300,7 @@ def _is_dtype_compat(self, other):
other = CategoricalIndex(self._create_categorical(other, dtype=self.dtype))
if not other.isin(values).all():
raise TypeError(
- "cannot append a non-category item to a " "CategoricalIndex"
+ "cannot append a non-category item to a CategoricalIndex"
)
return other
@@ -445,9 +446,11 @@ def argsort(self, *args, **kwargs):
@cache_readonly
def _engine(self):
-
- # we are going to look things up with the codes themselves
- return self._engine_type(lambda: self.codes, len(self))
+ # we are going to look things up with the codes themselves.
+ # To avoid a reference cycle, bind `codes` to a local variable, so
+ # `self` is not passed into the lambda.
+ codes = self.codes
+ return self._engine_type(lambda: codes, len(self))
# introspection
@cache_readonly
@@ -473,8 +476,6 @@ def unique(self, level=None):
@Appender(Index.duplicated.__doc__)
def duplicated(self, keep="first"):
- from pandas._libs.hashtable import duplicated_int64
-
codes = self.codes.astype("i8")
return duplicated_int64(codes, keep)
@@ -581,15 +582,15 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
if method is not None:
raise NotImplementedError(
- "argument method is not implemented for " "CategoricalIndex.reindex"
+ "argument method is not implemented for CategoricalIndex.reindex"
)
if level is not None:
raise NotImplementedError(
- "argument level is not implemented for " "CategoricalIndex.reindex"
+ "argument level is not implemented for CategoricalIndex.reindex"
)
if limit is not None:
raise NotImplementedError(
- "argument limit is not implemented for " "CategoricalIndex.reindex"
+ "argument limit is not implemented for CategoricalIndex.reindex"
)
target = ibase.ensure_index(target)
@@ -657,8 +658,6 @@ def _reindex_non_unique(self, target):
@Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
- from pandas.core.arrays.categorical import _recode_for_categories
-
method = missing.clean_reindex_fill_method(method)
target = ibase.ensure_index(target)
@@ -672,7 +671,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
)
elif method == "nearest":
raise NotImplementedError(
- "method='nearest' not implemented yet " "for CategoricalIndex"
+ "method='nearest' not implemented yet for CategoricalIndex"
)
if isinstance(target, CategoricalIndex) and self.values.is_dtype_equal(target):
@@ -902,31 +901,12 @@ def _make_compare(op):
opname = "__{op}__".format(op=op.__name__)
def _evaluate_compare(self, other):
-
- # if we have a Categorical type, then must have the same
- # categories
- if isinstance(other, CategoricalIndex):
- other = other._values
- elif isinstance(other, Index):
- other = self._create_categorical(other._values, dtype=self.dtype)
-
- if isinstance(other, (ABCCategorical, np.ndarray, ABCSeries)):
- if len(self.values) != len(other):
- raise ValueError("Lengths must match to compare")
-
- if isinstance(other, ABCCategorical):
- if not self.values.is_dtype_equal(other):
- raise TypeError(
- "categorical index comparisons must "
- "have the same categories and ordered "
- "attributes"
- )
-
- result = op(self.values, other)
+ with np.errstate(all="ignore"):
+ result = op(self.array, other)
if isinstance(result, ABCSeries):
# Dispatch to pd.Categorical returned NotImplemented
# and we got a Series back; down-cast to ndarray
- result = result.values
+ result = result._values
return result
return compat.set_function_name(_evaluate_compare, opname, cls)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 731ab9c416345..bf89bbbdf2b79 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -15,6 +15,7 @@
from pandas.core.dtypes.common import (
ensure_int64,
+ is_bool_dtype,
is_dtype_equal,
is_float,
is_integer,
@@ -62,6 +63,16 @@ def method(self, *args, **kwargs):
return method
+def _make_wrapped_arith_op(opname):
+ def method(self, other):
+ meth = getattr(self._data, opname)
+ result = meth(maybe_unwrap_index(other))
+ return wrap_arithmetic_op(self, other, result)
+
+ method.__name__ = opname
+ return method
+
+
class DatetimeIndexOpsMixin(ExtensionOpsMixin):
"""
common ops mixin to support a unified interface datetimelike Index
@@ -153,6 +164,20 @@ def values(self):
def asi8(self):
return self._data.asi8
+ def __array_wrap__(self, result, context=None):
+ """
+ Gets called after a ufunc.
+ """
+ result = lib.item_from_zerodim(result)
+ if is_bool_dtype(result) or lib.is_scalar(result):
+ return result
+
+ attrs = self._get_attributes_dict()
+ if not is_period_dtype(self) and attrs["freq"]:
+ # no need to infer if freq is None
+ attrs["freq"] = "infer"
+ return Index(result, **attrs)
+
# ------------------------------------------------------------------------
def equals(self, other):
@@ -167,7 +192,11 @@ def equals(self, other):
elif not isinstance(other, type(self)):
try:
other = type(self)(other)
- except Exception:
+ except (ValueError, TypeError, OverflowError):
+ # e.g.
+ # ValueError -> cannot parse str entry, or OutOfBoundsDatetime
+ # TypeError -> trying to convert IntervalIndex to DatetimeIndex
+ # OverflowError -> Index([very_large_timedeltas])
return False
if not is_dtype_equal(self.dtype, other.dtype):
@@ -315,7 +344,7 @@ def asobject(self):
*this is an internal non-public method*
"""
warnings.warn(
- "'asobject' is deprecated. Use 'astype(object)'" " instead",
+ "'asobject' is deprecated. Use 'astype(object)' instead",
FutureWarning,
stacklevel=2,
)
@@ -325,7 +354,7 @@ def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
if target.size != tolerance.size and tolerance.size > 1:
- raise ValueError("list-like tolerance size must match " "target index size")
+ raise ValueError("list-like tolerance size must match target index size")
return tolerance
def tolist(self):
@@ -531,6 +560,19 @@ def __rsub__(self, other):
cls.__rsub__ = __rsub__
+ __pow__ = _make_wrapped_arith_op("__pow__")
+ __rpow__ = _make_wrapped_arith_op("__rpow__")
+ __mul__ = _make_wrapped_arith_op("__mul__")
+ __rmul__ = _make_wrapped_arith_op("__rmul__")
+ __floordiv__ = _make_wrapped_arith_op("__floordiv__")
+ __rfloordiv__ = _make_wrapped_arith_op("__rfloordiv__")
+ __mod__ = _make_wrapped_arith_op("__mod__")
+ __rmod__ = _make_wrapped_arith_op("__rmod__")
+ __divmod__ = _make_wrapped_arith_op("__divmod__")
+ __rdivmod__ = _make_wrapped_arith_op("__rdivmod__")
+ __truediv__ = _make_wrapped_arith_op("__truediv__")
+ __rtruediv__ = _make_wrapped_arith_op("__rtruediv__")
+
def isin(self, values, level=None):
"""
Compute boolean array of whether each index value is found in the
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 5024eebe03bb4..0b20df38e7d42 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -4,7 +4,7 @@
import numpy as np
-from pandas._libs import Timestamp, index as libindex, lib, tslib as libts
+from pandas._libs import NaT, Timestamp, index as libindex, lib, tslib as libts
import pandas._libs.join as libjoin
from pandas._libs.tslibs import ccalendar, fields, parsing, timezones
from pandas.util._decorators import Appender, Substitution, cache_readonly
@@ -18,7 +18,7 @@
is_scalar,
is_string_like,
)
-import pandas.core.dtypes.concat as _concat
+from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna
@@ -69,7 +69,7 @@ class DatetimeDelegateMixin(DatetimelikeDelegateMixin):
# Some are "raw" methods, the result is not not re-boxed in an Index
# We also have a few "extra" attrs, which may or may not be raw,
# which we we dont' want to expose in the .dt accessor.
- _extra_methods = ["to_period", "to_perioddelta", "to_julian_date"]
+ _extra_methods = ["to_period", "to_perioddelta", "to_julian_date", "strftime"]
_extra_raw_methods = ["to_pydatetime", "_local_timestamps", "_has_same_tz"]
_extra_raw_properties = ["_box_func", "tz", "tzinfo"]
_delegated_properties = DatetimeArray._datetimelike_ops + _extra_raw_properties
@@ -106,7 +106,7 @@ class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index, DatetimeDelegateMixin):
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
- freq : string or pandas offset object, optional
+ freq : str or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation
@@ -129,7 +129,7 @@ class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index, DatetimeDelegateMixin):
.. deprecated:: 0.24.0
- closed : string or None, default None
+ closed : str or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
@@ -238,6 +238,7 @@ def _join_i8_wrapper(joinf, **kwargs):
)
_engine_type = libindex.DatetimeEngine
+ _supports_partial_string_indexing = True
_tz = None
_freq = None
@@ -464,14 +465,6 @@ def _convert_for_op(self, value):
return _to_M8(value)
raise ValueError("Passed item and index have different timezone")
- def _maybe_update_attributes(self, attrs):
- """ Update Index attributes (e.g. freq) depending on op """
- freq = attrs.get("freq", None)
- if freq is not None:
- # no need to infer if freq is None
- attrs["freq"] = "infer"
- return attrs
-
# --------------------------------------------------------------------
# Rendering Methods
@@ -608,7 +601,7 @@ def _fast_union(self, other, sort=None):
left_start = left[0]
loc = right.searchsorted(left_start, side="left")
right_chunk = right.values[:loc]
- dates = _concat._concat_compat((left.values, right_chunk))
+ dates = concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
# DTIs are not in the "correct" order and we want
# to sort
@@ -624,7 +617,7 @@ def _fast_union(self, other, sort=None):
if left_end < right_end:
loc = right.searchsorted(left_end, side="right")
right_chunk = right.values[loc:]
- dates = _concat._concat_compat((left.values, right_chunk))
+ dates = concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
@@ -668,7 +661,7 @@ def _get_time_micros(self):
def to_series(self, keep_tz=None, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys
- useful with map for returning an indexer based on an index
+ useful with map for returning an indexer based on an index.
Parameters
----------
@@ -694,10 +687,10 @@ def to_series(self, keep_tz=None, index=None, name=None):
behaviour and silence the warning.
index : Index, optional
- index of resulting Series. If None, defaults to original index
- name : string, optional
- name of resulting Series. If None, defaults to name of original
- index
+ Index of resulting Series. If None, defaults to original index.
+ name : str, optional
+ Name of resulting Series. If None, defaults to name of original
+ index.
Returns
-------
@@ -742,7 +735,7 @@ def to_series(self, keep_tz=None, index=None, name=None):
def snap(self, freq="S"):
"""
- Snap time stamps to nearest occurring frequency
+ Snap time stamps to nearest occurring frequency.
Returns
-------
@@ -778,6 +771,7 @@ def join(self, other, how="left", level=None, return_indexers=False, sort=False)
not in (
"floating",
"integer",
+ "integer-na",
"mixed-integer",
"mixed-integer-float",
"mixed",
@@ -803,11 +797,9 @@ def _maybe_utc_convert(self, other):
if isinstance(other, DatetimeIndex):
if self.tz is not None:
if other.tz is None:
- raise TypeError(
- "Cannot join tz-naive with tz-aware " "DatetimeIndex"
- )
+ raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex")
elif other.tz is not None:
- raise TypeError("Cannot join tz-naive with tz-aware " "DatetimeIndex")
+ raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex")
if not timezones.tz_compare(self.tz, other.tz):
this = self.tz_convert("UTC")
@@ -1048,7 +1040,7 @@ def get_loc(self, key, method=None, tolerance=None):
if isinstance(key, time):
if method is not None:
raise NotImplementedError(
- "cannot yet lookup inexact labels " "when key is a time object"
+ "cannot yet lookup inexact labels when key is a time object"
)
return self.indexer_at_time(key)
@@ -1184,7 +1176,6 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None):
is_normalized = cache_readonly(DatetimeArray.is_normalized.fget) # type: ignore
_resolution = cache_readonly(DatetimeArray._resolution.fget) # type: ignore
- strftime = ea_passthrough(DatetimeArray.strftime)
_has_same_tz = ea_passthrough(DatetimeArray._has_same_tz)
@property
@@ -1282,7 +1273,9 @@ def insert(self, loc, item):
raise ValueError("Passed item and index have different timezone")
# check freq can be preserved on edge cases
if self.size and self.freq is not None:
- if (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
+ if item is NaT:
+ pass
+ elif (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
@@ -1378,8 +1371,8 @@ def indexer_between_time(
datetime.time or string in appropriate format ("%H:%M", "%H%M",
"%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p").
- include_start : boolean, default True
- include_end : boolean, default True
+ include_start : bool, default True
+ include_end : bool, default True
Returns
-------
@@ -1442,7 +1435,7 @@ def date_range(
Left bound for generating dates.
end : str or datetime-like, optional
Right bound for generating dates.
- periods : integer, optional
+ periods : int, optional
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'. See
@@ -1570,7 +1563,7 @@ def date_range(
dtype='datetime64[ns]', freq='D')
"""
- if freq is None and com._any_none(periods, start, end):
+ if freq is None and com.any_none(periods, start, end):
freq = "D"
dtarr = DatetimeArray._generate_range(
@@ -1601,26 +1594,26 @@ def bdate_range(
):
"""
Return a fixed frequency DatetimeIndex, with business day as the default
- frequency
+ frequency.
Parameters
----------
- start : string or datetime-like, default None
+ start : str or datetime-like, default None
Left bound for generating dates.
- end : string or datetime-like, default None
+ end : str or datetime-like, default None
Right bound for generating dates.
- periods : integer, default None
+ periods : int, default None
Number of periods to generate.
- freq : string or DateOffset, default 'B' (business daily)
+ freq : str or DateOffset, default 'B' (business daily)
Frequency strings can have multiples, e.g. '5H'.
- tz : string or None
+ tz : str or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
- name : string, default None
+ name : str, default None
Name of the resulting DatetimeIndex.
- weekmask : string or None, default None
+ weekmask : str or None, default None
Weekmask of valid business days, passed to ``numpy.busdaycalendar``,
only used when custom frequency strings are passed. The default
value None is equivalent to 'Mon Tue Wed Thu Fri'.
@@ -1634,7 +1627,7 @@ def bdate_range(
.. versionadded:: 0.21.0
- closed : string, default None
+ closed : str, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None).
**kwargs
diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py
index 2e5b3ff8ef502..a6c39d049c50c 100644
--- a/pandas/core/indexes/frozen.py
+++ b/pandas/core/indexes/frozen.py
@@ -22,7 +22,6 @@
class FrozenList(PandasObject, list):
-
"""
Container that doesn't allow setting item *but*
because it's technically non-hashable, will be used
@@ -71,12 +70,7 @@ def difference(self, other):
# TODO: Consider deprecating these in favor of `union` (xref gh-15506)
__add__ = __iadd__ = union
- # Python 2 compat
- def __getslice__(self, i, j):
- return self.__class__(super().__getslice__(i, j))
-
def __getitem__(self, n):
- # Python 3 compat
if isinstance(n, slice):
return self.__class__(super().__getitem__(n))
return super().__getitem__(n)
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 561cf436c9af4..29e297cb28a3b 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -250,7 +250,22 @@ def _simple_new(cls, array, name, closed=None):
return result
@classmethod
- @Appender(_interval_shared_docs["from_breaks"] % _index_doc_kwargs)
+ @Appender(
+ _interval_shared_docs["from_breaks"]
+ % dict(
+ klass="IntervalIndex",
+ examples=textwrap.dedent(
+ """\
+ Examples
+ --------
+ >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3])
+ IntervalIndex([(0, 1], (1, 2], (2, 3]],
+ closed='right',
+ dtype='interval[int64]')
+ """
+ ),
+ )
+ )
def from_breaks(cls, breaks, closed="right", name=None, copy=False, dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_breaks(
@@ -259,7 +274,22 @@ def from_breaks(cls, breaks, closed="right", name=None, copy=False, dtype=None):
return cls._simple_new(array, name=name)
@classmethod
- @Appender(_interval_shared_docs["from_arrays"] % _index_doc_kwargs)
+ @Appender(
+ _interval_shared_docs["from_arrays"]
+ % dict(
+ klass="IntervalIndex",
+ examples=textwrap.dedent(
+ """\
+ Examples
+ --------
+ >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])
+ IntervalIndex([(0, 1], (1, 2], (2, 3]],
+ closed='right',
+ dtype='interval[int64]')
+ """
+ ),
+ )
+ )
def from_arrays(
cls, left, right, closed="right", name=None, copy=False, dtype=None
):
@@ -270,23 +300,22 @@ def from_arrays(
return cls._simple_new(array, name=name)
@classmethod
- @Appender(_interval_shared_docs["from_intervals"] % _index_doc_kwargs)
- def from_intervals(cls, data, closed=None, name=None, copy=False, dtype=None):
- msg = (
- "IntervalIndex.from_intervals is deprecated and will be "
- "removed in a future version; Use IntervalIndex(...) instead"
+ @Appender(
+ _interval_shared_docs["from_tuples"]
+ % dict(
+ klass="IntervalIndex",
+ examples=textwrap.dedent(
+ """\
+ Examples
+ --------
+ >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])
+ IntervalIndex([(0, 1], (1, 2]],
+ closed='right',
+ dtype='interval[int64]')
+ """
+ ),
)
- warnings.warn(msg, FutureWarning, stacklevel=2)
- with rewrite_exception("IntervalArray", cls.__name__):
- array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype)
-
- if name is None and isinstance(data, cls):
- name = data.name
-
- return cls._simple_new(array, name=name)
-
- @classmethod
- @Appender(_interval_shared_docs["from_tuples"] % _index_doc_kwargs)
+ )
def from_tuples(cls, data, closed="right", name=None, copy=False, dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)
@@ -347,7 +376,8 @@ def __contains__(self, key):
>>> idx.to_tuples()
Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object')
>>> idx.to_tuples(na_tuple=False)
- Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')""",
+ Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')
+ """,
)
)
def to_tuples(self, na_tuple=True):
@@ -382,7 +412,27 @@ def closed(self):
"""
return self._data._closed
- @Appender(_interval_shared_docs["set_closed"] % _index_doc_kwargs)
+ @Appender(
+ _interval_shared_docs["set_closed"]
+ % dict(
+ klass="IntervalIndex",
+ examples=textwrap.dedent(
+ """\
+ Examples
+ --------
+ >>> index = pd.interval_range(0, 3)
+ >>> index
+ IntervalIndex([(0, 1], (1, 2], (2, 3]],
+ closed='right',
+ dtype='interval[int64]')
+ >>> index.set_closed('both')
+ IntervalIndex([[0, 1], [1, 2], [2, 3]],
+ closed='both',
+ dtype='interval[int64]')
+ """
+ ),
+ )
+ )
def set_closed(self, closed):
if closed not in _VALID_CLOSED:
msg = "invalid option for 'closed': {closed}"
@@ -804,7 +854,7 @@ def _find_non_overlapping_monotonic_bounds(self, key):
return start, stop
def get_loc(
- self, key: Any, method: Optional[str] = None
+ self, key: Any, method: Optional[str] = None, tolerance=None
) -> Union[int, slice, np.ndarray]:
"""
Get integer location, slice or boolean mask for requested label.
@@ -906,35 +956,35 @@ def get_indexer(
)
raise InvalidIndexError(msg)
- target = ensure_index(target)
+ target_as_index = ensure_index(target)
- if isinstance(target, IntervalIndex):
+ if isinstance(target_as_index, IntervalIndex):
# equal indexes -> 1:1 positional match
- if self.equals(target):
+ if self.equals(target_as_index):
return np.arange(len(self), dtype="intp")
# different closed or incompatible subtype -> no matches
common_subtype = find_common_type(
- [self.dtype.subtype, target.dtype.subtype]
+ [self.dtype.subtype, target_as_index.dtype.subtype]
)
- if self.closed != target.closed or is_object_dtype(common_subtype):
- return np.repeat(np.intp(-1), len(target))
+ if self.closed != target_as_index.closed or is_object_dtype(common_subtype):
+ return np.repeat(np.intp(-1), len(target_as_index))
- # non-overlapping -> at most one match per interval in target
+ # non-overlapping -> at most one match per interval in target_as_index
# want exact matches -> need both left/right to match, so defer to
# left/right get_indexer, compare elementwise, equality -> match
- left_indexer = self.left.get_indexer(target.left)
- right_indexer = self.right.get_indexer(target.right)
+ left_indexer = self.left.get_indexer(target_as_index.left)
+ right_indexer = self.right.get_indexer(target_as_index.right)
indexer = np.where(left_indexer == right_indexer, left_indexer, -1)
- elif not is_object_dtype(target):
+ elif not is_object_dtype(target_as_index):
# homogeneous scalar index: use IntervalTree
- target = self._maybe_convert_i8(target)
- indexer = self._engine.get_indexer(target.values)
+ target_as_index = self._maybe_convert_i8(target_as_index)
+ indexer = self._engine.get_indexer(target_as_index.values)
else:
# heterogeneous scalar index: defer elementwise to get_loc
# (non-overlapping so get_loc guarantees scalar of KeyError)
indexer = []
- for key in target:
+ for key in target_as_index:
try:
loc = self.get_loc(key)
except KeyError:
@@ -947,21 +997,26 @@ def get_indexer(
def get_indexer_non_unique(
self, target: AnyArrayLike
) -> Tuple[np.ndarray, np.ndarray]:
- target = ensure_index(target)
+ target_as_index = ensure_index(target)
- # check that target IntervalIndex is compatible
- if isinstance(target, IntervalIndex):
+ # check that target_as_index IntervalIndex is compatible
+ if isinstance(target_as_index, IntervalIndex):
common_subtype = find_common_type(
- [self.dtype.subtype, target.dtype.subtype]
+ [self.dtype.subtype, target_as_index.dtype.subtype]
)
- if self.closed != target.closed or is_object_dtype(common_subtype):
+ if self.closed != target_as_index.closed or is_object_dtype(common_subtype):
# different closed or incompatible subtype -> no matches
- return np.repeat(-1, len(target)), np.arange(len(target))
+ return (
+ np.repeat(-1, len(target_as_index)),
+ np.arange(len(target_as_index)),
+ )
- if is_object_dtype(target) or isinstance(target, IntervalIndex):
- # target might contain intervals: defer elementwise to get_loc
+ if is_object_dtype(target_as_index) or isinstance(
+ target_as_index, IntervalIndex
+ ):
+ # target_as_index might contain intervals: defer elementwise to get_loc
indexer, missing = [], []
- for i, key in enumerate(target):
+ for i, key in enumerate(target_as_index):
try:
locs = self.get_loc(key)
if isinstance(locs, slice):
@@ -973,8 +1028,10 @@ def get_indexer_non_unique(
indexer.append(locs)
indexer = np.concatenate(indexer)
else:
- target = self._maybe_convert_i8(target)
- indexer, missing = self._engine.get_indexer_non_unique(target.values)
+ target_as_index = self._maybe_convert_i8(target_as_index)
+ indexer, missing = self._engine.get_indexer_non_unique(
+ target_as_index.values
+ )
return ensure_platform_int(indexer), ensure_platform_int(missing)
@@ -991,7 +1048,7 @@ def get_indexer_for(self, target: AnyArrayLike, **kwargs) -> np.ndarray:
List of indices.
"""
if self.is_overlapping:
- return self.get_indexer_non_unique(target, **kwargs)[0]
+ return self.get_indexer_non_unique(target)[0]
return self.get_indexer(target, **kwargs)
@Appender(_index_shared_docs["get_value"] % _index_doc_kwargs)
@@ -1051,7 +1108,7 @@ def insert(self, loc, item):
if isinstance(item, Interval):
if item.closed != self.closed:
raise ValueError(
- "inserted item must be closed on the same " "side as the index"
+ "inserted item must be closed on the same side as the index"
)
left_insert = item.left
right_insert = item.right
@@ -1060,7 +1117,7 @@ def insert(self, loc, item):
left_insert = right_insert = item
else:
raise ValueError(
- "can only insert Interval objects and NA into " "an IntervalIndex"
+ "can only insert Interval objects and NA into an IntervalIndex"
)
new_left = self.left.insert(loc, left_insert)
@@ -1104,12 +1161,8 @@ def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
def _format_native_types(self, na_rep="NaN", quoting=None, **kwargs):
- """ actually format my specific types """
- from pandas.io.formats.format import ExtensionArrayFormatter
-
- return ExtensionArrayFormatter(
- values=self, na_rep=na_rep, justify="all", leading_space=False
- ).get_result()
+ # GH 28210: use base method but with different default na_rep
+ return super()._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs)
def _format_data(self, name=None):
@@ -1180,11 +1233,41 @@ def equals(self, other):
and self.closed == other.closed
)
- @Appender(_interval_shared_docs["contains"] % _index_doc_kwargs)
+ @Appender(
+ _interval_shared_docs["contains"]
+ % dict(
+ klass="IntervalIndex",
+ examples=textwrap.dedent(
+ """\
+ >>> intervals = pd.IntervalIndex.from_tuples([(0, 1), (1, 3), (2, 4)])
+ >>> intervals
+ IntervalIndex([(0, 1], (1, 3], (2, 4]],
+ closed='right',
+ dtype='interval[int64]')
+ >>> intervals.contains(0.5)
+ array([ True, False, False])
+ """
+ ),
+ )
+ )
def contains(self, other):
return self._data.contains(other)
- @Appender(_interval_shared_docs["overlaps"] % _index_doc_kwargs)
+ @Appender(
+ _interval_shared_docs["overlaps"]
+ % dict(
+ klass="IntervalIndex",
+ examples=textwrap.dedent(
+ """\
+ >>> intervals = pd.IntervalIndex.from_tuples([(0, 1), (1, 3), (2, 4)])
+ >>> intervals
+ IntervalIndex([(0, 1], (1, 3], (2, 4]],
+ closed='right',
+ dtype='interval[int64]')
+ """
+ ),
+ )
+ )
def overlaps(self, other):
return self._data.overlaps(other)
@@ -1250,15 +1333,9 @@ def _intersection_non_unique(self, other: "IntervalIndex") -> "IntervalIndex":
first_nan_loc = np.arange(len(self))[self.isna()][0]
mask[first_nan_loc] = True
- lmiss = other.left.get_indexer_non_unique(self.left)[1]
- lmatch = np.setdiff1d(np.arange(len(self)), lmiss)
-
- for i in lmatch:
- potential = other.left.get_loc(self.left[i])
- if is_scalar(potential):
- if self.right[i] == other.right[potential]:
- mask[i] = True
- elif self.right[i] in other.right[potential]:
+ other_tups = set(zip(other.left, other.right))
+ for i, tup in enumerate(zip(self.left, self.right)):
+ if tup in other_tups:
mask[i] = True
return self[mask]
@@ -1317,7 +1394,7 @@ def _is_type_compatible(a, b):
(is_number(a) and is_number(b))
or (is_ts_compat(a) and is_ts_compat(b))
or (is_td_compat(a) and is_td_compat(b))
- or com._any_none(a, b)
+ or com.any_none(a, b)
)
@@ -1325,7 +1402,7 @@ def interval_range(
start=None, end=None, periods=None, freq=None, name=None, closed="right"
):
"""
- Return a fixed frequency IntervalIndex
+ Return a fixed frequency IntervalIndex.
Parameters
----------
@@ -1415,7 +1492,7 @@ def interval_range(
end = com.maybe_box_datetimelike(end)
endpoint = start if start is not None else end
- if freq is None and com._any_none(periods, start, end):
+ if freq is None and com.any_none(periods, start, end):
freq = 1 if is_number(endpoint) else "D"
if com.count_not_none(start, end, periods, freq) != 3:
@@ -1462,7 +1539,7 @@ def interval_range(
if is_number(endpoint):
# force consistency between start/end/freq (lower end if freq skips it)
- if com._all_not_none(start, end, freq):
+ if com.all_not_none(start, end, freq):
end -= (end - start) % freq
# compute the period/start/end if unspecified (at most one)
@@ -1474,7 +1551,7 @@ def interval_range(
end = start + (periods - 1) * freq
breaks = np.linspace(start, end, periods)
- if all(is_integer(x) for x in com._not_none(start, end, freq)):
+ if all(is_integer(x) for x in com.not_none(start, end, freq)):
# np.linspace always produces float output
breaks = maybe_downcast_to_dtype(breaks, "int64")
else:
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index b673c119c0498..3273c4f8cd13b 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -8,6 +8,7 @@
from pandas._config import get_option
from pandas._libs import Timestamp, algos as libalgos, index as libindex, lib, tslibs
+from pandas._libs.hashtable import duplicated_int64
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.util._decorators import Appender, cache_readonly, deprecate_kwarg
@@ -29,6 +30,8 @@
from pandas.core.dtypes.missing import array_equivalent, isna
import pandas.core.algorithms as algos
+from pandas.core.arrays import Categorical
+from pandas.core.arrays.categorical import _factorize_from_iterables
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
@@ -39,6 +42,12 @@
)
from pandas.core.indexes.frozen import FrozenList, _ensure_frozen
import pandas.core.missing as missing
+from pandas.core.sorting import (
+ get_group_index,
+ indexer_from_factorized,
+ lexsort_indexer,
+)
+from pandas.core.util.hashing import hash_tuple, hash_tuples
from pandas.io.formats.printing import (
format_object_attrs,
@@ -51,6 +60,8 @@
dict(klass="MultiIndex", target_klass="MultiIndex or list of tuples")
)
+_no_default_names = object()
+
class MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.UInt64Engine):
"""
@@ -362,7 +373,7 @@ def _verify_integrity(self, codes=None, levels=None):
return new_codes
@classmethod
- def from_arrays(cls, arrays, sortorder=None, names=None):
+ def from_arrays(cls, arrays, sortorder=None, names=_no_default_names):
"""
Convert arrays to MultiIndex.
@@ -415,10 +426,8 @@ def from_arrays(cls, arrays, sortorder=None, names=None):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError("all arrays must be same length")
- from pandas.core.arrays.categorical import _factorize_from_iterables
-
codes, levels = _factorize_from_iterables(arrays)
- if names is None:
+ if names is _no_default_names:
names = [getattr(arr, "name", None) for arr in arrays]
return MultiIndex(
@@ -489,7 +498,7 @@ def from_tuples(cls, tuples, sortorder=None, names=None):
return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
- def from_product(cls, iterables, sortorder=None, names=None):
+ def from_product(cls, iterables, sortorder=None, names=_no_default_names):
"""
Make a MultiIndex from the cartesian product of multiple iterables.
@@ -503,6 +512,11 @@ def from_product(cls, iterables, sortorder=None, names=None):
names : list / sequence of str, optional
Names for the levels in the index.
+ .. versionchanged:: 1.0.0
+
+ If not explicitly provided, names will be inferred from the
+ elements of iterables if an element has a name attribute
+
Returns
-------
index : MultiIndex
@@ -527,7 +541,6 @@ def from_product(cls, iterables, sortorder=None, names=None):
(2, 'purple')],
names=['number', 'color'])
"""
- from pandas.core.arrays.categorical import _factorize_from_iterables
from pandas.core.reshape.util import cartesian_product
if not is_list_like(iterables):
@@ -536,6 +549,9 @@ def from_product(cls, iterables, sortorder=None, names=None):
iterables = list(iterables)
codes, levels = _factorize_from_iterables(iterables)
+ if names is _no_default_names:
+ names = [getattr(it, "name", None) for it in iterables]
+
codes = cartesian_product(codes)
return MultiIndex(levels, codes, sortorder=sortorder, names=names)
@@ -616,6 +632,15 @@ def _values(self):
# We override here, since our parent uses _data, which we dont' use.
return self.values
+ @property
+ def shape(self):
+ """
+ Return a tuple of the shape of the underlying data.
+ """
+ # overriding the base Index.shape definition to avoid materializing
+ # the values (GH-27384, GH-27775)
+ return (len(self),)
+
@property
def array(self):
"""
@@ -772,7 +797,7 @@ def codes(self):
@property
def labels(self):
warnings.warn(
- (".labels was deprecated in version 0.24.0. " "Use .codes instead."),
+ (".labels was deprecated in version 0.24.0. Use .codes instead."),
FutureWarning,
stacklevel=2,
)
@@ -1213,7 +1238,7 @@ def _set_names(self, names, level=None, validate=True):
raise ValueError("Length of names must match length of level.")
if validate and level is None and len(names) != self.nlevels:
raise ValueError(
- "Length of names must match number of levels in " "MultiIndex."
+ "Length of names must match number of levels in MultiIndex."
)
if level is None:
@@ -1235,7 +1260,7 @@ def _set_names(self, names, level=None, validate=True):
self.levels[l].rename(name, inplace=True)
names = property(
- fset=_set_names, fget=_get_names, doc="""\nNames of levels in MultiIndex\n"""
+ fset=_set_names, fget=_get_names, doc="""\nNames of levels in MultiIndex.\n"""
)
@Appender(_index_shared_docs["_get_grouper_for_level"])
@@ -1280,7 +1305,7 @@ def _get_level_number(self, level):
count = self.names.count(level)
if (count > 1) and not is_integer(level):
raise ValueError(
- "The name %s occurs multiple times, use a " "level number" % level
+ "The name %s occurs multiple times, use a level number" % level
)
try:
level = self.names.index(level)
@@ -1359,6 +1384,12 @@ def is_monotonic_increasing(self):
increasing) values.
"""
+ if all(x.is_monotonic for x in self.levels):
+ # If each level is sorted, we can operate on the codes directly. GH27495
+ return libalgos.is_lexsorted(
+ [x.astype("int64", copy=False) for x in self.codes]
+ )
+
# reversed() because lexsort() wants the most significant key last.
values = [
self._get_level_values(i).values for i in reversed(range(len(self.levels)))
@@ -1393,8 +1424,6 @@ def _inferred_type_levels(self):
@cache_readonly
def _hashed_values(self):
""" return a uint64 ndarray of my hashed values """
- from pandas.core.util.hashing import hash_tuples
-
return hash_tuples(self)
def _hashed_indexing_key(self, key):
@@ -1414,9 +1443,7 @@ def _hashed_indexing_key(self, key):
Notes
-----
we need to stringify if we have mixed levels
-
"""
- from pandas.core.util.hashing import hash_tuples, hash_tuple
if not isinstance(key, tuple):
return hash_tuples(key)
@@ -1436,9 +1463,6 @@ def f(k, stringify):
@Appender(Index.duplicated.__doc__)
def duplicated(self, keep="first"):
- from pandas.core.sorting import get_group_index
- from pandas._libs.hashtable import duplicated_int64
-
shape = map(len, self.levels)
ids = get_group_index(self.codes, shape, sort=False, xnull=False)
@@ -1464,9 +1488,6 @@ def dropna(self, how="any"):
return self.copy(codes=new_codes, deep=True)
def get_value(self, series, key):
- # somewhat broken encapsulation
- from pandas.core.indexing import maybe_droplevels
-
# Label-based
s = com.values_from_object(series)
k = com.values_from_object(key)
@@ -1633,11 +1654,11 @@ def to_frame(self, index=True, name=None):
if name is not None:
if not is_list_like(name):
- raise TypeError("'name' must be a list / sequence " "of column names.")
+ raise TypeError("'name' must be a list / sequence of column names.")
if len(name) != len(self.levels):
raise ValueError(
- "'name' should have same length as " "number of levels on index."
+ "'name' should have same length as number of levels on index."
)
idx_names = name
else:
@@ -1751,7 +1772,7 @@ def is_all_dates(self):
def is_lexsorted(self):
"""
- Return True if the codes are lexicographically sorted
+ Return True if the codes are lexicographically sorted.
Returns
-------
@@ -2104,9 +2125,7 @@ def repeat(self, repeats, axis=None):
)
def where(self, cond, other=None):
- raise NotImplementedError(
- ".where is not supported for " "MultiIndex operations"
- )
+ raise NotImplementedError(".where is not supported for MultiIndex operations")
@deprecate_kwarg(old_arg_name="labels", new_arg_name="codes")
def drop(self, codes, level=None, errors="raise"):
@@ -2197,11 +2216,6 @@ def swaplevel(self, i=-2, j=-1):
MultiIndex
A new MultiIndex.
- .. versionchanged:: 0.18.1
-
- The indexes ``i`` and ``j`` are now optional, and default to
- the two innermost levels of the index.
-
See Also
--------
Series.swaplevel : Swap levels i and j in a MultiIndex.
@@ -2242,7 +2256,7 @@ def swaplevel(self, i=-2, j=-1):
def reorder_levels(self, order):
"""
- Rearrange levels using input order. May not drop or duplicate levels
+ Rearrange levels using input order. May not drop or duplicate levels.
Parameters
----------
@@ -2276,7 +2290,6 @@ def _get_codes_for_sorting(self):
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
- from pandas.core.arrays import Categorical
def cats(level_codes):
return np.arange(
@@ -2311,8 +2324,6 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True):
indexer : np.ndarray
Indices of output values in original index.
"""
- from pandas.core.sorting import indexer_from_factorized
-
if isinstance(level, (str, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
@@ -2323,8 +2334,6 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
- from pandas.core.sorting import lexsort_indexer
-
indexer = lexsort_indexer(
[self.codes[lev] for lev in level], orders=ascending
)
@@ -2421,14 +2430,12 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
)
if not self.is_unique:
- raise ValueError(
- "Reindexing only valid with uniquely valued " "Index objects"
- )
+ raise ValueError("Reindexing only valid with uniquely valued Index objects")
if method == "pad" or method == "backfill":
if tolerance is not None:
raise NotImplementedError(
- "tolerance not implemented yet " "for MultiIndex"
+ "tolerance not implemented yet for MultiIndex"
)
indexer = self._engine.get_indexer(target, method, limit)
elif method == "nearest":
@@ -2708,7 +2715,7 @@ def _maybe_to_slice(loc):
return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop)
- def get_loc_level(self, key, level=0, drop_level=True):
+ def get_loc_level(self, key, level=0, drop_level: bool = True):
"""
Get both the location for the requested label(s) and the
resulting sliced index.
@@ -2749,7 +2756,8 @@ def get_loc_level(self, key, level=0, drop_level=True):
(1, None)
"""
- def maybe_droplevels(indexer, levels, drop_level):
+ # different name to distinguish from maybe_droplevels
+ def maybe_mi_droplevels(indexer, levels, drop_level: bool):
if not drop_level:
return self[indexer]
# kludgearound
@@ -2767,7 +2775,7 @@ def maybe_droplevels(indexer, levels, drop_level):
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError(
- "Key for location must have same " "length as number of levels"
+ "Key for location must have same length as number of levels"
)
result = None
for lev, k in zip(level, key):
@@ -2779,7 +2787,7 @@ def maybe_droplevels(indexer, levels, drop_level):
result = loc if result is None else result & loc
- return result, maybe_droplevels(result, level, drop_level)
+ return result, maybe_mi_droplevels(result, level, drop_level)
level = self._get_level_number(level)
@@ -2792,7 +2800,7 @@ def maybe_droplevels(indexer, levels, drop_level):
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
- new_index = maybe_droplevels(indexer, [0], drop_level)
+ new_index = maybe_mi_droplevels(indexer, [0], drop_level)
return indexer, new_index
except TypeError:
pass
@@ -2807,7 +2815,7 @@ def partial_selection(key, indexer=None):
ilevels = [
i for i in range(len(key)) if key[i] != slice(None, None)
]
- return indexer, maybe_droplevels(indexer, ilevels, drop_level)
+ return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
if len(key) == self.nlevels and self.is_unique:
# Complete key in unique index -> standard get_loc
@@ -2842,10 +2850,10 @@ def partial_selection(key, indexer=None):
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)]
- return indexer, maybe_droplevels(indexer, ilevels, drop_level)
+ return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
- return indexer, maybe_droplevels(indexer, [level], drop_level)
+ return indexer, maybe_mi_droplevels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level=0, indexer=None):
# return an indexer, boolean array or a slice showing where the key is
@@ -3324,7 +3332,7 @@ def astype(self, dtype, copy=True):
raise NotImplementedError(msg)
elif not is_object_dtype(dtype):
msg = (
- "Setting {cls} dtype to anything other than object " "is not supported"
+ "Setting {cls} dtype to anything other than object is not supported"
).format(cls=self.__class__)
raise TypeError(msg)
elif copy is True:
@@ -3370,7 +3378,7 @@ def insert(self, loc, item):
if not isinstance(item, tuple):
item = (item,) + ("",) * (self.nlevels - 1)
elif len(item) != self.nlevels:
- raise ValueError("Item must have length equal to number of " "levels.")
+ raise ValueError("Item must have length equal to number of levels.")
new_levels = []
new_codes = []
@@ -3463,3 +3471,34 @@ def _sparsify(label_list, start=0, sentinel=""):
def _get_na_rep(dtype):
return {np.datetime64: "NaT", np.timedelta64: "NaT"}.get(dtype, "NaN")
+
+
+def maybe_droplevels(index, key):
+ """
+ Attempt to drop level or levels from the given index.
+
+ Parameters
+ ----------
+ index: Index
+ key : scalar or tuple
+
+ Returns
+ -------
+ Index
+ """
+ # drop levels
+ original_index = index
+ if isinstance(key, tuple):
+ for _ in key:
+ try:
+ index = index.droplevel(0)
+ except ValueError:
+ # we have dropped too much, so back out
+ return original_index
+ else:
+ try:
+ index = index.droplevel(0)
+ except ValueError:
+ pass
+
+ return index
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index daf26d53aa6e2..e83360dc701f3 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -5,6 +5,7 @@
from pandas._libs import index as libindex
from pandas.util._decorators import Appender, cache_readonly
+from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
is_bool,
is_bool_dtype,
@@ -17,7 +18,6 @@
needs_i8_conversion,
pandas_dtype,
)
-import pandas.core.dtypes.concat as _concat
from pandas.core.dtypes.generic import (
ABCFloat64Index,
ABCInt64Index,
@@ -99,7 +99,7 @@ def _convert_for_op(self, value):
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
- raise ValueError("list-like tolerance size must match " "target index size")
+ raise ValueError("list-like tolerance size must match target index size")
if not np.issubdtype(tolerance.dtype, np.number):
if tolerance.ndim > 0:
raise ValueError(
@@ -129,7 +129,8 @@ def _assert_safe_casting(cls, data, subarr):
pass
def _concat_same_dtype(self, indexes, name):
- return _concat._concat_index_same_dtype(indexes).rename(name)
+ result = type(indexes[0])(np.concatenate([x._values for x in indexes]))
+ return result.rename(name)
@property
def is_all_dates(self):
@@ -255,7 +256,7 @@ def _assert_safe_casting(cls, data, subarr):
"""
if not issubclass(data.dtype.type, np.signedinteger):
if not np.array_equal(data, subarr):
- raise TypeError("Unsafe NumPy casting, you must " "explicitly cast")
+ raise TypeError("Unsafe NumPy casting, you must explicitly cast")
def _is_compatible_with_other(self, other):
return super()._is_compatible_with_other(other) or all(
@@ -329,7 +330,7 @@ def _assert_safe_casting(cls, data, subarr):
"""
if not issubclass(data.dtype.type, np.unsignedinteger):
if not np.array_equal(data, subarr):
- raise TypeError("Unsafe NumPy casting, you must " "explicitly cast")
+ raise TypeError("Unsafe NumPy casting, you must explicitly cast")
def _is_compatible_with_other(self, other):
return super()._is_compatible_with_other(other) or all(
@@ -367,12 +368,11 @@ def astype(self, dtype, copy=True):
"values are required for conversion"
).format(dtype=dtype)
raise TypeError(msg)
- elif (
- is_integer_dtype(dtype) and not is_extension_array_dtype(dtype)
- ) and self.hasnans:
+ elif is_integer_dtype(dtype) and not is_extension_array_dtype(dtype):
# TODO(jreback); this can change once we have an EA Index type
# GH 13149
- raise ValueError("Cannot convert NA to integer")
+ arr = astype_nansafe(self.values, dtype=dtype)
+ return Int64Index(arr)
return super().astype(dtype, copy=copy)
@Appender(_index_shared_docs["_convert_scalar_indexer"])
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 47cf0f26f9ca5..ee85b0fb91acb 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -1,5 +1,6 @@
from datetime import datetime, timedelta
import warnings
+import weakref
import numpy as np
@@ -63,7 +64,10 @@ class PeriodDelegateMixin(DatetimelikeDelegateMixin):
_delegate_class = PeriodArray
_delegated_properties = PeriodArray._datetimelike_ops
- _delegated_methods = set(PeriodArray._datetimelike_methods) | {"_addsub_int_array"}
+ _delegated_methods = set(PeriodArray._datetimelike_methods) | {
+ "_addsub_int_array",
+ "strftime",
+ }
_raw_properties = {"is_leap_year"}
@@ -173,6 +177,7 @@ class PeriodIndex(DatetimeIndexOpsMixin, Int64Index, PeriodDelegateMixin):
_data = None
_engine_type = libindex.PeriodEngine
+ _supports_partial_string_indexing = True
# ------------------------------------------------------------------------
# Index Constructors
@@ -437,7 +442,9 @@ def _formatter_func(self):
@cache_readonly
def _engine(self):
- return self._engine_type(lambda: self, len(self))
+ # To avoid a reference cycle, pass a weakref of self to _engine_type.
+ period = weakref.ref(self)
+ return self._engine_type(period, len(self))
@Appender(_index_shared_docs["contains"])
def __contains__(self, key):
@@ -644,10 +651,13 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
if isinstance(target, PeriodIndex):
target = target.asi8
+ self_index = self._int64index
+ else:
+ self_index = self
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
- return Index.get_indexer(self._int64index, target, method, limit, tolerance)
+ return Index.get_indexer(self_index, target, method, limit, tolerance)
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
@@ -805,7 +815,7 @@ def _parsed_string_to_bounds(self, reso, parsed):
def _get_string_slice(self, key):
if not self.is_monotonic:
- raise ValueError("Partial indexing only valid for " "ordered time series")
+ raise ValueError("Partial indexing only valid for ordered time series")
key, parsed, reso = parse_time_string(key, self.freq)
grp = resolution.Resolution.get_freq_group(reso)
@@ -822,7 +832,7 @@ def _get_string_slice(self, key):
def _convert_tolerance(self, tolerance, target):
tolerance = DatetimeIndexOpsMixin._convert_tolerance(self, tolerance, target)
if target.size != tolerance.size and tolerance.size > 1:
- raise ValueError("list-like tolerance size must match " "target index size")
+ raise ValueError("list-like tolerance size must match target index size")
return self._maybe_convert_timedelta(tolerance)
def insert(self, loc, item):
@@ -931,11 +941,11 @@ def item(self):
return the first element of the underlying data as a python
scalar
- .. deprecated 0.25.0
+ .. deprecated:: 0.25.0
"""
warnings.warn(
- "`item` has been deprecated and will be removed in a " "future version",
+ "`item` has been deprecated and will be removed in a future version",
FutureWarning,
stacklevel=2,
)
@@ -943,10 +953,9 @@ def item(self):
if len(self) == 1:
return self[0]
else:
+ # TODO: is this still necessary?
# copy numpy's message here because Py26 raises an IndexError
- raise ValueError(
- "can only convert an array of size 1 to a " "Python scalar"
- )
+ raise ValueError("can only convert an array of size 1 to a Python scalar")
@property
def data(self):
@@ -988,7 +997,7 @@ def memory_usage(self, deep=False):
def period_range(start=None, end=None, periods=None, freq=None, name=None):
"""
Return a fixed frequency PeriodIndex, with day (calendar) as the default
- frequency
+ frequency.
Parameters
----------
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 16098c474a473..8783351cc74d1 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -11,7 +11,6 @@
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, cache_readonly
-from pandas.core.dtypes import concat as _concat
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
@@ -26,6 +25,7 @@
from pandas.core import ops
import pandas.core.common as com
+from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.indexes.numeric import Int64Index
@@ -75,7 +75,7 @@ class RangeIndex(Int64Index):
_engine_type = libindex.Int64Engine
_range = None # type: range
- # check whether self._data has benn called
+ # check whether self._data has been called
_cached_data = None # type: np.ndarray
# --------------------------------------------------------------------
# Constructors
@@ -110,7 +110,7 @@ def __new__(
return cls._simple_new(start, dtype=dtype, name=name)
# validate the arguments
- if com._all_none(start, stop, step):
+ if com.all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be called with integers")
start = ensure_python_int(start) if start is not None else 0
@@ -236,7 +236,7 @@ def _format_with_header(self, header, na_rep="NaN", **kwargs):
@cache_readonly
def start(self):
"""
- The value of the `start` parameter (``0`` if this was not supplied)
+ The value of the `start` parameter (``0`` if this was not supplied).
"""
# GH 25710
return self._range.start
@@ -244,7 +244,7 @@ def start(self):
@property
def _start(self):
"""
- The value of the `start` parameter (``0`` if this was not supplied)
+ The value of the `start` parameter (``0`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``start`` instead.
@@ -259,14 +259,14 @@ def _start(self):
@cache_readonly
def stop(self):
"""
- The value of the `stop` parameter
+ The value of the `stop` parameter.
"""
return self._range.stop
@property
def _stop(self):
"""
- The value of the `stop` parameter
+ The value of the `stop` parameter.
.. deprecated:: 0.25.0
Use ``stop`` instead.
@@ -282,7 +282,7 @@ def _stop(self):
@cache_readonly
def step(self):
"""
- The value of the `step` parameter (``1`` if this was not supplied)
+ The value of the `step` parameter (``1`` if this was not supplied).
"""
# GH 25710
return self._range.step
@@ -290,7 +290,7 @@ def step(self):
@property
def _step(self):
"""
- The value of the `step` parameter (``1`` if this was not supplied)
+ The value of the `step` parameter (``1`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``step`` instead.
@@ -647,7 +647,53 @@ def join(self, other, how="left", level=None, return_indexers=False, sort=False)
return super().join(other, how, level, return_indexers, sort)
def _concat_same_dtype(self, indexes, name):
- return _concat._concat_rangeindex_same_dtype(indexes).rename(name)
+ """
+ Concatenates multiple RangeIndex instances. All members of "indexes" must
+ be of type RangeIndex; result will be RangeIndex if possible, Int64Index
+ otherwise. E.g.:
+ indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
+ indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])
+ """
+ start = step = next_ = None
+
+ # Filter the empty indexes
+ non_empty_indexes = [obj for obj in indexes if len(obj)]
+
+ for obj in non_empty_indexes:
+ rng = obj._range # type: range
+
+ if start is None:
+ # This is set by the first non-empty index
+ start = rng.start
+ if step is None and len(rng) > 1:
+ step = rng.step
+ elif step is None:
+ # First non-empty index had only one element
+ if rng.start == start:
+ result = Int64Index(np.concatenate([x._values for x in indexes]))
+ return result.rename(name)
+
+ step = rng.start - start
+
+ non_consecutive = (step != rng.step and len(rng) > 1) or (
+ next_ is not None and rng.start != next_
+ )
+ if non_consecutive:
+ result = Int64Index(np.concatenate([x._values for x in indexes]))
+ return result.rename(name)
+
+ if step is not None:
+ next_ = rng[-1] + step
+
+ if non_empty_indexes:
+ # Get the stop value from "next" or alternatively
+ # from the last non-empty index
+ stop = non_empty_indexes[-1].stop if next_ is None else next_
+ return RangeIndex(start, stop, step).rename(name)
+
+ # Here all "indexes" had 0 length, i.e. were empty.
+ # In this case return an empty range index.
+ return RangeIndex(0, 0).rename(name)
def __len__(self):
"""
@@ -737,9 +783,8 @@ def _evaluate_numeric_binop(self, other):
# Must be an np.ndarray; GH#22390
return op(self._int64index, other)
- other = self._validate_for_numeric_binop(other, op)
+ other = extract_array(other, extract_numpy=True)
attrs = self._get_attributes_dict()
- attrs = self._maybe_update_attributes(attrs)
left, right = self, other
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 5a2dece98150f..b03d60c7b5b37 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -18,7 +18,7 @@
is_timedelta64_ns_dtype,
pandas_dtype,
)
-import pandas.core.dtypes.concat as _concat
+from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.missing import isna
from pandas.core.accessor import delegate_names
@@ -30,8 +30,6 @@
from pandas.core.indexes.datetimelike import (
DatetimeIndexOpsMixin,
DatetimelikeDelegateMixin,
- maybe_unwrap_index,
- wrap_arithmetic_op,
)
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops import get_op_result_name
@@ -39,18 +37,6 @@
from pandas.tseries.frequencies import to_offset
-def _make_wrapped_arith_op(opname):
-
- meth = getattr(TimedeltaArray, opname)
-
- def method(self, other):
- result = meth(self._data, maybe_unwrap_index(other))
- return wrap_arithmetic_op(self, other, result)
-
- method.__name__ = opname
- return method
-
-
class TimedeltaDelegateMixin(DatetimelikeDelegateMixin):
# Most attrs are dispatched via datetimelike_{ops,methods}
# Some are "raw" methods, the result is not not re-boxed in an Index
@@ -58,7 +44,12 @@ class TimedeltaDelegateMixin(DatetimelikeDelegateMixin):
# which we we dont' want to expose in the .dt accessor.
_delegate_class = TimedeltaArray
_delegated_properties = TimedeltaArray._datetimelike_ops + ["components"]
- _delegated_methods = TimedeltaArray._datetimelike_methods + ["_box_values"]
+ _delegated_methods = TimedeltaArray._datetimelike_methods + [
+ "_box_values",
+ "__neg__",
+ "__pos__",
+ "__abs__",
+ ]
_raw_properties = {"components"}
_raw_methods = {"to_pytimedelta"}
@@ -70,27 +61,27 @@ class TimedeltaDelegateMixin(DatetimelikeDelegateMixin):
TimedeltaArray,
TimedeltaDelegateMixin._delegated_methods,
typ="method",
- overwrite=False,
+ overwrite=True,
)
class TimedeltaIndex(
DatetimeIndexOpsMixin, dtl.TimelikeOps, Int64Index, TimedeltaDelegateMixin
):
"""
Immutable ndarray of timedelta64 data, represented internally as int64, and
- which can be boxed to timedelta objects
+ which can be boxed to timedelta objects.
Parameters
----------
data : array-like (1-dimensional), optional
- Optional timedelta-like data to construct index with
+ Optional timedelta-like data to construct index with.
unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional
- which is an integer/float number
- freq : string or pandas offset object, optional
+ Which is an integer/float number.
+ freq : str or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
- inferred frequency upon creation
+ inferred frequency upon creation.
copy : bool
- Make a copy of input ndarray
+ Make a copy of input ndarray.
start : starting value, timedelta-like, optional
If data is None, start is used as the start point in generating regular
timedelta data.
@@ -99,24 +90,24 @@ class TimedeltaIndex(
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
- over end argument
+ over end argument.
.. deprecated:: 0.24.0
end : end time, timedelta-like, optional
If periods is none, generated index will extend to first conforming
- time on or just past end argument
+ time on or just past end argument.
.. deprecated:: 0.24. 0
- closed : string or None, default None
+ closed : str or None, default None
Make the interval closed with respect to the given frequency to
- the 'left', 'right', or both sides (None)
+ the 'left', 'right', or both sides (None).
.. deprecated:: 0.24. 0
name : object
- Name to be stored in the index
+ Name to be stored in the index.
Attributes
----------
@@ -293,14 +284,6 @@ def __setstate__(self, state):
_unpickle_compat = __setstate__
- def _maybe_update_attributes(self, attrs):
- """ Update Index attributes (e.g. freq) depending on op """
- freq = attrs.get("freq", None)
- if freq is not None:
- # no need to infer if freq is None
- attrs["freq"] = "infer"
- return attrs
-
# -------------------------------------------------------------------
# Rendering Methods
@@ -313,24 +296,15 @@ def _formatter_func(self):
def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs):
from pandas.io.formats.format import Timedelta64Formatter
- return Timedelta64Formatter(
- values=self, nat_rep=na_rep, justify="all"
- ).get_result()
+ return np.asarray(
+ Timedelta64Formatter(
+ values=self, nat_rep=na_rep, justify="all"
+ ).get_result()
+ )
# -------------------------------------------------------------------
# Wrapping TimedeltaArray
- __mul__ = _make_wrapped_arith_op("__mul__")
- __rmul__ = _make_wrapped_arith_op("__rmul__")
- __floordiv__ = _make_wrapped_arith_op("__floordiv__")
- __rfloordiv__ = _make_wrapped_arith_op("__rfloordiv__")
- __mod__ = _make_wrapped_arith_op("__mod__")
- __rmod__ = _make_wrapped_arith_op("__rmod__")
- __divmod__ = _make_wrapped_arith_op("__divmod__")
- __rdivmod__ = _make_wrapped_arith_op("__rdivmod__")
- __truediv__ = _make_wrapped_arith_op("__truediv__")
- __rtruediv__ = _make_wrapped_arith_op("__rtruediv__")
-
# Compat for frequency inference, see GH#23789
_is_monotonic_increasing = Index.is_monotonic_increasing
_is_monotonic_decreasing = Index.is_monotonic_decreasing
@@ -487,7 +461,7 @@ def _fast_union(self, other):
if left_end < right_end:
loc = right.searchsorted(left_end, side="right")
right_chunk = right.values[loc:]
- dates = _concat._concat_compat((left.values, right_chunk))
+ dates = concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
@@ -712,7 +686,6 @@ def delete(self, loc):
TimedeltaIndex._add_comparison_ops()
-TimedeltaIndex._add_numeric_methods_unary()
TimedeltaIndex._add_logical_methods_disabled()
TimedeltaIndex._add_datetimelike_methods()
@@ -727,6 +700,7 @@ def _is_convertible_to_index(other):
"floating",
"mixed-integer",
"integer",
+ "integer-na",
"mixed-integer-float",
"mixed",
):
@@ -739,7 +713,7 @@ def timedelta_range(
):
"""
Return a fixed frequency TimedeltaIndex, with day as the default
- frequency
+ frequency.
Parameters
----------
@@ -802,7 +776,7 @@ def timedelta_range(
'5 days 00:00:00'],
dtype='timedelta64[ns]', freq=None)
"""
- if freq is None and com._any_none(periods, start, end):
+ if freq is None and com.any_none(periods, start, end):
freq = "D"
freq, freq_infer = dtl.maybe_infer_freq(freq)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 5aee37bc3b833..45cb037600fd7 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -21,12 +21,12 @@
is_sequence,
is_sparse,
)
-from pandas.core.dtypes.concat import _concat_compat
-from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
+from pandas.core.dtypes.concat import concat_compat
+from pandas.core.dtypes.generic import ABCDataFrame, ABCMultiIndex, ABCSeries
from pandas.core.dtypes.missing import _infer_fill_value, isna
import pandas.core.common as com
-from pandas.core.index import Index, InvalidIndexError, MultiIndex
+from pandas.core.index import Index, InvalidIndexError
from pandas.core.indexers import is_list_like_indexer, length_of_indexer
@@ -49,7 +49,7 @@ def get_indexers_list():
# the public IndexSlicerMaker
class _IndexSlice:
"""
- Create an object to more easily perform multi-index slicing
+ Create an object to more easily perform multi-index slicing.
See Also
--------
@@ -117,13 +117,14 @@ def __iter__(self):
raise NotImplementedError("ix is not iterable")
def __getitem__(self, key):
+ # Used in ix and downstream in geopandas _CoordinateIndexer
if type(key) is tuple:
# Note: we check the type exactly instead of with isinstance
# because NamedTuple is checked separately.
key = tuple(com.apply_if_callable(x, self.obj) for x in key)
try:
values = self.obj._get_value(*key)
- except (KeyError, TypeError, InvalidIndexError):
+ except (KeyError, TypeError, InvalidIndexError, AttributeError):
# TypeError occurs here if the key has non-hashable entries,
# generally slice or list.
# TODO(ix): most/all of the TypeError cases here are for ix,
@@ -131,6 +132,9 @@ def __getitem__(self, key):
# The InvalidIndexError is only catched for compatibility
# with geopandas, see
# https://github.com/pandas-dev/pandas/issues/27258
+ # TODO: The AttributeError is for IntervalIndex which
+ # incorrectly implements get_value, see
+ # https://github.com/pandas-dev/pandas/issues/27865
pass
else:
if is_scalar(values):
@@ -164,28 +168,29 @@ def _slice(self, obj, axis: int, kind=None):
def _get_setitem_indexer(self, key):
if self.axis is not None:
- return self._convert_tuple(key, is_setter=True)
+ return self._convert_tuple(key)
ax = self.obj._get_axis(0)
- if isinstance(ax, MultiIndex) and self.name != "iloc":
+ if isinstance(ax, ABCMultiIndex) and self.name != "iloc":
try:
return ax.get_loc(key)
- except Exception:
+ except (TypeError, KeyError):
+ # TypeError e.g. passed a bool
pass
if isinstance(key, tuple):
try:
- return self._convert_tuple(key, is_setter=True)
+ return self._convert_tuple(key)
except IndexingError:
pass
if isinstance(key, range):
- return self._convert_range(key, is_setter=True)
+ return list(key)
axis = self.axis or 0
try:
- return self._convert_to_indexer(key, axis=axis, is_setter=True)
+ return self._convert_to_indexer(key, axis=axis)
except TypeError as e:
# invalid indexer type vs 'other' indexing errors
@@ -223,10 +228,10 @@ def _validate_key(self, key, axis: int):
"""
raise AbstractMethodError(self)
- def _has_valid_tuple(self, key):
+ def _has_valid_tuple(self, key: Tuple):
""" check the key for valid keys across my indexer """
for i, k in enumerate(key):
- if i >= self.obj.ndim:
+ if i >= self.ndim:
raise IndexingError("Too many indexers")
try:
self._validate_key(k, i)
@@ -236,41 +241,35 @@ def _has_valid_tuple(self, key):
"[{types}] types".format(types=self._valid_types)
)
- def _is_nested_tuple_indexer(self, tup):
- if any(isinstance(ax, MultiIndex) for ax in self.obj.axes):
+ def _is_nested_tuple_indexer(self, tup: Tuple):
+ if any(isinstance(ax, ABCMultiIndex) for ax in self.obj.axes):
return any(is_nested_tuple(tup, ax) for ax in self.obj.axes)
return False
- def _convert_tuple(self, key, is_setter: bool = False):
+ def _convert_tuple(self, key):
keyidx = []
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
for i in range(self.ndim):
if i == axis:
- keyidx.append(
- self._convert_to_indexer(key, axis=axis, is_setter=is_setter)
- )
+ keyidx.append(self._convert_to_indexer(key, axis=axis))
else:
keyidx.append(slice(None))
else:
for i, k in enumerate(key):
- if i >= self.obj.ndim:
+ if i >= self.ndim:
raise IndexingError("Too many indexers")
- idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter)
+ idx = self._convert_to_indexer(k, axis=i)
keyidx.append(idx)
return tuple(keyidx)
- def _convert_range(self, key, is_setter: bool = False):
- """ convert a range argument """
- return list(key)
-
def _convert_scalar_indexer(self, key, axis: int):
# if we are accessing via lowered dim, use the last dim
ax = self.obj._get_axis(min(axis, self.ndim - 1))
# a scalar
return ax._convert_scalar_indexer(key, kind=self.name)
- def _convert_slice_indexer(self, key, axis: int):
+ def _convert_slice_indexer(self, key: slice, axis: int):
# if we are accessing via lowered dim, use the last dim
ax = self.obj._get_axis(min(axis, self.ndim - 1))
return ax._convert_slice_indexer(key, kind=self.name)
@@ -286,7 +285,7 @@ def _has_valid_positional_setitem_indexer(self, indexer):
raise IndexError("{0} cannot enlarge its target object".format(self.name))
else:
if not isinstance(indexer, tuple):
- indexer = self._tuplify(indexer)
+ indexer = _tuplify(self.ndim, indexer)
for ax, i in zip(self.obj.axes, indexer):
if isinstance(i, slice):
# should check the stop slice?
@@ -326,6 +325,17 @@ def _setitem_with_indexer(self, indexer, value):
val = list(value.values()) if isinstance(value, dict) else value
take_split_path = not blk._can_hold_element(val)
+ # if we have any multi-indexes that have non-trivial slices
+ # (not null slices) then we must take the split path, xref
+ # GH 10360, GH 27841
+ if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes):
+ for i, ax in zip(indexer, self.obj.axes):
+ if isinstance(ax, ABCMultiIndex) and not (
+ is_integer(i) or com.is_null_slice(i)
+ ):
+ take_split_path = True
+ break
+
if isinstance(indexer, tuple):
nindexer = []
for i, idx in enumerate(indexer):
@@ -401,7 +411,7 @@ def _setitem_with_indexer(self, indexer, value):
assert info_axis == 1
if not isinstance(indexer, tuple):
- indexer = self._tuplify(indexer)
+ indexer = _tuplify(self.ndim, indexer)
if isinstance(value, ABCSeries):
value = self._align_series(indexer, value)
@@ -413,7 +423,9 @@ def _setitem_with_indexer(self, indexer, value):
# if we have a partial multiindex, then need to adjust the plane
# indexer here
- if len(labels) == 1 and isinstance(self.obj[labels[0]].axes[0], MultiIndex):
+ if len(labels) == 1 and isinstance(
+ self.obj[labels[0]].axes[0], ABCMultiIndex
+ ):
item = labels[0]
obj = self.obj[item]
index = obj.index
@@ -484,9 +496,9 @@ def setter(item, v):
if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0:
# we have an equal len Frame
- if isinstance(value, ABCDataFrame) and value.ndim > 1:
+ if isinstance(value, ABCDataFrame):
sub_indexer = list(indexer)
- multiindex_indexer = isinstance(labels, MultiIndex)
+ multiindex_indexer = isinstance(labels, ABCMultiIndex)
for item in labels:
if item in value:
@@ -607,7 +619,7 @@ def _setitem_with_indexer_missing(self, indexer, value):
if len(self.obj._values):
# GH#22717 handle casting compatibility that np.concatenate
# does incorrectly
- new_values = _concat_compat([self.obj._values, new_values])
+ new_values = concat_compat([self.obj._values, new_values])
self.obj._data = self.obj._constructor(
new_values, index=new_index, name=self.obj.name
)._data
@@ -638,27 +650,23 @@ def _setitem_with_indexer_missing(self, indexer, value):
self.obj._maybe_update_cacher(clear=True)
return self.obj
- def _align_series(self, indexer, ser, multiindex_indexer=False):
+ def _align_series(self, indexer, ser: ABCSeries, multiindex_indexer: bool = False):
"""
Parameters
----------
indexer : tuple, slice, scalar
The indexer used to get the locations that will be set to
`ser`
-
ser : pd.Series
The values to assign to the locations specified by `indexer`
-
multiindex_indexer : boolean, optional
Defaults to False. Should be set to True if `indexer` was from
a `pd.MultiIndex`, to avoid unnecessary broadcasting.
-
Returns
-------
`np.array` of `ser` broadcast to the appropriate shape for assignment
to the locations selected by `indexer`
-
"""
if isinstance(indexer, (slice, np.ndarray, list, Index)):
indexer = tuple([indexer])
@@ -674,7 +682,7 @@ def ravel(i):
aligners = [not com.is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
single_aligner = sum_aligners == 1
- is_frame = self.obj.ndim == 2
+ is_frame = self.ndim == 2
obj = self.obj
# are we a single alignable value on a non-primary
@@ -734,8 +742,8 @@ def ravel(i):
raise ValueError("Incompatible indexer with Series")
- def _align_frame(self, indexer, df):
- is_frame = self.obj.ndim == 2
+ def _align_frame(self, indexer, df: ABCDataFrame):
+ is_frame = self.ndim == 2
if isinstance(indexer, tuple):
@@ -772,8 +780,8 @@ def _align_frame(self, indexer, df):
# we have a multi-index and are trying to align
# with a particular, level GH3738
if (
- isinstance(ax, MultiIndex)
- and isinstance(df.index, MultiIndex)
+ isinstance(ax, ABCMultiIndex)
+ and isinstance(df.index, ABCMultiIndex)
and ax.nlevels != df.index.nlevels
):
raise TypeError(
@@ -786,7 +794,7 @@ def _align_frame(self, indexer, df):
raise ValueError("Incompatible indexer with DataFrame")
- def _getitem_tuple(self, tup):
+ def _getitem_tuple(self, tup: Tuple):
try:
return self._getitem_lowerdim(tup)
except IndexingError:
@@ -809,7 +817,7 @@ def _getitem_tuple(self, tup):
return retval
- def _multi_take_opportunity(self, tup):
+ def _multi_take_opportunity(self, tup: Tuple):
"""
Check whether there is the possibility to use ``_multi_take``.
Currently the limit is that all axes being indexed must be indexed with
@@ -833,7 +841,7 @@ def _multi_take_opportunity(self, tup):
return True
- def _multi_take(self, tup):
+ def _multi_take(self, tup: Tuple):
"""
Create the indexers for the passed tuple of keys, and execute the take
operation. This allows the take operation to be executed all at once -
@@ -859,7 +867,7 @@ def _multi_take(self, tup):
def _convert_for_reindex(self, key, axis: int):
return key
- def _handle_lowerdim_multi_index_axis0(self, tup):
+ def _handle_lowerdim_multi_index_axis0(self, tup: Tuple):
# we have an axis0 multi-index, handle or raise
axis = self.axis or 0
try:
@@ -871,7 +879,7 @@ def _handle_lowerdim_multi_index_axis0(self, tup):
except KeyError as ek:
# raise KeyError if number of indexers match
# else IndexingError will be raised
- if len(tup) <= self.obj.index.nlevels and len(tup) > self.obj.ndim:
+ if len(tup) <= self.obj.index.nlevels and len(tup) > self.ndim:
raise ek
except Exception as e1:
if isinstance(tup[0], (slice, Index)):
@@ -884,7 +892,7 @@ def _handle_lowerdim_multi_index_axis0(self, tup):
return None
- def _getitem_lowerdim(self, tup):
+ def _getitem_lowerdim(self, tup: Tuple):
# we can directly get the axis result since the axis is specified
if self.axis is not None:
@@ -899,12 +907,12 @@ def _getitem_lowerdim(self, tup):
ax0 = self.obj._get_axis(0)
# ...but iloc should handle the tuple as simple integer-location
# instead of checking it as multiindex representation (GH 13797)
- if isinstance(ax0, MultiIndex) and self.name != "iloc":
+ if isinstance(ax0, ABCMultiIndex) and self.name != "iloc":
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
- if len(tup) > self.obj.ndim:
+ if len(tup) > self.ndim:
raise IndexingError("Too many indexers. handle elsewhere")
# to avoid wasted computation
@@ -948,7 +956,7 @@ def _getitem_lowerdim(self, tup):
raise IndexingError("not applicable")
- def _getitem_nested_tuple(self, tup):
+ def _getitem_nested_tuple(self, tup: Tuple):
# we have a nested tuple so have at least 1 multi-index level
# we should be able to match up the dimensionality here
@@ -999,7 +1007,7 @@ def _getitem_axis(self, key, axis: int):
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
elif is_list_like_indexer(key) and not (
- isinstance(key, tuple) and isinstance(labels, MultiIndex)
+ isinstance(key, tuple) and isinstance(labels, ABCMultiIndex)
):
if hasattr(key, "ndim") and key.ndim > 1:
@@ -1012,7 +1020,7 @@ def _getitem_axis(self, key, axis: int):
key = labels._maybe_cast_indexer(key)
if is_integer(key):
- if axis == 0 and isinstance(labels, MultiIndex):
+ if axis == 0 and isinstance(labels, ABCMultiIndex):
try:
return self._get_label(key, axis=axis)
except (KeyError, TypeError):
@@ -1188,9 +1196,7 @@ def _validate_read_indexer(
if not (ax.is_categorical() or ax.is_interval()):
warnings.warn(_missing_key_warning, FutureWarning, stacklevel=6)
- def _convert_to_indexer(
- self, obj, axis: int, is_setter: bool = False, raise_missing: bool = False
- ):
+ def _convert_to_indexer(self, obj, axis: int, raise_missing: bool = False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
@@ -1214,10 +1220,8 @@ def _convert_to_indexer(
try:
obj = self._convert_scalar_indexer(obj, axis)
except TypeError:
-
# but we will allow setting
- if is_setter:
- pass
+ pass
# see if we are positional in nature
is_int_index = labels.is_integer()
@@ -1227,8 +1231,8 @@ def _convert_to_indexer(
try:
return labels.get_loc(obj)
except LookupError:
- if isinstance(obj, tuple) and isinstance(labels, MultiIndex):
- if is_setter and len(obj) == labels.nlevels:
+ if isinstance(obj, tuple) and isinstance(labels, ABCMultiIndex):
+ if len(obj) == labels.nlevels:
return {"key": obj}
raise
except TypeError:
@@ -1242,17 +1246,14 @@ def _convert_to_indexer(
# if we are setting and its not a valid location
# its an insert which fails by definition
- if is_setter:
+ if self.name == "loc":
# always valid
- if self.name == "loc":
- return {"key": obj}
+ return {"key": obj}
+ if obj >= self.obj.shape[axis] and not isinstance(labels, ABCMultiIndex):
# a positional
- if obj >= self.obj.shape[axis] and not isinstance(labels, MultiIndex):
- raise ValueError(
- "cannot set by positional indexing with enlargement"
- )
+ raise ValueError("cannot set by positional indexing with enlargement")
return obj
@@ -1267,22 +1268,16 @@ def _convert_to_indexer(
return inds
else:
# When setting, missing keys are not allowed, even with .loc:
- kwargs = {"raise_missing": True if is_setter else raise_missing}
- return self._get_listlike_indexer(obj, axis, **kwargs)[1]
+ return self._get_listlike_indexer(obj, axis, raise_missing=True)[1]
else:
try:
return labels.get_loc(obj)
except LookupError:
# allow a not found key only if we are a setter
- if not is_list_like_indexer(obj) and is_setter:
+ if not is_list_like_indexer(obj):
return {"key": obj}
raise
- def _tuplify(self, loc):
- tup = [slice(None, None) for _ in range(self.ndim)]
- tup[0] = loc
- return tuple(tup)
-
def _get_slice_axis(self, slice_obj: slice, axis: int):
# caller is responsible for ensuring non-None axis
obj = self.obj
@@ -1422,7 +1417,7 @@ def _getbool_axis(self, key, axis: int):
# caller is responsible for ensuring non-None axis
labels = self.obj._get_axis(axis)
key = check_bool_indexer(labels, key)
- inds, = key.nonzero()
+ inds = key.nonzero()[0]
try:
return self.obj.take(inds, axis=axis)
except Exception as detail:
@@ -1723,7 +1718,12 @@ def _is_scalar_access(self, key: Tuple):
return False
ax = self.obj.axes[i]
- if isinstance(ax, MultiIndex):
+ if isinstance(ax, ABCMultiIndex):
+ return False
+
+ if isinstance(k, str) and ax._supports_partial_string_indexing:
+ # partial string indexing, df.loc['2000', 'A']
+ # should not be considered scalar
return False
if not ax.is_unique:
@@ -1740,8 +1740,11 @@ def _getitem_scalar(self, key):
def _get_partial_string_timestamp_match_key(self, key, labels):
"""Translate any partial string timestamp matches in key, returning the
new key (GH 10331)"""
- if isinstance(labels, MultiIndex):
- if isinstance(key, str) and labels.levels[0].is_all_dates:
+ if isinstance(labels, ABCMultiIndex):
+ if (
+ isinstance(key, str)
+ and labels.levels[0]._supports_partial_string_indexing
+ ):
# Convert key '2016-01-01' to
# ('2016-01-01'[, slice(None, None, None)]+)
key = tuple([key] + [slice(None)] * (len(labels.levels) - 1))
@@ -1751,7 +1754,10 @@ def _get_partial_string_timestamp_match_key(self, key, labels):
# (..., slice('2016-01-01', '2016-01-01', None), ...)
new_key = []
for i, component in enumerate(key):
- if isinstance(component, str) and labels.levels[i].is_all_dates:
+ if (
+ isinstance(component, str)
+ and labels.levels[i]._supports_partial_string_indexing
+ ):
new_key.append(slice(component, component, None))
else:
new_key.append(component)
@@ -1778,7 +1784,7 @@ def _getitem_axis(self, key, axis: int):
# to a list of keys
# we will use the *values* of the object
# and NOT the index if its a PandasObject
- if isinstance(labels, MultiIndex):
+ if isinstance(labels, ABCMultiIndex):
if isinstance(key, (ABCSeries, np.ndarray)) and key.ndim <= 1:
# Series, or 0,1 ndim ndarray
@@ -1806,7 +1812,7 @@ def _getitem_axis(self, key, axis: int):
key = tuple([key])
# an iterable multi-selection
- if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)):
+ if not (isinstance(key, tuple) and isinstance(labels, ABCMultiIndex)):
if hasattr(key, "ndim") and key.ndim > 1:
raise ValueError("Cannot index with multidimensional key")
@@ -2037,7 +2043,7 @@ def _getitem_scalar(self, key):
values = self.obj._get_value(*key, takeable=True)
return values
- def _validate_integer(self, key, axis):
+ def _validate_integer(self, key: int, axis: int):
"""
Check that 'key' is a valid position in the desired axis.
@@ -2062,7 +2068,7 @@ def _validate_integer(self, key, axis):
if key >= len_axis or key < -len_axis:
raise IndexError("single positional indexer is out-of-bounds")
- def _getitem_tuple(self, tup):
+ def _getitem_tuple(self, tup: Tuple):
self._has_valid_tuple(tup)
try:
@@ -2081,6 +2087,8 @@ def _getitem_tuple(self, tup):
# if the dim was reduced, then pass a lower-dim the next time
if retval.ndim < self.ndim:
+ # TODO: this is never reached in tests; can we confirm that
+ # it is impossible?
axis -= 1
# try to get for the next axis
@@ -2134,9 +2142,7 @@ def _getitem_axis(self, key, axis: int):
return self._get_loc(key, axis=axis)
# raise_missing is included for compat with the parent class signature
- def _convert_to_indexer(
- self, obj, axis: int, is_setter: bool = False, raise_missing: bool = False
- ):
+ def _convert_to_indexer(self, obj, axis: int, raise_missing: bool = False):
""" much simpler as we only have to deal with our valid types """
# make need to convert a float key
@@ -2156,11 +2162,11 @@ def _convert_to_indexer(
)
-class _ScalarAccessIndexer(_NDFrameIndexer):
+class _ScalarAccessIndexer(_NDFrameIndexerBase):
""" access scalars quickly """
def _convert_key(self, key, is_setter: bool = False):
- return list(key)
+ raise AbstractMethodError(self)
def __getitem__(self, key):
if not isinstance(key, tuple):
@@ -2182,8 +2188,8 @@ def __setitem__(self, key, value):
key = com.apply_if_callable(key, self.obj)
if not isinstance(key, tuple):
- key = self._tuplify(key)
- if len(key) != self.obj.ndim:
+ key = _tuplify(self.ndim, key)
+ if len(key) != self.ndim:
raise ValueError("Not enough indexers for scalar access (setting)!")
key = list(self._convert_key(key, is_setter=True))
key.append(value)
@@ -2313,9 +2319,6 @@ class _iAtIndexer(_ScalarAccessIndexer):
_takeable = True
- def _has_valid_setitem_indexer(self, indexer):
- self._has_valid_positional_setitem_indexer(indexer)
-
def _convert_key(self, key, is_setter: bool = False):
""" require integer args (and convert to label arguments) """
for a, i in zip(self.obj.axes, key):
@@ -2324,6 +2327,25 @@ def _convert_key(self, key, is_setter: bool = False):
return key
+def _tuplify(ndim: int, loc) -> tuple:
+ """
+ Given an indexer for the first dimension, create an equivalent tuple
+ for indexing over all dimensions.
+
+ Parameters
+ ----------
+ ndim : int
+ loc : object
+
+ Returns
+ -------
+ tuple
+ """
+ tup = [slice(None, None) for _ in range(ndim)]
+ tup[0] = loc
+ return tuple(tup)
+
+
def convert_to_index_sliceable(obj, key):
"""
if we are index sliceable, then return my slicer, otherwise return None
@@ -2340,7 +2362,7 @@ def convert_to_index_sliceable(obj, key):
# We might have a datetimelike string that we can translate to a
# slice here via partial string indexing
- if idx.is_all_dates:
+ if idx._supports_partial_string_indexing:
try:
return idx._get_string_slice(key)
except (KeyError, ValueError, NotImplementedError):
@@ -2455,7 +2477,7 @@ def is_nested_tuple(tup, labels):
for i, k in enumerate(tup):
if is_list_like(k) or isinstance(k, slice):
- return isinstance(labels, MultiIndex)
+ return isinstance(labels, ABCMultiIndex)
return False
@@ -2473,25 +2495,6 @@ def need_slice(obj):
)
-def maybe_droplevels(index, key):
- # drop levels
- original_index = index
- if isinstance(key, tuple):
- for _ in key:
- try:
- index = index.droplevel(0)
- except ValueError:
- # we have dropped too much, so back out
- return original_index
- else:
- try:
- index = index.droplevel(0)
- except ValueError:
- pass
-
- return index
-
-
def _non_reducing_slice(slice_):
"""
Ensurse that a slice doesn't reduce to a Series or Scalar.
diff --git a/pandas/core/internals/arrays.py b/pandas/core/internals/arrays.py
deleted file mode 100644
index 18af328bfa77f..0000000000000
--- a/pandas/core/internals/arrays.py
+++ /dev/null
@@ -1,55 +0,0 @@
-"""
-Methods for cleaning, validating, and unboxing arrays.
-"""
-from pandas.core.dtypes.generic import ABCIndexClass, ABCPandasArray, ABCSeries
-
-
-def extract_array(obj, extract_numpy=False):
- """
- Extract the ndarray or ExtensionArray from a Series or Index.
-
- For all other types, `obj` is just returned as is.
-
- Parameters
- ----------
- obj : object
- For Series / Index, the underlying ExtensionArray is unboxed.
- For Numpy-backed ExtensionArrays, the ndarray is extracted.
-
- extract_numpy : bool, default False
- Whether to extract the ndarray from a PandasArray
-
- Returns
- -------
- arr : object
-
- Examples
- --------
- >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category'))
- [a, b, c]
- Categories (3, object): [a, b, c]
-
- Other objects like lists, arrays, and DataFrames are just passed through.
-
- >>> extract_array([1, 2, 3])
- [1, 2, 3]
-
- For an ndarray-backed Series / Index a PandasArray is returned.
-
- >>> extract_array(pd.Series([1, 2, 3]))
- <PandasArray>
- [1, 2, 3]
- Length: 3, dtype: int64
-
- To extract all the way down to the ndarray, pass ``extract_numpy=True``.
-
- >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True)
- array([1, 2, 3])
- """
- if isinstance(obj, (ABCIndexClass, ABCSeries)):
- obj = obj.array
-
- if extract_numpy and isinstance(obj, ABCPandasArray):
- obj = obj.to_numpy()
-
- return obj
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 26aca34f20594..b76cb5cbec626 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -7,7 +7,7 @@
import numpy as np
-from pandas._libs import NaT, lib, tslib, tslibs
+from pandas._libs import NaT, Timestamp, lib, tslib, writers
import pandas._libs.internals as libinternals
from pandas._libs.tslibs import Timedelta, conversion
from pandas._libs.tslibs.timezones import tz_compare
@@ -18,7 +18,7 @@
find_common_type,
infer_dtype_from,
infer_dtype_from_scalar,
- maybe_convert_objects,
+ maybe_downcast_numeric,
maybe_downcast_to_dtype,
maybe_infer_dtype_type,
maybe_promote,
@@ -50,13 +50,12 @@
is_timedelta64_dtype,
pandas_dtype,
)
-import pandas.core.dtypes.concat as _concat
+from pandas.core.dtypes.concat import concat_categorical, concat_datetime
from pandas.core.dtypes.dtypes import CategoricalDtype, ExtensionDtype
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCExtensionArray,
- ABCIndexClass,
ABCPandasArray,
ABCSeries,
)
@@ -69,21 +68,15 @@
)
import pandas.core.algorithms as algos
-from pandas.core.arrays import (
- Categorical,
- DatetimeArray,
- ExtensionArray,
- PandasDtype,
- TimedeltaArray,
-)
+from pandas.core.arrays import Categorical, DatetimeArray, PandasDtype, TimedeltaArray
from pandas.core.base import PandasObject
import pandas.core.common as com
+from pandas.core.construction import extract_array
from pandas.core.indexers import (
check_setitem_lengths,
is_empty_indexer,
is_scalar_indexer,
)
-from pandas.core.internals.arrays import extract_array
import pandas.core.missing as missing
from pandas.core.nanops import nanpercentile
@@ -210,10 +203,6 @@ def internal_values(self, dtype=None):
"""
return self.values
- def formatting_values(self):
- """Return the internal values used by the DataFrame/SeriesFormatter"""
- return self.internal_values()
-
def get_values(self, dtype=None):
"""
return an internal format, currently just the ndarray
@@ -274,6 +263,8 @@ def make_block_same_class(self, values, placement=None, ndim=None, dtype=None):
)
if placement is None:
placement = self.mgr_locs
+ if ndim is None:
+ ndim = self.ndim
return make_block(
values, placement=placement, ndim=ndim, klass=self.__class__, dtype=dtype
)
@@ -416,11 +407,8 @@ def fillna(self, value, limit=None, inplace=False, downcast=None):
return self.copy()
if self._can_hold_element(value):
- # equivalent: self._try_coerce_args(value) would not raise
+ # equivalent: _try_coerce_args(value) would not raise
blocks = self.putmask(mask, value, inplace=inplace)
- blocks = [
- b.make_block(values=self._try_coerce_result(b.values)) for b in blocks
- ]
return self._maybe_downcast(blocks, downcast)
# we can't process the value, but nothing to do
@@ -428,17 +416,18 @@ def fillna(self, value, limit=None, inplace=False, downcast=None):
return self if inplace else self.copy()
# operate column-by-column
- def f(m, v, i):
+ def f(mask, val, idx):
block = self.coerce_to_target_dtype(value)
# slice out our block
- if i is not None:
- block = block.getitem_block(slice(i, i + 1))
+ if idx is not None:
+ # i.e. self.ndim == 2
+ block = block.getitem_block(slice(idx, idx + 1))
return block.fillna(value, limit=limit, inplace=inplace, downcast=None)
- return self.split_and_operate(mask, f, inplace)
+ return self.split_and_operate(None, f, inplace)
- def split_and_operate(self, mask, f, inplace):
+ def split_and_operate(self, mask, f, inplace: bool):
"""
split the block per-column, and apply the callable f
per-column, return a new block for each. Handle
@@ -456,7 +445,8 @@ def split_and_operate(self, mask, f, inplace):
"""
if mask is None:
- mask = np.ones(self.shape, dtype=bool)
+ mask = np.broadcast_to(True, shape=self.shape)
+
new_values = self.values
def make_a_block(nv, ref_loc):
@@ -497,17 +487,15 @@ def make_a_block(nv, ref_loc):
return new_blocks
- def _maybe_downcast(self, blocks, downcast=None):
+ def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]:
# no need to downcast our float
# unless indicated
- if downcast is None and self.is_float:
- return blocks
- elif downcast is None and (self.is_timedelta or self.is_datetime):
+ if downcast is None and (
+ self.is_float or self.is_timedelta or self.is_datetime
+ ):
return blocks
- if not isinstance(blocks, list):
- blocks = [blocks]
return _extend_blocks([b.downcast(downcast) for b in blocks])
def downcast(self, dtypes=None):
@@ -537,26 +525,21 @@ def downcast(self, dtypes=None):
raise ValueError(
"downcast must have a dictionary or 'infer' as its argument"
)
+ elif dtypes != "infer":
+ raise AssertionError("dtypes as dict is not supported yet")
# operate column-by-column
# this is expensive as it splits the blocks items-by-item
- def f(m, v, i):
-
- if dtypes == "infer":
- dtype = "infer"
- else:
- raise AssertionError("dtypes as dict is not supported yet")
-
- if dtype is not None:
- v = maybe_downcast_to_dtype(v, dtype)
- return v
+ def f(mask, val, idx):
+ val = maybe_downcast_to_dtype(val, dtype="infer")
+ return val
return self.split_and_operate(None, f, False)
- def astype(self, dtype, copy=False, errors="raise", values=None, **kwargs):
- return self._astype(dtype, copy=copy, errors=errors, values=values, **kwargs)
+ def astype(self, dtype, copy=False, errors="raise", **kwargs):
+ return self._astype(dtype, copy=copy, errors=errors, **kwargs)
- def _astype(self, dtype, copy=False, errors="raise", values=None, **kwargs):
+ def _astype(self, dtype, copy=False, errors="raise", **kwargs):
"""Coerce to the new type
Parameters
@@ -591,18 +574,6 @@ def _astype(self, dtype, copy=False, errors="raise", values=None, **kwargs):
# may need to convert to categorical
if self.is_categorical_astype(dtype):
- # deprecated 17636
- for deprecated_arg in ("categories", "ordered"):
- if deprecated_arg in kwargs:
- raise ValueError(
- "Got an unexpected argument: {}".format(deprecated_arg)
- )
-
- categories = kwargs.get("categories", None)
- ordered = kwargs.get("ordered", None)
- if com._any_not_none(categories, ordered):
- dtype = CategoricalDtype(categories, ordered)
-
if is_categorical_dtype(self.values):
# GH 10696/18593: update an existing categorical efficiently
return self.make_block(self.values.astype(dtype, copy=copy))
@@ -617,42 +588,42 @@ def _astype(self, dtype, copy=False, errors="raise", values=None, **kwargs):
return self.copy()
return self
- if values is None:
- try:
- # force the copy here
- if self.is_extension:
- values = self.values.astype(dtype)
- else:
- if issubclass(dtype.type, str):
-
- # use native type formatting for datetime/tz/timedelta
- if self.is_datelike:
- values = self.to_native_types()
-
- # astype formatting
- else:
- values = self.get_values()
+ # force the copy here
+ if self.is_extension:
+ # TODO: Should we try/except this astype?
+ values = self.values.astype(dtype)
+ else:
+ if issubclass(dtype.type, str):
- else:
- values = self.get_values(dtype=dtype)
+ # use native type formatting for datetime/tz/timedelta
+ if self.is_datelike:
+ values = self.to_native_types()
- # _astype_nansafe works fine with 1-d only
- vals1d = values.ravel()
- values = astype_nansafe(vals1d, dtype, copy=True, **kwargs)
+ # astype formatting
+ else:
+ values = self.get_values()
- # TODO(extension)
- # should we make this attribute?
- if isinstance(values, np.ndarray):
- values = values.reshape(self.shape)
+ else:
+ values = self.get_values(dtype=dtype)
- except Exception: # noqa: E722
+ # _astype_nansafe works fine with 1-d only
+ vals1d = values.ravel()
+ try:
+ values = astype_nansafe(vals1d, dtype, copy=True)
+ except (ValueError, TypeError):
+ # e.g. astype_nansafe can fail on object-dtype of strings
+ # trying to convert to float
if errors == "raise":
raise
newb = self.copy() if copy else self
- else:
- newb = make_block(values, placement=self.mgr_locs, ndim=self.ndim)
- else:
- newb = make_block(values, placement=self.mgr_locs, ndim=self.ndim)
+ return newb
+
+ # TODO(extension)
+ # should we make this attribute?
+ if isinstance(values, np.ndarray):
+ values = values.reshape(self.shape)
+
+ newb = make_block(values, placement=self.mgr_locs, ndim=self.ndim)
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
@@ -669,7 +640,14 @@ def _astype(self, dtype, copy=False, errors="raise", values=None, **kwargs):
)
return newb
- def convert(self, copy=True, **kwargs):
+ def convert(
+ self,
+ copy: bool = True,
+ datetime: bool = True,
+ numeric: bool = True,
+ timedelta: bool = True,
+ coerce: bool = False,
+ ):
""" attempt to coerce any object types to better types return a copy
of the block (if copy = True) by definition we are not an ObjectBlock
here!
@@ -677,7 +655,7 @@ def convert(self, copy=True, **kwargs):
return self.copy() if copy else self
- def _can_hold_element(self, element):
+ def _can_hold_element(self, element: Any) -> bool:
""" require the same dtype as ourselves """
dtype = self.values.dtype.type
tipo = maybe_infer_dtype_type(element)
@@ -685,42 +663,6 @@ def _can_hold_element(self, element):
return issubclass(tipo.type, dtype)
return isinstance(element, dtype)
- def _try_cast_result(self, result, dtype=None):
- """ try to cast the result to our original type, we may have
- roundtripped thru object in the mean-time
- """
- if dtype is None:
- dtype = self.dtype
-
- if self.is_integer or self.is_bool or self.is_datetime:
- pass
- elif self.is_float and result.dtype == self.dtype:
- # protect against a bool/object showing up here
- if isinstance(dtype, str) and dtype == "infer":
- return result
-
- # This is only reached via Block.setitem, where dtype is always
- # either "infer", self.dtype, or values.dtype.
- assert dtype == self.dtype, (dtype, self.dtype)
- return result
-
- # may need to change the dtype here
- return maybe_downcast_to_dtype(result, dtype)
-
- def _coerce_values(self, values):
- """
- Coerce values (usually derived from self.values) for an operation.
-
- Parameters
- ----------
- values : ndarray or ExtensionArray
-
- Returns
- -------
- ndarray or ExtensionArray
- """
- return values
-
def _try_coerce_args(self, other):
""" provide coercion to our input arguments """
@@ -733,18 +675,16 @@ def _try_coerce_args(self, other):
type(self).__name__.lower().replace("Block", ""),
)
)
+ if np.any(isna(other)) and not self._can_hold_na:
+ raise TypeError(
+ "cannot convert {} to an {}".format(
+ type(other).__name__,
+ type(self).__name__.lower().replace("Block", ""),
+ )
+ )
return other
- def _try_coerce_result(self, result):
- """ reverse of try_coerce_args """
- return result
-
- def _try_coerce_and_cast_result(self, result, dtype=None):
- result = self._try_coerce_result(result)
- result = self._try_cast_result(result, dtype=dtype)
- return result
-
def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
@@ -755,14 +695,15 @@ def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs):
mask = isna(values)
if not self.is_object and not quoting:
- values = values.astype(str)
+ itemsize = writers.word_len(na_rep)
+ values = values.astype("<U{size}".format(size=itemsize))
else:
values = np.array(values, dtype="object")
values[mask] = na_rep
return values
- # block actions ####
+ # block actions #
def copy(self, deep=True):
""" copy constructor """
values = self.values
@@ -781,21 +722,43 @@ def replace(
inplace = validate_bool_kwarg(inplace, "inplace")
original_to_replace = to_replace
- # try to replace, if we raise an error, convert to ObjectBlock and
+ # If we cannot replace with own dtype, convert to ObjectBlock and
# retry
- values = self._coerce_values(self.values)
- try:
- to_replace = self._try_coerce_args(to_replace)
- except (TypeError, ValueError):
+ if not self._can_hold_element(to_replace):
+ if not isinstance(to_replace, list):
+ if inplace:
+ return [self]
+ return [self.copy()]
+
+ to_replace = [x for x in to_replace if self._can_hold_element(x)]
+ if not len(to_replace):
+ # GH#28084 avoid costly checks since we can infer
+ # that there is nothing to replace in this block
+ if inplace:
+ return [self]
+ return [self.copy()]
+
+ if len(to_replace) == 1:
+ # _can_hold_element checks have reduced this back to the
+ # scalar case and we can avoid a costly object cast
+ return self.replace(
+ to_replace[0],
+ value,
+ inplace=inplace,
+ filter=filter,
+ regex=regex,
+ convert=convert,
+ )
+
# GH 22083, TypeError or ValueError occurred within error handling
# causes infinite loop. Cast and retry only if not objectblock.
if is_object_dtype(self):
- raise
+ raise AssertionError
# try again with a compatible block
block = self.astype(object)
return block.replace(
- to_replace=original_to_replace,
+ to_replace=to_replace,
value=value,
inplace=inplace,
filter=filter,
@@ -803,19 +766,35 @@ def replace(
convert=convert,
)
+ values = self.values
+ to_replace = self._try_coerce_args(to_replace)
+
mask = missing.mask_missing(values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
+ if not mask.any():
+ if inplace:
+ return [self]
+ return [self.copy()]
+
try:
blocks = self.putmask(mask, value, inplace=inplace)
+ # Note: it is _not_ the case that self._can_hold_element(value)
+ # is always true at this point. In particular, that can fail
+ # for:
+ # "2u" with bool-dtype, float-dtype
+ # 0.5 with int64-dtype
+ # np.nan with int64-dtype
except (TypeError, ValueError):
# GH 22083, TypeError or ValueError occurred within error handling
# causes infinite loop. Cast and retry only if not objectblock.
if is_object_dtype(self):
raise
+ assert not self._can_hold_element(value), value
+
# try again with a compatible block
block = self.astype(object)
return block.replace(
@@ -827,9 +806,7 @@ def replace(
convert=convert,
)
if convert:
- blocks = [
- b.convert(by_item=True, numeric=False, copy=not inplace) for b in blocks
- ]
+ blocks = [b.convert(numeric=False, copy=not inplace) for b in blocks]
return blocks
def _replace_single(self, *args, **kwargs):
@@ -864,9 +841,10 @@ def setitem(self, indexer, value):
# coerce if block dtype can store value
values = self.values
- try:
+ if self._can_hold_element(value):
value = self._try_coerce_args(value)
- except (TypeError, ValueError):
+
+ else:
# current dtype cannot store value, coerce to common dtype
find_dtype = False
@@ -874,28 +852,15 @@ def setitem(self, indexer, value):
dtype = value.dtype
find_dtype = True
- elif lib.is_scalar(value):
- if isna(value):
- # NaN promotion is handled in latter path
- dtype = False
- else:
- dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)
- find_dtype = True
- else:
- dtype = "infer"
+ elif lib.is_scalar(value) and not isna(value):
+ dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)
+ find_dtype = True
if find_dtype:
dtype = find_common_type([values.dtype, dtype])
if not is_dtype_equal(self.dtype, dtype):
b = self.astype(dtype)
return b.setitem(indexer, value)
- else:
- values = self._coerce_values(values)
- # can keep its own dtype
- if hasattr(value, "dtype") and is_dtype_equal(values.dtype, value.dtype):
- dtype = self.dtype
- else:
- dtype = "infer"
# value must be storeable at this moment
arr_value = np.array(value)
@@ -925,7 +890,7 @@ def setitem(self, indexer, value):
elif (
len(arr_value.shape)
and arr_value.shape[0] == values.shape[0]
- and np.prod(arr_value.shape) == np.prod(values.shape)
+ and arr_value.size == values.size
):
values[indexer] = value
try:
@@ -937,8 +902,6 @@ def setitem(self, indexer, value):
else:
values[indexer] = value
- # coerce and try to infer the dtypes of the result
- values = self._try_coerce_and_cast_result(values, dtype)
if transpose:
values = values.T
block = self.make_block(values)
@@ -972,6 +935,7 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False)
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isna(new) and not self.is_object:
+ # FIXME: make sure we have compatible NA
new = self.fill_value
if self._can_hold_element(new):
@@ -1025,15 +989,15 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False)
new = new.reshape(tuple(new_shape))
# operate column-by-column
- def f(m, v, i):
+ def f(mask, val, idx):
- if i is None:
+ if idx is None:
# ndim==1 case.
n = new
else:
if isinstance(new, np.ndarray):
- n = np.squeeze(new[i % new.shape[0]])
+ n = np.squeeze(new[idx % new.shape[0]])
else:
n = np.array(new)
@@ -1043,7 +1007,7 @@ def f(m, v, i):
# we need to explicitly astype here to make a copy
n = n.astype(dtype)
- nv = _putmask_smart(v, m, n)
+ nv = _putmask_smart(val, mask, n)
return nv
new_blocks = self.split_and_operate(mask, f, inplace)
@@ -1099,7 +1063,7 @@ def coerce_to_target_dtype(self, other):
mytz = getattr(self.dtype, "tz", None)
othertz = getattr(dtype, "tz", None)
- if str(mytz) != str(othertz):
+ if not tz_compare(mytz, othertz):
return self.astype(object)
raise AssertionError(
@@ -1121,9 +1085,7 @@ def coerce_to_target_dtype(self, other):
try:
return self.astype(dtype)
except (ValueError, TypeError, OverflowError):
- pass
-
- return self.astype(object)
+ return self.astype(object)
def interpolate(
self,
@@ -1215,7 +1177,6 @@ def _interpolate_with_fill(
return [self.copy()]
values = self.values if inplace else self.values.copy()
- values = self._coerce_values(values)
fill_value = self._try_coerce_args(fill_value)
values = missing.interpolate_2d(
values,
@@ -1225,7 +1186,6 @@ def _interpolate_with_fill(
fill_value=fill_value,
dtype=self.dtype,
)
- values = self._try_coerce_result(values)
blocks = [self.make_block_same_class(values, ndim=self.ndim)]
return self._maybe_downcast(blocks, downcast)
@@ -1323,7 +1283,7 @@ def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
else:
return self.make_block_same_class(new_values, new_mgr_locs)
- def diff(self, n, axis=1):
+ def diff(self, n: int, axis: int = 1) -> List["Block"]:
""" return block for the diff of the values """
new_values = algos.diff(self.values, n, axis=axis)
return [self.make_block(values=new_values)]
@@ -1357,7 +1317,15 @@ def shift(self, periods, axis=0, fill_value=None):
return [self.make_block(new_values)]
- def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0):
+ def where(
+ self,
+ other,
+ cond,
+ align=True,
+ errors="raise",
+ try_cast: bool = False,
+ axis: int = 0,
+ ) -> List["Block"]:
"""
evaluate the block; return result block(s) from the result
@@ -1401,29 +1369,23 @@ def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0)
# our where function
def func(cond, values, other):
- other = self._try_coerce_args(other)
- try:
- fastres = expressions.where(cond, values, other)
- return self._try_coerce_result(fastres)
- except Exception as detail:
- if errors == "raise":
- raise TypeError(
- "Could not operate [{other!r}] with block values "
- "[{detail!s}]".format(other=other, detail=detail)
- )
- else:
- # return the values
- result = np.empty(values.shape, dtype="float64")
- result.fill(np.nan)
- return result
+ if not (
+ (self.is_integer or self.is_bool)
+ and lib.is_float(other)
+ and np.isnan(other)
+ ):
+ # np.where will cast integer array to floats in this case
+ other = self._try_coerce_args(other)
+
+ fastres = expressions.where(cond, values, other)
+ return fastres
if cond.ravel().all():
result = values
else:
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
- values = self._coerce_values(values)
try:
result = func(cond, values, other)
except TypeError:
@@ -1446,11 +1408,7 @@ def func(cond, values, other):
if transpose:
result = result.T
- # try to cast if requested
- if try_cast:
- result = self._try_cast_result(result)
-
- return self.make_block(result)
+ return [self.make_block(result)]
# might need to separate out blocks
axis = cond.ndim - 1
@@ -1461,13 +1419,13 @@ def func(cond, values, other):
for m in [mask, ~mask]:
if m.any():
taken = result.take(m.nonzero()[0], axis=axis)
- r = self._try_cast_result(taken)
+ r = maybe_downcast_numeric(taken, self.dtype)
nb = self.make_block(r.T, placement=self.mgr_locs[m])
result_blocks.append(nb)
return result_blocks
- def equals(self, other):
+ def equals(self, other) -> bool:
if self.dtype != other.dtype or self.shape != other.shape:
return False
return array_equivalent(self.values, other.values)
@@ -1482,9 +1440,9 @@ def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):
new_columns : Index
All columns of the unstacked BlockManager.
n_rows : int
- Only used in ExtensionBlock.unstack
+ Only used in ExtensionBlock._unstack
fill_value : int
- Only used in ExtensionBlock.unstack
+ Only used in ExtensionBlock._unstack
Returns
-------
@@ -1522,19 +1480,7 @@ def quantile(self, qs, interpolation="linear", axis=0):
# We should always have ndim == 2 becase Series dispatches to DataFrame
assert self.ndim == 2
- if self.is_datetimetz:
- # TODO: cleanup this special case.
- # We need to operate on i8 values for datetimetz
- # but `Block.get_values()` returns an ndarray of objects
- # right now. We need an API for "values to do numeric-like ops on"
- values = self.values.asi8
-
- # TODO: NonConsolidatableMixin shape
- # Usual shape inconsistencies for ExtensionBlocks
- values = values[None, :]
- else:
- values = self.get_values()
- values = self._coerce_values(values)
+ values = self.get_values()
is_empty = values.shape[axis] == 0
orig_scalar = not is_list_like(qs)
@@ -1550,16 +1496,14 @@ def quantile(self, qs, interpolation="linear", axis=0):
).reshape(len(values), len(qs))
else:
# asarray needed for Sparse, see GH#24600
- # Note: we use self.values below instead of values because the
- # `asi8` conversion above will behave differently under `isna`
- mask = np.asarray(isna(self.values))
+ mask = np.asarray(isna(values))
result = nanpercentile(
values,
np.array(qs) * 100,
axis=axis,
na_value=self.fill_value,
mask=mask,
- ndim=self.ndim,
+ ndim=values.ndim,
interpolation=interpolation,
)
@@ -1572,8 +1516,7 @@ def quantile(self, qs, interpolation="linear", axis=0):
result = result[..., 0]
result = lib.item_from_zerodim(result)
- ndim = getattr(result, "ndim", None) or 0
- result = self._try_coerce_result(result)
+ ndim = np.ndim(result)
return make_block(result, placement=np.arange(len(result)), ndim=ndim)
def _replace_coerce(
@@ -1699,7 +1642,6 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False)
# use block's copy logic.
# .values may be an Index which does shallow copy by default
new_values = self.values if inplace else self.copy().values
- new_values = self._coerce_values(new_values)
new = self._try_coerce_args(new)
if isinstance(new, np.ndarray) and len(new) == len(mask):
@@ -1708,12 +1650,8 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False)
mask = _safe_reshape(mask, new_values.shape)
new_values[mask] = new
- new_values = self._try_coerce_result(new_values)
return [self.make_block(values=new_values)]
- def _try_cast_result(self, result, dtype=None):
- return result
-
def _get_unstack_items(self, unstacker, new_columns):
"""
Get the placement, values, and mask for a Block unstack.
@@ -1765,7 +1703,8 @@ def __init__(self, values, placement, ndim=None):
super().__init__(values, placement, ndim)
def _maybe_coerce_values(self, values):
- """Unbox to an extension array.
+ """
+ Unbox to an extension array.
This will unbox an ExtensionArray stored in an Index or Series.
ExtensionArrays pass through. No dtype coercion is done.
@@ -1778,9 +1717,7 @@ def _maybe_coerce_values(self, values):
-------
ExtensionArray
"""
- if isinstance(values, (ABCIndexClass, ABCSeries)):
- values = values._values
- return values
+ return extract_array(values)
@property
def _holder(self):
@@ -1868,7 +1805,7 @@ def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
return self.make_block_same_class(new_values, new_mgr_locs)
- def _can_hold_element(self, element):
+ def _can_hold_element(self, element: Any) -> bool:
# XXX: We may need to think about pushing this onto the array.
# We're doing the same as CategoricalBlock here.
return True
@@ -1886,41 +1823,6 @@ def _slice(self, slicer):
return self.values[slicer]
- def _try_cast_result(self, result, dtype=None):
- """
- if we have an operation that operates on for example floats
- we want to try to cast back to our EA here if possible
-
- result could be a 2-D numpy array, e.g. the result of
- a numeric operation; but it must be shape (1, X) because
- we by-definition operate on the ExtensionBlocks one-by-one
-
- result could also be an EA Array itself, in which case it
- is already a 1-D array
- """
- try:
-
- result = self._holder._from_sequence(result.ravel(), dtype=dtype)
- except Exception:
- pass
-
- return result
-
- def formatting_values(self):
- # Deprecating the ability to override _formatting_values.
- # Do the warning here, it's only user in pandas, since we
- # have to check if the subclass overrode it.
- fv = getattr(type(self.values), "_formatting_values", None)
- if fv and fv != ExtensionArray._formatting_values:
- msg = (
- "'ExtensionArray._formatting_values' is deprecated. "
- "Specify 'ExtensionArray._formatter' instead."
- )
- warnings.warn(msg, FutureWarning, stacklevel=10)
- return self.values._formatting_values()
-
- return self.values
-
def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
@@ -1968,7 +1870,15 @@ def shift(
)
]
- def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0):
+ def where(
+ self,
+ other,
+ cond,
+ align=True,
+ errors="raise",
+ try_cast: bool = False,
+ axis: int = 0,
+ ) -> List["Block"]:
if isinstance(other, ABCDataFrame):
# ExtensionArrays are 1-D, so if we get here then
# `other` should be a DataFrame with a single column.
@@ -2013,7 +1923,7 @@ def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0)
np.where(cond, self.values, other), dtype=dtype
)
- return self.make_block_same_class(result, placement=self.mgr_locs)
+ return [self.make_block_same_class(result, placement=self.mgr_locs)]
@property
def _ftype(self):
@@ -2065,7 +1975,7 @@ class NumericBlock(Block):
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
- def equals(self, other):
+ def equals(self, other) -> bool:
if self.dtype != other.dtype or self.shape != other.shape:
return False
left, right = self.values, other.values
@@ -2076,7 +1986,7 @@ class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
- def _can_hold_element(self, element):
+ def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, (np.floating, np.integer)) and not issubclass(
@@ -2140,7 +2050,7 @@ class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
- def _can_hold_element(self, element):
+ def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, (np.floating, np.integer, np.complexfloating))
@@ -2157,7 +2067,7 @@ class IntBlock(NumericBlock):
is_integer = True
_can_hold_na = False
- def _can_hold_element(self, element):
+ def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return (
@@ -2180,7 +2090,7 @@ def _holder(self):
@property
def fill_value(self):
- return tslibs.iNaT
+ return np.datetime64("NaT", "ns")
def get_values(self, dtype=None):
"""
@@ -2206,7 +2116,8 @@ def _can_hold_na(self):
return True
def _maybe_coerce_values(self, values):
- """Input validation for values passed to __init__. Ensure that
+ """
+ Input validation for values passed to __init__. Ensure that
we have datetime64ns, coercing if necessary.
Parameters
@@ -2247,12 +2158,14 @@ def _astype(self, dtype, **kwargs):
# delegate
return super()._astype(dtype=dtype, **kwargs)
- def _can_hold_element(self, element):
+ def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
if self.is_datetimetz:
# require exact match, since non-nano does not exist
- return is_dtype_equal(tipo, self.dtype)
+ return is_dtype_equal(tipo, self.dtype) or is_valid_nat_for_dtype(
+ element, self.dtype
+ )
# GH#27419 if we get a non-nano datetime64 object
return is_datetime64_dtype(tipo)
@@ -2262,14 +2175,9 @@ def _can_hold_element(self, element):
if self.is_datetimetz:
return tz_compare(element.tzinfo, self.dtype.tz)
return element.tzinfo is None
- elif is_integer(element):
- return element == tslibs.iNaT
return is_valid_nat_for_dtype(element, self.dtype)
- def _coerce_values(self, values):
- return values.view("i8")
-
def _try_coerce_args(self, other):
"""
Coerce other to dtype 'i8'. NaN and NaT convert to
@@ -2286,16 +2194,15 @@ def _try_coerce_args(self, other):
base-type other
"""
if is_valid_nat_for_dtype(other, self.dtype):
- other = tslibs.iNaT
- elif is_integer(other) and other == tslibs.iNaT:
- pass
+ other = np.datetime64("NaT", "ns")
elif isinstance(other, (datetime, np.datetime64, date)):
- other = self._box_func(other)
- if getattr(other, "tz") is not None:
+ other = Timestamp(other)
+ if other.tz is not None:
raise TypeError("cannot coerce a Timestamp with a tz on a naive Block")
- other = other.asm8.view("i8")
+ other = other.asm8
elif hasattr(other, "dtype") and is_datetime64_dtype(other):
- other = other.astype("i8", copy=False).view("i8")
+ # TODO: can we get here with non-nano?
+ pass
else:
# coercion issues
# let higher levels handle
@@ -2303,20 +2210,6 @@ def _try_coerce_args(self, other):
return other
- def _try_coerce_result(self, result):
- """ reverse of try_coerce_args """
- if isinstance(result, np.ndarray):
- if result.dtype.kind in ["i", "f"]:
- result = result.astype("M8[ns]")
-
- elif isinstance(result, (np.integer, np.float, np.datetime64)):
- result = self._box_func(result)
- return result
-
- @property
- def _box_func(self):
- return tslibs.Timestamp
-
def to_native_types(
self, slicer=None, na_rep=None, date_format=None, quoting=None, **kwargs
):
@@ -2372,6 +2265,7 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeBlock):
is_extension = True
_can_hold_element = DatetimeBlock._can_hold_element
+ fill_value = np.datetime64("NaT", "ns")
@property
def _holder(self):
@@ -2427,7 +2321,7 @@ def get_values(self, dtype=None):
"""
values = self.values
if is_object_dtype(dtype):
- values = values._box_values(values._data)
+ values = values.astype(object)
values = np.asarray(values)
@@ -2453,85 +2347,20 @@ def _slice(self, slicer):
return self.values[loc]
return self.values[slicer]
- def _coerce_values(self, values):
- # asi8 is a view, needs copy
- return _block_shape(values.view("i8"), ndim=self.ndim)
-
def _try_coerce_args(self, other):
- """
- localize and return i8 for the values
-
- Parameters
- ----------
- other : ndarray-like or scalar
-
- Returns
- -------
- base-type other
- """
-
- if isinstance(other, ABCSeries):
- other = self._holder(other)
-
- if isinstance(other, bool):
- raise TypeError
- elif is_datetime64_dtype(other):
- # add the tz back
- other = self._holder(other, dtype=self.dtype)
-
- elif is_valid_nat_for_dtype(other, self.dtype):
- other = tslibs.iNaT
- elif is_integer(other) and other == tslibs.iNaT:
- pass
- elif isinstance(other, self._holder):
- if other.tz != self.values.tz:
- raise ValueError("incompatible or non tz-aware value")
- other = _block_shape(other.asi8, ndim=self.ndim)
- elif isinstance(other, (np.datetime64, datetime, date)):
- other = tslibs.Timestamp(other)
- tz = getattr(other, "tz", None)
-
- # test we can have an equal time zone
- if tz is None or str(tz) != str(self.values.tz):
- raise ValueError("incompatible or non tz-aware value")
- other = other.value
- else:
- raise TypeError(other)
-
+ # DatetimeArray handles this for us
return other
- def _try_coerce_result(self, result):
- """ reverse of try_coerce_args """
- if isinstance(result, np.ndarray):
- if result.dtype.kind in ["i", "f"]:
- result = result.astype("M8[ns]")
-
- elif isinstance(result, (np.integer, np.float, np.datetime64)):
- result = self._box_func(result)
-
- if isinstance(result, np.ndarray):
- # allow passing of > 1dim if its trivial
-
- if result.ndim > 1:
- result = result.reshape(np.prod(result.shape))
- # GH#24096 new values invalidates a frequency
- result = self._holder._simple_new(
- result, freq=None, dtype=self.values.dtype
- )
-
- return result
-
- @property
- def _box_func(self):
- return lambda x: tslibs.Timestamp(x, tz=self.dtype.tz)
-
- def diff(self, n, axis=0):
- """1st discrete difference
+ def diff(self, n: int, axis: int = 0) -> List["Block"]:
+ """
+ 1st discrete difference.
Parameters
----------
- n : int, number of periods to diff
- axis : int, axis to diff upon. default 0
+ n : int
+ Number of periods to diff.
+ axis : int, default 0
+ Axis to diff upon.
Returns
-------
@@ -2559,7 +2388,7 @@ def concat_same_type(self, to_concat, placement=None):
# Instead of placing the condition here, it could also go into the
# is_uniform_join_units check, but I'm not sure what is better.
if len({x.dtype for x in to_concat}) > 1:
- values = _concat._concat_datetime([x.values for x in to_concat])
+ values = concat_datetime([x.values for x in to_concat])
placement = placement or slice(0, len(values), 1)
if self.ndim > 1:
@@ -2570,38 +2399,55 @@ def concat_same_type(self, to_concat, placement=None):
def fillna(self, value, limit=None, inplace=False, downcast=None):
# We support filling a DatetimeTZ with a `value` whose timezone
# is different by coercing to object.
- try:
+ if self._can_hold_element(value):
return super().fillna(value, limit, inplace, downcast)
- except (ValueError, TypeError):
- # different timezones, or a non-tz
- return self.astype(object).fillna(
- value, limit=limit, inplace=inplace, downcast=downcast
- )
+
+ # different timezones, or a non-tz
+ return self.astype(object).fillna(
+ value, limit=limit, inplace=inplace, downcast=downcast
+ )
def setitem(self, indexer, value):
# https://github.com/pandas-dev/pandas/issues/24020
# Need a dedicated setitem until #24020 (type promotion in setitem
# for extension arrays) is designed and implemented.
- try:
+ if self._can_hold_element(value) or (
+ isinstance(indexer, np.ndarray) and indexer.size == 0
+ ):
return super().setitem(indexer, value)
- except (ValueError, TypeError):
- newb = make_block(
- self.values.astype(object), placement=self.mgr_locs, klass=ObjectBlock
- )
- return newb.setitem(indexer, value)
- def equals(self, other):
+ obj_vals = self.values.astype(object)
+ newb = make_block(
+ obj_vals, placement=self.mgr_locs, klass=ObjectBlock, ndim=self.ndim
+ )
+ return newb.setitem(indexer, value)
+
+ def equals(self, other) -> bool:
# override for significant performance improvement
if self.dtype != other.dtype or self.shape != other.shape:
return False
return (self.values.view("i8") == other.values.view("i8")).all()
+ def quantile(self, qs, interpolation="linear", axis=0):
+ naive = self.values.view("M8[ns]")
+
+ # kludge for 2D block with 1D values
+ naive = naive.reshape(self.shape)
+
+ blk = self.make_block(naive)
+ res_blk = blk.quantile(qs, interpolation=interpolation, axis=axis)
+
+ # ravel is kludge for 2D block with 1D values, assumes column-like
+ aware = self._holder(res_blk.values.ravel(), dtype=self.dtype)
+ return self.make_block_same_class(aware, ndim=res_blk.ndim)
+
class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
+ fill_value = np.timedelta64("NaT", "ns")
def __init__(self, values, placement, ndim=None):
if values.dtype != _TD_DTYPE:
@@ -2615,22 +2461,14 @@ def __init__(self, values, placement, ndim=None):
def _holder(self):
return TimedeltaArray
- @property
- def _box_func(self):
- return lambda x: Timedelta(x, unit="ns")
-
- def _can_hold_element(self, element):
+ def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
- # TODO: remove the np.int64 support once coerce_values and
- # _try_coerce_args both coerce to m8[ns] and not i8.
- return issubclass(tipo.type, (np.timedelta64, np.int64))
+ return issubclass(tipo.type, np.timedelta64)
elif element is NaT:
return True
elif isinstance(element, (timedelta, np.timedelta64)):
return True
- elif is_integer(element):
- return element == tslibs.iNaT
return is_valid_nat_for_dtype(element, self.dtype)
def fillna(self, value, **kwargs):
@@ -2650,13 +2488,10 @@ def fillna(self, value, **kwargs):
value = Timedelta(value, unit="s")
return super().fillna(value, **kwargs)
- def _coerce_values(self, values):
- return values.view("i8")
-
def _try_coerce_args(self, other):
"""
- Coerce values and other to int64, with null values converted to
- iNaT. values is always ndarray-like, other may not be
+ Coerce values and other to datetime64[ns], with null values
+ converted to datetime64("NaT", "ns").
Parameters
----------
@@ -2668,13 +2503,12 @@ def _try_coerce_args(self, other):
"""
if is_valid_nat_for_dtype(other, self.dtype):
- other = tslibs.iNaT
- elif is_integer(other) and other == tslibs.iNaT:
- pass
+ other = np.timedelta64("NaT", "ns")
elif isinstance(other, (timedelta, np.timedelta64)):
- other = Timedelta(other).value
+ other = Timedelta(other).to_timedelta64()
elif hasattr(other, "dtype") and is_timedelta64_dtype(other):
- other = other.astype("i8", copy=False).view("i8")
+ # TODO: can we get here with non-nano dtype?
+ pass
else:
# coercion issues
# let higher levels handle
@@ -2682,19 +2516,6 @@ def _try_coerce_args(self, other):
return other
- def _try_coerce_result(self, result):
- """ reverse of try_coerce_args / try_operate """
- if isinstance(result, np.ndarray):
- mask = isna(result)
- if result.dtype.kind in ["i", "f"]:
- result = result.astype("m8[ns]")
- result[mask] = tslibs.iNaT
-
- elif isinstance(result, (np.integer, np.float)):
- result = self._box_func(result)
-
- return result
-
def should_store(self, value):
return issubclass(
value.dtype.type, np.timedelta64
@@ -2733,7 +2554,7 @@ class BoolBlock(NumericBlock):
is_bool = True
_can_hold_na = False
- def _can_hold_element(self, element):
+ def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, np.bool_)
@@ -2779,37 +2600,31 @@ def is_bool(self):
"""
return lib.is_bool_array(self.values.ravel())
- # TODO: Refactor when convert_objects is removed since there will be 1 path
- def convert(self, *args, **kwargs):
+ def convert(
+ self,
+ copy: bool = True,
+ datetime: bool = True,
+ numeric: bool = True,
+ timedelta: bool = True,
+ coerce: bool = False,
+ ):
""" attempt to coerce any object types to better types return a copy of
the block (if copy = True) by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
- if args:
- raise NotImplementedError
- by_item = kwargs.get("by_item", True)
-
- new_inputs = ["coerce", "datetime", "numeric", "timedelta"]
- new_style = False
- for kw in new_inputs:
- new_style |= kw in kwargs
-
- if new_style:
- fn = soft_convert_objects
- fn_inputs = new_inputs
- else:
- fn = maybe_convert_objects
- fn_inputs = ["convert_dates", "convert_numeric", "convert_timedeltas"]
- fn_inputs += ["copy"]
-
- fn_kwargs = {key: kwargs[key] for key in fn_inputs if key in kwargs}
-
# operate column-by-column
- def f(m, v, i):
- shape = v.shape
- values = fn(v.ravel(), **fn_kwargs)
+ def f(mask, val, idx):
+ shape = val.shape
+ values = soft_convert_objects(
+ val.ravel(),
+ datetime=datetime,
+ numeric=numeric,
+ timedelta=timedelta,
+ coerce=coerce,
+ copy=copy,
+ )
if isinstance(values, np.ndarray):
# TODO: allow EA once reshape is supported
values = values.reshape(shape)
@@ -2817,7 +2632,7 @@ def f(m, v, i):
values = _block_shape(values, ndim=self.ndim)
return values
- if by_item and not self._is_single_block:
+ if self.ndim == 2:
blocks = self.split_and_operate(None, f, False)
else:
values = f(None, self.values.ravel(), None)
@@ -2825,7 +2640,7 @@ def f(m, v, i):
return blocks
- def _maybe_downcast(self, blocks, downcast=None):
+ def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]:
if downcast is not None:
return blocks
@@ -2833,7 +2648,7 @@ def _maybe_downcast(self, blocks, downcast=None):
# split and convert the blocks
return _extend_blocks([b.convert(datetime=True, numeric=False) for b in blocks])
- def _can_hold_element(self, element):
+ def _can_hold_element(self, element: Any) -> bool:
return True
def _try_coerce_args(self, other):
@@ -2989,9 +2804,9 @@ def _replace_single(
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
- try:
+ if is_re(to_replace):
pattern = to_replace.pattern
- except AttributeError:
+ else:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
@@ -3012,18 +2827,18 @@ def _replace_single(
if isna(value) or not isinstance(value, str):
def re_replacer(s):
- try:
+ if is_re(rx) and isinstance(s, str):
return value if rx.search(s) is not None else s
- except TypeError:
+ else:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
- try:
+ if is_re(rx) and isinstance(s, str):
return rx.sub(value, s)
- except TypeError:
+ else:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
@@ -3041,7 +2856,7 @@ def re_replacer(s):
# convert
block = self.make_block(new_values)
if convert:
- block = block.convert(by_item=True, numeric=False)
+ block = block.convert(numeric=False)
return block
def _replace_coerce(
@@ -3080,9 +2895,7 @@ def _replace_coerce(
mask=mask,
)
if convert:
- block = [
- b.convert(by_item=True, numeric=False, copy=True) for b in block
- ]
+ block = [b.convert(numeric=False, copy=True) for b in block]
return block
return self
@@ -3092,13 +2905,13 @@ class CategoricalBlock(ExtensionBlock):
is_categorical = True
_verify_integrity = True
_can_hold_na = True
- _concatenator = staticmethod(_concat._concat_categorical)
+ _concatenator = staticmethod(concat_categorical)
def __init__(self, values, placement, ndim=None):
- from pandas.core.arrays.categorical import _maybe_to_categorical
-
# coerce to categorical if we can
- super().__init__(_maybe_to_categorical(values), placement=placement, ndim=ndim)
+ values = extract_array(values)
+ assert isinstance(values, Categorical), type(values)
+ super().__init__(values, placement=placement, ndim=ndim)
@property
def _holder(self):
@@ -3111,16 +2924,6 @@ def array_dtype(self):
"""
return np.object_
- def _try_coerce_result(self, result):
- """ reverse of try_coerce_args """
-
- # GH12564: CategoricalBlock is 1-dim only
- # while returned results could be any dim
- if (not is_categorical_dtype(result)) and isinstance(result, np.ndarray):
- result = _block_shape(result, ndim=self.ndim)
-
- return result
-
def to_dense(self):
# Categorical.get_values returns a DatetimeIndex for datetime
# categories, so we can't simply use `np.asarray(self.values)` like
@@ -3162,7 +2965,15 @@ def concat_same_type(self, to_concat, placement=None):
values, placement=placement or slice(0, len(values), 1), ndim=self.ndim
)
- def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0):
+ def where(
+ self,
+ other,
+ cond,
+ align=True,
+ errors="raise",
+ try_cast: bool = False,
+ axis: int = 0,
+ ) -> List["Block"]:
# TODO(CategoricalBlock.where):
# This can all be deleted in favor of ExtensionBlock.where once
# we enforce the deprecation.
@@ -3349,14 +3160,15 @@ def _safe_reshape(arr, new_shape):
return arr
-def _putmask_smart(v, m, n):
+def _putmask_smart(v, mask, n):
"""
Return a new ndarray, try to preserve dtype if possible.
Parameters
----------
v : `values`, updated in-place (array like)
- m : `mask`, applies to both sides (array like)
+ mask : np.ndarray
+ Applies to both sides (array like).
n : `new values` either scalar or an array like aligned with `values`
Returns
@@ -3374,12 +3186,12 @@ def _putmask_smart(v, m, n):
# n should be the length of the mask or a scalar here
if not is_list_like(n):
- n = np.repeat(n, len(m))
+ n = np.repeat(n, len(mask))
# see if we are only masking values that if putted
# will work in the current dtype
try:
- nn = n[m]
+ nn = n[mask]
except TypeError:
# TypeError: only integer scalar arrays can be converted to a scalar index
pass
@@ -3404,16 +3216,16 @@ def _putmask_smart(v, m, n):
comp = nn == nn_at
if is_list_like(comp) and comp.all():
nv = v.copy()
- nv[m] = nn_at
+ nv[mask] = nn_at
return nv
n = np.asarray(n)
def _putmask_preserve(nv, n):
try:
- nv[m] = n[m]
+ nv[mask] = n[mask]
except (IndexError, ValueError):
- nv[m] = n
+ nv[mask] = n
return nv
# preserves dtype if possible
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 9ccd4b80869a0..121c61d8d3623 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -19,7 +19,7 @@
is_sparse,
is_timedelta64_dtype,
)
-import pandas.core.dtypes.concat as _concat
+from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algos
@@ -211,7 +211,7 @@ def get_reindexed_values(self, empty_dtype, upcasted_na):
if not self.indexers:
if not self.block._can_consolidate:
- # preserve these for validation in _concat_compat
+ # preserve these for validation in concat_compat
return self.block.values
if self.block.is_bool and not self.block.is_categorical:
@@ -265,7 +265,7 @@ def concatenate_join_units(join_units, concat_axis, copy):
else:
concat_values = concat_values.copy()
else:
- concat_values = _concat._concat_compat(to_concat, axis=concat_axis)
+ concat_values = concat_compat(to_concat, axis=concat_axis)
return concat_values
@@ -380,7 +380,7 @@ def is_uniform_join_units(join_units):
"""
Check if the join units consist of blocks of uniform type that can
be concatenated using Block.concat_same_type instead of the generic
- concatenate_join_units (which uses `_concat._concat_compat`).
+ concatenate_join_units (which uses `concat_compat`).
"""
return (
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py
index c437f686bd17b..3126b9d9d3e2e 100644
--- a/pandas/core/internals/construction.py
+++ b/pandas/core/internals/construction.py
@@ -8,18 +8,12 @@
import numpy.ma as ma
from pandas._libs import lib
-from pandas._libs.tslibs import IncompatibleFrequency, OutOfBoundsDatetime
import pandas.compat as compat
from pandas.compat import PY36, raise_with_traceback
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
- construct_1d_ndarray_preserving_na,
- construct_1d_object_array_from_listlike,
- infer_dtype_from_scalar,
maybe_cast_to_datetime,
- maybe_cast_to_integer_array,
- maybe_castable,
maybe_convert_platform,
maybe_infer_to_datetimelike,
maybe_upcast,
@@ -29,13 +23,9 @@
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
- is_extension_type,
- is_float_dtype,
is_integer_dtype,
- is_iterator,
is_list_like,
is_object_dtype,
- pandas_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
@@ -45,10 +35,10 @@
ABCSeries,
ABCTimedeltaIndex,
)
-from pandas.core.dtypes.missing import isna
from pandas.core import algorithms, common as com
-from pandas.core.arrays import Categorical, ExtensionArray, period_array
+from pandas.core.arrays import Categorical
+from pandas.core.construction import sanitize_array
from pandas.core.index import (
Index,
_get_objs_combined_axis,
@@ -60,7 +50,6 @@
create_block_manager_from_arrays,
create_block_manager_from_blocks,
)
-from pandas.core.internals.arrays import extract_array
# ---------------------------------------------------------------------
# BlockManager Interface
@@ -224,7 +213,7 @@ def init_dict(data, index, columns, dtype=None):
arrays = Series(data, index=columns, dtype=object)
data_names = arrays.index
- missing = arrays.isnull()
+ missing = arrays.isna()
if index is None:
# GH10856
# raise ValueError if only scalars in dict
@@ -625,186 +614,3 @@ def sanitize_index(data, index, copy=False):
data = sanitize_array(data, index, copy=copy)
return data
-
-
-def sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False):
- """
- Sanitize input data to an ndarray, copy if specified, coerce to the
- dtype if specified.
- """
- if dtype is not None:
- dtype = pandas_dtype(dtype)
-
- if isinstance(data, ma.MaskedArray):
- mask = ma.getmaskarray(data)
- if mask.any():
- data, fill_value = maybe_upcast(data, copy=True)
- data.soften_mask() # set hardmask False if it was True
- data[mask] = fill_value
- else:
- data = data.copy()
-
- # extract ndarray or ExtensionArray, ensure we have no PandasArray
- data = extract_array(data, extract_numpy=True)
-
- # GH#846
- if isinstance(data, np.ndarray):
-
- if dtype is not None and is_float_dtype(data.dtype) and is_integer_dtype(dtype):
- # possibility of nan -> garbage
- try:
- subarr = _try_cast(data, dtype, copy, True)
- except ValueError:
- if copy:
- subarr = data.copy()
- else:
- subarr = np.array(data, copy=False)
- else:
- # we will try to copy be-definition here
- subarr = _try_cast(data, dtype, copy, raise_cast_failure)
-
- elif isinstance(data, ExtensionArray):
- # it is already ensured above this is not a PandasArray
- subarr = data
-
- if dtype is not None:
- subarr = subarr.astype(dtype, copy=copy)
- elif copy:
- subarr = subarr.copy()
- return subarr
-
- elif isinstance(data, (list, tuple)) and len(data) > 0:
- if dtype is not None:
- try:
- subarr = _try_cast(data, dtype, copy, raise_cast_failure)
- except Exception:
- if raise_cast_failure: # pragma: no cover
- raise
- subarr = np.array(data, dtype=object, copy=copy)
- subarr = lib.maybe_convert_objects(subarr)
-
- else:
- subarr = maybe_convert_platform(data)
-
- subarr = maybe_cast_to_datetime(subarr, dtype)
-
- elif isinstance(data, range):
- # GH#16804
- arr = np.arange(data.start, data.stop, data.step, dtype="int64")
- subarr = _try_cast(arr, dtype, copy, raise_cast_failure)
- else:
- subarr = _try_cast(data, dtype, copy, raise_cast_failure)
-
- # scalar like, GH
- if getattr(subarr, "ndim", 0) == 0:
- if isinstance(data, list): # pragma: no cover
- subarr = np.array(data, dtype=object)
- elif index is not None:
- value = data
-
- # figure out the dtype from the value (upcast if necessary)
- if dtype is None:
- dtype, value = infer_dtype_from_scalar(value)
- else:
- # need to possibly convert the value here
- value = maybe_cast_to_datetime(value, dtype)
-
- subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype)
-
- else:
- return subarr.item()
-
- # the result that we want
- elif subarr.ndim == 1:
- if index is not None:
-
- # a 1-element ndarray
- if len(subarr) != len(index) and len(subarr) == 1:
- subarr = construct_1d_arraylike_from_scalar(
- subarr[0], len(index), subarr.dtype
- )
-
- elif subarr.ndim > 1:
- if isinstance(data, np.ndarray):
- raise Exception("Data must be 1-dimensional")
- else:
- subarr = com.asarray_tuplesafe(data, dtype=dtype)
-
- # This is to prevent mixed-type Series getting all casted to
- # NumPy string type, e.g. NaN --> '-1#IND'.
- if issubclass(subarr.dtype.type, str):
- # GH#16605
- # If not empty convert the data to dtype
- # GH#19853: If data is a scalar, subarr has already the result
- if not lib.is_scalar(data):
- if not np.all(isna(data)):
- data = np.array(data, dtype=dtype, copy=False)
- subarr = np.array(data, dtype=object, copy=copy)
-
- if (
- not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype))
- and is_object_dtype(subarr.dtype)
- and not is_object_dtype(dtype)
- ):
- inferred = lib.infer_dtype(subarr, skipna=False)
- if inferred == "period":
- try:
- subarr = period_array(subarr)
- except IncompatibleFrequency:
- pass
-
- return subarr
-
-
-def _try_cast(arr, dtype, copy, raise_cast_failure):
- """
- Convert input to numpy ndarray and optionally cast to a given dtype.
-
- Parameters
- ----------
- arr : array-like
- dtype : np.dtype, ExtensionDtype or None
- copy : bool
- If False, don't copy the data if not needed.
- raise_cast_failure : bool
- If True, and if a dtype is specified, raise errors during casting.
- Otherwise an object array is returned.
- """
- # perf shortcut as this is the most common case
- if isinstance(arr, np.ndarray):
- if maybe_castable(arr) and not copy and dtype is None:
- return arr
-
- try:
- # GH#15832: Check if we are requesting a numeric dype and
- # that we can convert the data to the requested dtype.
- if is_integer_dtype(dtype):
- subarr = maybe_cast_to_integer_array(arr, dtype)
-
- subarr = maybe_cast_to_datetime(arr, dtype)
- # Take care in creating object arrays (but iterators are not
- # supported):
- if is_object_dtype(dtype) and (
- is_list_like(subarr)
- and not (is_iterator(subarr) or isinstance(subarr, np.ndarray))
- ):
- subarr = construct_1d_object_array_from_listlike(subarr)
- elif not is_extension_type(subarr):
- subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy)
- except OutOfBoundsDatetime:
- # in case of out of bound datetime64 -> always raise
- raise
- except (ValueError, TypeError):
- if is_categorical_dtype(dtype):
- # We *do* allow casting to categorical, since we know
- # that Categorical is the only array type for 'category'.
- subarr = Categorical(arr, dtype.categories, ordered=dtype._ordered)
- elif is_extension_array_dtype(dtype):
- # create an extension array from its dtype
- array_type = dtype.construct_array_type()._from_sequence
- subarr = array_type(arr, dtype=dtype, copy=copy)
- elif dtype is not None and raise_cast_failure:
- raise
- else:
- subarr = np.array(arr, dtype=object, copy=copy)
- return subarr
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 2e7280eeae0e2..1c31542daa5de 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -7,7 +7,7 @@
import numpy as np
-from pandas._libs import internals as libinternals, lib
+from pandas._libs import Timedelta, Timestamp, internals as libinternals, lib
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
@@ -26,7 +26,7 @@
is_scalar,
is_sparse,
)
-import pandas.core.dtypes.concat as _concat
+from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCExtensionArray, ABCSeries
from pandas.core.dtypes.missing import isna
@@ -532,7 +532,7 @@ def get_axe(block, qs, axes):
return self.__class__(blocks, new_axes)
# single block, i.e. ndim == {1}
- values = _concat._concat_compat([b.values for b in blocks])
+ values = concat_compat([b.values for b in blocks])
# compute the orderings of our original data
if len(self.blocks) > 1:
@@ -602,9 +602,10 @@ def comp(s, regex=False):
"""
if isna(s):
return isna(values)
- if hasattr(s, "asm8"):
+ if isinstance(s, (Timedelta, Timestamp)) and getattr(s, "tz", None) is None:
+
return _compare_or_regex_search(
- maybe_convert_objects(values), getattr(s, "asm8"), regex
+ maybe_convert_objects(values), s.asm8, regex
)
return _compare_or_regex_search(values, s, regex)
@@ -908,7 +909,7 @@ def fast_xs(self, loc):
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
- result[rl] = blk._try_coerce_result(blk.iget((i, loc)))
+ result[rl] = blk.iget((i, loc))
if is_extension_array_dtype(dtype):
result = dtype.construct_array_type()._from_sequence(result, dtype=dtype)
@@ -975,8 +976,6 @@ def iget(self, i):
"""
block = self.blocks[self._blknos[i]]
values = block.iget(self._blklocs[i])
- if values.ndim != 1:
- return values
# shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager(
@@ -1551,7 +1550,6 @@ def index(self):
def convert(self, **kwargs):
""" convert the whole block as one """
- kwargs["by_item"] = False
return self.apply("convert", **kwargs)
@property
@@ -1584,10 +1582,6 @@ def external_values(self):
def internal_values(self):
return self._block.internal_values()
- def formatting_values(self):
- """Return the internal values used by the DataFrame/SeriesFormatter"""
- return self._block.formatting_values()
-
def get_values(self):
""" return a dense type view """
return np.array(self._block.to_dense(), copy=False)
@@ -1648,11 +1642,11 @@ def concat(self, to_concat, new_axis):
new_block = blocks[0].concat_same_type(blocks)
else:
values = [x.values for x in blocks]
- values = _concat._concat_compat(values)
+ values = concat_compat(values)
new_block = make_block(values, placement=slice(0, len(values), 1))
else:
values = [x._block.values for x in to_concat]
- values = _concat._concat_compat(values)
+ values = concat_compat(values)
new_block = make_block(values, placement=slice(0, len(values), 1))
mgr = SingleBlockManager(new_block, new_axis)
@@ -1825,7 +1819,7 @@ def _simple_blockify(tuples, dtype):
"""
values, placement = _stack_arrays(tuples, dtype)
- # CHECK DTYPE?
+ # TODO: CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 8f0abc91f7aef..bc81fbb7e1ce0 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -119,7 +119,7 @@ def clean_interp_method(method, **kwargs):
"from_derivatives",
]
if method in ("spline", "polynomial") and order is None:
- raise ValueError("You must specify the order of the spline or " "polynomial.")
+ raise ValueError("You must specify the order of the spline or polynomial.")
if method not in valid:
raise ValueError(
"method must be one of {valid}. Got '{method}' "
@@ -176,7 +176,7 @@ def interpolate_1d(
valid_limit_directions = ["forward", "backward", "both"]
limit_direction = limit_direction.lower()
if limit_direction not in valid_limit_directions:
- msg = "Invalid limit_direction: expecting one of {valid!r}, " "got {invalid!r}."
+ msg = "Invalid limit_direction: expecting one of {valid!r}, got {invalid!r}."
raise ValueError(
msg.format(valid=valid_limit_directions, invalid=limit_direction)
)
@@ -322,7 +322,7 @@ def _interpolate_scipy_wrapper(
alt_methods["pchip"] = interpolate.pchip_interpolate
except AttributeError:
raise ImportError(
- "Your version of Scipy does not support " "PCHIP interpolation."
+ "Your version of Scipy does not support PCHIP interpolation."
)
elif method == "akima":
alt_methods["akima"] = _akima_interpolate
@@ -463,6 +463,7 @@ def interpolate_2d(
Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result.
"""
+ orig_values = values
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
@@ -470,7 +471,7 @@ def interpolate_2d(
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
- raise AssertionError("cannot interpolate on a ndim == 1 with " "axis != 0")
+ raise AssertionError("cannot interpolate on a ndim == 1 with axis != 0")
values = values.reshape(tuple((1,) + values.shape))
if fill_value is None:
@@ -490,6 +491,10 @@ def interpolate_2d(
if ndim == 1:
values = values[0]
+ if orig_values.dtype.kind == "M":
+ # convert float back to datetime64
+ values = values.astype(orig_values.dtype)
+
return values
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index ce14cb22a88ce..eb442e8bf3486 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -33,8 +33,6 @@
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna
-import pandas.core.common as com
-
bn = import_optional_dependency("bottleneck", raise_on_missing=False, on_version="warn")
_BOTTLENECK_INSTALLED = bn is not None
_USE_BOTTLENECK = False
@@ -99,17 +97,21 @@ def f(values, axis=None, skipna=True, **kwds):
for k, v in self.kwargs.items():
if k not in kwds:
kwds[k] = v
- try:
- if values.size == 0 and kwds.get("min_count") is None:
- # We are empty, returning NA for our type
- # Only applies for the default `min_count` of None
- # since that affects how empty arrays are handled.
- # TODO(GH-18976) update all the nanops methods to
- # correctly handle empty inputs and remove this check.
- # It *may* just be `var`
- return _na_for_min_count(values, axis)
-
- if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name):
+
+ if values.size == 0 and kwds.get("min_count") is None:
+ # We are empty, returning NA for our type
+ # Only applies for the default `min_count` of None
+ # since that affects how empty arrays are handled.
+ # TODO(GH-18976) update all the nanops methods to
+ # correctly handle empty inputs and remove this check.
+ # It *may* just be `var`
+ return _na_for_min_count(values, axis)
+
+ if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name):
+ if kwds.get("mask", None) is None:
+ # `mask` is not recognised by bottleneck, would raise
+ # TypeError if called
+ kwds.pop("mask", None)
result = bn_func(values, axis=axis, **kwds)
# prefer to treat inf/-inf as NA, but must compute the func
@@ -118,18 +120,8 @@ def f(values, axis=None, skipna=True, **kwds):
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
- except Exception:
- try:
- result = alt(values, axis=axis, skipna=skipna, **kwds)
- except ValueError as e:
- # we want to transform an object array
- # ValueError message to the more typical TypeError
- # e.g. this is normally a disallowed function on
- # object arrays that contain strings
-
- if is_object_dtype(values):
- raise TypeError(e)
- raise
+ else:
+ result = alt(values, axis=axis, skipna=skipna, **kwds)
return result
@@ -281,12 +273,12 @@ def _get_values(
mask = _maybe_get_mask(values, skipna, mask)
if is_datetime64tz_dtype(values):
- # com.values_from_object returns M8[ns] dtype instead of tz-aware,
+ # lib.values_from_object returns M8[ns] dtype instead of tz-aware,
# so this case must be handled separately from the rest
dtype = values.dtype
values = getattr(values, "_values", values)
else:
- values = com.values_from_object(values)
+ values = lib.values_from_object(values)
dtype = values.dtype
if is_datetime_or_timedelta_dtype(values) or is_datetime64tz_dtype(values):
@@ -742,7 +734,7 @@ def nanvar(values, axis=None, skipna=True, ddof=1, mask=None):
>>> nanops.nanvar(s)
1.0
"""
- values = com.values_from_object(values)
+ values = lib.values_from_object(values)
dtype = values.dtype
mask = _maybe_get_mask(values, skipna, mask)
if is_any_int_dtype(values):
@@ -943,7 +935,7 @@ def nanskew(values, axis=None, skipna=True, mask=None):
>>> nanops.nanskew(s)
1.7320508075688787
"""
- values = com.values_from_object(values)
+ values = lib.values_from_object(values)
mask = _maybe_get_mask(values, skipna, mask)
if not is_float_dtype(values.dtype):
values = values.astype("f8")
@@ -1022,7 +1014,7 @@ def nankurt(values, axis=None, skipna=True, mask=None):
>>> nanops.nankurt(s)
-1.2892561983471076
"""
- values = com.values_from_object(values)
+ values = lib.values_from_object(values)
mask = _maybe_get_mask(values, skipna, mask)
if not is_float_dtype(values.dtype):
values = values.astype("f8")
@@ -1302,10 +1294,12 @@ def _ensure_numeric(x):
elif not (is_float(x) or is_integer(x) or is_complex(x)):
try:
x = float(x)
- except Exception:
+ except ValueError:
+ # e.g. "1+1j" or "foo"
try:
x = complex(x)
- except Exception:
+ except ValueError:
+ # e.g. "foo"
raise TypeError(
"Could not convert {value!s} to numeric".format(value=x)
)
@@ -1393,6 +1387,16 @@ def nanpercentile(values, q, axis, na_value, mask, ndim, interpolation):
-------
quantiles : scalar or array
"""
+ if values.dtype.kind in ["m", "M"]:
+ # need to cast to integer to avoid rounding errors in numpy
+ result = nanpercentile(
+ values.view("i8"), q, axis, na_value.view("i8"), mask, ndim, interpolation
+ )
+
+ # Note: we have to do do `astype` and not view because in general we
+ # have float result at this point, not i8
+ return result.astype(values.dtype)
+
if not lib.is_scalar(mask) and mask.any():
if ndim == 1:
return _nanpercentile_1d(
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 230abd6b301a6..16d2eaa410637 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -5,61 +5,39 @@
"""
import datetime
import operator
-import textwrap
-from typing import Any, Callable
-import warnings
+from typing import Tuple
import numpy as np
-from pandas._libs import Timedelta, Timestamp, lib, ops as libops
-from pandas.errors import NullFrequencyError
+from pandas._libs import Timedelta, Timestamp, lib
from pandas.util._decorators import Appender
-from pandas.core.dtypes.cast import (
- construct_1d_object_array_from_listlike,
- find_common_type,
- maybe_upcast_putmask,
-)
-from pandas.core.dtypes.common import (
- ensure_object,
- is_bool_dtype,
- is_categorical_dtype,
- is_datetime64_dtype,
- is_datetime64tz_dtype,
- is_datetimelike_v_numeric,
- is_extension_array_dtype,
- is_integer_dtype,
- is_list_like,
- is_object_dtype,
- is_period_dtype,
- is_scalar,
- is_timedelta64_dtype,
- needs_i8_conversion,
-)
-from pandas.core.dtypes.generic import (
- ABCDataFrame,
- ABCDatetimeArray,
- ABCIndex,
- ABCIndexClass,
- ABCSeries,
- ABCSparseArray,
- ABCSparseSeries,
- ABCTimedeltaArray,
-)
-from pandas.core.dtypes.missing import isna, notna
+from pandas.core.dtypes.common import is_list_like, is_timedelta64_dtype
+from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
+from pandas.core.dtypes.missing import isna
-import pandas as pd
-from pandas._typing import ArrayLike
-import pandas.core.common as com
-
-from . import missing
-from .docstrings import (
+from pandas.core.construction import extract_array
+from pandas.core.ops.array_ops import (
+ arithmetic_op,
+ comparison_op,
+ define_na_arithmetic_op,
+ logical_op,
+)
+from pandas.core.ops.array_ops import comp_method_OBJECT_ARRAY # noqa:F401
+from pandas.core.ops.dispatch import maybe_dispatch_ufunc_to_dunder_op # noqa:F401
+from pandas.core.ops.dispatch import should_series_dispatch
+from pandas.core.ops.docstrings import (
_arith_doc_FRAME,
_flex_comp_doc_FRAME,
_make_flex_doc,
_op_descriptions,
)
-from .roperator import ( # noqa:F401
+from pandas.core.ops.invalid import invalid_comparison # noqa:F401
+from pandas.core.ops.methods import ( # noqa:F401
+ add_flex_arithmetic_methods,
+ add_special_arithmetic_methods,
+)
+from pandas.core.ops.roperator import ( # noqa:F401
radd,
rand_,
rdiv,
@@ -93,7 +71,7 @@ def get_op_result_name(left, right):
name : object
Usually a string
"""
- # `left` is always a pd.Series when called from within ops
+ # `left` is always a Series when called from within ops
if isinstance(right, (ABCSeries, ABCIndexClass)):
name = _maybe_match_name(left, right)
else:
@@ -136,7 +114,7 @@ def _maybe_match_name(a, b):
return None
-def maybe_upcast_for_op(obj):
+def maybe_upcast_for_op(obj, shape: Tuple[int, ...]):
"""
Cast non-pandas objects to pandas types to unify behavior of arithmetic
and comparison operations.
@@ -144,6 +122,7 @@ def maybe_upcast_for_op(obj):
Parameters
----------
obj: object
+ shape : tuple[int]
Returns
-------
@@ -154,52 +133,49 @@ def maybe_upcast_for_op(obj):
Be careful to call this *after* determining the `name` attribute to be
attached to the result of the arithmetic operation.
"""
+ from pandas.core.arrays import DatetimeArray, TimedeltaArray
+
if type(obj) is datetime.timedelta:
# GH#22390 cast up to Timedelta to rely on Timedelta
# implementation; otherwise operation against numeric-dtype
# raises TypeError
return Timedelta(obj)
- elif isinstance(obj, np.timedelta64) and not isna(obj):
+ elif isinstance(obj, np.datetime64):
+ # GH#28080 numpy casts integer-dtype to datetime64 when doing
+ # array[int] + datetime64, which we do not allow
+ if isna(obj):
+ # Avoid possible ambiguities with pd.NaT
+ obj = obj.astype("datetime64[ns]")
+ right = np.broadcast_to(obj, shape)
+ return DatetimeArray(right)
+
+ return Timestamp(obj)
+
+ elif isinstance(obj, np.timedelta64):
+ if isna(obj):
+ # wrapping timedelta64("NaT") in Timedelta returns NaT,
+ # which would incorrectly be treated as a datetime-NaT, so
+ # we broadcast and wrap in a TimedeltaArray
+ obj = obj.astype("timedelta64[ns]")
+ right = np.broadcast_to(obj, shape)
+ return TimedeltaArray(right)
+
# In particular non-nanosecond timedelta64 needs to be cast to
# nanoseconds, or else we get undesired behavior like
# np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D')
- # The isna check is to avoid casting timedelta64("NaT"), which would
- # return NaT and incorrectly be treated as a datetime-NaT.
return Timedelta(obj)
- elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj):
+
+ elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj.dtype):
# GH#22390 Unfortunately we need to special-case right-hand
# timedelta64 dtypes because numpy casts integer dtypes to
# timedelta64 when operating with timedelta64
- return pd.TimedeltaIndex(obj)
+ return TimedeltaArray._from_sequence(obj)
return obj
# -----------------------------------------------------------------------------
-def make_invalid_op(name):
- """
- Return a binary method that always raises a TypeError.
-
- Parameters
- ----------
- name : str
-
- Returns
- -------
- invalid_op : function
- """
-
- def invalid_op(self, other=None):
- raise TypeError(
- "cannot perform {name} with this index type: "
- "{typ}".format(name=name, typ=type(self).__name__)
- )
-
- invalid_op.__name__ = name
- return invalid_op
-
-
def _gen_eval_kwargs(name):
"""
Find the keyword arguments to pass to numexpr for the given operation.
@@ -232,12 +208,6 @@ def _gen_eval_kwargs(name):
# Exclude commutative operations
kwargs["reversed"] = True
- if name in ["truediv", "rtruediv"]:
- kwargs["truediv"] = True
-
- if name in ["ne"]:
- kwargs["masker"] = True
-
return kwargs
@@ -266,7 +236,7 @@ def _get_frame_op_default_axis(name):
return "columns"
-def _get_opstr(op, cls):
+def _get_opstr(op):
"""
Find the operation string, if any, to pass to numexpr for this
operation.
@@ -274,19 +244,11 @@ def _get_opstr(op, cls):
Parameters
----------
op : binary operator
- cls : class
Returns
-------
op_str : string or None
"""
- # numexpr is available for non-sparse classes
- subtyp = getattr(cls, "_subtyp", "")
- use_numexpr = "sparse" not in subtyp
-
- if not use_numexpr:
- # if we're not using numexpr, then don't pass a str_rep
- return None
return {
operator.add: "+",
@@ -379,169 +341,10 @@ def fill_binop(left, right, fill_value):
return left, right
-def mask_cmp_op(x, y, op):
- """
- Apply the function `op` to only non-null points in x and y.
-
- Parameters
- ----------
- x : array-like
- y : array-like
- op : binary operation
-
- Returns
- -------
- result : ndarray[bool]
- """
- xrav = x.ravel()
- result = np.empty(x.size, dtype=bool)
- if isinstance(y, (np.ndarray, ABCSeries)):
- yrav = y.ravel()
- mask = notna(xrav) & notna(yrav)
- result[mask] = op(np.array(list(xrav[mask])), np.array(list(yrav[mask])))
- else:
- mask = notna(xrav)
- result[mask] = op(np.array(list(xrav[mask])), y)
-
- if op == operator.ne: # pragma: no cover
- np.putmask(result, ~mask, True)
- else:
- np.putmask(result, ~mask, False)
- result = result.reshape(x.shape)
- return result
-
-
-def masked_arith_op(x, y, op):
- """
- If the given arithmetic operation fails, attempt it again on
- only the non-null elements of the input array(s).
-
- Parameters
- ----------
- x : np.ndarray
- y : np.ndarray, Series, Index
- op : binary operator
- """
- # For Series `x` is 1D so ravel() is a no-op; calling it anyway makes
- # the logic valid for both Series and DataFrame ops.
- xrav = x.ravel()
- assert isinstance(x, (np.ndarray, ABCSeries)), type(x)
- if isinstance(y, (np.ndarray, ABCSeries, ABCIndexClass)):
- dtype = find_common_type([x.dtype, y.dtype])
- result = np.empty(x.size, dtype=dtype)
-
- # PeriodIndex.ravel() returns int64 dtype, so we have
- # to work around that case. See GH#19956
- yrav = y if is_period_dtype(y) else y.ravel()
- mask = notna(xrav) & notna(yrav)
-
- if yrav.shape != mask.shape:
- # FIXME: GH#5284, GH#5035, GH#19448
- # Without specifically raising here we get mismatched
- # errors in Py3 (TypeError) vs Py2 (ValueError)
- # Note: Only = an issue in DataFrame case
- raise ValueError("Cannot broadcast operands together.")
-
- if mask.any():
- with np.errstate(all="ignore"):
- result[mask] = op(xrav[mask], com.values_from_object(yrav[mask]))
-
- else:
- assert is_scalar(y), type(y)
- assert isinstance(x, np.ndarray), type(x)
- # mask is only meaningful for x
- result = np.empty(x.size, dtype=x.dtype)
- mask = notna(xrav)
-
- # 1 ** np.nan is 1. So we have to unmask those.
- if op == pow:
- mask = np.where(x == 1, False, mask)
- elif op == rpow:
- mask = np.where(y == 1, False, mask)
-
- if mask.any():
- with np.errstate(all="ignore"):
- result[mask] = op(xrav[mask], y)
-
- result, changed = maybe_upcast_putmask(result, ~mask, np.nan)
- result = result.reshape(x.shape) # 2D compat
- return result
-
-
-def invalid_comparison(left, right, op):
- """
- If a comparison has mismatched types and is not necessarily meaningful,
- follow python3 conventions by:
-
- - returning all-False for equality
- - returning all-True for inequality
- - raising TypeError otherwise
-
- Parameters
- ----------
- left : array-like
- right : scalar, array-like
- op : operator.{eq, ne, lt, le, gt}
-
- Raises
- ------
- TypeError : on inequality comparisons
- """
- if op is operator.eq:
- res_values = np.zeros(left.shape, dtype=bool)
- elif op is operator.ne:
- res_values = np.ones(left.shape, dtype=bool)
- else:
- raise TypeError(
- "Invalid comparison between dtype={dtype} and {typ}".format(
- dtype=left.dtype, typ=type(right).__name__
- )
- )
- return res_values
-
-
# -----------------------------------------------------------------------------
# Dispatch logic
-def should_series_dispatch(left, right, op):
- """
- Identify cases where a DataFrame operation should dispatch to its
- Series counterpart.
-
- Parameters
- ----------
- left : DataFrame
- right : DataFrame
- op : binary operator
-
- Returns
- -------
- override : bool
- """
- if left._is_mixed_type or right._is_mixed_type:
- return True
-
- if not len(left.columns) or not len(right.columns):
- # ensure obj.dtypes[0] exists for each obj
- return False
-
- ldtype = left.dtypes.iloc[0]
- rdtype = right.dtypes.iloc[0]
-
- if (is_timedelta64_dtype(ldtype) and is_integer_dtype(rdtype)) or (
- is_timedelta64_dtype(rdtype) and is_integer_dtype(ldtype)
- ):
- # numpy integer dtypes as timedelta64 dtypes in this scenario
- return True
-
- if is_datetime64_dtype(ldtype) and is_object_dtype(rdtype):
- # in particular case where right is an array of DateOffsets
- return True
-
- return False
-
-
def dispatch_to_series(left, right, func, str_rep=None, axis=None):
"""
Evaluate the frame operation func(left, right) by evaluating
@@ -580,8 +383,19 @@ def column_op(a, b):
# in which case we specifically want to operate row-by-row
assert right.index.equals(left.columns)
- def column_op(a, b):
- return {i: func(a.iloc[:, i], b.iloc[i]) for i in range(len(a.columns))}
+ if right.dtype == "timedelta64[ns]":
+ # ensure we treat NaT values as the correct dtype
+ # Note: we do not do this unconditionally as it may be lossy or
+ # expensive for EA dtypes.
+ right = np.asarray(right)
+
+ def column_op(a, b):
+ return {i: func(a.iloc[:, i], b[i]) for i in range(len(a.columns))}
+
+ else:
+
+ def column_op(a, b):
+ return {i: func(a.iloc[:, i], b.iloc[i]) for i in range(len(a.columns))}
elif isinstance(right, ABCSeries):
assert right.index.equals(left.index) # Handle other cases later
@@ -594,295 +408,7 @@ def column_op(a, b):
raise NotImplementedError(right)
new_data = expressions.evaluate(column_op, str_rep, left, right)
-
- result = left._constructor(new_data, index=left.index, copy=False)
- # Pin columns instead of passing to constructor for compat with
- # non-unique columns case
- result.columns = left.columns
- return result
-
-
-def dispatch_to_index_op(op, left, right, index_class):
- """
- Wrap Series left in the given index_class to delegate the operation op
- to the index implementation. DatetimeIndex and TimedeltaIndex perform
- type checking, timezone handling, overflow checks, etc.
-
- Parameters
- ----------
- op : binary operator (operator.add, operator.sub, ...)
- left : Series
- right : object
- index_class : DatetimeIndex or TimedeltaIndex
-
- Returns
- -------
- result : object, usually DatetimeIndex, TimedeltaIndex, or Series
- """
- left_idx = index_class(left)
-
- # avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes,
- # left_idx may inherit a freq from a cached DatetimeIndex.
- # See discussion in GH#19147.
- if getattr(left_idx, "freq", None) is not None:
- left_idx = left_idx._shallow_copy(freq=None)
- try:
- result = op(left_idx, right)
- except NullFrequencyError:
- # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError
- # on add/sub of integers (or int-like). We re-raise as a TypeError.
- raise TypeError(
- "incompatible type for a datetime/timedelta "
- "operation [{name}]".format(name=op.__name__)
- )
- return result
-
-
-def dispatch_to_extension_op(op, left, right):
- """
- Assume that left or right is a Series backed by an ExtensionArray,
- apply the operator defined by op.
- """
-
- # The op calls will raise TypeError if the op is not defined
- # on the ExtensionArray
-
- # unbox Series and Index to arrays
- if isinstance(left, (ABCSeries, ABCIndexClass)):
- new_left = left._values
- else:
- new_left = left
-
- if isinstance(right, (ABCSeries, ABCIndexClass)):
- new_right = right._values
- else:
- new_right = right
-
- res_values = op(new_left, new_right)
- res_name = get_op_result_name(left, right)
-
- if op.__name__ in ["divmod", "rdivmod"]:
- return _construct_divmod_result(left, res_values, left.index, res_name)
-
- return _construct_result(left, res_values, left.index, res_name)
-
-
-# -----------------------------------------------------------------------------
-# Functions that add arithmetic methods to objects, given arithmetic factory
-# methods
-
-
-def _get_method_wrappers(cls):
- """
- Find the appropriate operation-wrappers to use when defining flex/special
- arithmetic, boolean, and comparison operations with the given class.
-
- Parameters
- ----------
- cls : class
-
- Returns
- -------
- arith_flex : function or None
- comp_flex : function or None
- arith_special : function
- comp_special : function
- bool_special : function
-
- Notes
- -----
- None is only returned for SparseArray
- """
- if issubclass(cls, ABCSparseSeries):
- # Be sure to catch this before ABCSeries and ABCSparseArray,
- # as they will both come see SparseSeries as a subclass
- arith_flex = _flex_method_SERIES
- comp_flex = _flex_method_SERIES
- arith_special = _arith_method_SPARSE_SERIES
- comp_special = _arith_method_SPARSE_SERIES
- bool_special = _bool_method_SERIES
- # TODO: I don't think the functions defined by bool_method are tested
- elif issubclass(cls, ABCSeries):
- # Just Series; SparseSeries is caught above
- arith_flex = _flex_method_SERIES
- comp_flex = _flex_method_SERIES
- arith_special = _arith_method_SERIES
- comp_special = _comp_method_SERIES
- bool_special = _bool_method_SERIES
- elif issubclass(cls, ABCDataFrame):
- # Same for DataFrame and SparseDataFrame
- arith_flex = _arith_method_FRAME
- comp_flex = _flex_comp_method_FRAME
- arith_special = _arith_method_FRAME
- comp_special = _comp_method_FRAME
- bool_special = _arith_method_FRAME
- return arith_flex, comp_flex, arith_special, comp_special, bool_special
-
-
-def _create_methods(cls, arith_method, comp_method, bool_method, special):
- # creates actual methods based upon arithmetic, comp and bool method
- # constructors.
-
- have_divmod = issubclass(cls, ABCSeries)
- # divmod is available for Series and SparseSeries
-
- # yapf: disable
- new_methods = dict(
- add=arith_method(cls, operator.add, special),
- radd=arith_method(cls, radd, special),
- sub=arith_method(cls, operator.sub, special),
- mul=arith_method(cls, operator.mul, special),
- truediv=arith_method(cls, operator.truediv, special),
- floordiv=arith_method(cls, operator.floordiv, special),
- # Causes a floating point exception in the tests when numexpr enabled,
- # so for now no speedup
- mod=arith_method(cls, operator.mod, special),
- pow=arith_method(cls, operator.pow, special),
- # not entirely sure why this is necessary, but previously was included
- # so it's here to maintain compatibility
- rmul=arith_method(cls, rmul, special),
- rsub=arith_method(cls, rsub, special),
- rtruediv=arith_method(cls, rtruediv, special),
- rfloordiv=arith_method(cls, rfloordiv, special),
- rpow=arith_method(cls, rpow, special),
- rmod=arith_method(cls, rmod, special))
- # yapf: enable
- new_methods["div"] = new_methods["truediv"]
- new_methods["rdiv"] = new_methods["rtruediv"]
- if have_divmod:
- # divmod doesn't have an op that is supported by numexpr
- new_methods["divmod"] = arith_method(cls, divmod, special)
- new_methods["rdivmod"] = arith_method(cls, rdivmod, special)
-
- new_methods.update(
- dict(
- eq=comp_method(cls, operator.eq, special),
- ne=comp_method(cls, operator.ne, special),
- lt=comp_method(cls, operator.lt, special),
- gt=comp_method(cls, operator.gt, special),
- le=comp_method(cls, operator.le, special),
- ge=comp_method(cls, operator.ge, special),
- )
- )
-
- if bool_method:
- new_methods.update(
- dict(
- and_=bool_method(cls, operator.and_, special),
- or_=bool_method(cls, operator.or_, special),
- # For some reason ``^`` wasn't used in original.
- xor=bool_method(cls, operator.xor, special),
- rand_=bool_method(cls, rand_, special),
- ror_=bool_method(cls, ror_, special),
- rxor=bool_method(cls, rxor, special),
- )
- )
-
- if special:
- dunderize = lambda x: "__{name}__".format(name=x.strip("_"))
- else:
- dunderize = lambda x: x
- new_methods = {dunderize(k): v for k, v in new_methods.items()}
- return new_methods
-
-
-def add_methods(cls, new_methods):
- for name, method in new_methods.items():
- # For most methods, if we find that the class already has a method
- # of the same name, it is OK to over-write it. The exception is
- # inplace methods (__iadd__, __isub__, ...) for SparseArray, which
- # retain the np.ndarray versions.
- force = not (issubclass(cls, ABCSparseArray) and name.startswith("__i"))
- if force or name not in cls.__dict__:
- setattr(cls, name, method)
-
-
-# ----------------------------------------------------------------------
-# Arithmetic
-def add_special_arithmetic_methods(cls):
- """
- Adds the full suite of special arithmetic methods (``__add__``,
- ``__sub__``, etc.) to the class.
-
- Parameters
- ----------
- cls : class
- special methods will be defined and pinned to this class
- """
- _, _, arith_method, comp_method, bool_method = _get_method_wrappers(cls)
- new_methods = _create_methods(
- cls, arith_method, comp_method, bool_method, special=True
- )
- # inplace operators (I feel like these should get passed an `inplace=True`
- # or just be removed
-
- def _wrap_inplace_method(method):
- """
- return an inplace wrapper for this method
- """
-
- def f(self, other):
- result = method(self, other)
-
- # this makes sure that we are aligned like the input
- # we are updating inplace so we want to ignore is_copy
- self._update_inplace(
- result.reindex_like(self, copy=False)._data, verify_is_copy=False
- )
-
- return self
-
- f.__name__ = "__i{name}__".format(name=method.__name__.strip("__"))
- return f
-
- new_methods.update(
- dict(
- __iadd__=_wrap_inplace_method(new_methods["__add__"]),
- __isub__=_wrap_inplace_method(new_methods["__sub__"]),
- __imul__=_wrap_inplace_method(new_methods["__mul__"]),
- __itruediv__=_wrap_inplace_method(new_methods["__truediv__"]),
- __ifloordiv__=_wrap_inplace_method(new_methods["__floordiv__"]),
- __imod__=_wrap_inplace_method(new_methods["__mod__"]),
- __ipow__=_wrap_inplace_method(new_methods["__pow__"]),
- )
- )
-
- new_methods.update(
- dict(
- __iand__=_wrap_inplace_method(new_methods["__and__"]),
- __ior__=_wrap_inplace_method(new_methods["__or__"]),
- __ixor__=_wrap_inplace_method(new_methods["__xor__"]),
- )
- )
-
- add_methods(cls, new_methods=new_methods)
-
-
-def add_flex_arithmetic_methods(cls):
- """
- Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)
- to the class.
-
- Parameters
- ----------
- cls : class
- flex methods will be defined and pinned to this class
- """
- flex_arith_method, flex_comp_method, _, _, _ = _get_method_wrappers(cls)
- new_methods = _create_methods(
- cls, flex_arith_method, flex_comp_method, bool_method=None, special=False
- )
- new_methods.update(
- dict(
- multiply=new_methods["mul"],
- subtract=new_methods["sub"],
- divide=new_methods["div"],
- )
- )
- # opt out of bool flex methods for now
- assert not any(kname in new_methods for kname in ("ror_", "rxor", "rand_"))
-
- add_methods(cls, new_methods=new_methods)
+ return new_data
# -----------------------------------------------------------------------------
@@ -918,6 +444,9 @@ def _construct_result(left, result, index, name, dtype=None):
"""
out = left._constructor(result, index=index, dtype=dtype)
out = out.__finalize__(left)
+
+ # Set the result's name after __finalize__ is called because __finalize__
+ # would set it back to self.name
out.name = name
return out
@@ -936,307 +465,55 @@ def _arith_method_SERIES(cls, op, special):
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
- str_rep = _get_opstr(op, cls)
+ str_rep = _get_opstr(op)
op_name = _get_op_name(op, special)
eval_kwargs = _gen_eval_kwargs(op_name)
construct_result = (
_construct_divmod_result if op in [divmod, rdivmod] else _construct_result
)
- def na_op(x, y):
- """
- Return the result of evaluating op on the passed in values.
-
- If native types are not compatible, try coersion to object dtype.
-
- Parameters
- ----------
- x : array-like
- y : array-like or scalar
-
- Returns
- -------
- array-like
-
- Raises
- ------
- TypeError : invalid operation
- """
- import pandas.core.computation.expressions as expressions
-
- try:
- result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs)
- except TypeError:
- result = masked_arith_op(x, y, op)
-
- return missing.dispatch_fill_zeros(op, x, y, result)
-
def wrapper(left, right):
if isinstance(right, ABCDataFrame):
return NotImplemented
left, right = _align_method_SERIES(left, right)
res_name = get_op_result_name(left, right)
- right = maybe_upcast_for_op(right)
- if is_categorical_dtype(left):
- raise TypeError(
- "{typ} cannot perform the operation "
- "{op}".format(typ=type(left).__name__, op=str_rep)
- )
-
- elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left):
- # Give dispatch_to_index_op a chance for tests like
- # test_dt64_series_add_intlike, which the index dispatching handles
- # specifically.
- result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex)
- return construct_result(
- left, result, index=left.index, name=res_name, dtype=result.dtype
- )
-
- elif is_extension_array_dtype(left) or (
- is_extension_array_dtype(right) and not is_scalar(right)
- ):
- # GH#22378 disallow scalar to exclude e.g. "category", "Int64"
- return dispatch_to_extension_op(op, left, right)
-
- elif is_timedelta64_dtype(left):
- result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex)
- return construct_result(left, result, index=left.index, name=res_name)
-
- elif is_timedelta64_dtype(right):
- # We should only get here with non-scalar or timedelta64('NaT')
- # values for right
- # Note: we cannot use dispatch_to_index_op because
- # that may incorrectly raise TypeError when we
- # should get NullFrequencyError
- orig_right = right
- if is_scalar(right):
- # broadcast and wrap in a TimedeltaIndex
- assert np.isnat(right)
- right = np.broadcast_to(right, left.shape)
- right = pd.TimedeltaIndex(right)
-
- assert isinstance(right, (pd.TimedeltaIndex, ABCTimedeltaArray, ABCSeries))
- try:
- result = op(left._values, right)
- except NullFrequencyError:
- if orig_right is not right:
- # i.e. scalar timedelta64('NaT')
- # We get a NullFrequencyError because we broadcast to
- # TimedeltaIndex, but this should be TypeError.
- raise TypeError(
- "incompatible type for a datetime/timedelta "
- "operation [{name}]".format(name=op.__name__)
- )
- raise
-
- # We do not pass dtype to ensure that the Series constructor
- # does inference in the case where `result` has object-dtype.
- return construct_result(left, result, index=left.index, name=res_name)
-
- elif isinstance(right, (ABCDatetimeArray, pd.DatetimeIndex)):
- result = op(left._values, right)
- return construct_result(left, result, index=left.index, name=res_name)
-
- lvalues = left.values
- rvalues = right
- if isinstance(rvalues, (ABCSeries, ABCIndexClass)):
- rvalues = rvalues._values
+ lvalues = extract_array(left, extract_numpy=True)
+ result = arithmetic_op(lvalues, right, op, str_rep, eval_kwargs)
- with np.errstate(all="ignore"):
- result = na_op(lvalues, rvalues)
- return construct_result(
- left, result, index=left.index, name=res_name, dtype=None
- )
+ # We do not pass dtype to ensure that the Series constructor
+ # does inference in the case where `result` has object-dtype.
+ return construct_result(left, result, index=left.index, name=res_name)
wrapper.__name__ = op_name
return wrapper
-def _comp_method_OBJECT_ARRAY(op, x, y):
- if isinstance(y, list):
- y = construct_1d_object_array_from_listlike(y)
- if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)):
- if not is_object_dtype(y.dtype):
- y = y.astype(np.object_)
-
- if isinstance(y, (ABCSeries, ABCIndex)):
- y = y.values
-
- result = libops.vec_compare(x, y, op)
- else:
- result = libops.scalar_compare(x, y, op)
- return result
-
-
def _comp_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
op_name = _get_op_name(op, special)
- masker = _gen_eval_kwargs(op_name).get("masker", False)
-
- def na_op(x, y):
- # TODO:
- # should have guarantess on what x, y can be type-wise
- # Extension Dtypes are not called here
-
- # Checking that cases that were once handled here are no longer
- # reachable.
- assert not (is_categorical_dtype(y) and not is_scalar(y))
-
- if is_object_dtype(x.dtype):
- result = _comp_method_OBJECT_ARRAY(op, x, y)
- elif is_datetimelike_v_numeric(x, y):
- return invalid_comparison(x, y, op)
-
- else:
-
- # we want to compare like types
- # we only want to convert to integer like if
- # we are not NotImplemented, otherwise
- # we would allow datetime64 (but viewed as i8) against
- # integer comparisons
-
- # we have a datetime/timedelta and may need to convert
- assert not needs_i8_conversion(x)
- mask = None
- if not is_scalar(y) and needs_i8_conversion(y):
- mask = isna(x) | isna(y)
- y = y.view("i8")
- x = x.view("i8")
-
- method = getattr(x, op_name, None)
- if method is not None:
- with np.errstate(all="ignore"):
- result = method(y)
- if result is NotImplemented:
- return invalid_comparison(x, y, op)
- else:
- result = op(x, y)
-
- if mask is not None and mask.any():
- result[mask] = masker
-
- return result
-
- def wrapper(self, other, axis=None):
- # Validate the axis parameter
- if axis is not None:
- self._get_axis_number(axis)
+ def wrapper(self, other):
res_name = get_op_result_name(self, other)
- if isinstance(other, list):
- # TODO: same for tuples?
- other = np.asarray(other)
-
if isinstance(other, ABCDataFrame): # pragma: no cover
# Defer to DataFrame implementation; fail early
return NotImplemented
- elif isinstance(other, ABCSeries) and not self._indexed_same(other):
- raise ValueError("Can only compare identically-labeled " "Series objects")
-
- elif is_categorical_dtype(self):
- # Dispatch to Categorical implementation; pd.CategoricalIndex
- # behavior is non-canonical GH#19513
- res_values = dispatch_to_index_op(op, self, other, pd.Categorical)
- return self._constructor(res_values, index=self.index, name=res_name)
-
- elif is_datetime64_dtype(self) or is_datetime64tz_dtype(self):
- # Dispatch to DatetimeIndex to ensure identical
- # Series/Index behavior
- if isinstance(other, datetime.date) and not isinstance(
- other, datetime.datetime
- ):
- # https://github.com/pandas-dev/pandas/issues/21152
- # Compatibility for difference between Series comparison w/
- # datetime and date
- msg = (
- "Comparing Series of datetimes with 'datetime.date'. "
- "Currently, the 'datetime.date' is coerced to a "
- "datetime. In the future pandas will not coerce, "
- "and {future}. "
- "To retain the current behavior, "
- "convert the 'datetime.date' to a datetime with "
- "'pd.Timestamp'."
- )
-
- if op in {operator.lt, operator.le, operator.gt, operator.ge}:
- future = "a TypeError will be raised"
- else:
- future = (
- "'the values will not compare equal to the " "'datetime.date'"
- )
- msg = "\n".join(textwrap.wrap(msg.format(future=future)))
- warnings.warn(msg, FutureWarning, stacklevel=2)
- other = Timestamp(other)
+ if isinstance(other, ABCSeries) and not self._indexed_same(other):
+ raise ValueError("Can only compare identically-labeled Series objects")
- res_values = dispatch_to_index_op(op, self, other, pd.DatetimeIndex)
+ lvalues = extract_array(self, extract_numpy=True)
+ rvalues = extract_array(other, extract_numpy=True)
- return self._constructor(res_values, index=self.index, name=res_name)
+ res_values = comparison_op(lvalues, rvalues, op)
- elif is_timedelta64_dtype(self):
- res_values = dispatch_to_index_op(op, self, other, pd.TimedeltaIndex)
- return self._constructor(res_values, index=self.index, name=res_name)
-
- elif is_extension_array_dtype(self) or (
- is_extension_array_dtype(other) and not is_scalar(other)
- ):
- # Note: the `not is_scalar(other)` condition rules out
- # e.g. other == "category"
- return dispatch_to_extension_op(op, self, other)
-
- elif isinstance(other, ABCSeries):
- # By this point we have checked that self._indexed_same(other)
- res_values = na_op(self.values, other.values)
- # rename is needed in case res_name is None and res_values.name
- # is not.
- return self._constructor(
- res_values, index=self.index, name=res_name
- ).rename(res_name)
-
- elif isinstance(other, (np.ndarray, ABCIndexClass)):
- # do not check length of zerodim array
- # as it will broadcast
- if other.ndim != 0 and len(self) != len(other):
- raise ValueError("Lengths must match to compare")
-
- res_values = na_op(self.values, np.asarray(other))
- result = self._constructor(res_values, index=self.index)
- # rename is needed in case res_name is None and self.name
- # is not.
- return result.__finalize__(self).rename(res_name)
-
- elif is_scalar(other) and isna(other):
- # numpy does not like comparisons vs None
- if op is operator.ne:
- res_values = np.ones(len(self), dtype=bool)
- else:
- res_values = np.zeros(len(self), dtype=bool)
- return self._constructor(
- res_values, index=self.index, name=res_name, dtype="bool"
- )
-
- else:
- values = self.to_numpy()
-
- with np.errstate(all="ignore"):
- res = na_op(values, other)
- if is_scalar(res):
- raise TypeError(
- "Could not compare {typ} type with Series".format(typ=type(other))
- )
-
- # always return a full value series here
- res_values = com.values_from_object(res)
- return self._constructor(
- res_values, index=self.index, name=res_name, dtype="bool"
- )
+ return _construct_result(self, res_values, index=self.index, name=res_name)
wrapper.__name__ = op_name
return wrapper
@@ -1249,46 +526,7 @@ def _bool_method_SERIES(cls, op, special):
"""
op_name = _get_op_name(op, special)
- def na_op(x, y):
- try:
- result = op(x, y)
- except TypeError:
- assert not isinstance(y, (list, ABCSeries, ABCIndexClass))
- if isinstance(y, np.ndarray):
- # bool-bool dtype operations should be OK, should not get here
- assert not (is_bool_dtype(x) and is_bool_dtype(y))
- x = ensure_object(x)
- y = ensure_object(y)
- result = libops.vec_binop(x, y, op)
- else:
- # let null fall thru
- assert lib.is_scalar(y)
- if not isna(y):
- y = bool(y)
- try:
- result = libops.scalar_binop(x, y, op)
- except (
- TypeError,
- ValueError,
- AttributeError,
- OverflowError,
- NotImplementedError,
- ):
- raise TypeError(
- "cannot compare a dtyped [{dtype}] array "
- "with a scalar of type [{typ}]".format(
- dtype=x.dtype, typ=type(y).__name__
- )
- )
-
- return result
-
- fill_int = lambda x: x.fillna(0)
- fill_bool = lambda x: x.fillna(False).astype(bool)
-
def wrapper(self, other):
- is_self_int_dtype = is_integer_dtype(self.dtype)
-
self, other = _align_method_SERIES(self, other, align_asobject=True)
res_name = get_op_result_name(self, other)
@@ -1296,32 +534,11 @@ def wrapper(self, other):
# Defer to DataFrame implementation; fail early
return NotImplemented
- elif isinstance(other, (ABCSeries, ABCIndexClass)):
- is_other_int_dtype = is_integer_dtype(other.dtype)
- other = fill_int(other) if is_other_int_dtype else fill_bool(other)
+ lvalues = extract_array(self, extract_numpy=True)
+ rvalues = extract_array(other, extract_numpy=True)
- ovalues = other.values
- finalizer = lambda x: x
-
- else:
- # scalars, list, tuple, np.array
- is_other_int_dtype = is_integer_dtype(np.asarray(other))
- if is_list_like(other) and not isinstance(other, np.ndarray):
- # TODO: Can we do this before the is_integer_dtype check?
- # could the is_integer_dtype check be checking the wrong
- # thing? e.g. other = [[0, 1], [2, 3], [4, 5]]?
- other = construct_1d_object_array_from_listlike(other)
-
- ovalues = other
- finalizer = lambda x: x.__finalize__(self)
-
- # For int vs int `^`, `|`, `&` are bitwise operators and return
- # integer dtypes. Otherwise these are boolean ops
- filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool
- res_values = na_op(self.values, ovalues)
- unfilled = self._constructor(res_values, index=self.index, name=res_name)
- filled = filler(unfilled)
- return finalizer(filled)
+ res_values = logical_op(lvalues, rvalues, op)
+ return _construct_result(self, res_values, index=self.index, name=res_name)
wrapper.__name__ = op_name
return wrapper
@@ -1386,27 +603,16 @@ def _combine_series_frame(self, other, func, fill_value=None, axis=None, level=N
return self._combine_match_index(other, func, level=level)
else:
return self._combine_match_columns(other, func, level=level)
- else:
- if not len(other):
- return self * np.nan
-
- if not len(self):
- # Ambiguous case, use _series so works with DataFrame
- return self._constructor(
- data=self._series, index=self.index, columns=self.columns
- )
- # default axis is columns
- return self._combine_match_columns(other, func, level=level)
+ # default axis is columns
+ return self._combine_match_columns(other, func, level=level)
def _align_method_FRAME(left, right, axis):
""" convert rhs to meet lhs dims if input is list, tuple or np.ndarray """
def to_series(right):
- msg = (
- "Unable to coerce to Series, length must be {req_len}: " "given {given_len}"
- )
+ msg = "Unable to coerce to Series, length must be {req_len}: given {given_len}"
if axis is not None and left._get_axis_name(axis) == "index":
if len(left.index) != len(right):
raise ValueError(
@@ -1461,20 +667,12 @@ def to_series(right):
def _arith_method_FRAME(cls, op, special):
- str_rep = _get_opstr(op, cls)
+ str_rep = _get_opstr(op)
op_name = _get_op_name(op, special)
eval_kwargs = _gen_eval_kwargs(op_name)
default_axis = _get_frame_op_default_axis(op_name)
- def na_op(x, y):
- import pandas.core.computation.expressions as expressions
-
- try:
- result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs)
- except TypeError:
- result = masked_arith_op(x, y, op)
-
- return missing.dispatch_fill_zeros(op, x, y, result)
+ na_op = define_na_arithmetic_op(op, str_rep, eval_kwargs)
if op_name in _op_descriptions:
# i.e. include "add" but not "__add__"
@@ -1499,11 +697,12 @@ def f(self, other, axis=default_axis, level=None, fill_value=None):
self, other, pass_op, fill_value=fill_value, axis=axis, level=level
)
else:
+ # in this case we always have `np.ndim(other) == 0`
if fill_value is not None:
self = self.fillna(fill_value)
- assert np.ndim(other) == 0
- return self._combine_const(other, op)
+ new_data = dispatch_to_series(self, other, op)
+ return self._construct_result(new_data)
f.__name__ = op_name
@@ -1511,18 +710,10 @@ def f(self, other, axis=default_axis, level=None, fill_value=None):
def _flex_comp_method_FRAME(cls, op, special):
- str_rep = _get_opstr(op, cls)
+ str_rep = _get_opstr(op)
op_name = _get_op_name(op, special)
default_axis = _get_frame_op_default_axis(op_name)
- def na_op(x, y):
- try:
- with np.errstate(invalid="ignore"):
- result = op(x, y)
- except TypeError:
- result = mask_cmp_op(x, y, op)
- return result
-
doc = _flex_comp_doc_FRAME.format(
op_name=op_name, desc=_op_descriptions[op_name]["desc"]
)
@@ -1536,15 +727,17 @@ def f(self, other, axis=default_axis, level=None):
# Another DataFrame
if not self._indexed_same(other):
self, other = self.align(other, "outer", level=level, copy=False)
- return dispatch_to_series(self, other, na_op, str_rep)
+ new_data = dispatch_to_series(self, other, op, str_rep)
+ return self._construct_result(new_data)
elif isinstance(other, ABCSeries):
return _combine_series_frame(
- self, other, na_op, fill_value=None, axis=axis, level=level
+ self, other, op, fill_value=None, axis=axis, level=level
)
else:
- assert np.ndim(other) == 0, other
- return self._combine_const(other, na_op)
+ # in this case we always have `np.ndim(other) == 0`
+ new_data = dispatch_to_series(self, other, op)
+ return self._construct_result(new_data)
f.__name__ = op_name
@@ -1552,7 +745,7 @@ def f(self, other, axis=default_axis, level=None):
def _comp_method_FRAME(cls, func, special):
- str_rep = _get_opstr(func, cls)
+ str_rep = _get_opstr(func)
op_name = _get_op_name(func, special)
@Appender("Wrapper for comparison method {name}".format(name=op_name))
@@ -1564,9 +757,10 @@ def f(self, other):
# Another DataFrame
if not self._indexed_same(other):
raise ValueError(
- "Can only compare identically-labeled " "DataFrame objects"
+ "Can only compare identically-labeled DataFrame objects"
)
- return dispatch_to_series(self, other, func, str_rep)
+ new_data = dispatch_to_series(self, other, func, str_rep)
+ return self._construct_result(new_data)
elif isinstance(other, ABCSeries):
return _combine_series_frame(
@@ -1576,170 +770,9 @@ def f(self, other):
# straight boolean comparisons we want to allow all columns
# (regardless of dtype to pass thru) See #4537 for discussion.
- res = self._combine_const(other, func)
- return res.fillna(True).astype(bool)
+ new_data = dispatch_to_series(self, other, func)
+ return self._construct_result(new_data)
f.__name__ = op_name
return f
-
-
-# -----------------------------------------------------------------------------
-# Sparse
-
-
-def _cast_sparse_series_op(left, right, opname):
- """
- For SparseSeries operation, coerce to float64 if the result is expected
- to have NaN or inf values
-
- Parameters
- ----------
- left : SparseArray
- right : SparseArray
- opname : str
-
- Returns
- -------
- left : SparseArray
- right : SparseArray
- """
- from pandas.core.sparse.api import SparseDtype
-
- opname = opname.strip("_")
-
- # TODO: This should be moved to the array?
- if is_integer_dtype(left) and is_integer_dtype(right):
- # series coerces to float64 if result should have NaN/inf
- if opname in ("floordiv", "mod") and (right.to_dense() == 0).any():
- left = left.astype(SparseDtype(np.float64, left.fill_value))
- right = right.astype(SparseDtype(np.float64, right.fill_value))
- elif opname in ("rfloordiv", "rmod") and (left.to_dense() == 0).any():
- left = left.astype(SparseDtype(np.float64, left.fill_value))
- right = right.astype(SparseDtype(np.float64, right.fill_value))
-
- return left, right
-
-
-def _arith_method_SPARSE_SERIES(cls, op, special):
- """
- Wrapper function for Series arithmetic operations, to avoid
- code duplication.
- """
- op_name = _get_op_name(op, special)
-
- def wrapper(self, other):
- if isinstance(other, ABCDataFrame):
- return NotImplemented
- elif isinstance(other, ABCSeries):
- if not isinstance(other, ABCSparseSeries):
- other = other.to_sparse(fill_value=self.fill_value)
- return _sparse_series_op(self, other, op, op_name)
- elif is_scalar(other):
- with np.errstate(all="ignore"):
- new_values = op(self.values, other)
- return self._constructor(new_values, index=self.index, name=self.name)
- else: # pragma: no cover
- raise TypeError(
- "operation with {other} not supported".format(other=type(other))
- )
-
- wrapper.__name__ = op_name
- return wrapper
-
-
-def _sparse_series_op(left, right, op, name):
- left, right = left.align(right, join="outer", copy=False)
- new_index = left.index
- new_name = get_op_result_name(left, right)
-
- from pandas.core.arrays.sparse import _sparse_array_op
-
- lvalues, rvalues = _cast_sparse_series_op(left.values, right.values, name)
- result = _sparse_array_op(lvalues, rvalues, op, name)
- return left._constructor(result, index=new_index, name=new_name)
-
-
-def maybe_dispatch_ufunc_to_dunder_op(
- self: ArrayLike, ufunc: Callable, method: str, *inputs: ArrayLike, **kwargs: Any
-):
- """
- Dispatch a ufunc to the equivalent dunder method.
-
- Parameters
- ----------
- self : ArrayLike
- The array whose dunder method we dispatch to
- ufunc : Callable
- A NumPy ufunc
- method : {'reduce', 'accumulate', 'reduceat', 'outer', 'at', '__call__'}
- inputs : ArrayLike
- The input arrays.
- kwargs : Any
- The additional keyword arguments, e.g. ``out``.
-
- Returns
- -------
- result : Any
- The result of applying the ufunc
- """
- # special has the ufuncs we dispatch to the dunder op on
- special = {
- "add",
- "sub",
- "mul",
- "pow",
- "mod",
- "floordiv",
- "truediv",
- "divmod",
- "eq",
- "ne",
- "lt",
- "gt",
- "le",
- "ge",
- "remainder",
- "matmul",
- }
- aliases = {
- "subtract": "sub",
- "multiply": "mul",
- "floor_divide": "floordiv",
- "true_divide": "truediv",
- "power": "pow",
- "remainder": "mod",
- "divide": "div",
- "equal": "eq",
- "not_equal": "ne",
- "less": "lt",
- "less_equal": "le",
- "greater": "gt",
- "greater_equal": "ge",
- }
-
- # For op(., Array) -> Array.__r{op}__
- flipped = {
- "lt": "__gt__",
- "le": "__ge__",
- "gt": "__lt__",
- "ge": "__le__",
- "eq": "__eq__",
- "ne": "__ne__",
- }
-
- op_name = ufunc.__name__
- op_name = aliases.get(op_name, op_name)
-
- def not_implemented(*args, **kwargs):
- return NotImplemented
-
- if method == "__call__" and op_name in special and kwargs.get("out") is None:
- if isinstance(inputs[0], type(self)):
- name = "__{}__".format(op_name)
- return getattr(self, name, not_implemented)(inputs[1])
- else:
- name = flipped.get(op_name, "__r{}__".format(op_name))
- return getattr(self, name, not_implemented)(inputs[0])
- else:
- return NotImplemented
diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py
new file mode 100644
index 0000000000000..55b4b1a899f65
--- /dev/null
+++ b/pandas/core/ops/array_ops.py
@@ -0,0 +1,380 @@
+"""
+Functions for arithmetic and comparison operations on NumPy arrays and
+ExtensionArrays.
+"""
+import operator
+from typing import Any, Dict, Union
+
+import numpy as np
+
+from pandas._libs import Timestamp, lib, ops as libops
+
+from pandas.core.dtypes.cast import (
+ construct_1d_object_array_from_listlike,
+ find_common_type,
+ maybe_upcast_putmask,
+)
+from pandas.core.dtypes.common import (
+ ensure_object,
+ is_bool_dtype,
+ is_integer_dtype,
+ is_list_like,
+ is_object_dtype,
+ is_scalar,
+)
+from pandas.core.dtypes.generic import (
+ ABCDatetimeArray,
+ ABCDatetimeIndex,
+ ABCExtensionArray,
+ ABCIndex,
+ ABCIndexClass,
+ ABCSeries,
+ ABCTimedeltaArray,
+ ABCTimedeltaIndex,
+)
+from pandas.core.dtypes.missing import isna, notna
+
+from pandas.core.construction import extract_array
+from pandas.core.ops import missing
+from pandas.core.ops.dispatch import dispatch_to_extension_op, should_extension_dispatch
+from pandas.core.ops.invalid import invalid_comparison
+from pandas.core.ops.roperator import rpow
+
+
+def comp_method_OBJECT_ARRAY(op, x, y):
+ if isinstance(y, list):
+ y = construct_1d_object_array_from_listlike(y)
+
+ # TODO: Should the checks below be ABCIndexClass?
+ if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)):
+ # TODO: should this be ABCIndexClass??
+ if not is_object_dtype(y.dtype):
+ y = y.astype(np.object_)
+
+ if isinstance(y, (ABCSeries, ABCIndex)):
+ y = y.values
+
+ result = libops.vec_compare(x, y, op)
+ else:
+ result = libops.scalar_compare(x, y, op)
+ return result
+
+
+def masked_arith_op(x, y, op):
+ """
+ If the given arithmetic operation fails, attempt it again on
+ only the non-null elements of the input array(s).
+
+ Parameters
+ ----------
+ x : np.ndarray
+ y : np.ndarray, Series, Index
+ op : binary operator
+ """
+ # For Series `x` is 1D so ravel() is a no-op; calling it anyway makes
+ # the logic valid for both Series and DataFrame ops.
+ xrav = x.ravel()
+ assert isinstance(x, np.ndarray), type(x)
+ if isinstance(y, np.ndarray):
+ dtype = find_common_type([x.dtype, y.dtype])
+ result = np.empty(x.size, dtype=dtype)
+
+ # NB: ravel() is only safe since y is ndarray; for e.g. PeriodIndex
+ # we would get int64 dtype, see GH#19956
+ yrav = y.ravel()
+ mask = notna(xrav) & notna(yrav)
+
+ if yrav.shape != mask.shape:
+ # FIXME: GH#5284, GH#5035, GH#19448
+ # Without specifically raising here we get mismatched
+ # errors in Py3 (TypeError) vs Py2 (ValueError)
+ # Note: Only = an issue in DataFrame case
+ raise ValueError("Cannot broadcast operands together.")
+
+ if mask.any():
+ with np.errstate(all="ignore"):
+ result[mask] = op(xrav[mask], yrav[mask])
+
+ else:
+ if not is_scalar(y):
+ raise TypeError(type(y))
+
+ # mask is only meaningful for x
+ result = np.empty(x.size, dtype=x.dtype)
+ mask = notna(xrav)
+
+ # 1 ** np.nan is 1. So we have to unmask those.
+ if op is pow:
+ mask = np.where(x == 1, False, mask)
+ elif op is rpow:
+ mask = np.where(y == 1, False, mask)
+
+ if mask.any():
+ with np.errstate(all="ignore"):
+ result[mask] = op(xrav[mask], y)
+
+ result, changed = maybe_upcast_putmask(result, ~mask, np.nan)
+ result = result.reshape(x.shape) # 2D compat
+ return result
+
+
+def define_na_arithmetic_op(op, str_rep, eval_kwargs):
+ def na_op(x, y):
+ return na_arithmetic_op(x, y, op, str_rep, eval_kwargs)
+
+ return na_op
+
+
+def na_arithmetic_op(left, right, op, str_rep, eval_kwargs):
+ """
+ Return the result of evaluating op on the passed in values.
+
+ If native types are not compatible, try coersion to object dtype.
+
+ Parameters
+ ----------
+ left : np.ndarray
+ right : np.ndarray or scalar
+ str_rep : str or None
+ eval_kwargs : kwargs to pass to expressions
+
+ Returns
+ -------
+ array-like
+
+ Raises
+ ------
+ TypeError : invalid operation
+ """
+ import pandas.core.computation.expressions as expressions
+
+ try:
+ result = expressions.evaluate(op, str_rep, left, right, **eval_kwargs)
+ except TypeError:
+ result = masked_arith_op(left, right, op)
+
+ return missing.dispatch_fill_zeros(op, left, right, result)
+
+
+def arithmetic_op(
+ left: Union[np.ndarray, ABCExtensionArray],
+ right: Any,
+ op,
+ str_rep: str,
+ eval_kwargs: Dict[str, str],
+):
+ """
+ Evaluate an arithmetic operation `+`, `-`, `*`, `/`, `//`, `%`, `**`, ...
+
+ Parameters
+ ----------
+ left : np.ndarray or ExtensionArray
+ right : object
+ Cannot be a DataFrame or Index. Series is *not* excluded.
+ op : {operator.add, operator.sub, ...}
+ Or one of the reversed variants from roperator.
+
+ Returns
+ -------
+ ndarrray or ExtensionArray
+ Or a 2-tuple of these in the case of divmod or rdivmod.
+ """
+
+ from pandas.core.ops import maybe_upcast_for_op
+
+ keep_null_freq = isinstance(
+ right,
+ (
+ ABCDatetimeIndex,
+ ABCDatetimeArray,
+ ABCTimedeltaIndex,
+ ABCTimedeltaArray,
+ Timestamp,
+ ),
+ )
+
+ # NB: We assume that extract_array has already been called on `left`, but
+ # cannot make the same assumption about `right`. This is because we need
+ # to define `keep_null_freq` before calling extract_array on it.
+ lvalues = left
+ rvalues = extract_array(right, extract_numpy=True)
+
+ rvalues = maybe_upcast_for_op(rvalues, lvalues.shape)
+
+ if should_extension_dispatch(left, rvalues) or isinstance(
+ rvalues, (ABCTimedeltaArray, ABCDatetimeArray, Timestamp)
+ ):
+ # TimedeltaArray, DatetimeArray, and Timestamp are included here
+ # because they have `freq` attribute which is handled correctly
+ # by dispatch_to_extension_op.
+ res_values = dispatch_to_extension_op(op, lvalues, rvalues, keep_null_freq)
+
+ else:
+ with np.errstate(all="ignore"):
+ res_values = na_arithmetic_op(lvalues, rvalues, op, str_rep, eval_kwargs)
+
+ return res_values
+
+
+def comparison_op(
+ left: Union[np.ndarray, ABCExtensionArray], right: Any, op
+) -> Union[np.ndarray, ABCExtensionArray]:
+ """
+ Evaluate a comparison operation `=`, `!=`, `>=`, `>`, `<=`, or `<`.
+
+ Parameters
+ ----------
+ left : np.ndarray or ExtensionArray
+ right : object
+ Cannot be a DataFrame, Series, or Index.
+ op : {operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le}
+
+ Returns
+ -------
+ ndarrray or ExtensionArray
+ """
+
+ # NB: We assume extract_array has already been called on left and right
+ lvalues = left
+ rvalues = right
+
+ rvalues = lib.item_from_zerodim(rvalues)
+ if isinstance(rvalues, list):
+ # TODO: same for tuples?
+ rvalues = np.asarray(rvalues)
+
+ if isinstance(rvalues, (np.ndarray, ABCExtensionArray, ABCIndexClass)):
+ # TODO: make this treatment consistent across ops and classes.
+ # We are not catching all listlikes here (e.g. frozenset, tuple)
+ # The ambiguous case is object-dtype. See GH#27803
+ if len(lvalues) != len(rvalues):
+ raise ValueError("Lengths must match to compare")
+
+ if should_extension_dispatch(lvalues, rvalues):
+ res_values = dispatch_to_extension_op(op, lvalues, rvalues)
+
+ elif is_scalar(rvalues) and isna(rvalues):
+ # numpy does not like comparisons vs None
+ if op is operator.ne:
+ res_values = np.ones(len(lvalues), dtype=bool)
+ else:
+ res_values = np.zeros(len(lvalues), dtype=bool)
+
+ elif is_object_dtype(lvalues.dtype):
+ res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues)
+
+ else:
+ op_name = "__{op}__".format(op=op.__name__)
+ method = getattr(lvalues, op_name)
+ with np.errstate(all="ignore"):
+ res_values = method(rvalues)
+
+ if res_values is NotImplemented:
+ res_values = invalid_comparison(lvalues, rvalues, op)
+ if is_scalar(res_values):
+ raise TypeError(
+ "Could not compare {typ} type with Series".format(typ=type(rvalues))
+ )
+
+ return res_values
+
+
+def na_logical_op(x, y, op):
+ try:
+ result = op(x, y)
+ except TypeError:
+ if isinstance(y, np.ndarray):
+ # bool-bool dtype operations should be OK, should not get here
+ assert not (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype))
+ x = ensure_object(x)
+ y = ensure_object(y)
+ result = libops.vec_binop(x, y, op)
+ else:
+ # let null fall thru
+ assert lib.is_scalar(y)
+ if not isna(y):
+ y = bool(y)
+ try:
+ result = libops.scalar_binop(x, y, op)
+ except (
+ TypeError,
+ ValueError,
+ AttributeError,
+ OverflowError,
+ NotImplementedError,
+ ):
+ raise TypeError(
+ "cannot compare a dtyped [{dtype}] array "
+ "with a scalar of type [{typ}]".format(
+ dtype=x.dtype, typ=type(y).__name__
+ )
+ )
+
+ return result
+
+
+def logical_op(
+ left: Union[np.ndarray, ABCExtensionArray], right: Any, op
+) -> Union[np.ndarray, ABCExtensionArray]:
+ """
+ Evaluate a logical operation `|`, `&`, or `^`.
+
+ Parameters
+ ----------
+ left : np.ndarray or ExtensionArray
+ right : object
+ Cannot be a DataFrame, Series, or Index.
+ op : {operator.and_, operator.or_, operator.xor}
+ Or one of the reversed variants from roperator.
+
+ Returns
+ -------
+ ndarrray or ExtensionArray
+ """
+
+ fill_int = lambda x: x
+
+ def fill_bool(x, left=None):
+ # if `left` is specifically not-boolean, we do not cast to bool
+ if x.dtype.kind in ["c", "f", "O"]:
+ # dtypes that can hold NA
+ mask = isna(x)
+ if mask.any():
+ x = x.astype(object)
+ x[mask] = False
+
+ if left is None or is_bool_dtype(left.dtype):
+ x = x.astype(bool)
+ return x
+
+ is_self_int_dtype = is_integer_dtype(left.dtype)
+
+ right = lib.item_from_zerodim(right)
+ if is_list_like(right) and not hasattr(right, "dtype"):
+ # e.g. list, tuple
+ right = construct_1d_object_array_from_listlike(right)
+
+ # NB: We assume extract_array has already been called on left and right
+ lvalues = left
+ rvalues = right
+
+ if should_extension_dispatch(lvalues, rvalues):
+ res_values = dispatch_to_extension_op(op, lvalues, rvalues)
+
+ else:
+ if isinstance(rvalues, np.ndarray):
+ is_other_int_dtype = is_integer_dtype(rvalues.dtype)
+ rvalues = rvalues if is_other_int_dtype else fill_bool(rvalues, lvalues)
+
+ else:
+ # i.e. scalar
+ is_other_int_dtype = lib.is_integer(rvalues)
+
+ # For int vs int `^`, `|`, `&` are bitwise operators and return
+ # integer dtypes. Otherwise these are boolean ops
+ filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool
+
+ res_values = na_logical_op(lvalues, rvalues, op)
+ res_values = filler(res_values) # type: ignore
+
+ return res_values
diff --git a/pandas/core/ops/dispatch.py b/pandas/core/ops/dispatch.py
new file mode 100644
index 0000000000000..9835d57ee7366
--- /dev/null
+++ b/pandas/core/ops/dispatch.py
@@ -0,0 +1,223 @@
+"""
+Functions for defining unary operations.
+"""
+from typing import Any, Callable, Union
+
+import numpy as np
+
+from pandas.errors import NullFrequencyError
+
+from pandas.core.dtypes.common import (
+ is_datetime64_dtype,
+ is_extension_array_dtype,
+ is_integer_dtype,
+ is_object_dtype,
+ is_scalar,
+ is_timedelta64_dtype,
+)
+from pandas.core.dtypes.generic import ABCExtensionArray, ABCSeries
+
+from pandas._typing import ArrayLike
+from pandas.core.construction import array
+
+
+def should_extension_dispatch(left: ABCSeries, right: Any) -> bool:
+ """
+ Identify cases where Series operation should use dispatch_to_extension_op.
+
+ Parameters
+ ----------
+ left : Series
+ right : object
+
+ Returns
+ -------
+ bool
+ """
+ if (
+ is_extension_array_dtype(left.dtype)
+ or is_datetime64_dtype(left.dtype)
+ or is_timedelta64_dtype(left.dtype)
+ ):
+ return True
+
+ if not is_scalar(right) and is_extension_array_dtype(right):
+ # GH#22378 disallow scalar to exclude e.g. "category", "Int64"
+ return True
+
+ return False
+
+
+def should_series_dispatch(left, right, op):
+ """
+ Identify cases where a DataFrame operation should dispatch to its
+ Series counterpart.
+
+ Parameters
+ ----------
+ left : DataFrame
+ right : DataFrame
+ op : binary operator
+
+ Returns
+ -------
+ override : bool
+ """
+ if left._is_mixed_type or right._is_mixed_type:
+ return True
+
+ if not len(left.columns) or not len(right.columns):
+ # ensure obj.dtypes[0] exists for each obj
+ return False
+
+ ldtype = left.dtypes.iloc[0]
+ rdtype = right.dtypes.iloc[0]
+
+ if (is_timedelta64_dtype(ldtype) and is_integer_dtype(rdtype)) or (
+ is_timedelta64_dtype(rdtype) and is_integer_dtype(ldtype)
+ ):
+ # numpy integer dtypes as timedelta64 dtypes in this scenario
+ return True
+
+ if is_datetime64_dtype(ldtype) and is_object_dtype(rdtype):
+ # in particular case where right is an array of DateOffsets
+ return True
+
+ return False
+
+
+def dispatch_to_extension_op(
+ op,
+ left: Union[ABCExtensionArray, np.ndarray],
+ right: Any,
+ keep_null_freq: bool = False,
+):
+ """
+ Assume that left or right is a Series backed by an ExtensionArray,
+ apply the operator defined by op.
+
+ Parameters
+ ----------
+ op : binary operator
+ left : ExtensionArray or np.ndarray
+ right : object
+ keep_null_freq : bool, default False
+ Whether to re-raise a NullFrequencyError unchanged, as opposed to
+ catching and raising TypeError.
+
+ Returns
+ -------
+ ExtensionArray or np.ndarray
+ 2-tuple of these if op is divmod or rdivmod
+ """
+ # NB: left and right should already be unboxed, so neither should be
+ # a Series or Index.
+
+ if left.dtype.kind in "mM" and isinstance(left, np.ndarray):
+ # We need to cast datetime64 and timedelta64 ndarrays to
+ # DatetimeArray/TimedeltaArray. But we avoid wrapping others in
+ # PandasArray as that behaves poorly with e.g. IntegerArray.
+ left = array(left)
+
+ # The op calls will raise TypeError if the op is not defined
+ # on the ExtensionArray
+
+ try:
+ res_values = op(left, right)
+ except NullFrequencyError:
+ # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError
+ # on add/sub of integers (or int-like). We re-raise as a TypeError.
+ if keep_null_freq:
+ # TODO: remove keep_null_freq after Timestamp+int deprecation
+ # GH#22535 is enforced
+ raise
+ raise TypeError(
+ "incompatible type for a datetime/timedelta "
+ "operation [{name}]".format(name=op.__name__)
+ )
+ return res_values
+
+
+def maybe_dispatch_ufunc_to_dunder_op(
+ self: ArrayLike, ufunc: Callable, method: str, *inputs: ArrayLike, **kwargs: Any
+):
+ """
+ Dispatch a ufunc to the equivalent dunder method.
+
+ Parameters
+ ----------
+ self : ArrayLike
+ The array whose dunder method we dispatch to
+ ufunc : Callable
+ A NumPy ufunc
+ method : {'reduce', 'accumulate', 'reduceat', 'outer', 'at', '__call__'}
+ inputs : ArrayLike
+ The input arrays.
+ kwargs : Any
+ The additional keyword arguments, e.g. ``out``.
+
+ Returns
+ -------
+ result : Any
+ The result of applying the ufunc
+ """
+ # special has the ufuncs we dispatch to the dunder op on
+ special = {
+ "add",
+ "sub",
+ "mul",
+ "pow",
+ "mod",
+ "floordiv",
+ "truediv",
+ "divmod",
+ "eq",
+ "ne",
+ "lt",
+ "gt",
+ "le",
+ "ge",
+ "remainder",
+ "matmul",
+ }
+ aliases = {
+ "subtract": "sub",
+ "multiply": "mul",
+ "floor_divide": "floordiv",
+ "true_divide": "truediv",
+ "power": "pow",
+ "remainder": "mod",
+ "divide": "div",
+ "equal": "eq",
+ "not_equal": "ne",
+ "less": "lt",
+ "less_equal": "le",
+ "greater": "gt",
+ "greater_equal": "ge",
+ }
+
+ # For op(., Array) -> Array.__r{op}__
+ flipped = {
+ "lt": "__gt__",
+ "le": "__ge__",
+ "gt": "__lt__",
+ "ge": "__le__",
+ "eq": "__eq__",
+ "ne": "__ne__",
+ }
+
+ op_name = ufunc.__name__
+ op_name = aliases.get(op_name, op_name)
+
+ def not_implemented(*args, **kwargs):
+ return NotImplemented
+
+ if method == "__call__" and op_name in special and kwargs.get("out") is None:
+ if isinstance(inputs[0], type(self)):
+ name = "__{}__".format(op_name)
+ return getattr(self, name, not_implemented)(inputs[1])
+ else:
+ name = flipped.get(op_name, "__r{}__".format(op_name))
+ return getattr(self, name, not_implemented)(inputs[0])
+ else:
+ return NotImplemented
diff --git a/pandas/core/ops/invalid.py b/pandas/core/ops/invalid.py
new file mode 100644
index 0000000000000..013ff7689b221
--- /dev/null
+++ b/pandas/core/ops/invalid.py
@@ -0,0 +1,61 @@
+"""
+Templates for invalid operations.
+"""
+import operator
+
+import numpy as np
+
+
+def invalid_comparison(left, right, op):
+ """
+ If a comparison has mismatched types and is not necessarily meaningful,
+ follow python3 conventions by:
+
+ - returning all-False for equality
+ - returning all-True for inequality
+ - raising TypeError otherwise
+
+ Parameters
+ ----------
+ left : array-like
+ right : scalar, array-like
+ op : operator.{eq, ne, lt, le, gt}
+
+ Raises
+ ------
+ TypeError : on inequality comparisons
+ """
+ if op is operator.eq:
+ res_values = np.zeros(left.shape, dtype=bool)
+ elif op is operator.ne:
+ res_values = np.ones(left.shape, dtype=bool)
+ else:
+ raise TypeError(
+ "Invalid comparison between dtype={dtype} and {typ}".format(
+ dtype=left.dtype, typ=type(right).__name__
+ )
+ )
+ return res_values
+
+
+def make_invalid_op(name: str):
+ """
+ Return a binary method that always raises a TypeError.
+
+ Parameters
+ ----------
+ name : str
+
+ Returns
+ -------
+ invalid_op : function
+ """
+
+ def invalid_op(self, other=None):
+ raise TypeError(
+ "cannot perform {name} with this index type: "
+ "{typ}".format(name=name, typ=type(self).__name__)
+ )
+
+ invalid_op.__name__ = name
+ return invalid_op
diff --git a/pandas/core/ops/methods.py b/pandas/core/ops/methods.py
new file mode 100644
index 0000000000000..477c847fb01e6
--- /dev/null
+++ b/pandas/core/ops/methods.py
@@ -0,0 +1,233 @@
+"""
+Functions to generate methods and pin them to the appropriate classes.
+"""
+import operator
+
+from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries, ABCSparseArray
+
+from pandas.core.ops.roperator import (
+ radd,
+ rand_,
+ rdivmod,
+ rfloordiv,
+ rmod,
+ rmul,
+ ror_,
+ rpow,
+ rsub,
+ rtruediv,
+ rxor,
+)
+
+
+def _get_method_wrappers(cls):
+ """
+ Find the appropriate operation-wrappers to use when defining flex/special
+ arithmetic, boolean, and comparison operations with the given class.
+
+ Parameters
+ ----------
+ cls : class
+
+ Returns
+ -------
+ arith_flex : function or None
+ comp_flex : function or None
+ arith_special : function
+ comp_special : function
+ bool_special : function
+
+ Notes
+ -----
+ None is only returned for SparseArray
+ """
+ # TODO: make these non-runtime imports once the relevant functions
+ # are no longer in __init__
+ from pandas.core.ops import (
+ _arith_method_FRAME,
+ _arith_method_SERIES,
+ _bool_method_SERIES,
+ _comp_method_FRAME,
+ _comp_method_SERIES,
+ _flex_comp_method_FRAME,
+ _flex_method_SERIES,
+ )
+
+ if issubclass(cls, ABCSeries):
+ # Just Series
+ arith_flex = _flex_method_SERIES
+ comp_flex = _flex_method_SERIES
+ arith_special = _arith_method_SERIES
+ comp_special = _comp_method_SERIES
+ bool_special = _bool_method_SERIES
+ elif issubclass(cls, ABCDataFrame):
+ arith_flex = _arith_method_FRAME
+ comp_flex = _flex_comp_method_FRAME
+ arith_special = _arith_method_FRAME
+ comp_special = _comp_method_FRAME
+ bool_special = _arith_method_FRAME
+ return arith_flex, comp_flex, arith_special, comp_special, bool_special
+
+
+def add_special_arithmetic_methods(cls):
+ """
+ Adds the full suite of special arithmetic methods (``__add__``,
+ ``__sub__``, etc.) to the class.
+
+ Parameters
+ ----------
+ cls : class
+ special methods will be defined and pinned to this class
+ """
+ _, _, arith_method, comp_method, bool_method = _get_method_wrappers(cls)
+ new_methods = _create_methods(
+ cls, arith_method, comp_method, bool_method, special=True
+ )
+ # inplace operators (I feel like these should get passed an `inplace=True`
+ # or just be removed
+
+ def _wrap_inplace_method(method):
+ """
+ return an inplace wrapper for this method
+ """
+
+ def f(self, other):
+ result = method(self, other)
+
+ # this makes sure that we are aligned like the input
+ # we are updating inplace so we want to ignore is_copy
+ self._update_inplace(
+ result.reindex_like(self, copy=False)._data, verify_is_copy=False
+ )
+
+ return self
+
+ f.__name__ = "__i{name}__".format(name=method.__name__.strip("__"))
+ return f
+
+ new_methods.update(
+ dict(
+ __iadd__=_wrap_inplace_method(new_methods["__add__"]),
+ __isub__=_wrap_inplace_method(new_methods["__sub__"]),
+ __imul__=_wrap_inplace_method(new_methods["__mul__"]),
+ __itruediv__=_wrap_inplace_method(new_methods["__truediv__"]),
+ __ifloordiv__=_wrap_inplace_method(new_methods["__floordiv__"]),
+ __imod__=_wrap_inplace_method(new_methods["__mod__"]),
+ __ipow__=_wrap_inplace_method(new_methods["__pow__"]),
+ )
+ )
+
+ new_methods.update(
+ dict(
+ __iand__=_wrap_inplace_method(new_methods["__and__"]),
+ __ior__=_wrap_inplace_method(new_methods["__or__"]),
+ __ixor__=_wrap_inplace_method(new_methods["__xor__"]),
+ )
+ )
+
+ _add_methods(cls, new_methods=new_methods)
+
+
+def add_flex_arithmetic_methods(cls):
+ """
+ Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)
+ to the class.
+
+ Parameters
+ ----------
+ cls : class
+ flex methods will be defined and pinned to this class
+ """
+ flex_arith_method, flex_comp_method, _, _, _ = _get_method_wrappers(cls)
+ new_methods = _create_methods(
+ cls, flex_arith_method, flex_comp_method, bool_method=None, special=False
+ )
+ new_methods.update(
+ dict(
+ multiply=new_methods["mul"],
+ subtract=new_methods["sub"],
+ divide=new_methods["div"],
+ )
+ )
+ # opt out of bool flex methods for now
+ assert not any(kname in new_methods for kname in ("ror_", "rxor", "rand_"))
+
+ _add_methods(cls, new_methods=new_methods)
+
+
+def _create_methods(cls, arith_method, comp_method, bool_method, special):
+ # creates actual methods based upon arithmetic, comp and bool method
+ # constructors.
+
+ have_divmod = issubclass(cls, ABCSeries)
+ # divmod is available for Series
+
+ # yapf: disable
+ new_methods = dict(
+ add=arith_method(cls, operator.add, special),
+ radd=arith_method(cls, radd, special),
+ sub=arith_method(cls, operator.sub, special),
+ mul=arith_method(cls, operator.mul, special),
+ truediv=arith_method(cls, operator.truediv, special),
+ floordiv=arith_method(cls, operator.floordiv, special),
+ # Causes a floating point exception in the tests when numexpr enabled,
+ # so for now no speedup
+ mod=arith_method(cls, operator.mod, special),
+ pow=arith_method(cls, operator.pow, special),
+ # not entirely sure why this is necessary, but previously was included
+ # so it's here to maintain compatibility
+ rmul=arith_method(cls, rmul, special),
+ rsub=arith_method(cls, rsub, special),
+ rtruediv=arith_method(cls, rtruediv, special),
+ rfloordiv=arith_method(cls, rfloordiv, special),
+ rpow=arith_method(cls, rpow, special),
+ rmod=arith_method(cls, rmod, special))
+ # yapf: enable
+ new_methods["div"] = new_methods["truediv"]
+ new_methods["rdiv"] = new_methods["rtruediv"]
+ if have_divmod:
+ # divmod doesn't have an op that is supported by numexpr
+ new_methods["divmod"] = arith_method(cls, divmod, special)
+ new_methods["rdivmod"] = arith_method(cls, rdivmod, special)
+
+ new_methods.update(
+ dict(
+ eq=comp_method(cls, operator.eq, special),
+ ne=comp_method(cls, operator.ne, special),
+ lt=comp_method(cls, operator.lt, special),
+ gt=comp_method(cls, operator.gt, special),
+ le=comp_method(cls, operator.le, special),
+ ge=comp_method(cls, operator.ge, special),
+ )
+ )
+
+ if bool_method:
+ new_methods.update(
+ dict(
+ and_=bool_method(cls, operator.and_, special),
+ or_=bool_method(cls, operator.or_, special),
+ # For some reason ``^`` wasn't used in original.
+ xor=bool_method(cls, operator.xor, special),
+ rand_=bool_method(cls, rand_, special),
+ ror_=bool_method(cls, ror_, special),
+ rxor=bool_method(cls, rxor, special),
+ )
+ )
+
+ if special:
+ dunderize = lambda x: "__{name}__".format(name=x.strip("_"))
+ else:
+ dunderize = lambda x: x
+ new_methods = {dunderize(k): v for k, v in new_methods.items()}
+ return new_methods
+
+
+def _add_methods(cls, new_methods):
+ for name, method in new_methods.items():
+ # For most methods, if we find that the class already has a method
+ # of the same name, it is OK to over-write it. The exception is
+ # inplace methods (__iadd__, __isub__, ...) for SparseArray, which
+ # retain the np.ndarray versions.
+ force = not (issubclass(cls, ABCSparseArray) and name.startswith("__i"))
+ if force or name not in cls.__dict__:
+ setattr(cls, name, method)
diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py
index 01bc345a40b83..4fe69f64bd0ae 100644
--- a/pandas/core/ops/missing.py
+++ b/pandas/core/ops/missing.py
@@ -30,22 +30,19 @@
from .roperator import rdivmod, rfloordiv, rmod
-def fill_zeros(result, x, y, name, fill):
+def fill_zeros(result, x, y):
"""
If this is a reversed op, then flip x,y
If we have an integer value (or array in y)
- and we have 0's, fill them with the fill,
+ and we have 0's, fill them with np.nan,
return the result.
Mask the nan's from x.
"""
- if fill is None or is_float_dtype(result):
+ if is_float_dtype(result.dtype):
return result
- if name.startswith(("r", "__r")):
- x, y = y, x
-
is_variable_type = hasattr(y, "dtype") or hasattr(y, "type")
is_scalar_type = is_scalar(y)
@@ -55,7 +52,7 @@ def fill_zeros(result, x, y, name, fill):
if is_scalar_type:
y = np.array(y)
- if is_integer_dtype(y):
+ if is_integer_dtype(y.dtype):
if (y == 0).any():
@@ -66,19 +63,7 @@ def fill_zeros(result, x, y, name, fill):
shape = result.shape
result = result.astype("float64", copy=False).ravel()
- np.putmask(result, mask, fill)
-
- # if we have a fill of inf, then sign it correctly
- # (GH#6178 and GH#9308)
- if np.isinf(fill):
- signs = y if name.startswith(("r", "__r")) else x
- signs = np.sign(signs.astype("float", copy=False))
- negative_inf_mask = (signs.ravel() < 0) & mask
- np.putmask(result, negative_inf_mask, -fill)
-
- if "floordiv" in name: # (GH#9308)
- nan_mask = ((y == 0) & (x == 0)).ravel()
- np.putmask(result, nan_mask, np.nan)
+ np.putmask(result, mask, np.nan)
result = result.reshape(shape)
@@ -172,12 +157,12 @@ def dispatch_fill_zeros(op, left, right, result):
if op is divmod:
result = (
mask_zero_div_zero(left, right, result[0]),
- fill_zeros(result[1], left, right, "__mod__", np.nan),
+ fill_zeros(result[1], left, right),
)
elif op is rdivmod:
result = (
mask_zero_div_zero(right, left, result[0]),
- fill_zeros(result[1], left, right, "__rmod__", np.nan),
+ fill_zeros(result[1], right, left),
)
elif op is operator.floordiv:
# Note: no need to do this for truediv; in py3 numpy behaves the way
@@ -188,7 +173,7 @@ def dispatch_fill_zeros(op, left, right, result):
# we want.
result = mask_zero_div_zero(right, left, result)
elif op is operator.mod:
- result = fill_zeros(result, left, right, "__mod__", np.nan)
+ result = fill_zeros(result, left, right)
elif op is rmod:
- result = fill_zeros(result, left, right, "__rmod__", np.nan)
+ result = fill_zeros(result, right, left)
return result
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index b4a3e6ed71bf4..545bc21dd6d1b 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -7,7 +7,7 @@
import numpy as np
from pandas._libs import lib
-from pandas._libs.tslibs import NaT, Timestamp
+from pandas._libs.tslibs import NaT, Period, Timestamp
from pandas._libs.tslibs.frequencies import is_subperiod, is_superperiod
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.compat.numpy import function as nv
@@ -16,7 +16,6 @@
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
-import pandas as pd
import pandas.core.algorithms as algos
from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
@@ -25,7 +24,7 @@
from pandas.core.groupby.grouper import Grouper
from pandas.core.groupby.ops import BinGrouper
from pandas.core.indexes.datetimes import DatetimeIndex, date_range
-from pandas.core.indexes.period import PeriodIndex
+from pandas.core.indexes.period import PeriodIndex, period_range
from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range
from pandas.tseries.frequencies import to_offset
@@ -138,7 +137,7 @@ def _typ(self):
"""
Masquerade for compat as a Series or a DataFrame.
"""
- if isinstance(self._selected_obj, pd.Series):
+ if isinstance(self._selected_obj, ABCSeries):
return "series"
return "dataframe"
@@ -424,7 +423,7 @@ def pad(self, limit=None):
Parameters
----------
- limit : integer, optional
+ limit : int, optional
limit of how many values to fill
Returns
@@ -515,7 +514,7 @@ def backfill(self, limit=None):
Parameters
----------
- limit : integer, optional
+ limit : int, optional
Limit of how many values to fill.
Returns
@@ -629,7 +628,7 @@ def fillna(self, method, limit=None):
* 'backfill' or 'bfill': use next valid observation to fill gap.
* 'nearest': use nearest valid observation to fill gap.
- limit : integer, optional
+ limit : int, optional
Limit of how many consecutive missing values to fill.
Returns
@@ -781,8 +780,6 @@ def interpolate(
):
"""
Interpolate values according to different methods.
-
- .. versionadded:: 0.18.1
"""
result = self._upsample(None)
return result.interpolate(
@@ -826,7 +823,7 @@ def std(self, ddof=1, *args, **kwargs):
Parameters
----------
- ddof : integer, default 1
+ ddof : int, default 1
Degrees of freedom.
Returns
@@ -843,7 +840,7 @@ def var(self, ddof=1, *args, **kwargs):
Parameters
----------
- ddof : integer, default 1
+ ddof : int, default 1
degrees of freedom
Returns
@@ -860,7 +857,9 @@ def size(self):
# a copy of 0-len objects. GH14962
result = self._downsample("size")
if not len(self.ax) and isinstance(self._selected_obj, ABCDataFrame):
- result = pd.Series([], index=result.index, dtype="int64")
+ from pandas import Series
+
+ result = Series([], index=result.index, dtype="int64")
return result
def quantile(self, q=0.5, **kwargs):
@@ -1047,7 +1046,7 @@ def _downsample(self, how, **kwargs):
**kwargs : kw args passed to how function
"""
self._set_binner()
- how = self._is_cython_func(how) or how
+ how = self._get_cython_func(how) or how
ax = self.ax
obj = self._selected_obj
@@ -1141,8 +1140,6 @@ def _wrap_result(self, result):
class DatetimeIndexResamplerGroupby(_GroupByMixin, DatetimeIndexResampler):
"""
Provides a resample of a groupby implementation
-
- .. versionadded:: 0.18.1
"""
@property
@@ -1197,7 +1194,7 @@ def _downsample(self, how, **kwargs):
if self.kind == "timestamp":
return super()._downsample(how, **kwargs)
- how = self._is_cython_func(how) or how
+ how = self._get_cython_func(how) or how
ax = self.ax
if is_subperiod(ax.freq, self.freq):
@@ -1259,8 +1256,6 @@ def _upsample(self, method, limit=None, fill_value=None):
class PeriodIndexResamplerGroupby(_GroupByMixin, PeriodIndexResampler):
"""
Provides a resample of a groupby implementation.
-
- .. versionadded:: 0.18.1
"""
@property
@@ -1289,8 +1284,6 @@ def _adjust_binner_for_upsample(self, binner):
class TimedeltaIndexResamplerGroupby(_GroupByMixin, TimedeltaIndexResampler):
"""
Provides a resample of a groupby implementation.
-
- .. versionadded:: 0.18.1
"""
@property
@@ -1563,9 +1556,7 @@ def _get_time_period_bins(self, ax):
binner = labels = PeriodIndex(data=[], freq=freq, name=ax.name)
return binner, [], labels
- labels = binner = pd.period_range(
- start=ax[0], end=ax[-1], freq=freq, name=ax.name
- )
+ labels = binner = period_range(start=ax[0], end=ax[-1], freq=freq, name=ax.name)
end_stamps = (labels + freq).asfreq(freq, "s").to_timestamp()
if ax.tzinfo:
@@ -1608,11 +1599,11 @@ def _get_period_bins(self, ax):
)
# Get offset for bin edge (not label edge) adjustment
- start_offset = pd.Period(start, self.freq) - pd.Period(p_start, self.freq)
+ start_offset = Period(start, self.freq) - Period(p_start, self.freq)
bin_shift = start_offset.n % freq_mult
start = p_start
- labels = binner = pd.period_range(
+ labels = binner = period_range(
start=start, end=end, freq=self.freq, name=ax.name
)
@@ -1639,15 +1630,14 @@ def _get_period_bins(self, ax):
def _take_new_index(obj, indexer, new_index, axis=0):
- from pandas.core.api import Series, DataFrame
- if isinstance(obj, Series):
+ if isinstance(obj, ABCSeries):
new_values = algos.take_1d(obj.values, indexer)
- return Series(new_values, index=new_index, name=obj.name)
- elif isinstance(obj, DataFrame):
+ return obj._constructor(new_values, index=new_index, name=obj.name)
+ elif isinstance(obj, ABCDataFrame):
if axis == 1:
raise NotImplementedError("axis 1 is not supported")
- return DataFrame(
+ return obj._constructor(
obj._data.reindex_indexer(new_axis=new_index, indexer=indexer, axis=1)
)
else:
@@ -1732,7 +1722,7 @@ def _get_period_range_edges(first, last, offset, closed="left", base=0):
-------
A tuple of length 2, containing the adjusted pd.Period objects.
"""
- if not all(isinstance(obj, pd.Period) for obj in [first, last]):
+ if not all(isinstance(obj, Period) for obj in [first, last]):
raise TypeError("'first' and 'last' must be instances of type Period")
# GH 23882
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 5a476dceca1f3..60bab817705e3 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -6,8 +6,6 @@
import numpy as np
-import pandas.core.dtypes.concat as _concat
-
from pandas import DataFrame, Index, MultiIndex, Series
from pandas.core import common as com
from pandas.core.arrays.categorical import (
@@ -290,7 +288,7 @@ def __init__(
self.intersect = True
else: # pragma: no cover
raise ValueError(
- "Only can inner (intersect) or outer (union) " "join the other axis"
+ "Only can inner (intersect) or outer (union) join the other axis"
)
if isinstance(objs, dict):
@@ -304,7 +302,7 @@ def __init__(
raise ValueError("No objects to concatenate")
if keys is None:
- objs = list(com._not_none(*objs))
+ objs = list(com.not_none(*objs))
else:
# #1649
clean_keys = []
@@ -439,13 +437,13 @@ def get_result(self):
mgr = self.objs[0]._data.concat(
[x._data for x in self.objs], self.new_axes
)
- cons = _concat._get_series_result_type(mgr, self.objs)
+ cons = _get_series_result_type(mgr, self.objs)
return cons(mgr, name=name).__finalize__(self, method="concat")
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
- cons = _concat._get_series_result_type(data)
+ cons = _get_series_result_type(data)
index, columns = self.new_axes
df = cons(data, index=index)
@@ -475,7 +473,7 @@ def get_result(self):
if not self.copy:
new_data._consolidate_inplace()
- cons = _concat._get_frame_result_type(new_data, self.objs)
+ cons = _get_frame_result_type(new_data, self.objs)
return cons._from_axes(new_data, self.new_axes).__finalize__(
self, method="concat"
)
@@ -708,3 +706,28 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None):
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
+
+
+def _get_series_result_type(result, objs=None):
+ """
+ return appropriate class of Series concat
+ input is either dict or array-like
+ """
+ # TODO: See if we can just inline with _constructor_expanddim
+ # now that sparse is removed.
+ from pandas import DataFrame
+
+ # concat Series with axis 1
+ if isinstance(result, dict):
+ return DataFrame
+
+ # otherwise it is a SingleBlockManager (axis = 0)
+ return objs[0]._constructor
+
+
+def _get_frame_result_type(result, objs):
+ """
+ return appropriate class of DataFrame-like concat
+ """
+ # TODO: just inline this as _constructor.
+ return objs[0]
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index 9a69942a70e01..6f2e264f1a4d0 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -5,6 +5,7 @@
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_extension_type, is_list_like
+from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.core.dtypes.missing import notna
@@ -39,7 +40,7 @@ def melt(
id_vars = [id_vars]
elif isinstance(frame.columns, ABCMultiIndex) and not isinstance(id_vars, list):
raise ValueError(
- "id_vars must be a list of tuples when columns" " are a MultiIndex"
+ "id_vars must be a list of tuples when columns are a MultiIndex"
)
else:
# Check that `id_vars` are in frame
@@ -61,7 +62,7 @@ def melt(
value_vars, list
):
raise ValueError(
- "value_vars must be a list of tuples when" " columns are a MultiIndex"
+ "value_vars must be a list of tuples when columns are a MultiIndex"
)
else:
value_vars = list(value_vars)
@@ -171,9 +172,7 @@ def lreshape(data, groups, dropna=True, label=None):
for target, names in zip(keys, values):
to_concat = [data[col].values for col in names]
- import pandas.core.dtypes.concat as _concat
-
- mdata[target] = _concat._concat_compat(to_concat)
+ mdata[target] = concat_compat(to_concat)
pivot_cols.append(target)
for col in id_cols:
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index c1a07c129f7cd..62a30073a53fd 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -3,6 +3,7 @@
"""
import copy
+import datetime
from functools import partial
import string
import warnings
@@ -22,7 +23,6 @@
is_bool,
is_bool_dtype,
is_categorical_dtype,
- is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetimelike,
is_dtype_equal,
@@ -179,7 +179,7 @@ def merge_ordered(
"""
Perform merge with optional filling/interpolation designed for ordered
data like time series data. Optionally perform group-wise merge (see
- examples)
+ examples).
Parameters
----------
@@ -217,8 +217,6 @@ def merge_ordered(
* outer: use union of keys from both frames (SQL: full outer join)
* inner: use intersection of keys from both frames (SQL: inner join)
- .. versionadded:: 0.19.0
-
Returns
-------
merged : DataFrame
@@ -328,8 +326,6 @@ def merge_asof(
Optionally match on equivalent keys with 'by' before searching with 'on'.
- .. versionadded:: 0.19.0
-
Parameters
----------
left : DataFrame
@@ -345,26 +341,14 @@ def merge_asof(
Field name to join on in right DataFrame.
left_index : boolean
Use the index of the left DataFrame as the join key.
-
- .. versionadded:: 0.19.2
-
right_index : boolean
Use the index of the right DataFrame as the join key.
-
- .. versionadded:: 0.19.2
-
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
-
- .. versionadded:: 0.19.2
-
right_by : column name
Field names to match on in the right DataFrame.
-
- .. versionadded:: 0.19.2
-
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
@@ -1292,8 +1276,6 @@ def _get_join_indexers(left_keys, right_keys, sort=False, how="inner", **kwargs)
indexers into the left_keys, right_keys
"""
- from functools import partial
-
assert len(left_keys) == len(
right_keys
), "left_key and right_keys must be the same length"
@@ -1574,7 +1556,7 @@ def _validate_specification(self):
# set 'by' columns
if self.by is not None:
if self.left_by is not None or self.right_by is not None:
- raise MergeError("Can only pass by OR left_by " "and right_by")
+ raise MergeError("Can only pass by OR left_by and right_by")
self.left_by = self.right_by = self.by
if self.left_by is None and self.right_by is not None:
raise MergeError("missing left_by")
@@ -1638,7 +1620,7 @@ def _get_merge_keys(self):
)
raise MergeError(msg)
- # validate tolerance; must be a Timedelta if we have a DTI
+ # validate tolerance; datetime.timedelta or Timedelta if we have a DTI
if self.tolerance is not None:
if self.left_index:
@@ -1653,8 +1635,8 @@ def _get_merge_keys(self):
)
)
- if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt):
- if not isinstance(self.tolerance, Timedelta):
+ if is_datetimelike(lt):
+ if not isinstance(self.tolerance, datetime.timedelta):
raise MergeError(msg)
if self.tolerance < Timedelta(0):
raise MergeError("tolerance must be positive")
@@ -1724,6 +1706,7 @@ def flip(xs):
left_values = left_values.view("i8")
right_values = right_values.view("i8")
if tolerance is not None:
+ tolerance = Timedelta(tolerance)
tolerance = tolerance.value
# a "by" parameter requires special handling
@@ -1767,7 +1750,6 @@ def flip(xs):
def _get_multiindex_indexer(join_keys, index, sort):
- from functools import partial
# bind `sort` argument
fkeys = partial(_factorize_keys, sort=sort)
@@ -1977,7 +1959,7 @@ def _should_fill(lname, rname):
def _any(x):
- return x is not None and com._any_not_none(*x)
+ return x is not None and com.any_not_none(*x)
def validate_operand(obj):
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index 23bf89b2bc1ac..d653dd87308cf 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -127,8 +127,6 @@ def pivot_table(
table = agged.unstack(to_unstack)
if not dropna:
- from pandas import MultiIndex
-
if table.index.nlevels > 1:
m = MultiIndex.from_arrays(
cartesian_product(table.index.levels), names=table.index.names
@@ -480,8 +478,6 @@ def crosstab(
- If passed 'columns' will normalize over each column.
- If margins is `True`, will also normalize margin values.
- .. versionadded:: 0.18.1
-
Returns
-------
DataFrame
@@ -615,13 +611,21 @@ def _normalize(table, normalize, margins, margins_name="All"):
table = table.fillna(0)
elif margins is True:
-
- column_margin = table.loc[:, margins_name].drop(margins_name)
- index_margin = table.loc[margins_name, :].drop(margins_name)
- table = table.drop(margins_name, axis=1).drop(margins_name)
- # to keep index and columns names
- table_index_names = table.index.names
- table_columns_names = table.columns.names
+ # keep index and column of pivoted table
+ table_index = table.index
+ table_columns = table.columns
+
+ # check if margin name is in (for MI cases) or equal to last
+ # index/column and save the column and index margin
+ if (margins_name not in table.iloc[-1, :].name) | (
+ margins_name != table.iloc[:, -1].name
+ ):
+ raise ValueError("{} not in pivoted DataFrame".format(margins_name))
+ column_margin = table.iloc[:-1, -1]
+ index_margin = table.iloc[-1, :-1]
+
+ # keep the core table
+ table = table.iloc[:-1, :-1]
# Normalize core
table = _normalize(table, normalize=normalize, margins=False)
@@ -631,11 +635,13 @@ def _normalize(table, normalize, margins, margins_name="All"):
column_margin = column_margin / column_margin.sum()
table = concat([table, column_margin], axis=1)
table = table.fillna(0)
+ table.columns = table_columns
elif normalize == "index":
index_margin = index_margin / index_margin.sum()
table = table.append(index_margin)
table = table.fillna(0)
+ table.index = table_index
elif normalize == "all" or normalize is True:
column_margin = column_margin / column_margin.sum()
@@ -645,13 +651,12 @@ def _normalize(table, normalize, margins, margins_name="All"):
table = table.append(index_margin)
table = table.fillna(0)
+ table.index = table_index
+ table.columns = table_columns
else:
raise ValueError("Not a valid normalize argument")
- table.index.names = table_index_names
- table.columns.names = table_columns_names
-
else:
raise ValueError("Not a valid margins argument")
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 540a06caec220..e654685d24d9d 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -3,8 +3,8 @@
import numpy as np
-import pandas._libs.algos as _algos
-import pandas._libs.reshape as _reshape
+import pandas._libs.algos as libalgos
+import pandas._libs.reshape as libreshape
from pandas._libs.sparse import IntIndex
from pandas.core.dtypes.cast import maybe_promote
@@ -12,6 +12,7 @@
ensure_platform_int,
is_bool_dtype,
is_extension_array_dtype,
+ is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
@@ -22,9 +23,9 @@
import pandas.core.algorithms as algos
from pandas.core.arrays import SparseArray
from pandas.core.arrays.categorical import _factorize_from_iterable
+from pandas.core.construction import extract_array
from pandas.core.frame import DataFrame
from pandas.core.index import Index, MultiIndex
-from pandas.core.internals.arrays import extract_array
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index,
@@ -56,7 +57,7 @@ class _Unstacker:
float and missing values will be set to NaN.
constructor : object
Pandas ``DataFrame`` or subclass used to create unstacked
- response. If None, DataFrame or SparseDataFrame will be used.
+ response. If None, DataFrame will be used.
Examples
--------
@@ -133,9 +134,7 @@ def __init__(
num_cells = np.multiply(num_rows, num_columns, dtype=np.int32)
if num_rows > 0 and num_columns > 0 and num_cells <= 0:
- raise ValueError(
- "Unstacked DataFrame is too big, " "causing int32 overflow"
- )
+ raise ValueError("Unstacked DataFrame is too big, causing int32 overflow")
self._make_sorted_values_labels()
self._make_selectors()
@@ -151,7 +150,7 @@ def _make_sorted_values_labels(self):
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
- indexer = _algos.groupsort_indexer(comp_index, ngroups)[0]
+ indexer = libalgos.groupsort_indexer(comp_index, ngroups)[0]
indexer = ensure_platform_int(indexer)
self.sorted_values = algos.take_nd(self.values, indexer, axis=0)
@@ -176,7 +175,7 @@ def _make_selectors(self):
mask.put(selector, True)
if mask.sum() < len(self.index):
- raise ValueError("Index contains duplicate entries, " "cannot reshape")
+ raise ValueError("Index contains duplicate entries, cannot reshape")
self.group_index = comp_index
self.mask = mask
@@ -240,7 +239,7 @@ def get_new_values(self):
sorted_values = sorted_values.astype(name, copy=False)
# fill in our values & mask
- f = getattr(_reshape, "unstack_{name}".format(name=name))
+ f = getattr(libreshape, "unstack_{name}".format(name=name))
f(
sorted_values,
mask.view("u1"),
@@ -402,6 +401,10 @@ def unstack(obj, level, fill_value=None):
else:
level = level[0]
+ # Prioritize integer interpretation (GH #21677):
+ if not is_integer(level) and not level == "__placeholder__":
+ level = obj.index._get_level_number(level)
+
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level, fill_value=fill_value)
@@ -722,8 +725,9 @@ def _convert_level_number(level_num, columns):
new_names = list(this.index.names)
new_codes = [lab.repeat(levsize) for lab in this.index.codes]
else:
- new_levels = [this.index]
- new_codes = [np.arange(N).repeat(levsize)]
+ old_codes, old_levels = _factorize_from_iterable(this.index)
+ new_levels = [old_levels]
+ new_codes = [old_codes.repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(level_vals)
@@ -781,9 +785,6 @@ def get_dummies(
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
-
- .. versionadded:: 0.18.0
-
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
@@ -855,7 +856,6 @@ def get_dummies(
2 0.0 0.0 1.0
"""
from pandas.core.reshape.concat import concat
- from itertools import cycle
dtypes_to_encode = ["object", "category"]
@@ -884,7 +884,7 @@ def check_len(item, name):
check_len(prefix_sep, "prefix_sep")
if isinstance(prefix, str):
- prefix = cycle([prefix])
+ prefix = itertools.cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in data_to_encode.columns]
@@ -893,7 +893,7 @@ def check_len(item, name):
# validate separators
if isinstance(prefix_sep, str):
- prefix_sep = cycle([prefix_sep])
+ prefix_sep = itertools.cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index 0446f53345671..ab354a21a33df 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -5,6 +5,7 @@
import numpy as np
+from pandas._libs import Timedelta, Timestamp
from pandas._libs.lib import infer_dtype
from pandas.core.dtypes.common import (
@@ -26,8 +27,6 @@
Interval,
IntervalIndex,
Series,
- Timedelta,
- Timestamp,
to_datetime,
to_timedelta,
)
@@ -230,7 +229,7 @@ def cut(
if np.isinf(mn) or np.isinf(mx):
# GH 24314
raise ValueError(
- "cannot specify integer `bins` when input data " "contains infinity"
+ "cannot specify integer `bins` when input data contains infinity"
)
elif mn == mx: # adjust end points before binning
mn -= 0.001 * abs(mn) if mn != 0 else 0.001
@@ -374,8 +373,7 @@ def _bins_to_cuts(
if isinstance(bins, IntervalIndex):
# we have a fast-path here
ids = bins.get_indexer(x)
- result = algos.take_nd(bins, ids)
- result = Categorical(result, categories=bins, ordered=True)
+ result = Categorical.from_codes(ids, categories=bins, ordered=True)
return result, bins
unique_bins = algos.unique(bins)
@@ -406,7 +404,7 @@ def _bins_to_cuts(
else:
if len(labels) != len(bins) - 1:
raise ValueError(
- "Bin labels must be one fewer than " "the number of bin edges"
+ "Bin labels must be one fewer than the number of bin edges"
)
if not is_categorical_dtype(labels):
labels = Categorical(labels, categories=labels, ordered=True)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 59ea8c6bd6c5d..c87e371354f63 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -12,7 +12,7 @@
from pandas._config import get_option
-from pandas._libs import iNaT, index as libindex, lib, reshape, tslibs
+from pandas._libs import index as libindex, lib, reshape, tslibs
from pandas.compat import PY36
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, deprecate
@@ -44,10 +44,8 @@
ABCDatetimeIndex,
ABCSeries,
ABCSparseArray,
- ABCSparseSeries,
)
from pandas.core.dtypes.missing import (
- is_valid_nat_for_dtype,
isna,
na_value_for_dtype,
notna,
@@ -56,11 +54,12 @@
import pandas as pd
from pandas.core import algorithms, base, generic, nanops, ops
-from pandas.core.accessor import CachedAccessor
-from pandas.core.arrays import ExtensionArray, SparseArray
+from pandas.core.accessor import CachedAccessor, DirNamesMixin
+from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.categorical import Categorical, CategoricalAccessor
from pandas.core.arrays.sparse import SparseAccessor
import pandas.core.common as com
+from pandas.core.construction import extract_array, sanitize_array
from pandas.core.index import (
Float64Index,
Index,
@@ -76,7 +75,6 @@
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexing import check_bool_indexer
from pandas.core.internals import SingleBlockManager
-from pandas.core.internals.construction import sanitize_array
from pandas.core.strings import StringMethods
from pandas.core.tools.datetimes import to_datetime
@@ -113,7 +111,7 @@ def remove_na(arr):
"""
warnings.warn(
- "remove_na is deprecated and is a private " "function. Do not use.",
+ "remove_na is deprecated and is a private function. Do not use.",
FutureWarning,
stacklevel=2,
)
@@ -128,7 +126,7 @@ def _coerce_method(converter):
def wrapper(self):
if len(self) == 1:
return converter(self.iloc[0])
- raise TypeError("cannot convert the series to " "{0}".format(str(converter)))
+ raise TypeError("cannot convert the series to {0}".format(str(converter)))
wrapper.__name__ = "__{name}__".format(name=converter.__name__)
return wrapper
@@ -157,7 +155,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
data : array-like, Iterable, dict, or scalar value
Contains data stored in Series.
- .. versionchanged :: 0.23.0
+ .. versionchanged:: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
@@ -178,8 +176,10 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
_metadata = ["name"]
_accessors = {"dt", "cat", "str", "sparse"}
# tolist is not actually deprecated, just suppressed in the __dir__
- _deprecations = generic.NDFrame._deprecations | frozenset(
- ["asobject", "reshape", "get_value", "set_value", "valid", "tolist"]
+ _deprecations = (
+ generic.NDFrame._deprecations
+ | DirNamesMixin._deprecations
+ | frozenset(["asobject", "reshape", "valid", "tolist", "ftype", "real", "imag"])
)
# Override cache_readonly bc Series is mutable
@@ -227,7 +227,7 @@ def __init__(
if isinstance(data, MultiIndex):
raise NotImplementedError(
- "initializing a Series from a " "MultiIndex is not supported"
+ "initializing a Series from a MultiIndex is not supported"
)
elif isinstance(data, Index):
if name is None:
@@ -247,7 +247,7 @@ def __init__(
elif isinstance(data, np.ndarray):
pass
- elif isinstance(data, (ABCSeries, ABCSparseSeries)):
+ elif isinstance(data, ABCSeries):
if name is None:
name = data.name
if index is None:
@@ -276,7 +276,7 @@ def __init__(
pass
elif isinstance(data, (set, frozenset)):
raise TypeError(
- "{0!r} type is unordered" "".format(data.__class__.__name__)
+ "{0!r} type is unordered".format(data.__class__.__name__)
)
elif isinstance(data, ABCSparseArray):
# handle sparse passed here (and force conversion)
@@ -371,7 +371,7 @@ def from_array(
"""
Construct Series from array.
- .. deprecated :: 0.23.0
+ .. deprecated:: 0.23.0
Use pd.Series(..) constructor instead.
Returns
@@ -386,10 +386,6 @@ def from_array(
FutureWarning,
stacklevel=2,
)
- if isinstance(arr, ABCSparseArray):
- from pandas.core.sparse.series import SparseSeries
-
- cls = SparseSeries
return cls(
arr, index=index, name=name, dtype=dtype, copy=copy, fastpath=fastpath
)
@@ -563,13 +559,6 @@ def _values(self):
"""
return self._data.internal_values()
- def _formatting_values(self):
- """
- Return the values that can be formatted (used by SeriesFormatter
- and DataFrameFormatter).
- """
- return self._data.formatting_values()
-
def get_values(self):
"""
Same as values (but handles sparseness conversions); is a view.
@@ -598,14 +587,14 @@ def asobject(self):
"""
Return object Series which contains boxed values.
- .. deprecated :: 0.23.0
+ .. deprecated:: 0.23.0
Use ``astype(object)`` instead.
*this is an internal non-public method*
"""
warnings.warn(
- "'asobject' is deprecated. Use 'astype(object)'" " instead",
+ "'asobject' is deprecated. Use 'astype(object)' instead",
FutureWarning,
stacklevel=2,
)
@@ -683,8 +672,8 @@ def nonzero(self):
3 4
dtype: int64
- >>> s = pd.Series([0, 3, 0, 4], index=['a', 'b', 'c', 'd'])
# same return although index of s is different
+ >>> s = pd.Series([0, 3, 0, 4], index=['a', 'b', 'c', 'd'])
>>> s.nonzero()
(array([1, 3]),)
>>> s.iloc[s.nonzero()[0]]
@@ -711,7 +700,7 @@ def put(self, *args, **kwargs):
numpy.ndarray.put
"""
warnings.warn(
- "`put` has been deprecated and will be removed in a" "future version.",
+ "`put` has been deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=2,
)
@@ -801,8 +790,6 @@ def __array_ufunc__(
self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any
):
# TODO: handle DataFrame
- from pandas.core.internals.construction import extract_array
-
cls = type(self)
# for binary ops, use our custom dunder methods
@@ -955,10 +942,12 @@ def real(self):
"""
Return the real value of vector.
- .. deprecated 0.25.0
+ .. deprecated:: 0.25.0
"""
warnings.warn(
- "`real` has be deprecated and will be removed in a " "future verison",
+ "`real` is deprecated and will be removed in a future version. "
+ "To eliminate this warning for a Series `ser`, use "
+ "`np.real(ser.to_numpy())` or `ser.to_numpy().real`.",
FutureWarning,
stacklevel=2,
)
@@ -973,10 +962,12 @@ def imag(self):
"""
Return imag value of vector.
- .. deprecated 0.25.0
+ .. deprecated:: 0.25.0
"""
warnings.warn(
- "`imag` has be deprecated and will be removed in a " "future verison",
+ "`imag` is deprecated and will be removed in a future version. "
+ "To eliminate this warning for a Series `ser`, use "
+ "`np.imag(ser.to_numpy())` or `ser.to_numpy().imag`.",
FutureWarning,
stacklevel=2,
)
@@ -1030,6 +1021,36 @@ def axes(self):
"""
return [self.index]
+ # ----------------------------------------------------------------------
+ # Indexing Methods
+
+ @Appender(generic.NDFrame.take.__doc__)
+ def take(self, indices, axis=0, is_copy=False, **kwargs):
+ nv.validate_take(tuple(), kwargs)
+
+ indices = ensure_platform_int(indices)
+ new_index = self.index.take(indices)
+
+ if is_categorical_dtype(self):
+ # https://github.com/pandas-dev/pandas/issues/20664
+ # TODO: remove when the default Categorical.take behavior changes
+ indices = maybe_convert_indices(indices, len(self._get_axis(axis)))
+ kwargs = {"allow_fill": False}
+ else:
+ kwargs = {}
+ new_values = self._values.take(indices, **kwargs)
+
+ result = self._constructor(
+ new_values, index=new_index, fastpath=True
+ ).__finalize__(self)
+
+ # Maybe set copy if we didn't actually change the index.
+ if is_copy:
+ if not result._get_axis(axis).equals(self._get_axis(axis)):
+ result._set_is_copy(self)
+
+ return result
+
def _ixs(self, i: int, axis: int = 0):
"""
Return the i-th value or values in the Series by location.
@@ -1050,11 +1071,7 @@ def _ixs(self, i: int, axis: int = 0):
else:
return values[i]
- @property
- def _is_mixed_type(self):
- return False
-
- def _slice(self, slobj, axis=0, kind=None):
+ def _slice(self, slobj: slice, axis: int = 0, kind=None):
slobj = self.index._convert_slice_indexer(slobj, kind=kind or "getitem")
return self._get_values(slobj)
@@ -1094,9 +1111,6 @@ def __getitem__(self, key):
return self.__getitem__(new_key)
raise
- except Exception:
- raise
-
if is_iterator(key):
key = list(key)
@@ -1108,8 +1122,7 @@ def __getitem__(self, key):
def _get_with(self, key):
# other: fancy integer or otherwise
if isinstance(key, slice):
- indexer = self.index._convert_slice_indexer(key, kind="getitem")
- return self._get_values(indexer)
+ return self._slice(key)
elif isinstance(key, ABCDataFrame):
raise TypeError(
"Indexing a Series with DataFrame is not "
@@ -1118,14 +1131,15 @@ def _get_with(self, key):
elif isinstance(key, tuple):
try:
return self._get_values_tuple(key)
- except Exception:
+ except ValueError:
+ # if we don't have a MultiIndex, we may still be able to handle
+ # a 1-tuple. see test_1tuple_without_multiindex
if len(key) == 1:
key = key[0]
if isinstance(key, slice):
return self._get_values(key)
raise
- # pragma: no cover
if not isinstance(key, (list, np.ndarray, Series, Index)):
key = list(key)
@@ -1142,23 +1156,22 @@ def _get_with(self, key):
elif key_type == "boolean":
return self._get_values(key)
- try:
- # handle the dup indexing case (GH 4246)
- if isinstance(key, (list, tuple)):
- return self.loc[key]
-
- return self.reindex(key)
- except Exception:
- # [slice(0, 5, None)] will break if you convert to ndarray,
- # e.g. as requested by np.median
- # hack
- if isinstance(key[0], slice):
+ if isinstance(key, (list, tuple)):
+ # TODO: de-dup with tuple case handled above?
+ # handle the dup indexing case GH#4246
+ if len(key) == 1 and isinstance(key[0], slice):
+ # [slice(0, 5, None)] will break if you convert to ndarray,
+ # e.g. as requested by np.median
+ # FIXME: hack
return self._get_values(key)
- raise
+
+ return self.loc[key]
+
+ return self.reindex(key)
def _get_values_tuple(self, key):
# mpl hackaround
- if com._any_none(*key):
+ if com.any_none(*key):
return self._get_values(key)
if not isinstance(self.index, MultiIndex):
@@ -1175,52 +1188,52 @@ def _get_values(self, indexer):
return self._constructor(
self._data.get_slice(indexer), fastpath=True
).__finalize__(self)
- except Exception:
+ except ValueError:
+ # mpl compat if we look up e.g. ser[:, np.newaxis];
+ # see tests.series.timeseries.test_mpl_compat_hack
return self._values[indexer]
- def __setitem__(self, key, value):
- key = com.apply_if_callable(key, self)
+ def _get_value(self, label, takeable: bool = False):
+ """
+ Quickly retrieve single value at passed index label.
- def setitem(key, value):
- try:
- self._set_with_engine(key, value)
- return
- except com.SettingWithCopyError:
- raise
- except (KeyError, ValueError):
- values = self._values
- if is_integer(key) and not self.index.inferred_type == "integer":
+ Parameters
+ ----------
+ label : object
+ takeable : interpret the index as indexers, default False
- values[key] = value
- return
- elif key is Ellipsis:
- self[:] = value
- return
- elif com.is_bool_indexer(key):
- pass
- elif is_timedelta64_dtype(self.dtype):
- # reassign a null value to iNaT
- if is_valid_nat_for_dtype(value, self.dtype):
- # exclude np.datetime64("NaT")
- value = iNaT
-
- try:
- self.index._engine.set_value(self._values, key, value)
- return
- except (TypeError, ValueError):
- # ValueError appears in only some builds in CI
- pass
+ Returns
+ -------
+ scalar value
+ """
+ if takeable:
+ return com.maybe_box_datetimelike(self._values[label])
+ return self.index.get_value(self._values, label)
+
+ def __setitem__(self, key, value):
+ key = com.apply_if_callable(key, self)
+ cacher_needs_updating = self._check_is_chained_assignment_possible()
+ try:
+ self._set_with_engine(key, value)
+ except com.SettingWithCopyError:
+ raise
+ except (KeyError, ValueError):
+ values = self._values
+ if is_integer(key) and not self.index.inferred_type == "integer":
+ values[key] = value
+ elif key is Ellipsis:
+ self[:] = value
+ else:
self.loc[key] = value
- return
- except TypeError as e:
- if isinstance(key, tuple) and not isinstance(self.index, MultiIndex):
- raise ValueError("Can only tuple-index with a MultiIndex")
+ except TypeError as e:
+ if isinstance(key, tuple) and not isinstance(self.index, MultiIndex):
+ raise ValueError("Can only tuple-index with a MultiIndex")
- # python 3 type errors should be raised
- if _is_unorderable_exception(e):
- raise IndexError(key)
+ # python 3 type errors should be raised
+ if _is_unorderable_exception(e):
+ raise IndexError(key)
if com.is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
@@ -1232,9 +1245,6 @@ def setitem(key, value):
self._set_with(key, value)
- # do the setitem
- cacher_needs_updating = self._check_is_chained_assignment_possible()
- setitem(key, value)
if cacher_needs_updating:
self._maybe_update_cacher()
@@ -1256,6 +1266,14 @@ def _set_with(self, key, value):
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind="getitem")
return self._set_values(indexer, value)
+
+ elif is_scalar(key) and not is_integer(key) and key not in self.index:
+ # GH#12862 adding an new key to the Series
+ # Note: have to exclude integers because that is ambiguously
+ # position-based
+ self.loc[key] = value
+ return
+
else:
if isinstance(key, tuple):
try:
@@ -1263,23 +1281,12 @@ def _set_with(self, key, value):
except Exception:
pass
- if is_scalar(key) and not is_integer(key) and key not in self.index:
- # GH#12862 adding an new key to the Series
- # Note: have to exclude integers because that is ambiguously
- # position-based
- self.loc[key] = value
- return
-
if is_scalar(key):
key = [key]
- elif not isinstance(key, (list, Series, np.ndarray)):
- try:
- key = list(key)
- except Exception:
- key = [key]
if isinstance(key, Index):
key_type = key.inferred_type
+ key = key._values
else:
key_type = lib.infer_dtype(key, skipna=False)
@@ -1294,10 +1301,7 @@ def _set_with(self, key, value):
self._set_labels(key, value)
def _set_labels(self, key, value):
- if isinstance(key, Index):
- key = key.values
- else:
- key = com.asarray_tuplesafe(key)
+ key = com.asarray_tuplesafe(key)
indexer = self.index.get_indexer(key)
mask = indexer == -1
if mask.any():
@@ -1310,6 +1314,46 @@ def _set_values(self, key, value):
self._data = self._data.setitem(indexer=key, value=value)
self._maybe_update_cacher()
+ def _set_value(self, label, value, takeable: bool = False):
+ """
+ Quickly set single value at passed label.
+
+ If label is not contained, a new object is created with the label
+ placed at the end of the result index.
+
+ Parameters
+ ----------
+ label : object
+ Partial indexing with MultiIndex not allowed
+ value : object
+ Scalar value
+ takeable : interpret the index as indexers, default False
+
+ Returns
+ -------
+ Series
+ If label is contained, will be reference to calling Series,
+ otherwise a new object.
+ """
+ try:
+ if takeable:
+ self._values[label] = value
+ else:
+ self.index._engine.set_value(self._values, label, value)
+ except (KeyError, TypeError):
+
+ # set using a non-recursive method
+ self.loc[label] = value
+
+ return self
+
+ # ----------------------------------------------------------------------
+ # Unsorted
+
+ @property
+ def _is_mixed_type(self):
+ return False
+
def repeat(self, repeats, axis=None):
"""
Repeat elements of a Series.
@@ -1367,86 +1411,6 @@ def repeat(self, repeats, axis=None):
new_values = self._values.repeat(repeats)
return self._constructor(new_values, index=new_index).__finalize__(self)
- def get_value(self, label, takeable=False):
- """
- Quickly retrieve single value at passed index label.
-
- .. deprecated:: 0.21.0
- Please use .at[] or .iat[] accessors.
-
- Parameters
- ----------
- label : object
- takeable : interpret the index as indexers, default False
-
- Returns
- -------
- scalar value
- """
- warnings.warn(
- "get_value is deprecated and will be removed "
- "in a future release. Please use "
- ".at[] or .iat[] accessors instead",
- FutureWarning,
- stacklevel=2,
- )
- return self._get_value(label, takeable=takeable)
-
- def _get_value(self, label, takeable=False):
- if takeable is True:
- return com.maybe_box_datetimelike(self._values[label])
- return self.index.get_value(self._values, label)
-
- _get_value.__doc__ = get_value.__doc__
-
- def set_value(self, label, value, takeable=False):
- """
- Quickly set single value at passed label.
-
- .. deprecated:: 0.21.0
- Please use .at[] or .iat[] accessors.
-
- If label is not contained, a new object is created with the label
- placed at the end of the result index.
-
- Parameters
- ----------
- label : object
- Partial indexing with MultiIndex not allowed
- value : object
- Scalar value
- takeable : interpret the index as indexers, default False
-
- Returns
- -------
- Series
- If label is contained, will be reference to calling Series,
- otherwise a new object.
- """
- warnings.warn(
- "set_value is deprecated and will be removed "
- "in a future release. Please use "
- ".at[] or .iat[] accessors instead",
- FutureWarning,
- stacklevel=2,
- )
- return self._set_value(label, value, takeable=takeable)
-
- def _set_value(self, label, value, takeable=False):
- try:
- if takeable:
- self._values[label] = value
- else:
- self.index._engine.set_value(self._values, label, value)
- except (KeyError, TypeError):
-
- # set using a non-recursive method
- self.loc[label] = value
-
- return self
-
- _set_value.__doc__ = set_value.__doc__
-
def reset_index(self, level=None, drop=False, name=None, inplace=False):
"""
Generate a new DataFrame or Series with the index reset.
@@ -1575,7 +1539,7 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False):
).__finalize__(self)
elif inplace:
raise TypeError(
- "Cannot reset_index inplace on a Series " "to create a DataFrame"
+ "Cannot reset_index inplace on a Series to create a DataFrame"
)
else:
df = self.to_frame(name)
@@ -1710,7 +1674,8 @@ def items(self):
See Also
--------
- DataFrame.items : Equivalent to Series.items for DataFrame.
+ DataFrame.items : Iterate over (column name, Series) pairs.
+ DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.
Examples
--------
@@ -1808,38 +1773,6 @@ def to_frame(self, name=None):
return df
- def to_sparse(self, kind="block", fill_value=None):
- """
- Convert Series to SparseSeries.
-
- .. deprecated:: 0.25.0
-
- Parameters
- ----------
- kind : {'block', 'integer'}, default 'block'
- fill_value : float, defaults to NaN (missing)
- Value to use for filling NaN values.
-
- Returns
- -------
- SparseSeries
- Sparse representation of the Series.
- """
-
- warnings.warn(
- "Series.to_sparse is deprecated and will be removed " "in a future version",
- FutureWarning,
- stacklevel=2,
- )
- from pandas.core.sparse.series import SparseSeries
-
- values = SparseArray(self, kind=kind, fill_value=fill_value)
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore", message="SparseSeries")
- return SparseSeries(values, index=self.index, name=self.name).__finalize__(
- self
- )
-
def _set_name(self, name, inplace=False):
"""
Set the Series name.
@@ -2350,8 +2283,6 @@ def quantile(self, q=0.5, interpolation="linear"):
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
- .. versionadded:: 0.18.0
-
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
@@ -2642,9 +2573,7 @@ def dot(self, other):
>>> s.dot(arr)
array([24, 14])
"""
- from pandas.core.frame import DataFrame
-
- if isinstance(other, (Series, DataFrame)):
+ if isinstance(other, (Series, ABCDataFrame)):
common = self.index.union(other.index)
if len(common) > len(self.index) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
@@ -2661,7 +2590,7 @@ def dot(self, other):
"Dot product shape mismatch, %s vs %s" % (lvals.shape, rvals.shape)
)
- if isinstance(other, DataFrame):
+ if isinstance(other, ABCDataFrame):
return self._constructor(
np.dot(lvals, rvals), index=other.columns
).__finalize__(self)
@@ -2702,9 +2631,6 @@ def append(self, to_append, ignore_index=False, verify_integrity=False):
Series to append with self.
ignore_index : bool, default False
If True, do not use the index labels.
-
- .. versionadded:: 0.19.0
-
verify_integrity : bool, default False
If True, raise Exception on creating index with duplicates.
@@ -2768,7 +2694,8 @@ def append(self, to_append, ignore_index=False, verify_integrity=False):
from pandas.core.reshape.concat import concat
if isinstance(to_append, (list, tuple)):
- to_concat = [self] + to_append
+ to_concat = [self]
+ to_concat.extend(to_append)
else:
to_concat = [self, to_append]
return concat(
@@ -3595,22 +3522,21 @@ def nsmallest(self, n=5, keep="first"):
def swaplevel(self, i=-2, j=-1, copy=True):
"""
- Swap levels i and j in a MultiIndex.
+ Swap levels i and j in a :class:`MultiIndex`.
+
+ Default is to swap the two innermost levels of the index.
Parameters
----------
i, j : int, str (can be mixed)
Level of index to be swapped. Can pass level name as string.
+ copy : bool, default True
+ Whether to copy underlying data.
Returns
-------
Series
Series with levels swapped in MultiIndex.
-
- .. versionchanged:: 0.18.1
-
- The indexes ``i`` and ``j`` are now optional, and default to
- the two innermost levels of the index.
"""
new_index = self.index.swaplevel(i, j)
return self._constructor(self._values, index=new_index, copy=copy).__finalize__(
@@ -3656,7 +3582,7 @@ def explode(self) -> "Series":
Series.str.split : Split string values on specified separator.
Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex
to produce DataFrame.
- DataFrame.melt : Unpivot a DataFrame from wide format to long format
+ DataFrame.melt : Unpivot a DataFrame from wide format to long format.
DataFrame.explode : Explode a DataFrame from list-like
columns to long format.
@@ -3707,8 +3633,6 @@ def unstack(self, level=-1, fill_value=None):
fill_value : scalar value, default None
Value to use when replacing NaN values.
- .. versionadded:: 0.18.0
-
Returns
-------
DataFrame
@@ -4077,7 +4001,7 @@ def _reduce(
elif isinstance(delegate, np.ndarray):
if numeric_only:
raise NotImplementedError(
- "Series.{0} does not implement " "numeric_only.".format(name)
+ "Series.{0} does not implement numeric_only.".format(name)
)
with np.errstate(all="ignore"):
return op(delegate, skipna=skipna, **kwds)
@@ -4203,12 +4127,10 @@ def rename(self, index=None, **kwargs):
"""
kwargs["inplace"] = validate_bool_kwarg(kwargs.get("inplace", False), "inplace")
- non_mapping = is_scalar(index) or (
- is_list_like(index) and not is_dict_like(index)
- )
- if non_mapping:
+ if callable(index) or is_dict_like(index):
+ return super().rename(index=index, **kwargs)
+ else:
return self._set_name(index, inplace=kwargs.get("inplace"))
- return super().rename(index=index, **kwargs)
@Substitution(**_shared_doc_kwargs)
@Appender(generic.NDFrame.reindex.__doc__)
@@ -4422,33 +4344,6 @@ def memory_usage(self, index=True, deep=False):
v += self.index.memory_usage(deep=deep)
return v
- @Appender(generic.NDFrame.take.__doc__)
- def take(self, indices, axis=0, is_copy=False, **kwargs):
- nv.validate_take(tuple(), kwargs)
-
- indices = ensure_platform_int(indices)
- new_index = self.index.take(indices)
-
- if is_categorical_dtype(self):
- # https://github.com/pandas-dev/pandas/issues/20664
- # TODO: remove when the default Categorical.take behavior changes
- indices = maybe_convert_indices(indices, len(self._get_axis(axis)))
- kwargs = {"allow_fill": False}
- else:
- kwargs = {}
- new_values = self._values.take(indices, **kwargs)
-
- result = self._constructor(
- new_values, index=new_index, fastpath=True
- ).__finalize__(self)
-
- # Maybe set copy if we didn't actually change the index.
- if is_copy:
- if not result._get_axis(axis).equals(self._get_axis(axis)):
- result._set_is_copy(self)
-
- return result
-
def isin(self, values):
"""
Check whether `values` are contained in Series.
@@ -4463,10 +4358,6 @@ def isin(self, values):
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
- .. versionadded:: 0.18.1
-
- Support for values as a set.
-
Returns
-------
Series
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index 523c4dc5e867b..e6edad656d430 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -15,6 +15,7 @@
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algorithms
+from pandas.core.construction import extract_array
_INT64_MAX = np.iinfo(np.int64).max
@@ -202,19 +203,19 @@ def lexsort_indexer(keys, orders=None, na_position="last"):
# we are already a Categorical
if is_categorical_dtype(key):
- c = key
+ cat = key
# create the Categorical
else:
- c = Categorical(key, ordered=True)
+ cat = Categorical(key, ordered=True)
if na_position not in ["last", "first"]:
raise ValueError("invalid na_position: {!r}".format(na_position))
- n = len(c.categories)
- codes = c.codes.copy()
+ n = len(cat.categories)
+ codes = cat.codes.copy()
- mask = c.codes == -1
+ mask = cat.codes == -1
if order: # ascending
if na_position == "last":
codes = np.where(mask, n, codes)
@@ -240,8 +241,6 @@ def nargsort(items, kind="quicksort", ascending=True, na_position="last"):
handles NaNs. It adds ascending and na_position parameters.
GH #6399, #5231
"""
- from pandas.core.internals.arrays import extract_array
-
items = extract_array(items)
mask = np.asarray(isna(items))
@@ -272,7 +271,6 @@ def nargsort(items, kind="quicksort", ascending=True, na_position="last"):
class _KeyMapper:
-
"""
Ease my suffering. Map compressed group id -> key tuple
"""
@@ -399,8 +397,6 @@ def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False, verify=T
``values`` should be unique if ``labels`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
- .. versionadded:: 0.19.0
-
Parameters
----------
values : list-like
@@ -439,7 +435,7 @@ def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False, verify=T
"""
if not is_list_like(values):
raise TypeError(
- "Only list-like objects are allowed to be passed to" "safe_sort as values"
+ "Only list-like objects are allowed to be passed to safe_sort as values"
)
if not isinstance(values, np.ndarray) and not is_extension_array_dtype(values):
diff --git a/pandas/core/sparse/api.py b/pandas/core/sparse/api.py
index 6a00fa570b2ac..e7bf94cdc08ea 100644
--- a/pandas/core/sparse/api.py
+++ b/pandas/core/sparse/api.py
@@ -1,4 +1,3 @@
-# flake8: noqa
from pandas.core.arrays.sparse import SparseArray, SparseDtype
-from pandas.core.sparse.frame import SparseDataFrame
-from pandas.core.sparse.series import SparseSeries
+
+__all__ = ["SparseArray", "SparseDtype"]
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
deleted file mode 100644
index 54998eb66e69d..0000000000000
--- a/pandas/core/sparse/frame.py
+++ /dev/null
@@ -1,1089 +0,0 @@
-"""
-Data structures for sparse float data. Life is made simpler by dealing only
-with float64 data
-"""
-import warnings
-
-import numpy as np
-
-from pandas._libs.lib import is_scalar, item_from_zerodim
-from pandas._libs.sparse import BlockIndex, get_blocks
-from pandas.compat.numpy import function as nv
-from pandas.util._decorators import Appender
-
-from pandas.core.dtypes.cast import maybe_upcast
-from pandas.core.dtypes.common import ensure_platform_int, is_scipy_sparse
-from pandas.core.dtypes.missing import isna, notna
-
-import pandas.core.algorithms as algos
-from pandas.core.arrays.sparse import SparseArray, SparseFrameAccessor
-import pandas.core.common as com
-from pandas.core.frame import DataFrame
-import pandas.core.generic as generic
-from pandas.core.index import Index, MultiIndex, ensure_index
-from pandas.core.internals import BlockManager, create_block_manager_from_arrays
-from pandas.core.internals.construction import extract_index, prep_ndarray
-import pandas.core.ops as ops
-from pandas.core.series import Series
-from pandas.core.sparse.series import SparseSeries
-
-_shared_doc_kwargs = dict(klass="SparseDataFrame")
-depr_msg = """\
-SparseDataFrame is deprecated and will be removed in a future version.
-Use a regular DataFrame whose columns are SparseArrays instead.
-
-See http://pandas.pydata.org/pandas-docs/stable/\
-user_guide/sparse.html#migrating for more.
-"""
-
-
-class SparseDataFrame(DataFrame):
- """
- DataFrame containing sparse floating point data in the form of SparseSeries
- objects
-
- .. deprecated:: 0.25.0
-
- Use a DataFrame with sparse values instead.
-
- Parameters
- ----------
- data : same types as can be passed to DataFrame or scipy.sparse.spmatrix
- .. versionchanged :: 0.23.0
- If data is a dict, argument order is maintained for Python 3.6
- and later.
-
- index : array-like, optional
- column : array-like, optional
- default_kind : {'block', 'integer'}, default 'block'
- Default sparse kind for converting Series to SparseSeries. Will not
- override SparseSeries passed into constructor
- default_fill_value : float
- Default fill_value for converting Series to SparseSeries
- (default: nan). Will not override SparseSeries passed in.
- """
-
- _subtyp = "sparse_frame"
-
- def __init__(
- self,
- data=None,
- index=None,
- columns=None,
- default_kind=None,
- default_fill_value=None,
- dtype=None,
- copy=False,
- ):
- if not is_scalar(default_fill_value):
- raise ValueError("'default_fill_value' must be a scalar")
-
- warnings.warn(depr_msg, FutureWarning, stacklevel=2)
- # pick up the defaults from the Sparse structures
- if isinstance(data, SparseDataFrame):
- if index is None:
- index = data.index
- if columns is None:
- columns = data.columns
- if default_fill_value is None:
- default_fill_value = data.default_fill_value
- if default_kind is None:
- default_kind = data.default_kind
- elif isinstance(data, (SparseSeries, SparseArray)):
- if index is None:
- index = data.index
- if default_fill_value is None:
- default_fill_value = data.fill_value
- if columns is None and hasattr(data, "name"):
- columns = [data.name]
- if columns is None:
- raise Exception("cannot pass a series w/o a name or columns")
- data = {columns[0]: data}
-
- if default_fill_value is None:
- default_fill_value = np.nan
- if default_kind is None:
- default_kind = "block"
-
- self._default_kind = default_kind
- self._default_fill_value = default_fill_value
-
- if is_scipy_sparse(data):
- mgr = self._init_spmatrix(
- data, index, columns, dtype=dtype, fill_value=default_fill_value
- )
- elif isinstance(data, dict):
- mgr = self._init_dict(data, index, columns, dtype=dtype)
- elif isinstance(data, (np.ndarray, list)):
- mgr = self._init_matrix(data, index, columns, dtype=dtype)
- elif isinstance(data, SparseDataFrame):
- mgr = self._init_mgr(
- data._data, dict(index=index, columns=columns), dtype=dtype, copy=copy
- )
- elif isinstance(data, DataFrame):
- mgr = self._init_dict(data, data.index, data.columns, dtype=dtype)
- elif isinstance(data, Series):
- mgr = self._init_dict(
- data.to_frame(), data.index, columns=None, dtype=dtype
- )
- elif isinstance(data, BlockManager):
- mgr = self._init_mgr(
- data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy
- )
- elif data is None:
- data = DataFrame()
-
- if index is None:
- index = Index([])
- else:
- index = ensure_index(index)
-
- if columns is None:
- columns = Index([])
- else:
- for c in columns:
- data[c] = SparseArray(
- self._default_fill_value,
- index=index,
- kind=self._default_kind,
- fill_value=self._default_fill_value,
- )
- mgr = to_manager(data, columns, index)
- if dtype is not None:
- mgr = mgr.astype(dtype)
- else:
- msg = (
- 'SparseDataFrame called with unknown type "{data_type}" '
- "for data argument"
- )
- raise TypeError(msg.format(data_type=type(data).__name__))
-
- generic.NDFrame.__init__(self, mgr)
-
- @property
- def _constructor(self):
- return SparseDataFrame
-
- _constructor_sliced = SparseSeries
-
- def _init_dict(self, data, index, columns, dtype=None):
- # pre-filter out columns if we passed it
- if columns is not None:
- columns = ensure_index(columns)
- data = {k: v for k, v in data.items() if k in columns}
- else:
- keys = com.dict_keys_to_ordered_list(data)
- columns = Index(keys)
-
- if index is None:
- index = extract_index(list(data.values()))
-
- def sp_maker(x):
- return SparseArray(
- x,
- kind=self._default_kind,
- fill_value=self._default_fill_value,
- copy=True,
- dtype=dtype,
- )
-
- sdict = {}
- for k, v in data.items():
- if isinstance(v, Series):
- # Force alignment, no copy necessary
- if not v.index.equals(index):
- v = v.reindex(index)
-
- if not isinstance(v, SparseSeries):
- v = sp_maker(v.values)
- elif isinstance(v, SparseArray):
- v = v.copy()
- else:
- if isinstance(v, dict):
- v = [v.get(i, np.nan) for i in index]
-
- v = sp_maker(v)
-
- if index is not None and len(v) != len(index):
- msg = "Length of passed values is {}, index implies {}"
- raise ValueError(msg.format(len(v), len(index)))
- sdict[k] = v
-
- if len(columns.difference(sdict)):
- # TODO: figure out how to handle this case, all nan's?
- # add in any other columns we want to have (completeness)
- nan_arr = np.empty(len(index), dtype="float64")
- nan_arr.fill(np.nan)
- nan_arr = SparseArray(
- nan_arr,
- kind=self._default_kind,
- fill_value=self._default_fill_value,
- copy=False,
- )
- sdict.update((c, nan_arr) for c in columns if c not in sdict)
-
- return to_manager(sdict, columns, index)
-
- def _init_matrix(self, data, index, columns, dtype=None):
- """
- Init self from ndarray or list of lists.
- """
- data = prep_ndarray(data, copy=False)
- index, columns = SparseFrameAccessor._prep_index(data, index, columns)
- data = {idx: data[:, i] for i, idx in enumerate(columns)}
- return self._init_dict(data, index, columns, dtype)
-
- def _init_spmatrix(self, data, index, columns, dtype=None, fill_value=None):
- """
- Init self from scipy.sparse matrix.
- """
- index, columns = SparseFrameAccessor._prep_index(data, index, columns)
- data = data.tocoo()
- N = len(index)
-
- # Construct a dict of SparseSeries
- sdict = {}
- values = Series(data.data, index=data.row, copy=False)
- for col, rowvals in values.groupby(data.col):
- # get_blocks expects int32 row indices in sorted order
- rowvals = rowvals.sort_index()
- rows = rowvals.index.values.astype(np.int32)
- blocs, blens = get_blocks(rows)
-
- sdict[columns[col]] = SparseSeries(
- rowvals.values,
- index=index,
- fill_value=fill_value,
- sparse_index=BlockIndex(N, blocs, blens),
- )
-
- # Add any columns that were empty and thus not grouped on above
- sdict.update(
- {
- column: SparseSeries(
- index=index,
- fill_value=fill_value,
- sparse_index=BlockIndex(N, [], []),
- )
- for column in columns
- if column not in sdict
- }
- )
-
- return self._init_dict(sdict, index, columns, dtype)
-
- @Appender(SparseFrameAccessor.to_coo.__doc__)
- def to_coo(self):
- return SparseFrameAccessor(self).to_coo()
-
- def __repr__(self):
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore", "Sparse")
- return super().__repr__()
-
- def __getstate__(self):
- # pickling
- return dict(
- _typ=self._typ,
- _subtyp=self._subtyp,
- _data=self._data,
- _default_fill_value=self._default_fill_value,
- _default_kind=self._default_kind,
- )
-
- def _unpickle_sparse_frame_compat(self, state):
- """
- Original pickle format
- """
- series, cols, idx, fv, kind = state
-
- if not isinstance(cols, Index): # pragma: no cover
- from pandas.io.pickle import _unpickle_array
-
- columns = _unpickle_array(cols)
- else:
- columns = cols
-
- if not isinstance(idx, Index): # pragma: no cover
- from pandas.io.pickle import _unpickle_array
-
- index = _unpickle_array(idx)
- else:
- index = idx
-
- series_dict = DataFrame()
- for col, (sp_index, sp_values) in series.items():
- series_dict[col] = SparseSeries(
- sp_values, sparse_index=sp_index, fill_value=fv
- )
-
- self._data = to_manager(series_dict, columns, index)
- self._default_fill_value = fv
- self._default_kind = kind
-
- @Appender(SparseFrameAccessor.to_dense.__doc__)
- def to_dense(self):
- return SparseFrameAccessor(self).to_dense()
-
- def _apply_columns(self, func):
- """
- Get new SparseDataFrame applying func to each columns
- """
-
- new_data = {col: func(series) for col, series in self.items()}
-
- return self._constructor(
- data=new_data,
- index=self.index,
- columns=self.columns,
- default_fill_value=self.default_fill_value,
- ).__finalize__(self)
-
- def astype(self, dtype):
- return self._apply_columns(lambda x: x.astype(dtype))
-
- def copy(self, deep=True):
- """
- Make a copy of this SparseDataFrame
- """
- result = super().copy(deep=deep)
- result._default_fill_value = self._default_fill_value
- result._default_kind = self._default_kind
- return result
-
- @property
- def default_fill_value(self):
- return self._default_fill_value
-
- @property
- def default_kind(self):
- return self._default_kind
-
- @property
- def density(self):
- """
- Ratio of non-sparse points to total (dense) data points
- represented in the frame
- """
- tot_nonsparse = sum(ser.sp_index.npoints for _, ser in self.items())
- tot = len(self.index) * len(self.columns)
- return tot_nonsparse / float(tot)
-
- def fillna(
- self, value=None, method=None, axis=0, inplace=False, limit=None, downcast=None
- ):
- new_self = super().fillna(
- value=value,
- method=method,
- axis=axis,
- inplace=inplace,
- limit=limit,
- downcast=downcast,
- )
- if not inplace:
- self = new_self
-
- # set the fill value if we are filling as a scalar with nothing special
- # going on
- if value is not None and value == value and method is None and limit is None:
- self._default_fill_value = value
-
- if not inplace:
- return self
-
- # ----------------------------------------------------------------------
- # Support different internal representation of SparseDataFrame
-
- def _sanitize_column(self, key, value, **kwargs):
- """
- Creates a new SparseArray from the input value.
-
- Parameters
- ----------
- key : object
- value : scalar, Series, or array-like
- kwargs : dict
-
- Returns
- -------
- sanitized_column : SparseArray
-
- """
-
- def sp_maker(x, index=None):
- return SparseArray(
- x,
- index=index,
- fill_value=self._default_fill_value,
- kind=self._default_kind,
- )
-
- if isinstance(value, SparseSeries):
- clean = value.reindex(self.index).as_sparse_array(
- fill_value=self._default_fill_value, kind=self._default_kind
- )
-
- elif isinstance(value, SparseArray):
- if len(value) != len(self.index):
- raise ValueError("Length of values does not match " "length of index")
- clean = value
-
- elif hasattr(value, "__iter__"):
- if isinstance(value, Series):
- clean = value.reindex(self.index)
- if not isinstance(value, SparseSeries):
- clean = sp_maker(clean)
- else:
- if len(value) != len(self.index):
- raise ValueError(
- "Length of values does not match " "length of index"
- )
- clean = sp_maker(value)
-
- # Scalar
- else:
- clean = sp_maker(value, self.index)
-
- # always return a SparseArray!
- return clean
-
- def get_value(self, index, col, takeable=False):
- """
- Quickly retrieve single value at passed column and index
-
- .. deprecated:: 0.21.0
-
- Please use .at[] or .iat[] accessors.
-
- Parameters
- ----------
- index : row label
- col : column label
- takeable : interpret the index/col as indexers, default False
-
- Returns
- -------
- value : scalar value
- """
- warnings.warn(
- "get_value is deprecated and will be removed "
- "in a future release. Please use "
- ".at[] or .iat[] accessors instead",
- FutureWarning,
- stacklevel=2,
- )
- return self._get_value(index, col, takeable=takeable)
-
- def _get_value(self, index, col, takeable=False):
- if takeable is True:
- series = self._iget_item_cache(col)
- else:
- series = self._get_item_cache(col)
-
- return series._get_value(index, takeable=takeable)
-
- _get_value.__doc__ = get_value.__doc__
-
- def set_value(self, index, col, value, takeable=False):
- """
- Put single value at passed column and index
-
- .. deprecated:: 0.21.0
-
- Please use .at[] or .iat[] accessors.
-
- Parameters
- ----------
- index : row label
- col : column label
- value : scalar value
- takeable : interpret the index/col as indexers, default False
-
- Notes
- -----
- This method *always* returns a new object. It is currently not
- particularly efficient (and potentially very expensive) but is provided
- for API compatibility with DataFrame
-
- Returns
- -------
- frame : DataFrame
- """
- warnings.warn(
- "set_value is deprecated and will be removed "
- "in a future release. Please use "
- ".at[] or .iat[] accessors instead",
- FutureWarning,
- stacklevel=2,
- )
- return self._set_value(index, col, value, takeable=takeable)
-
- def _set_value(self, index, col, value, takeable=False):
- dense = self.to_dense()._set_value(index, col, value, takeable=takeable)
- return dense.to_sparse(
- kind=self._default_kind, fill_value=self._default_fill_value
- )
-
- _set_value.__doc__ = set_value.__doc__
-
- def _slice(self, slobj, axis=0, kind=None):
- if axis == 0:
- new_index = self.index[slobj]
- new_columns = self.columns
- else:
- new_index = self.index
- new_columns = self.columns[slobj]
-
- return self.reindex(index=new_index, columns=new_columns)
-
- def xs(self, key, axis=0, copy=False):
- """
- Returns a row (cross-section) from the SparseDataFrame as a Series
- object.
-
- Parameters
- ----------
- key : some index contained in the index
-
- Returns
- -------
- xs : Series
- """
- if axis == 1:
- data = self[key]
- return data
-
- i = self.index.get_loc(key)
- data = self.take([i])._internal_get_values()[0]
- return Series(data, index=self.columns)
-
- # ----------------------------------------------------------------------
- # Arithmetic-related methods
-
- def _combine_frame(self, other, func, fill_value=None, level=None):
- if level is not None:
- raise NotImplementedError("'level' argument is not supported")
-
- this, other = self.align(other, join="outer", level=level, copy=False)
- new_index, new_columns = this.index, this.columns
-
- if self.empty and other.empty:
- return self._constructor(index=new_index).__finalize__(self)
-
- new_data = {}
- if fill_value is not None:
- # TODO: be a bit more intelligent here
- for col in new_columns:
- if col in this and col in other:
- dleft = this[col].to_dense()
- dright = other[col].to_dense()
- result = dleft._binop(dright, func, fill_value=fill_value)
- result = result.to_sparse(fill_value=this[col].fill_value)
- new_data[col] = result
- else:
-
- for col in new_columns:
- if col in this and col in other:
- new_data[col] = func(this[col], other[col])
-
- new_fill_value = self._get_op_result_fill_value(other, func)
-
- return self._constructor(
- data=new_data,
- index=new_index,
- columns=new_columns,
- default_fill_value=new_fill_value,
- ).__finalize__(self)
-
- def _combine_match_index(self, other, func, level=None):
- new_data = {}
-
- if level is not None:
- raise NotImplementedError("'level' argument is not supported")
-
- this, other = self.align(other, join="outer", axis=0, level=level, copy=False)
-
- for col, series in this.items():
- new_data[col] = func(series.values, other.values)
-
- fill_value = self._get_op_result_fill_value(other, func)
-
- return self._constructor(
- new_data,
- index=this.index,
- columns=self.columns,
- default_fill_value=fill_value,
- ).__finalize__(self)
-
- def _combine_match_columns(self, other, func, level=None):
- # patched version of DataFrame._combine_match_columns to account for
- # NumPy circumventing __rsub__ with float64 types, e.g.: 3.0 - series,
- # where 3.0 is numpy.float64 and series is a SparseSeries. Still
- # possible for this to happen, which is bothersome
-
- if level is not None:
- raise NotImplementedError("'level' argument is not supported")
-
- left, right = self.align(other, join="outer", axis=1, level=level, copy=False)
- assert left.columns.equals(right.index)
-
- new_data = {}
-
- for col in left.columns:
- new_data[col] = func(left[col], float(right[col]))
-
- return self._constructor(
- new_data,
- index=left.index,
- columns=left.columns,
- default_fill_value=self.default_fill_value,
- ).__finalize__(self)
-
- def _combine_const(self, other, func):
- return self._apply_columns(lambda x: func(x, other))
-
- def _get_op_result_fill_value(self, other, func):
- own_default = self.default_fill_value
-
- if isinstance(other, DataFrame):
- # i.e. called from _combine_frame
-
- other_default = getattr(other, "default_fill_value", np.nan)
-
- # if the fill values are the same use them? or use a valid one
- if own_default == other_default:
- # TOOD: won't this evaluate as False if both are np.nan?
- fill_value = own_default
- elif np.isnan(own_default) and not np.isnan(other_default):
- fill_value = other_default
- elif not np.isnan(own_default) and np.isnan(other_default):
- fill_value = own_default
- else:
- fill_value = None
-
- elif isinstance(other, SparseSeries):
- # i.e. called from _combine_match_index
-
- # fill_value is a function of our operator
- if isna(other.fill_value) or isna(own_default):
- fill_value = np.nan
- else:
- fill_value = func(np.float64(own_default), np.float64(other.fill_value))
- fill_value = item_from_zerodim(fill_value)
- else:
- raise NotImplementedError(type(other))
-
- return fill_value
-
- def _reindex_index(
- self, index, method, copy, level, fill_value=np.nan, limit=None, takeable=False
- ):
- if level is not None:
- raise TypeError("Reindex by level not supported for sparse")
-
- if self.index.equals(index):
- if copy:
- return self.copy()
- else:
- return self
-
- if len(self.index) == 0:
- return self._constructor(index=index, columns=self.columns).__finalize__(
- self
- )
-
- indexer = self.index.get_indexer(index, method, limit=limit)
- indexer = ensure_platform_int(indexer)
- mask = indexer == -1
- need_mask = mask.any()
-
- new_series = {}
- for col, series in self.items():
- if mask.all():
- continue
-
- values = series.values
- # .take returns SparseArray
- new = values.take(indexer)
- if need_mask:
- new = new.to_dense()
- # convert integer to float if necessary. need to do a lot
- # more than that, handle boolean etc also
- new, fill_value = maybe_upcast(new, fill_value=fill_value)
- np.putmask(new, mask, fill_value)
-
- new_series[col] = new
-
- return self._constructor(
- new_series,
- index=index,
- columns=self.columns,
- default_fill_value=self._default_fill_value,
- ).__finalize__(self)
-
- def _reindex_columns(
- self, columns, method, copy, level, fill_value=None, limit=None, takeable=False
- ):
- if level is not None:
- raise TypeError("Reindex by level not supported for sparse")
-
- if notna(fill_value):
- raise NotImplementedError("'fill_value' argument is not supported")
-
- if limit:
- raise NotImplementedError("'limit' argument is not supported")
-
- if method is not None:
- raise NotImplementedError("'method' argument is not supported")
-
- # TODO: fill value handling
- sdict = {k: v for k, v in self.items() if k in columns}
- return self._constructor(
- sdict,
- index=self.index,
- columns=columns,
- default_fill_value=self._default_fill_value,
- ).__finalize__(self)
-
- def _reindex_with_indexers(
- self,
- reindexers,
- method=None,
- fill_value=None,
- limit=None,
- copy=False,
- allow_dups=False,
- ):
-
- if method is not None or limit is not None:
- raise NotImplementedError(
- "cannot reindex with a method or limit " "with sparse"
- )
-
- if fill_value is None:
- fill_value = np.nan
-
- reindexers = {self._get_axis_number(a): val for (a, val) in reindexers.items()}
-
- index, row_indexer = reindexers.get(0, (None, None))
- columns, col_indexer = reindexers.get(1, (None, None))
-
- if columns is None:
- columns = self.columns
-
- new_arrays = {}
- for col in columns:
- if col not in self:
- continue
- if row_indexer is not None:
- new_arrays[col] = algos.take_1d(
- self[col]._internal_get_values(), row_indexer, fill_value=fill_value
- )
- else:
- new_arrays[col] = self[col]
-
- return self._constructor(new_arrays, index=index, columns=columns).__finalize__(
- self
- )
-
- def _join_compat(
- self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False
- ):
- if on is not None:
- raise NotImplementedError(
- "'on' keyword parameter is not yet " "implemented"
- )
- return self._join_index(other, how, lsuffix, rsuffix)
-
- def _join_index(self, other, how, lsuffix, rsuffix):
- if isinstance(other, Series):
- if other.name is None:
- raise ValueError("Other Series must have a name")
-
- other = SparseDataFrame(
- {other.name: other}, default_fill_value=self._default_fill_value
- )
-
- join_index = self.index.join(other.index, how=how)
-
- this = self.reindex(join_index)
- other = other.reindex(join_index)
-
- this, other = this._maybe_rename_join(other, lsuffix, rsuffix)
-
- from pandas import concat
-
- return concat([this, other], axis=1, verify_integrity=True)
-
- def _maybe_rename_join(self, other, lsuffix, rsuffix):
- to_rename = self.columns.intersection(other.columns)
- if len(to_rename) > 0:
- if not lsuffix and not rsuffix:
- raise ValueError(
- "columns overlap but no suffix specified: "
- "{to_rename}".format(to_rename=to_rename)
- )
-
- def lrenamer(x):
- if x in to_rename:
- return "{x}{lsuffix}".format(x=x, lsuffix=lsuffix)
- return x
-
- def rrenamer(x):
- if x in to_rename:
- return "{x}{rsuffix}".format(x=x, rsuffix=rsuffix)
- return x
-
- this = self.rename(columns=lrenamer)
- other = other.rename(columns=rrenamer)
- else:
- this = self
-
- return this, other
-
- def transpose(self, *args, **kwargs):
- """
- Returns a DataFrame with the rows/columns switched.
- """
- nv.validate_transpose(args, kwargs)
- return self._constructor(
- self.values.T,
- index=self.columns,
- columns=self.index,
- default_fill_value=self._default_fill_value,
- default_kind=self._default_kind,
- ).__finalize__(self)
-
- T = property(transpose)
-
- @Appender(DataFrame.count.__doc__)
- def count(self, axis=0, **kwds):
- if axis is None:
- axis = self._stat_axis_number
-
- return self.apply(lambda x: x.count(), axis=axis)
-
- def cumsum(self, axis=0, *args, **kwargs):
- """
- Return SparseDataFrame of cumulative sums over requested axis.
-
- Parameters
- ----------
- axis : {0, 1}
- 0 for row-wise, 1 for column-wise
-
- Returns
- -------
- y : SparseDataFrame
- """
- nv.validate_cumsum(args, kwargs)
-
- if axis is None:
- axis = self._stat_axis_number
-
- return self.apply(lambda x: x.cumsum(), axis=axis)
-
- @Appender(generic._shared_docs["isna"] % _shared_doc_kwargs)
- def isna(self):
- return self._apply_columns(lambda x: x.isna())
-
- isnull = isna
-
- @Appender(generic._shared_docs["notna"] % _shared_doc_kwargs)
- def notna(self):
- return self._apply_columns(lambda x: x.notna())
-
- notnull = notna
-
- def apply(self, func, axis=0, broadcast=None, reduce=None, result_type=None):
- """
- Analogous to DataFrame.apply, for SparseDataFrame
-
- Parameters
- ----------
- func : function
- Function to apply to each column
- axis : {0, 1, 'index', 'columns'}
- broadcast : bool, default False
- For aggregation functions, return object of same size with values
- propagated
-
- .. deprecated:: 0.23.0
- This argument will be removed in a future version, replaced
- by result_type='broadcast'.
-
- reduce : boolean or None, default None
- Try to apply reduction procedures. If the DataFrame is empty,
- apply will use reduce to determine whether the result should be a
- Series or a DataFrame. If reduce is None (the default), apply's
- return value will be guessed by calling func an empty Series (note:
- while guessing, exceptions raised by func will be ignored). If
- reduce is True a Series will always be returned, and if False a
- DataFrame will always be returned.
-
- .. deprecated:: 0.23.0
- This argument will be removed in a future version, replaced
- by result_type='reduce'.
-
- result_type : {'expand', 'reduce', 'broadcast, None}
- These only act when axis=1 {columns}:
-
- * 'expand' : list-like results will be turned into columns.
- * 'reduce' : return a Series if possible rather than expanding
- list-like results. This is the opposite to 'expand'.
- * 'broadcast' : results will be broadcast to the original shape
- of the frame, the original index & columns will be retained.
-
- The default behaviour (None) depends on the return value of the
- applied function: list-like results will be returned as a Series
- of those. However if the apply function returns a Series these
- are expanded to columns.
-
- .. versionadded:: 0.23.0
-
- Returns
- -------
- applied : Series or SparseDataFrame
- """
- if not len(self.columns):
- return self
- axis = self._get_axis_number(axis)
-
- if isinstance(func, np.ufunc):
- new_series = {}
- for k, v in self.items():
- applied = func(v)
- applied.fill_value = func(v.fill_value)
- new_series[k] = applied
- return self._constructor(
- new_series,
- index=self.index,
- columns=self.columns,
- default_fill_value=self._default_fill_value,
- default_kind=self._default_kind,
- ).__finalize__(self)
-
- from pandas.core.apply import frame_apply
-
- op = frame_apply(
- self,
- func=func,
- axis=axis,
- reduce=reduce,
- broadcast=broadcast,
- result_type=result_type,
- )
- return op.get_result()
-
- def applymap(self, func):
- """
- Apply a function to a DataFrame that is intended to operate
- elementwise, i.e. like doing map(func, series) for each series in the
- DataFrame
-
- Parameters
- ----------
- func : function
- Python function, returns a single value from a single value
-
- Returns
- -------
- applied : DataFrame
- """
- return self.apply(lambda x: [func(y) for y in x])
-
-
-def to_manager(sdf, columns, index):
- """ create and return the block manager from a dataframe of series,
- columns, index
- """
-
- # from BlockManager perspective
- axes = [ensure_index(columns), ensure_index(index)]
-
- return create_block_manager_from_arrays([sdf[c] for c in columns], columns, axes)
-
-
-def stack_sparse_frame(frame):
- """
- Only makes sense when fill_value is NaN
- """
- lengths = [s.sp_index.npoints for _, s in frame.items()]
- nobs = sum(lengths)
-
- # this is pretty fast
- minor_codes = np.repeat(np.arange(len(frame.columns)), lengths)
-
- inds_to_concat = []
- vals_to_concat = []
- # TODO: Figure out whether this can be reached.
- # I think this currently can't be reached because you can't build a
- # SparseDataFrame with a non-np.NaN fill value (fails earlier).
- for _, series in frame.items():
- if not np.isnan(series.fill_value):
- raise TypeError("This routine assumes NaN fill value")
-
- int_index = series.sp_index.to_int_index()
- inds_to_concat.append(int_index.indices)
- vals_to_concat.append(series.sp_values)
-
- major_codes = np.concatenate(inds_to_concat)
- stacked_values = np.concatenate(vals_to_concat)
- index = MultiIndex(
- levels=[frame.index, frame.columns],
- codes=[major_codes, minor_codes],
- verify_integrity=False,
- )
-
- lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index, columns=["foo"])
- return lp.sort_index(level=0)
-
-
-def homogenize(series_dict):
- """
- Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex
- corresponding to the locations where they all have data
-
- Parameters
- ----------
- series_dict : dict or DataFrame
-
- Notes
- -----
- Using the dumbest algorithm I could think of. Should put some more thought
- into this
-
- Returns
- -------
- homogenized : dict of SparseSeries
- """
- index = None
-
- need_reindex = False
-
- for _, series in series_dict.items():
- if not np.isnan(series.fill_value):
- raise TypeError("this method is only valid with NaN fill values")
-
- if index is None:
- index = series.sp_index
- elif not series.sp_index.equals(index):
- need_reindex = True
- index = index.intersect(series.sp_index)
-
- if need_reindex:
- output = {}
- for name, series in series_dict.items():
- if not series.sp_index.equals(index):
- series = series.sparse_reindex(index)
-
- output[name] = series
- else:
- output = series_dict
-
- return output
-
-
-# use unaccelerated ops for sparse objects
-ops.add_flex_arithmetic_methods(SparseDataFrame)
-ops.add_special_arithmetic_methods(SparseDataFrame)
diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py
deleted file mode 100644
index 43f2609f46bd6..0000000000000
--- a/pandas/core/sparse/series.py
+++ /dev/null
@@ -1,652 +0,0 @@
-"""
-Data structures for sparse float data. Life is made simpler by dealing only
-with float64 data
-"""
-from collections import abc
-import warnings
-
-import numpy as np
-
-import pandas._libs.index as libindex
-import pandas._libs.sparse as splib
-from pandas._libs.sparse import BlockIndex, IntIndex
-from pandas.compat.numpy import function as nv
-from pandas.util._decorators import Appender, Substitution
-
-from pandas.core.dtypes.common import is_integer, is_scalar
-from pandas.core.dtypes.generic import ABCSeries, ABCSparseSeries
-from pandas.core.dtypes.missing import isna, notna
-
-from pandas.core import generic
-from pandas.core.arrays import SparseArray
-from pandas.core.arrays.sparse import SparseAccessor
-from pandas.core.index import Index
-from pandas.core.internals import SingleBlockManager
-import pandas.core.ops as ops
-from pandas.core.series import Series
-from pandas.core.sparse.scipy_sparse import _coo_to_sparse_series, _sparse_series_to_coo
-
-_shared_doc_kwargs = dict(
- axes="index",
- klass="SparseSeries",
- axes_single_arg="{0, 'index'}",
- optional_labels="",
- optional_axis="",
-)
-
-
-depr_msg = """\
-SparseSeries is deprecated and will be removed in a future version.
-Use a Series with sparse values instead.
-
- >>> series = pd.Series(pd.SparseArray(...))
-
-See http://pandas.pydata.org/pandas-docs/stable/\
-user_guide/sparse.html#migrating for more.
-"""
-
-
-class SparseSeries(Series):
- """Data structure for labeled, sparse floating point data
-
- .. deprecated:: 0.25.0
-
- Use a Series with sparse values instead.
-
- Parameters
- ----------
- data : {array-like, Series, SparseSeries, dict}
- .. versionchanged :: 0.23.0
- If data is a dict, argument order is maintained for Python 3.6
- and later.
-
- kind : {'block', 'integer'}
- fill_value : float
- Code for missing value. Defaults depends on dtype.
- 0 for int dtype, False for bool dtype, and NaN for other dtypes
- sparse_index : {BlockIndex, IntIndex}, optional
- Only if you have one. Mainly used internally
-
- Notes
- -----
- SparseSeries objects are immutable via the typical Python means. If you
- must change values, convert to dense, make your changes, then convert back
- to sparse
- """
-
- _subtyp = "sparse_series"
-
- def __init__(
- self,
- data=None,
- index=None,
- sparse_index=None,
- kind="block",
- fill_value=None,
- name=None,
- dtype=None,
- copy=False,
- fastpath=False,
- ):
- warnings.warn(depr_msg, FutureWarning, stacklevel=2)
- # TODO: Most of this should be refactored and shared with Series
- # 1. BlockManager -> array
- # 2. Series.index, Series.name, index, name reconciliation
- # 3. Implicit reindexing
- # 4. Implicit broadcasting
- # 5. Dict construction
- if data is None:
- data = []
- elif isinstance(data, SingleBlockManager):
- index = data.index
- data = data.blocks[0].values
- elif isinstance(data, (ABCSeries, ABCSparseSeries)):
- index = data.index if index is None else index
- dtype = data.dtype if dtype is None else dtype
- name = data.name if name is None else name
-
- if index is not None:
- data = data.reindex(index)
-
- elif isinstance(data, abc.Mapping):
- data, index = Series()._init_dict(data, index=index)
-
- elif is_scalar(data) and index is not None:
- data = np.full(len(index), fill_value=data)
-
- super().__init__(
- SparseArray(
- data,
- sparse_index=sparse_index,
- kind=kind,
- dtype=dtype,
- fill_value=fill_value,
- copy=copy,
- ),
- index=index,
- name=name,
- copy=False,
- fastpath=fastpath,
- )
-
- def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
- # avoid infinite recursion for other SparseSeries inputs
- inputs = tuple(x.values if isinstance(x, type(self)) else x for x in inputs)
- result = self.values.__array_ufunc__(ufunc, method, *inputs, **kwargs)
- return self._constructor(
- result,
- index=self.index,
- sparse_index=self.sp_index,
- fill_value=result.fill_value,
- copy=False,
- ).__finalize__(self)
-
- # unary ops
- # TODO: See if this can be shared
- def __pos__(self):
- result = self.values.__pos__()
- return self._constructor(
- result,
- index=self.index,
- sparse_index=self.sp_index,
- fill_value=result.fill_value,
- copy=False,
- ).__finalize__(self)
-
- def __neg__(self):
- result = self.values.__neg__()
- return self._constructor(
- result,
- index=self.index,
- sparse_index=self.sp_index,
- fill_value=result.fill_value,
- copy=False,
- ).__finalize__(self)
-
- def __invert__(self):
- result = self.values.__invert__()
- return self._constructor(
- result,
- index=self.index,
- sparse_index=self.sp_index,
- fill_value=result.fill_value,
- copy=False,
- ).__finalize__(self)
-
- @property
- def block(self):
- warnings.warn("SparseSeries.block is deprecated.", FutureWarning, stacklevel=2)
- return self._data._block
-
- @property
- def fill_value(self):
- return self.values.fill_value
-
- @fill_value.setter
- def fill_value(self, v):
- self.values.fill_value = v
-
- @property
- def sp_index(self):
- return self.values.sp_index
-
- @property
- def sp_values(self):
- return self.values.sp_values
-
- @property
- def npoints(self):
- return self.values.npoints
-
- @classmethod
- def from_array(
- cls, arr, index=None, name=None, copy=False, fill_value=None, fastpath=False
- ):
- """Construct SparseSeries from array.
-
- .. deprecated:: 0.23.0
- Use the pd.SparseSeries(..) constructor instead.
- """
- warnings.warn(
- "'from_array' is deprecated and will be removed in a "
- "future version. Please use the pd.SparseSeries(..) "
- "constructor instead.",
- FutureWarning,
- stacklevel=2,
- )
- return cls(
- arr,
- index=index,
- name=name,
- copy=copy,
- fill_value=fill_value,
- fastpath=fastpath,
- )
-
- @property
- def _constructor(self):
- return SparseSeries
-
- @property
- def _constructor_expanddim(self):
- from pandas.core.sparse.api import SparseDataFrame
-
- return SparseDataFrame
-
- @property
- def kind(self):
- if isinstance(self.sp_index, BlockIndex):
- return "block"
- elif isinstance(self.sp_index, IntIndex):
- return "integer"
-
- def as_sparse_array(self, kind=None, fill_value=None, copy=False):
- """ return my self as a sparse array, do not copy by default """
-
- if fill_value is None:
- fill_value = self.fill_value
- if kind is None:
- kind = self.kind
- return SparseArray(
- self.values,
- sparse_index=self.sp_index,
- fill_value=fill_value,
- kind=kind,
- copy=copy,
- )
-
- def __repr__(self):
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore", "Sparse")
- series_rep = Series.__repr__(self)
- rep = "{series}\n{index!r}".format(series=series_rep, index=self.sp_index)
- return rep
-
- def _reduce(
- self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds
- ):
- """ perform a reduction operation """
- return op(self.array.to_dense(), skipna=skipna, **kwds)
-
- def __getstate__(self):
- # pickling
- return dict(
- _typ=self._typ,
- _subtyp=self._subtyp,
- _data=self._data,
- fill_value=self.fill_value,
- name=self.name,
- )
-
- def _unpickle_series_compat(self, state):
-
- nd_state, own_state = state
-
- # recreate the ndarray
- data = np.empty(nd_state[1], dtype=nd_state[2])
- np.ndarray.__setstate__(data, nd_state)
-
- index, fill_value, sp_index = own_state[:3]
- name = None
- if len(own_state) > 3:
- name = own_state[3]
-
- # create a sparse array
- if not isinstance(data, SparseArray):
- data = SparseArray(
- data, sparse_index=sp_index, fill_value=fill_value, copy=False
- )
-
- # recreate
- data = SingleBlockManager(data, index, fastpath=True)
- generic.NDFrame.__init__(self, data)
-
- self._set_axis(0, index)
- self.name = name
-
- def _set_subtyp(self, is_all_dates):
- if is_all_dates:
- object.__setattr__(self, "_subtyp", "sparse_time_series")
- else:
- object.__setattr__(self, "_subtyp", "sparse_series")
-
- def _ixs(self, i, axis=0):
- """
- Return the i-th value or values in the SparseSeries by location
-
- Parameters
- ----------
- i : int, slice, or sequence of integers
-
- Returns
- -------
- value : scalar (int) or Series (slice, sequence)
- """
- label = self.index[i]
- if isinstance(label, Index):
- return self.take(i, axis=axis)
- else:
- return self._get_val_at(i)
-
- def _get_val_at(self, loc):
- """ forward to the array """
- return self.values._get_val_at(loc)
-
- def __getitem__(self, key):
- # TODO: Document difference from Series.__getitem__, deprecate,
- # and remove!
- if is_integer(key) and key not in self.index:
- return self._get_val_at(key)
- else:
- return super().__getitem__(key)
-
- def _get_values(self, indexer):
- try:
- return self._constructor(
- self._data.get_slice(indexer), fastpath=True
- ).__finalize__(self)
- except Exception:
- return self[indexer]
-
- def _set_with_engine(self, key, value):
- return self._set_value(key, value)
-
- def abs(self):
- """
- Return an object with absolute value taken. Only applicable to objects
- that are all numeric
-
- Returns
- -------
- abs: same type as caller
- """
- return self._constructor(np.abs(self.values), index=self.index).__finalize__(
- self
- )
-
- def get(self, label, default=None):
- """
- Returns value occupying requested label, default to specified
- missing value if not present. Analogous to dict.get
-
- Parameters
- ----------
- label : object
- Label value looking for
- default : object, optional
- Value to return if label not in index
-
- Returns
- -------
- y : scalar
- """
- if label in self.index:
- loc = self.index.get_loc(label)
- return self._get_val_at(loc)
- else:
- return default
-
- def get_value(self, label, takeable=False):
- """
- Retrieve single value at passed index label
-
- .. deprecated:: 0.21.0
-
- Please use .at[] or .iat[] accessors.
-
- Parameters
- ----------
- index : label
- takeable : interpret the index as indexers, default False
-
- Returns
- -------
- value : scalar value
- """
- warnings.warn(
- "get_value is deprecated and will be removed "
- "in a future release. Please use "
- ".at[] or .iat[] accessors instead",
- FutureWarning,
- stacklevel=2,
- )
-
- return self._get_value(label, takeable=takeable)
-
- def _get_value(self, label, takeable=False):
- loc = label if takeable is True else self.index.get_loc(label)
- return self._get_val_at(loc)
-
- _get_value.__doc__ = get_value.__doc__
-
- def set_value(self, label, value, takeable=False):
- """
- Quickly set single value at passed label. If label is not contained, a
- new object is created with the label placed at the end of the result
- index
-
- .. deprecated:: 0.21.0
-
- Please use .at[] or .iat[] accessors.
-
- Parameters
- ----------
- label : object
- Partial indexing with MultiIndex not allowed
- value : object
- Scalar value
- takeable : interpret the index as indexers, default False
-
- Notes
- -----
- This method *always* returns a new object. It is not particularly
- efficient but is provided for API compatibility with Series
-
- Returns
- -------
- series : SparseSeries
- """
- warnings.warn(
- "set_value is deprecated and will be removed "
- "in a future release. Please use "
- ".at[] or .iat[] accessors instead",
- FutureWarning,
- stacklevel=2,
- )
- return self._set_value(label, value, takeable=takeable)
-
- def _set_value(self, label, value, takeable=False):
- values = self.to_dense()
-
- # if the label doesn't exist, we will create a new object here
- # and possibly change the index
- new_values = values._set_value(label, value, takeable=takeable)
- if new_values is not None:
- values = new_values
- new_index = values.index
- values = SparseArray(values, fill_value=self.fill_value, kind=self.kind)
- self._data = SingleBlockManager(values, new_index)
- self._index = new_index
-
- _set_value.__doc__ = set_value.__doc__
-
- def _set_values(self, key, value):
-
- # this might be inefficient as we have to recreate the sparse array
- # rather than setting individual elements, but have to convert
- # the passed slice/boolean that's in dense space into a sparse indexer
- # not sure how to do that!
- if isinstance(key, Series):
- key = key.values
-
- values = self.values.to_dense()
- values[key] = libindex.convert_scalar(values, value)
- values = SparseArray(values, fill_value=self.fill_value, kind=self.kind)
- self._data = SingleBlockManager(values, self.index)
-
- def to_dense(self):
- """
- Convert SparseSeries to a Series.
-
- Returns
- -------
- s : Series
- """
- return Series(self.values.to_dense(), index=self.index, name=self.name)
-
- @property
- def density(self):
- return self.values.density
-
- def copy(self, deep=True):
- """
- Make a copy of the SparseSeries. Only the actual sparse values need to
- be copied
- """
- # TODO: https://github.com/pandas-dev/pandas/issues/22314
- # We skip the block manager till that is resolved.
- new_data = self.values
- if deep:
- new_data = new_data.copy()
- return self._constructor(
- new_data,
- sparse_index=self.sp_index,
- fill_value=self.fill_value,
- index=self.index.copy(),
- name=self.name,
- ).__finalize__(self)
-
- @Substitution(**_shared_doc_kwargs)
- @Appender(generic.NDFrame.reindex.__doc__)
- def reindex(self, index=None, method=None, copy=True, limit=None, **kwargs):
- # TODO: remove?
- return super().reindex(
- index=index, method=method, copy=copy, limit=limit, **kwargs
- )
-
- def sparse_reindex(self, new_index):
- """
- Conform sparse values to new SparseIndex
-
- Parameters
- ----------
- new_index : {BlockIndex, IntIndex}
-
- Returns
- -------
- reindexed : SparseSeries
- """
- if not isinstance(new_index, splib.SparseIndex):
- raise TypeError("new index must be a SparseIndex")
- values = self.values
- values = values.sp_index.to_int_index().reindex(
- values.sp_values.astype("float64"), values.fill_value, new_index
- )
- values = SparseArray(
- values, sparse_index=new_index, fill_value=self.values.fill_value
- )
- return self._constructor(values, index=self.index).__finalize__(self)
-
- def cumsum(self, axis=0, *args, **kwargs):
- """
- Cumulative sum of non-NA/null values.
-
- When performing the cumulative summation, any non-NA/null values will
- be skipped. The resulting SparseSeries will preserve the locations of
- NaN values, but the fill value will be `np.nan` regardless.
-
- Parameters
- ----------
- axis : {0}
-
- Returns
- -------
- cumsum : SparseSeries
- """
- nv.validate_cumsum(args, kwargs)
- # Validate axis
- if axis is not None:
- self._get_axis_number(axis)
-
- new_array = self.values.cumsum()
-
- return self._constructor(
- new_array, index=self.index, sparse_index=new_array.sp_index
- ).__finalize__(self)
-
- # TODO: SparseSeries.isna is Sparse, while Series.isna is dense
- @Appender(generic._shared_docs["isna"] % _shared_doc_kwargs)
- def isna(self):
- arr = SparseArray(
- isna(self.values.sp_values),
- sparse_index=self.values.sp_index,
- fill_value=isna(self.fill_value),
- )
- return self._constructor(arr, index=self.index).__finalize__(self)
-
- isnull = isna
-
- @Appender(generic._shared_docs["notna"] % _shared_doc_kwargs)
- def notna(self):
- arr = SparseArray(
- notna(self.values.sp_values),
- sparse_index=self.values.sp_index,
- fill_value=notna(self.fill_value),
- )
- return self._constructor(arr, index=self.index).__finalize__(self)
-
- notnull = notna
-
- def dropna(self, axis=0, inplace=False, **kwargs):
- """
- Analogous to Series.dropna. If fill_value=NaN, returns a dense Series
- """
- # TODO: make more efficient
- # Validate axis
- self._get_axis_number(axis or 0)
- dense_valid = self.to_dense().dropna()
- if inplace:
- raise NotImplementedError(
- "Cannot perform inplace dropna" " operations on a SparseSeries"
- )
- if isna(self.fill_value):
- return dense_valid
- else:
- dense_valid = dense_valid[dense_valid != self.fill_value]
- return dense_valid.to_sparse(fill_value=self.fill_value)
-
- def combine_first(self, other):
- """
- Combine Series values, choosing the calling Series's values
- first. Result index will be the union of the two indexes
-
- Parameters
- ----------
- other : Series
-
- Returns
- -------
- y : Series
- """
- if isinstance(other, SparseSeries):
- other = other.to_dense()
-
- dense_combined = self.to_dense().combine_first(other)
- return dense_combined.to_sparse(fill_value=self.fill_value)
-
- @Appender(SparseAccessor.to_coo.__doc__)
- def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False):
- A, rows, columns = _sparse_series_to_coo(
- self, row_levels, column_levels, sort_labels=sort_labels
- )
- return A, rows, columns
-
- @classmethod
- @Appender(SparseAccessor.from_coo.__doc__)
- def from_coo(cls, A, dense_index=False):
- return _coo_to_sparse_series(A, dense_index=dense_index)
-
-
-# overwrite series methods with unaccelerated Sparse-specific versions
-ops.add_flex_arithmetic_methods(SparseSeries)
-ops.add_special_arithmetic_methods(SparseSeries)
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 7c293ca4e50b0..25350119f9df5 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -21,7 +21,12 @@
is_scalar,
is_string_like,
)
-from pandas.core.dtypes.generic import ABCIndexClass, ABCMultiIndex, ABCSeries
+from pandas.core.dtypes.generic import (
+ ABCDataFrame,
+ ABCIndexClass,
+ ABCMultiIndex,
+ ABCSeries,
+)
from pandas.core.dtypes.missing import isna
from pandas.core.algorithms import take_1d
@@ -59,6 +64,9 @@ def cat_core(list_of_columns: List, sep: str):
nd.array
The concatenation of list_of_columns with sep
"""
+ if sep == "":
+ # no need to interleave sep if it is empty
+ return np.sum(list_of_columns, axis=0)
list_with_sep = [sep] * (2 * len(list_of_columns) - 1)
list_with_sep[::2] = list_of_columns
return np.sum(list_with_sep, axis=0)
@@ -603,7 +611,7 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True):
if is_compiled_re:
if (case is not None) or (flags != 0):
raise ValueError(
- "case and flags cannot be set" " when pat is a compiled regex"
+ "case and flags cannot be set when pat is a compiled regex"
)
else:
# not a compiled regex
@@ -623,10 +631,10 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True):
else:
if is_compiled_re:
raise ValueError(
- "Cannot use a compiled regex as replacement " "pattern with regex=False"
+ "Cannot use a compiled regex as replacement pattern with regex=False"
)
if callable(repl):
- raise ValueError("Cannot use a callable replacement when " "regex=False")
+ raise ValueError("Cannot use a callable replacement when regex=False")
f = lambda x: x.replace(pat, repl, n)
return _na_map(f, arr)
@@ -763,7 +771,7 @@ def _str_extract_noexpand(arr, pat, flags=0):
Index.
"""
- from pandas import DataFrame, Index
+ from pandas import DataFrame
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
@@ -772,7 +780,7 @@ def _str_extract_noexpand(arr, pat, flags=0):
result = np.array([groups_or_na(val)[0] for val in arr], dtype=object)
name = _get_single_group_name(regex)
else:
- if isinstance(arr, Index):
+ if isinstance(arr, ABCIndexClass):
raise ValueError("only one regex group is supported with Index")
name = None
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
@@ -837,8 +845,6 @@ def str_extract(arr, pat, flags=0, expand=True):
If False, return a Series/Index if there is one capture group
or DataFrame if there are multiple capture groups.
- .. versionadded:: 0.18.0
-
Returns
-------
DataFrame or Series or Index
@@ -916,8 +922,6 @@ def str_extractall(arr, pat, flags=0):
Series has exactly one match, extractall(pat).xs(0, level='match')
is the same as extract(pat).
- .. versionadded:: 0.18.0
-
Parameters
----------
pat : str
@@ -1438,6 +1442,12 @@ def str_slice(arr, start=None, stop=None, step=None):
2 hameleon
dtype: object
+ >>> s.str.slice(start=-1)
+ 0 a
+ 1 x
+ 2 n
+ dtype: object
+
>>> s.str.slice(stop=2)
0 ko
1 fo
@@ -1948,7 +1958,7 @@ def _validate(data):
"""
if isinstance(data, ABCMultiIndex):
raise AttributeError(
- "Can only use .str accessor with Index, " "not MultiIndex"
+ "Can only use .str accessor with Index, not MultiIndex"
)
# see _libs/lib.pyx for list of inferred types
@@ -1957,11 +1967,14 @@ def _validate(data):
values = getattr(data, "values", data) # Series / Index
values = getattr(values, "categories", values) # categorical / normal
- # missing values obfuscate type inference -> skip
- inferred_dtype = lib.infer_dtype(values, skipna=True)
+ try:
+ inferred_dtype = lib.infer_dtype(values, skipna=True)
+ except ValueError:
+ # GH#27571 mostly occurs with ExtensionArray
+ inferred_dtype = None
if inferred_dtype not in allowed_types:
- raise AttributeError("Can only use .str accessor with string " "values!")
+ raise AttributeError("Can only use .str accessor with string values!")
return inferred_dtype
def __getitem__(self, key):
@@ -2003,7 +2016,7 @@ def _wrap_result(
# infer from ndim if expand is not specified
expand = result.ndim != 1
- elif expand is True and not isinstance(self._orig, Index):
+ elif expand is True and not isinstance(self._orig, ABCIndexClass):
# required when expand=True is explicitly specified
# not needed when inferred
@@ -2036,7 +2049,7 @@ def cons_row(x):
# Wait until we are sure result is a Series or Index before
# checking attributes (GH 12180)
- if isinstance(self._orig, Index):
+ if isinstance(self._orig, ABCIndexClass):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
@@ -2062,7 +2075,7 @@ def cons_row(x):
cons = self._orig._constructor
return cons(result, name=name, index=index)
- def _get_series_list(self, others, ignore_index=False):
+ def _get_series_list(self, others):
"""
Auxiliary function for :meth:`str.cat`. Turn potentially mixed input
into a list of Series (elements without an index must match the length
@@ -2070,120 +2083,56 @@ def _get_series_list(self, others, ignore_index=False):
Parameters
----------
- others : Series, Index, DataFrame, np.ndarray, list-like or list-like
- of objects that are Series, Index or np.ndarray (1-dim)
- ignore_index : boolean, default False
- Determines whether to forcefully align others with index of caller
+ others : Series, DataFrame, np.ndarray, list-like or list-like of
+ objects that are either Series, Index or np.ndarray (1-dim)
Returns
-------
- tuple : (others transformed into list of Series,
- boolean whether FutureWarning should be raised)
+ list : others transformed into list of Series
"""
-
- # Once str.cat defaults to alignment, this function can be simplified;
- # will not need `ignore_index` and the second boolean output anymore
-
- from pandas import Index, Series, DataFrame
+ from pandas import Series, DataFrame
# self._orig is either Series or Index
- idx = self._orig if isinstance(self._orig, Index) else self._orig.index
-
- err_msg = (
- "others must be Series, Index, DataFrame, np.ndarray or "
- "list-like (either containing only strings or containing "
- "only objects of type Series/Index/list-like/np.ndarray)"
- )
+ idx = self._orig if isinstance(self._orig, ABCIndexClass) else self._orig.index
# Generally speaking, all objects without an index inherit the index
# `idx` of the calling Series/Index - i.e. must have matching length.
- # Objects with an index (i.e. Series/Index/DataFrame) keep their own
- # index, *unless* ignore_index is set to True.
- if isinstance(others, Series):
- warn = not others.index.equals(idx)
- # only reconstruct Series when absolutely necessary
- los = [
- Series(others.values, index=idx) if ignore_index and warn else others
- ]
- return (los, warn)
- elif isinstance(others, Index):
- warn = not others.equals(idx)
- los = [Series(others.values, index=(idx if ignore_index else others))]
- return (los, warn)
- elif isinstance(others, DataFrame):
- warn = not others.index.equals(idx)
- if ignore_index and warn:
- # without copy, this could change "others"
- # that was passed to str.cat
- others = others.copy()
- others.index = idx
- return ([others[x] for x in others], warn)
+ # Objects with an index (i.e. Series/Index/DataFrame) keep their own.
+ if isinstance(others, ABCSeries):
+ return [others]
+ elif isinstance(others, ABCIndexClass):
+ return [Series(others.values, index=others)]
+ elif isinstance(others, ABCDataFrame):
+ return [others[x] for x in others]
elif isinstance(others, np.ndarray) and others.ndim == 2:
others = DataFrame(others, index=idx)
- return ([others[x] for x in others], False)
+ return [others[x] for x in others]
elif is_list_like(others, allow_sets=False):
others = list(others) # ensure iterators do not get read twice etc
# in case of list-like `others`, all elements must be
- # either one-dimensional list-likes or scalars
- if all(is_list_like(x, allow_sets=False) for x in others):
+ # either Series/Index/np.ndarray (1-dim)...
+ if all(
+ isinstance(x, (ABCSeries, ABCIndexClass))
+ or (isinstance(x, np.ndarray) and x.ndim == 1)
+ for x in others
+ ):
los = []
- join_warn = False
- depr_warn = False
- # iterate through list and append list of series for each
- # element (which we check to be one-dimensional and non-nested)
- while others:
- nxt = others.pop(0) # nxt is guaranteed list-like by above
-
- # GH 21950 - DeprecationWarning
- # only allowing Series/Index/np.ndarray[1-dim] will greatly
- # simply this function post-deprecation.
- if not (
- isinstance(nxt, (Series, Index))
- or (isinstance(nxt, np.ndarray) and nxt.ndim == 1)
- ):
- depr_warn = True
-
- if not isinstance(nxt, (DataFrame, Series, Index, np.ndarray)):
- # safety for non-persistent list-likes (e.g. iterators)
- # do not map indexed/typed objects; info needed below
- nxt = list(nxt)
-
- # known types for which we can avoid deep inspection
- no_deep = (
- isinstance(nxt, np.ndarray) and nxt.ndim == 1
- ) or isinstance(nxt, (Series, Index))
- # nested list-likes are forbidden:
- # -> elements of nxt must not be list-like
- is_legal = (no_deep and nxt.dtype == object) or all(
- not is_list_like(x) for x in nxt
- )
-
- # DataFrame is false positive of is_legal
- # because "x in df" returns column names
- if not is_legal or isinstance(nxt, DataFrame):
- raise TypeError(err_msg)
-
- nxt, wnx = self._get_series_list(nxt, ignore_index=ignore_index)
- los = los + nxt
- join_warn = join_warn or wnx
-
- if depr_warn:
- warnings.warn(
- "list-likes other than Series, Index, or "
- "np.ndarray WITHIN another list-like are "
- "deprecated and will be removed in a future "
- "version.",
- FutureWarning,
- stacklevel=4,
- )
- return (los, join_warn)
+ while others: # iterate through list and append each element
+ los = los + self._get_series_list(others.pop(0))
+ return los
+ # ... or just strings
elif all(not is_list_like(x) for x in others):
- return ([Series(others, index=idx)], False)
- raise TypeError(err_msg)
+ return [Series(others, index=idx)]
+ raise TypeError(
+ "others must be Series, Index, DataFrame, np.ndarrary "
+ "or list-like (either containing only strings or "
+ "containing only objects of type Series/Index/"
+ "np.ndarray[1-dim])"
+ )
@forbid_nonstring_types(["bytes", "mixed", "mixed-integer"])
- def cat(self, others=None, sep=None, na_rep=None, join=None):
+ def cat(self, others=None, sep=None, na_rep=None, join="left"):
"""
Concatenate strings in the Series/Index with given separator.
@@ -2217,16 +2166,15 @@ def cat(self, others=None, sep=None, na_rep=None, join=None):
- If `na_rep` is None, and `others` is not None, a row containing a
missing value in any of the columns (before concatenation) will
have a missing value in the result.
- join : {'left', 'right', 'outer', 'inner'}, default None
+ join : {'left', 'right', 'outer', 'inner'}, default 'left'
Determines the join-style between the calling Series/Index and any
Series/Index/DataFrame in `others` (objects without an index need
- to match the length of the calling Series/Index). If None,
- alignment is disabled, but this option will be removed in a future
- version of pandas and replaced with a default of `'left'`. To
- disable alignment, use `.values` on any Series/Index/DataFrame in
- `others`.
+ to match the length of the calling Series/Index). To disable
+ alignment, use `.values` on any Series/Index/DataFrame in `others`.
.. versionadded:: 0.23.0
+ .. versionchanged:: 1.0.0
+ Changed default of `join` from None to `'left'`.
Returns
-------
@@ -2325,7 +2273,7 @@ def cat(self, others=None, sep=None, na_rep=None, join=None):
if sep is None:
sep = ""
- if isinstance(self._orig, Index):
+ if isinstance(self._orig, ABCIndexClass):
data = Series(self._orig, index=self._orig)
else: # Series
data = self._orig
@@ -2342,39 +2290,14 @@ def cat(self, others=None, sep=None, na_rep=None, join=None):
try:
# turn anything in "others" into lists of Series
- others, warn = self._get_series_list(others, ignore_index=(join is None))
+ others = self._get_series_list(others)
except ValueError: # do not catch TypeError raised by _get_series_list
- if join is None:
- raise ValueError(
- "All arrays must be same length, except "
- "those having an index if `join` is not None"
- )
- else:
- raise ValueError(
- "If `others` contains arrays or lists (or "
- "other list-likes without an index), these "
- "must all be of the same length as the "
- "calling Series/Index."
- )
-
- if join is None and warn:
- warnings.warn(
- "A future version of pandas will perform index "
- "alignment when `others` is a Series/Index/"
- "DataFrame (or a list-like containing one). To "
- "disable alignment (the behavior before v.0.23) and "
- "silence this warning, use `.values` on any Series/"
- "Index/DataFrame in `others`. To enable alignment "
- "and silence this warning, pass `join='left'|"
- "'outer'|'inner'|'right'`. The future default will "
- "be `join='left'`.",
- FutureWarning,
- stacklevel=3,
+ raise ValueError(
+ "If `others` contains arrays or lists (or other "
+ "list-likes without an index), these must all be "
+ "of the same length as the calling Series/Index."
)
- # if join is None, _get_series_list already force-aligned indexes
- join = "left" if join is None else join
-
# align if required
if any(not data.index.equals(x.index) for x in others):
# Need to add keys for uniqueness in case of duplicate columns
@@ -2411,7 +2334,7 @@ def cat(self, others=None, sep=None, na_rep=None, join=None):
# no NaNs - can just concatenate
result = cat_safe(all_cols, sep)
- if isinstance(self._orig, Index):
+ if isinstance(self._orig, ABCIndexClass):
# add dtype for case that result is all-NA
result = Index(result, dtype=object, name=self._orig.name)
else: # Series
@@ -2655,7 +2578,7 @@ def rsplit(self, pat=None, n=-1, expand=False):
"side": "first",
"return": "3 elements containing the string itself, followed by two "
"empty strings",
- "also": "rpartition : Split the string at the last occurrence of " "`sep`.",
+ "also": "rpartition : Split the string at the last occurrence of `sep`.",
}
)
@deprecate_kwarg(old_arg_name="pat", new_arg_name="sep")
@@ -2671,7 +2594,7 @@ def partition(self, sep=" ", expand=True):
"side": "last",
"return": "3 elements containing two empty strings, followed by the "
"string itself",
- "also": "partition : Split the string at the first occurrence of " "`sep`.",
+ "also": "partition : Split the string at the first occurrence of `sep`.",
}
)
@deprecate_kwarg(old_arg_name="pat", new_arg_name="sep")
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index e9d2c3f07bfae..32dc3c1f3e8f2 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -334,6 +334,9 @@ def _convert_listlike_datetimes(
return DatetimeIndex(arg, tz=tz, name=name)
except ValueError:
pass
+ elif tz:
+ # DatetimeArray, DatetimeIndex
+ return arg.tz_localize(tz)
return arg
@@ -365,7 +368,7 @@ def _convert_listlike_datetimes(
return result
elif getattr(arg, "ndim", 1) > 1:
raise TypeError(
- "arg must be a string, datetime, list, tuple, " "1-d array, or Series"
+ "arg must be a string, datetime, list, tuple, 1-d array, or Series"
)
# warn if passing timedelta64, raise for PeriodDtype
@@ -402,9 +405,7 @@ def _convert_listlike_datetimes(
orig_arg = ensure_object(orig_arg)
result = _attempt_YYYYMMDD(orig_arg, errors=errors)
except (ValueError, TypeError, tslibs.OutOfBoundsDatetime):
- raise ValueError(
- "cannot convert the input to " "'%Y%m%d' date format"
- )
+ raise ValueError("cannot convert the input to '%Y%m%d' date format")
# fallback
if result is None:
@@ -503,7 +504,7 @@ def _adjust_to_origin(arg, origin, unit):
try:
arg = arg - j0
except TypeError:
- raise ValueError("incompatible 'arg' type for given " "'origin'='julian'")
+ raise ValueError("incompatible 'arg' type for given 'origin'='julian'")
# preemptively check this for a nice range
j_max = Timestamp.max.to_julian_date() - j0
@@ -576,10 +577,7 @@ def to_datetime(
Parameters
----------
- arg : integer, float, string, datetime, list, tuple, 1-d array, Series
-
- .. versionadded:: 0.18.1
-
+ arg : int, float, str, datetime, list, tuple, 1-d array, Series
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
@@ -587,13 +585,13 @@ def to_datetime(
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
- dayfirst : boolean, default False
+ dayfirst : bool, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as
2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
- yearfirst : boolean, default False
+ yearfirst : bool, default False
Specify a date parse order if `arg` is str or its list-likes.
- If True parses dates with the year first, eg 10/11/12 is parsed as
@@ -606,10 +604,10 @@ def to_datetime(
.. versionadded:: 0.16.1
- utc : boolean, default None
+ utc : bool, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
- box : boolean, default True
+ box : bool, default True
- If True returns a DatetimeIndex or Index-like object
- If False returns ndarray of values.
@@ -619,22 +617,22 @@ def to_datetime(
instead to get an ndarray of values or numpy.datetime64,
respectively.
- format : string, default None
+ format : str, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
See strftime documentation for more information on choices:
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
- exact : boolean, True by default
+ exact : bool, True by default
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
- unit : string, default 'ns'
+ unit : str, default 'ns'
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
- infer_datetime_format : boolean, default False
+ infer_datetime_format : bool, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
@@ -651,7 +649,7 @@ def to_datetime(
origin.
.. versionadded:: 0.20.0
- cache : boolean, default True
+ cache : bool, default True
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
@@ -900,7 +898,7 @@ def coerce(values):
try:
values = to_datetime(values, format="%Y%m%d", errors=errors, utc=tz)
except (TypeError, ValueError) as e:
- raise ValueError("cannot assemble the " "datetimes: {error}".format(error=e))
+ raise ValueError("cannot assemble the datetimes: {error}".format(error=e))
for u in ["h", "m", "s", "ms", "us", "ns"]:
value = unit_rev.get(u)
@@ -1032,7 +1030,7 @@ def _convert_listlike(arg, format):
elif getattr(arg, "ndim", 1) > 1:
raise TypeError(
- "arg must be a string, datetime, list, tuple, " "1-d array, or Series"
+ "arg must be a string, datetime, list, tuple, 1-d array, or Series"
)
arg = ensure_object(arg)
@@ -1077,7 +1075,7 @@ def _convert_listlike(arg, format):
times.append(time_object)
elif errors == "raise":
raise ValueError(
- "Cannot convert arg {arg} to " "a time".format(arg=arg)
+ "Cannot convert arg {arg} to a time".format(arg=arg)
)
elif errors == "ignore":
return arg
diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py
index e1a976b874c25..fa33d11bda7eb 100644
--- a/pandas/core/tools/numeric.py
+++ b/pandas/core/tools/numeric.py
@@ -59,8 +59,6 @@ def to_numeric(arg, errors="raise", downcast=None):
checked satisfy that specification, no downcasting will be
performed on the data.
- .. versionadded:: 0.19.0
-
Returns
-------
ret : numeric if parsing succeeded.
@@ -139,21 +137,20 @@ def to_numeric(arg, errors="raise", downcast=None):
else:
values = arg
- try:
- if is_numeric_dtype(values):
- pass
- elif is_datetime_or_timedelta_dtype(values):
- values = values.astype(np.int64)
- else:
- values = ensure_object(values)
- coerce_numeric = errors not in ("ignore", "raise")
+ if is_numeric_dtype(values):
+ pass
+ elif is_datetime_or_timedelta_dtype(values):
+ values = values.astype(np.int64)
+ else:
+ values = ensure_object(values)
+ coerce_numeric = errors not in ("ignore", "raise")
+ try:
values = lib.maybe_convert_numeric(
values, set(), coerce_numeric=coerce_numeric
)
-
- except Exception:
- if errors == "raise":
- raise
+ except (ValueError, TypeError):
+ if errors == "raise":
+ raise
# attempt downcast only if the data has been successfully converted
# to a numerical dtype and if a downcast method has been specified
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index 2c594a3df27ea..cc31317980ca8 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -97,11 +97,11 @@ def to_timedelta(arg, unit="ns", box=True, errors="raise"):
unit = parse_timedelta_unit(unit)
if errors not in ("ignore", "raise", "coerce"):
- raise ValueError("errors must be one of 'ignore', " "'raise', or 'coerce'}")
+ raise ValueError("errors must be one of 'ignore', 'raise', or 'coerce'}")
if unit in {"Y", "y", "M"}:
warnings.warn(
- "M and Y units are deprecated and " "will be removed in a future version.",
+ "M and Y units are deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=2,
)
@@ -120,7 +120,7 @@ def to_timedelta(arg, unit="ns", box=True, errors="raise"):
return _convert_listlike(arg, unit=unit, box=box, errors=errors)
elif getattr(arg, "ndim", 1) > 1:
raise TypeError(
- "arg must be a string, timedelta, list, tuple, " "1-d array, or Series"
+ "arg must be a string, timedelta, list, tuple, 1-d array, or Series"
)
# ...so it must be a scalar value. Return scalar.
diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py
index f5ab81ad9089e..bcdbf0855cbb4 100644
--- a/pandas/core/util/hashing.py
+++ b/pandas/core/util/hashing.py
@@ -58,9 +58,7 @@ def hash_pandas_object(
obj, index=True, encoding="utf8", hash_key=None, categorize=True
):
"""
- Return a data hash of the Index/Series/DataFrame
-
- .. versionadded:: 0.19.2
+ Return a data hash of the Index/Series/DataFrame.
Parameters
----------
@@ -245,8 +243,6 @@ def hash_array(vals, encoding="utf8", hash_key=None, categorize=True):
"""
Given a 1d array, return an array of deterministic integers.
- .. versionadded:: 0.19.2
-
Parameters
----------
vals : ndarray, Categorical
diff --git a/pandas/core/window/__init__.py b/pandas/core/window/__init__.py
new file mode 100644
index 0000000000000..dcf58a4c0dd5b
--- /dev/null
+++ b/pandas/core/window/__init__.py
@@ -0,0 +1,3 @@
+from pandas.core.window.ewm import EWM # noqa:F401
+from pandas.core.window.expanding import Expanding, ExpandingGroupby # noqa:F401
+from pandas.core.window.rolling import Rolling, RollingGroupby, Window # noqa:F401
diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py
new file mode 100644
index 0000000000000..0f2920b3558c9
--- /dev/null
+++ b/pandas/core/window/common.py
@@ -0,0 +1,276 @@
+"""Common utility functions for rolling operations"""
+from collections import defaultdict
+import warnings
+
+import numpy as np
+
+from pandas.core.dtypes.common import is_integer
+from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
+
+import pandas.core.common as com
+from pandas.core.generic import _shared_docs
+from pandas.core.groupby.base import GroupByMixin
+from pandas.core.index import MultiIndex
+
+_shared_docs = dict(**_shared_docs)
+_doc_template = """
+ Returns
+ -------
+ Series or DataFrame
+ Return type is determined by the caller.
+
+ See Also
+ --------
+ Series.%(name)s : Series %(name)s.
+ DataFrame.%(name)s : DataFrame %(name)s.
+"""
+
+
+class _GroupByMixin(GroupByMixin):
+ """
+ Provide the groupby facilities.
+ """
+
+ def __init__(self, obj, *args, **kwargs):
+ parent = kwargs.pop("parent", None) # noqa
+ groupby = kwargs.pop("groupby", None)
+ if groupby is None:
+ groupby, obj = obj, obj.obj
+ self._groupby = groupby
+ self._groupby.mutated = True
+ self._groupby.grouper.mutated = True
+ super().__init__(obj, *args, **kwargs)
+
+ count = GroupByMixin._dispatch("count")
+ corr = GroupByMixin._dispatch("corr", other=None, pairwise=None)
+ cov = GroupByMixin._dispatch("cov", other=None, pairwise=None)
+
+ def _apply(
+ self, func, name=None, window=None, center=None, check_minp=None, **kwargs
+ ):
+ """
+ Dispatch to apply; we are stripping all of the _apply kwargs and
+ performing the original function call on the grouped object.
+ """
+
+ def f(x, name=name, *args):
+ x = self._shallow_copy(x)
+
+ if isinstance(name, str):
+ return getattr(x, name)(*args, **kwargs)
+
+ return x.apply(name, *args, **kwargs)
+
+ return self._groupby.apply(f)
+
+
+def _flex_binary_moment(arg1, arg2, f, pairwise=False):
+
+ if not (
+ isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame))
+ and isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame))
+ ):
+ raise TypeError(
+ "arguments to moment function must be of type "
+ "np.ndarray/Series/DataFrame"
+ )
+
+ if isinstance(arg1, (np.ndarray, ABCSeries)) and isinstance(
+ arg2, (np.ndarray, ABCSeries)
+ ):
+ X, Y = _prep_binary(arg1, arg2)
+ return f(X, Y)
+
+ elif isinstance(arg1, ABCDataFrame):
+ from pandas import DataFrame
+
+ def dataframe_from_int_dict(data, frame_template):
+ result = DataFrame(data, index=frame_template.index)
+ if len(result.columns) > 0:
+ result.columns = frame_template.columns[result.columns]
+ return result
+
+ results = {}
+ if isinstance(arg2, ABCDataFrame):
+ if pairwise is False:
+ if arg1 is arg2:
+ # special case in order to handle duplicate column names
+ for i, col in enumerate(arg1.columns):
+ results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])
+ return dataframe_from_int_dict(results, arg1)
+ else:
+ if not arg1.columns.is_unique:
+ raise ValueError("'arg1' columns are not unique")
+ if not arg2.columns.is_unique:
+ raise ValueError("'arg2' columns are not unique")
+ with warnings.catch_warnings(record=True):
+ warnings.simplefilter("ignore", RuntimeWarning)
+ X, Y = arg1.align(arg2, join="outer")
+ X = X + 0 * Y
+ Y = Y + 0 * X
+
+ with warnings.catch_warnings(record=True):
+ warnings.simplefilter("ignore", RuntimeWarning)
+ res_columns = arg1.columns.union(arg2.columns)
+ for col in res_columns:
+ if col in X and col in Y:
+ results[col] = f(X[col], Y[col])
+ return DataFrame(results, index=X.index, columns=res_columns)
+ elif pairwise is True:
+ results = defaultdict(dict)
+ for i, k1 in enumerate(arg1.columns):
+ for j, k2 in enumerate(arg2.columns):
+ if j < i and arg2 is arg1:
+ # Symmetric case
+ results[i][j] = results[j][i]
+ else:
+ results[i][j] = f(
+ *_prep_binary(arg1.iloc[:, i], arg2.iloc[:, j])
+ )
+
+ from pandas import concat
+
+ result_index = arg1.index.union(arg2.index)
+ if len(result_index):
+
+ # construct result frame
+ result = concat(
+ [
+ concat(
+ [results[i][j] for j, c in enumerate(arg2.columns)],
+ ignore_index=True,
+ )
+ for i, c in enumerate(arg1.columns)
+ ],
+ ignore_index=True,
+ axis=1,
+ )
+ result.columns = arg1.columns
+
+ # set the index and reorder
+ if arg2.columns.nlevels > 1:
+ result.index = MultiIndex.from_product(
+ arg2.columns.levels + [result_index]
+ )
+ result = result.reorder_levels([2, 0, 1]).sort_index()
+ else:
+ result.index = MultiIndex.from_product(
+ [range(len(arg2.columns)), range(len(result_index))]
+ )
+ result = result.swaplevel(1, 0).sort_index()
+ result.index = MultiIndex.from_product(
+ [result_index] + [arg2.columns]
+ )
+ else:
+
+ # empty result
+ result = DataFrame(
+ index=MultiIndex(
+ levels=[arg1.index, arg2.columns], codes=[[], []]
+ ),
+ columns=arg2.columns,
+ dtype="float64",
+ )
+
+ # reset our index names to arg1 names
+ # reset our column names to arg2 names
+ # careful not to mutate the original names
+ result.columns = result.columns.set_names(arg1.columns.names)
+ result.index = result.index.set_names(
+ result_index.names + arg2.columns.names
+ )
+
+ return result
+
+ else:
+ raise ValueError("'pairwise' is not True/False")
+ else:
+ results = {
+ i: f(*_prep_binary(arg1.iloc[:, i], arg2))
+ for i, col in enumerate(arg1.columns)
+ }
+ return dataframe_from_int_dict(results, arg1)
+
+ else:
+ return _flex_binary_moment(arg2, arg1, f)
+
+
+def _get_center_of_mass(comass, span, halflife, alpha):
+ valid_count = com.count_not_none(comass, span, halflife, alpha)
+ if valid_count > 1:
+ raise ValueError("comass, span, halflife, and alpha are mutually exclusive")
+
+ # Convert to center of mass; domain checks ensure 0 < alpha <= 1
+ if comass is not None:
+ if comass < 0:
+ raise ValueError("comass must satisfy: comass >= 0")
+ elif span is not None:
+ if span < 1:
+ raise ValueError("span must satisfy: span >= 1")
+ comass = (span - 1) / 2.0
+ elif halflife is not None:
+ if halflife <= 0:
+ raise ValueError("halflife must satisfy: halflife > 0")
+ decay = 1 - np.exp(np.log(0.5) / halflife)
+ comass = 1 / decay - 1
+ elif alpha is not None:
+ if alpha <= 0 or alpha > 1:
+ raise ValueError("alpha must satisfy: 0 < alpha <= 1")
+ comass = (1.0 - alpha) / alpha
+ else:
+ raise ValueError("Must pass one of comass, span, halflife, or alpha")
+
+ return float(comass)
+
+
+def _offset(window, center):
+ if not is_integer(window):
+ window = len(window)
+ offset = (window - 1) / 2.0 if center else 0
+ try:
+ return int(offset)
+ except TypeError:
+ return offset.astype(int)
+
+
+def _require_min_periods(p):
+ def _check_func(minp, window):
+ if minp is None:
+ return window
+ else:
+ return max(p, minp)
+
+ return _check_func
+
+
+def _use_window(minp, window):
+ if minp is None:
+ return window
+ else:
+ return minp
+
+
+def _zsqrt(x):
+ with np.errstate(all="ignore"):
+ result = np.sqrt(x)
+ mask = x < 0
+
+ if isinstance(x, ABCDataFrame):
+ if mask.values.any():
+ result[mask] = 0
+ else:
+ if mask.any():
+ result[mask] = 0
+
+ return result
+
+
+def _prep_binary(arg1, arg2):
+ if not isinstance(arg2, type(arg1)):
+ raise Exception("Input arrays must be of the same type!")
+
+ # mask out values, this also makes a common index...
+ X = arg1 + 0 * arg2
+ Y = arg2 + 0 * arg1
+
+ return X, Y
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
new file mode 100644
index 0000000000000..40e6c679ba72d
--- /dev/null
+++ b/pandas/core/window/ewm.py
@@ -0,0 +1,388 @@
+from textwrap import dedent
+
+import numpy as np
+
+import pandas._libs.window as libwindow
+from pandas.compat.numpy import function as nv
+from pandas.util._decorators import Appender, Substitution
+
+from pandas.core.dtypes.generic import ABCDataFrame
+
+from pandas.core.base import DataError
+from pandas.core.window.common import _doc_template, _get_center_of_mass, _shared_docs
+from pandas.core.window.rolling import _flex_binary_moment, _Rolling, _zsqrt
+
+_bias_template = """
+ Parameters
+ ----------
+ bias : bool, default False
+ Use a standard estimation bias correction.
+ *args, **kwargs
+ Arguments and keyword arguments to be passed into func.
+"""
+
+_pairwise_template = """
+ Parameters
+ ----------
+ other : Series, DataFrame, or ndarray, optional
+ If not supplied then will default to self and produce pairwise
+ output.
+ pairwise : bool, default None
+ If False then only matching columns between self and other will be
+ used and the output will be a DataFrame.
+ If True then all pairwise combinations will be calculated and the
+ output will be a MultiIndex DataFrame in the case of DataFrame
+ inputs. In the case of missing elements, only complete pairwise
+ observations will be used.
+ bias : bool, default False
+ Use a standard estimation bias correction.
+ **kwargs
+ Keyword arguments to be passed into func.
+"""
+
+
+class EWM(_Rolling):
+ r"""
+ Provide exponential weighted functions.
+
+ Parameters
+ ----------
+ com : float, optional
+ Specify decay in terms of center of mass,
+ :math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`.
+ span : float, optional
+ Specify decay in terms of span,
+ :math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`.
+ halflife : float, optional
+ Specify decay in terms of half-life,
+ :math:`\alpha = 1 - exp(log(0.5) / halflife),\text{for} halflife > 0`.
+ alpha : float, optional
+ Specify smoothing factor :math:`\alpha` directly,
+ :math:`0 < \alpha \leq 1`.
+ min_periods : int, default 0
+ Minimum number of observations in window required to have a value
+ (otherwise result is NA).
+ adjust : bool, default True
+ Divide by decaying adjustment factor in beginning periods to account
+ for imbalance in relative weightings
+ (viewing EWMA as a moving average).
+ ignore_na : bool, default False
+ Ignore missing values when calculating weights;
+ specify True to reproduce pre-0.15.0 behavior.
+ axis : {0 or 'index', 1 or 'columns'}, default 0
+ The axis to use. The value 0 identifies the rows, and 1
+ identifies the columns.
+
+ Returns
+ -------
+ DataFrame
+ A Window sub-classed for the particular operation.
+
+ See Also
+ --------
+ rolling : Provides rolling window calculations.
+ expanding : Provides expanding transformations.
+
+ Notes
+ -----
+ Exactly one of center of mass, span, half-life, and alpha must be provided.
+ Allowed values and relationship between the parameters are specified in the
+ parameter descriptions above; see the link at the end of this section for
+ a detailed explanation.
+
+ When adjust is True (default), weighted averages are calculated using
+ weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
+
+ When adjust is False, weighted averages are calculated recursively as:
+ weighted_average[0] = arg[0];
+ weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
+
+ When ignore_na is False (default), weights are based on absolute positions.
+ For example, the weights of x and y used in calculating the final weighted
+ average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
+ (1-alpha)**2 and alpha (if adjust is False).
+
+ When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based
+ on relative positions. For example, the weights of x and y used in
+ calculating the final weighted average of [x, None, y] are 1-alpha and 1
+ (if adjust is True), and 1-alpha and alpha (if adjust is False).
+
+ More details can be found at
+ http://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows
+
+ Examples
+ --------
+
+ >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
+ >>> df
+ B
+ 0 0.0
+ 1 1.0
+ 2 2.0
+ 3 NaN
+ 4 4.0
+
+ >>> df.ewm(com=0.5).mean()
+ B
+ 0 0.000000
+ 1 0.750000
+ 2 1.615385
+ 3 1.615385
+ 4 3.670213
+ """
+ _attributes = ["com", "min_periods", "adjust", "ignore_na", "axis"]
+
+ def __init__(
+ self,
+ obj,
+ com=None,
+ span=None,
+ halflife=None,
+ alpha=None,
+ min_periods=0,
+ adjust=True,
+ ignore_na=False,
+ axis=0,
+ ):
+ self.obj = obj
+ self.com = _get_center_of_mass(com, span, halflife, alpha)
+ self.min_periods = min_periods
+ self.adjust = adjust
+ self.ignore_na = ignore_na
+ self.axis = axis
+ self.on = None
+
+ @property
+ def _constructor(self):
+ return EWM
+
+ _agg_see_also_doc = dedent(
+ """
+ See Also
+ --------
+ pandas.DataFrame.rolling.aggregate
+ """
+ )
+
+ _agg_examples_doc = dedent(
+ """
+ Examples
+ --------
+
+ >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
+ >>> df
+ A B C
+ 0 -2.385977 -0.102758 0.438822
+ 1 -1.004295 0.905829 -0.954544
+ 2 0.735167 -0.165272 -1.619346
+ 3 -0.702657 -1.340923 -0.706334
+ 4 -0.246845 0.211596 -0.901819
+ 5 2.463718 3.157577 -1.380906
+ 6 -1.142255 2.340594 -0.039875
+ 7 1.396598 -1.647453 1.677227
+ 8 -0.543425 1.761277 -0.220481
+ 9 -0.640505 0.289374 -1.550670
+
+ >>> df.ewm(alpha=0.5).mean()
+ A B C
+ 0 -2.385977 -0.102758 0.438822
+ 1 -1.464856 0.569633 -0.490089
+ 2 -0.207700 0.149687 -1.135379
+ 3 -0.471677 -0.645305 -0.906555
+ 4 -0.355635 -0.203033 -0.904111
+ 5 1.076417 1.503943 -1.146293
+ 6 -0.041654 1.925562 -0.588728
+ 7 0.680292 0.132049 0.548693
+ 8 0.067236 0.948257 0.163353
+ 9 -0.286980 0.618493 -0.694496
+ """
+ )
+
+ @Substitution(
+ see_also=_agg_see_also_doc,
+ examples=_agg_examples_doc,
+ versionadded="",
+ klass="Series/Dataframe",
+ axis="",
+ )
+ @Appender(_shared_docs["aggregate"])
+ def aggregate(self, func, *args, **kwargs):
+ return super().aggregate(func, *args, **kwargs)
+
+ agg = aggregate
+
+ def _apply(self, func, **kwargs):
+ """
+ Rolling statistical measure using supplied function. Designed to be
+ used with passed-in Cython array-based functions.
+
+ Parameters
+ ----------
+ func : str/callable to apply
+
+ Returns
+ -------
+ y : same type as input argument
+ """
+ blocks, obj = self._create_blocks()
+ block_list = list(blocks)
+
+ results = []
+ exclude = []
+ for i, b in enumerate(blocks):
+ try:
+ values = self._prep_values(b.values)
+
+ except (TypeError, NotImplementedError):
+ if isinstance(obj, ABCDataFrame):
+ exclude.extend(b.columns)
+ del block_list[i]
+ continue
+ else:
+ raise DataError("No numeric types to aggregate")
+
+ if values.size == 0:
+ results.append(values.copy())
+ continue
+
+ # if we have a string function name, wrap it
+ if isinstance(func, str):
+ cfunc = getattr(libwindow, func, None)
+ if cfunc is None:
+ raise ValueError(
+ "we do not support this function "
+ "in libwindow.{func}".format(func=func)
+ )
+
+ def func(arg):
+ return cfunc(
+ arg,
+ self.com,
+ int(self.adjust),
+ int(self.ignore_na),
+ int(self.min_periods),
+ )
+
+ results.append(np.apply_along_axis(func, self.axis, values))
+
+ return self._wrap_results(results, block_list, obj, exclude)
+
+ @Substitution(name="ewm")
+ @Appender(_doc_template)
+ def mean(self, *args, **kwargs):
+ """
+ Exponential weighted moving average.
+
+ Parameters
+ ----------
+ *args, **kwargs
+ Arguments and keyword arguments to be passed into func.
+ """
+ nv.validate_window_func("mean", args, kwargs)
+ return self._apply("ewma", **kwargs)
+
+ @Substitution(name="ewm")
+ @Appender(_doc_template)
+ @Appender(_bias_template)
+ def std(self, bias=False, *args, **kwargs):
+ """
+ Exponential weighted moving stddev.
+ """
+ nv.validate_window_func("std", args, kwargs)
+ return _zsqrt(self.var(bias=bias, **kwargs))
+
+ vol = std
+
+ @Substitution(name="ewm")
+ @Appender(_doc_template)
+ @Appender(_bias_template)
+ def var(self, bias=False, *args, **kwargs):
+ """
+ Exponential weighted moving variance.
+ """
+ nv.validate_window_func("var", args, kwargs)
+
+ def f(arg):
+ return libwindow.ewmcov(
+ arg,
+ arg,
+ self.com,
+ int(self.adjust),
+ int(self.ignore_na),
+ int(self.min_periods),
+ int(bias),
+ )
+
+ return self._apply(f, **kwargs)
+
+ @Substitution(name="ewm")
+ @Appender(_doc_template)
+ @Appender(_pairwise_template)
+ def cov(self, other=None, pairwise=None, bias=False, **kwargs):
+ """
+ Exponential weighted sample covariance.
+ """
+ if other is None:
+ other = self._selected_obj
+ # only default unset
+ pairwise = True if pairwise is None else pairwise
+ other = self._shallow_copy(other)
+
+ def _get_cov(X, Y):
+ X = self._shallow_copy(X)
+ Y = self._shallow_copy(Y)
+ cov = libwindow.ewmcov(
+ X._prep_values(),
+ Y._prep_values(),
+ self.com,
+ int(self.adjust),
+ int(self.ignore_na),
+ int(self.min_periods),
+ int(bias),
+ )
+ return X._wrap_result(cov)
+
+ return _flex_binary_moment(
+ self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
+ )
+
+ @Substitution(name="ewm")
+ @Appender(_doc_template)
+ @Appender(_pairwise_template)
+ def corr(self, other=None, pairwise=None, **kwargs):
+ """
+ Exponential weighted sample correlation.
+ """
+ if other is None:
+ other = self._selected_obj
+ # only default unset
+ pairwise = True if pairwise is None else pairwise
+ other = self._shallow_copy(other)
+
+ def _get_corr(X, Y):
+ X = self._shallow_copy(X)
+ Y = self._shallow_copy(Y)
+
+ def _cov(x, y):
+ return libwindow.ewmcov(
+ x,
+ y,
+ self.com,
+ int(self.adjust),
+ int(self.ignore_na),
+ int(self.min_periods),
+ 1,
+ )
+
+ x_values = X._prep_values()
+ y_values = Y._prep_values()
+ with np.errstate(all="ignore"):
+ cov = _cov(x_values, y_values)
+ x_var = _cov(x_values, x_values)
+ y_var = _cov(y_values, y_values)
+ corr = cov / _zsqrt(x_var * y_var)
+ return X._wrap_result(corr)
+
+ return _flex_binary_moment(
+ self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
+ )
diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py
new file mode 100644
index 0000000000000..47bd8f2ec593b
--- /dev/null
+++ b/pandas/core/window/expanding.py
@@ -0,0 +1,260 @@
+from textwrap import dedent
+
+from pandas.compat.numpy import function as nv
+from pandas.util._decorators import Appender, Substitution
+
+from pandas.core.window.common import _doc_template, _GroupByMixin, _shared_docs
+from pandas.core.window.rolling import _Rolling_and_Expanding
+
+
+class Expanding(_Rolling_and_Expanding):
+ """
+ Provide expanding transformations.
+
+ Parameters
+ ----------
+ min_periods : int, default 1
+ Minimum number of observations in window required to have a value
+ (otherwise result is NA).
+ center : bool, default False
+ Set the labels at the center of the window.
+ axis : int or str, default 0
+
+ Returns
+ -------
+ a Window sub-classed for the particular operation
+
+ See Also
+ --------
+ rolling : Provides rolling window calculations.
+ ewm : Provides exponential weighted functions.
+
+ Notes
+ -----
+ By default, the result is set to the right edge of the window. This can be
+ changed to the center of the window by setting ``center=True``.
+
+ Examples
+ --------
+
+ >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
+ B
+ 0 0.0
+ 1 1.0
+ 2 2.0
+ 3 NaN
+ 4 4.0
+
+ >>> df.expanding(2).sum()
+ B
+ 0 NaN
+ 1 1.0
+ 2 3.0
+ 3 3.0
+ 4 7.0
+ """
+
+ _attributes = ["min_periods", "center", "axis"]
+
+ def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs):
+ super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis)
+
+ @property
+ def _constructor(self):
+ return Expanding
+
+ def _get_window(self, other=None, **kwargs):
+ """
+ Get the window length over which to perform some operation.
+
+ Parameters
+ ----------
+ other : object, default None
+ The other object that is involved in the operation.
+ Such an object is involved for operations like covariance.
+
+ Returns
+ -------
+ window : int
+ The window length.
+ """
+ axis = self.obj._get_axis(self.axis)
+ length = len(axis) + (other is not None) * len(axis)
+
+ other = self.min_periods or -1
+ return max(length, other)
+
+ _agg_see_also_doc = dedent(
+ """
+ See Also
+ --------
+ DataFrame.expanding.aggregate
+ DataFrame.rolling.aggregate
+ DataFrame.aggregate
+ """
+ )
+
+ _agg_examples_doc = dedent(
+ """
+ Examples
+ --------
+
+ >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
+ >>> df
+ A B C
+ 0 -2.385977 -0.102758 0.438822
+ 1 -1.004295 0.905829 -0.954544
+ 2 0.735167 -0.165272 -1.619346
+ 3 -0.702657 -1.340923 -0.706334
+ 4 -0.246845 0.211596 -0.901819
+ 5 2.463718 3.157577 -1.380906
+ 6 -1.142255 2.340594 -0.039875
+ 7 1.396598 -1.647453 1.677227
+ 8 -0.543425 1.761277 -0.220481
+ 9 -0.640505 0.289374 -1.550670
+
+ >>> df.ewm(alpha=0.5).mean()
+ A B C
+ 0 -2.385977 -0.102758 0.438822
+ 1 -1.464856 0.569633 -0.490089
+ 2 -0.207700 0.149687 -1.135379
+ 3 -0.471677 -0.645305 -0.906555
+ 4 -0.355635 -0.203033 -0.904111
+ 5 1.076417 1.503943 -1.146293
+ 6 -0.041654 1.925562 -0.588728
+ 7 0.680292 0.132049 0.548693
+ 8 0.067236 0.948257 0.163353
+ 9 -0.286980 0.618493 -0.694496
+ """
+ )
+
+ @Substitution(
+ see_also=_agg_see_also_doc,
+ examples=_agg_examples_doc,
+ versionadded="",
+ klass="Series/Dataframe",
+ axis="",
+ )
+ @Appender(_shared_docs["aggregate"])
+ def aggregate(self, func, *args, **kwargs):
+ return super().aggregate(func, *args, **kwargs)
+
+ agg = aggregate
+
+ @Substitution(name="expanding")
+ @Appender(_shared_docs["count"])
+ def count(self, **kwargs):
+ return super().count(**kwargs)
+
+ @Substitution(name="expanding")
+ @Appender(_shared_docs["apply"])
+ def apply(self, func, raw=None, args=(), kwargs={}):
+ return super().apply(func, raw=raw, args=args, kwargs=kwargs)
+
+ @Substitution(name="expanding")
+ @Appender(_shared_docs["sum"])
+ def sum(self, *args, **kwargs):
+ nv.validate_expanding_func("sum", args, kwargs)
+ return super().sum(*args, **kwargs)
+
+ @Substitution(name="expanding")
+ @Appender(_doc_template)
+ @Appender(_shared_docs["max"])
+ def max(self, *args, **kwargs):
+ nv.validate_expanding_func("max", args, kwargs)
+ return super().max(*args, **kwargs)
+
+ @Substitution(name="expanding")
+ @Appender(_shared_docs["min"])
+ def min(self, *args, **kwargs):
+ nv.validate_expanding_func("min", args, kwargs)
+ return super().min(*args, **kwargs)
+
+ @Substitution(name="expanding")
+ @Appender(_shared_docs["mean"])
+ def mean(self, *args, **kwargs):
+ nv.validate_expanding_func("mean", args, kwargs)
+ return super().mean(*args, **kwargs)
+
+ @Substitution(name="expanding")
+ @Appender(_shared_docs["median"])
+ def median(self, **kwargs):
+ return super().median(**kwargs)
+
+ @Substitution(name="expanding")
+ @Appender(_shared_docs["std"])
+ def std(self, ddof=1, *args, **kwargs):
+ nv.validate_expanding_func("std", args, kwargs)
+ return super().std(ddof=ddof, **kwargs)
+
+ @Substitution(name="expanding")
+ @Appender(_shared_docs["var"])
+ def var(self, ddof=1, *args, **kwargs):
+ nv.validate_expanding_func("var", args, kwargs)
+ return super().var(ddof=ddof, **kwargs)
+
+ @Substitution(name="expanding")
+ @Appender(_doc_template)
+ @Appender(_shared_docs["skew"])
+ def skew(self, **kwargs):
+ return super().skew(**kwargs)
+
+ _agg_doc = dedent(
+ """
+ Examples
+ --------
+
+ The example below will show an expanding calculation with a window size of
+ four matching the equivalent function call using `scipy.stats`.
+
+ >>> arr = [1, 2, 3, 4, 999]
+ >>> import scipy.stats
+ >>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
+ >>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
+ -1.200000
+ >>> print(fmt.format(scipy.stats.kurtosis(arr, bias=False)))
+ 4.999874
+ >>> s = pd.Series(arr)
+ >>> s.expanding(4).kurt()
+ 0 NaN
+ 1 NaN
+ 2 NaN
+ 3 -1.200000
+ 4 4.999874
+ dtype: float64
+ """
+ )
+
+ @Appender(_agg_doc)
+ @Substitution(name="expanding")
+ @Appender(_shared_docs["kurt"])
+ def kurt(self, **kwargs):
+ return super().kurt(**kwargs)
+
+ @Substitution(name="expanding")
+ @Appender(_shared_docs["quantile"])
+ def quantile(self, quantile, interpolation="linear", **kwargs):
+ return super().quantile(
+ quantile=quantile, interpolation=interpolation, **kwargs
+ )
+
+ @Substitution(name="expanding")
+ @Appender(_doc_template)
+ @Appender(_shared_docs["cov"])
+ def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
+ return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)
+
+ @Substitution(name="expanding")
+ @Appender(_shared_docs["corr"])
+ def corr(self, other=None, pairwise=None, **kwargs):
+ return super().corr(other=other, pairwise=pairwise, **kwargs)
+
+
+class ExpandingGroupby(_GroupByMixin, Expanding):
+ """
+ Provide a expanding groupby implementation.
+ """
+
+ @property
+ def _constructor(self):
+ return Expanding
diff --git a/pandas/core/window.py b/pandas/core/window/rolling.py
similarity index 62%
rename from pandas/core/window.py
rename to pandas/core/window/rolling.py
index 86574208a3fc0..29ef2e917ae57 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window/rolling.py
@@ -2,10 +2,9 @@
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
-from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
-from typing import List, Optional, Set
+from typing import Callable, List, Optional, Set, Union
import warnings
import numpy as np
@@ -35,24 +34,20 @@
ABCTimedeltaIndex,
)
-from pandas._typing import Axis, FrameOrSeries
+from pandas._typing import Axis, FrameOrSeries, Scalar
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
-from pandas.core.generic import _shared_docs
-from pandas.core.groupby.base import GroupByMixin
-
-_shared_docs = dict(**_shared_docs)
-_doc_template = """
- Returns
- -------
- Series or DataFrame
- Return type is determined by the caller.
-
- See Also
- --------
- Series.%(name)s : Series %(name)s.
- DataFrame.%(name)s : DataFrame %(name)s.
-"""
+from pandas.core.index import Index, ensure_index
+from pandas.core.window.common import (
+ _doc_template,
+ _flex_binary_moment,
+ _GroupByMixin,
+ _offset,
+ _require_min_periods,
+ _shared_docs,
+ _use_window,
+ _zsqrt,
+)
class _Window(PandasObject, SelectionMixin):
@@ -119,7 +114,9 @@ def validate(self):
"left",
"neither",
]:
- raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
+ raise ValueError("closed must be 'right', 'left', 'both' or 'neither'")
+ if not isinstance(self.obj, (ABCSeries, ABCDataFrame)):
+ raise TypeError("invalid type: {}".format(type(self)))
def _create_blocks(self):
"""
@@ -172,7 +169,19 @@ def __getattr__(self, attr):
def _dir_additions(self):
return self.obj._dir_additions()
- def _get_window(self, other=None):
+ def _get_window(self, other=None, **kwargs) -> int:
+ """
+ Returns window length
+
+ Parameters
+ ----------
+ other:
+ ignored, exists for compatibility
+
+ Returns
+ -------
+ window : int
+ """
return self.window
@property
@@ -199,7 +208,7 @@ def __iter__(self):
def _get_index(self) -> Optional[np.ndarray]:
"""
- Return index as an ndarray.
+ Return integer representations as an ndarray if index is frequency.
Returns
-------
@@ -231,16 +240,16 @@ def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
- raise TypeError(
- "cannot handle this type -> {0}" "".format(values.dtype)
- )
+ raise TypeError("cannot handle this type -> {0}".format(values.dtype))
- # Always convert inf to nan
- values[np.isinf(values)] = np.NaN
+ # Convert inf to nan for C funcs
+ inf = np.isinf(values)
+ if inf.any():
+ values = np.where(inf, np.nan, values)
return values
- def _wrap_result(self, result, block=None, obj=None) -> FrameOrSeries:
+ def _wrap_result(self, result, block=None, obj=None):
"""
Wrap a single result.
"""
@@ -254,6 +263,8 @@ def _wrap_result(self, result, block=None, obj=None) -> FrameOrSeries:
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
+ # TODO: do we know what result.dtype is at this point?
+ # i.e. can we just do an astype?
from pandas import to_timedelta
result = to_timedelta(result.ravel(), unit="ns").values.reshape(
@@ -281,7 +292,6 @@ def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
"""
from pandas import Series, concat
- from pandas.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
@@ -327,9 +337,7 @@ def _center_window(self, result, window) -> np.ndarray:
Center the result in the window.
"""
if self.axis > result.ndim - 1:
- raise ValueError(
- "Requested axis is larger then no. of argument " "dimensions"
- )
+ raise ValueError("Requested axis is larger then no. of argument dimensions")
offset = _offset(window, True)
if offset > 0:
@@ -341,6 +349,138 @@ def _center_window(self, result, window) -> np.ndarray:
result = np.copy(result[tuple(lead_indexer)])
return result
+ def _get_roll_func(
+ self, cfunc: Callable, check_minp: Callable, index: np.ndarray, **kwargs
+ ) -> Callable:
+ """
+ Wrap rolling function to check values passed.
+
+ Parameters
+ ----------
+ cfunc : callable
+ Cython function used to calculate rolling statistics
+ check_minp : callable
+ function to check minimum period parameter
+ index : ndarray
+ used for variable window
+
+ Returns
+ -------
+ func : callable
+ """
+
+ def func(arg, window, min_periods=None, closed=None):
+ minp = check_minp(min_periods, window)
+ return cfunc(arg, window, minp, index, closed, **kwargs)
+
+ return func
+
+ def _apply(
+ self,
+ func: Union[str, Callable],
+ name: Optional[str] = None,
+ window: Optional[Union[int, str]] = None,
+ center: Optional[bool] = None,
+ check_minp: Optional[Callable] = None,
+ **kwargs
+ ):
+ """
+ Rolling statistical measure using supplied function.
+
+ Designed to be used with passed-in Cython array-based functions.
+
+ Parameters
+ ----------
+ func : str/callable to apply
+ name : str, optional
+ name of this function
+ window : int/str, default to _get_window()
+ window length or offset
+ center : bool, default to self.center
+ check_minp : function, default to _use_window
+ **kwargs
+ additional arguments for rolling function and window function
+
+ Returns
+ -------
+ y : type of input
+ """
+ if center is None:
+ center = self.center
+
+ if check_minp is None:
+ check_minp = _use_window
+
+ if window is None:
+ window = self._get_window(**kwargs)
+
+ blocks, obj = self._create_blocks()
+ block_list = list(blocks)
+ index_as_array = self._get_index()
+
+ results = []
+ exclude = [] # type: List[Scalar]
+ for i, b in enumerate(blocks):
+ try:
+ values = self._prep_values(b.values)
+
+ except (TypeError, NotImplementedError):
+ if isinstance(obj, ABCDataFrame):
+ exclude.extend(b.columns)
+ del block_list[i]
+ continue
+ else:
+ raise DataError("No numeric types to aggregate")
+
+ if values.size == 0:
+ results.append(values.copy())
+ continue
+
+ # if we have a string function name, wrap it
+ if isinstance(func, str):
+ cfunc = getattr(libwindow, func, None)
+ if cfunc is None:
+ raise ValueError(
+ "we do not support this function "
+ "in libwindow.{func}".format(func=func)
+ )
+
+ func = self._get_roll_func(cfunc, check_minp, index_as_array, **kwargs)
+
+ # calculation function
+ if center:
+ offset = _offset(window, center)
+ additional_nans = np.array([np.NaN] * offset)
+
+ def calc(x):
+ return func(
+ np.concatenate((x, additional_nans)),
+ window,
+ min_periods=self.min_periods,
+ closed=self.closed,
+ )
+
+ else:
+
+ def calc(x):
+ return func(
+ x, window, min_periods=self.min_periods, closed=self.closed
+ )
+
+ with np.errstate(all="ignore"):
+ if values.ndim > 1:
+ result = np.apply_along_axis(calc, self.axis, values)
+ else:
+ result = calc(values)
+ result = np.asarray(result)
+
+ if center:
+ result = self._center_window(result, window)
+
+ results.append(result)
+
+ return self._wrap_results(results, block_list, obj, exclude)
+
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
@@ -477,8 +617,6 @@ class Window(_Window):
"""
Provide rolling window calculations.
- .. versionadded:: 0.18.0
-
Parameters
----------
window : int, or offset
@@ -487,8 +625,7 @@ class Window(_Window):
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
- the time-period. This is only valid for datetimelike indexes. This is
- new in 0.19.0
+ the time-period. This is only valid for datetimelike indexes.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
@@ -648,13 +785,23 @@ def validate(self):
else:
raise ValueError("Invalid window {0}".format(window))
- def _prep_window(self, **kwargs):
+ def _get_window(self, other=None, **kwargs) -> np.ndarray:
"""
- Provide validation for our window type, return the window
- we have already been validated.
+ Provide validation for the window type, return the window
+ which has already been validated.
+
+ Parameters
+ ----------
+ other:
+ ignored, exists for compatibility
+
+ Returns
+ -------
+ window : ndarray
+ the window, weights
"""
- window = self._get_window()
+ window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
@@ -694,63 +841,14 @@ def _pop_args(win_type, arg_names, kwargs):
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
- def _apply_window(self, mean=True, **kwargs):
- """
- Applies a moving window of type ``window_type`` on the data.
-
- Parameters
- ----------
- mean : bool, default True
- If True computes weighted mean, else weighted sum
-
- Returns
- -------
- y : same type as input argument
-
- """
- window = self._prep_window(**kwargs)
- center = self.center
-
- blocks, obj = self._create_blocks()
- block_list = list(blocks)
-
- results = []
- exclude = []
- for i, b in enumerate(blocks):
- try:
- values = self._prep_values(b.values)
-
- except (TypeError, NotImplementedError):
- if isinstance(obj, ABCDataFrame):
- exclude.extend(b.columns)
- del block_list[i]
- continue
- else:
- raise DataError("No numeric types to aggregate")
-
- if values.size == 0:
- results.append(values.copy())
- continue
-
- offset = _offset(window, center)
- additional_nans = np.array([np.NaN] * offset)
-
- def f(arg, *args, **kwargs):
- minp = _use_window(self.min_periods, len(window))
- return libwindow.roll_window(
- np.concatenate((arg, additional_nans)) if center else arg,
- window,
- minp,
- avg=mean,
- )
-
- result = np.apply_along_axis(f, self.axis, values)
-
- if center:
- result = self._center_window(result, window)
- results.append(result)
+ def _get_roll_func(
+ self, cfunc: Callable, check_minp: Callable, index: np.ndarray, **kwargs
+ ) -> Callable:
+ def func(arg, window, min_periods=None, closed=None):
+ minp = check_minp(min_periods, len(window))
+ return cfunc(arg, window, minp)
- return self._wrap_results(results, block_list, obj, exclude)
+ return func
_agg_see_also_doc = dedent(
"""
@@ -803,12 +901,12 @@ def f(arg, *args, **kwargs):
axis="",
)
@Appender(_shared_docs["aggregate"])
- def aggregate(self, arg, *args, **kwargs):
- result, how = self._aggregate(arg, *args, **kwargs)
+ def aggregate(self, func, *args, **kwargs):
+ result, how = self._aggregate(func, *args, **kwargs)
if result is None:
# these must apply directly
- result = arg(self)
+ result = func(self)
return result
@@ -818,51 +916,13 @@ def aggregate(self, arg, *args, **kwargs):
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
- return self._apply_window(mean=False, **kwargs)
+ return self._apply("roll_weighted_sum", **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
- return self._apply_window(mean=True, **kwargs)
-
-
-class _GroupByMixin(GroupByMixin):
- """
- Provide the groupby facilities.
- """
-
- def __init__(self, obj, *args, **kwargs):
- parent = kwargs.pop("parent", None) # noqa
- groupby = kwargs.pop("groupby", None)
- if groupby is None:
- groupby, obj = obj, obj.obj
- self._groupby = groupby
- self._groupby.mutated = True
- self._groupby.grouper.mutated = True
- super().__init__(obj, *args, **kwargs)
-
- count = GroupByMixin._dispatch("count")
- corr = GroupByMixin._dispatch("corr", other=None, pairwise=None)
- cov = GroupByMixin._dispatch("cov", other=None, pairwise=None)
-
- def _apply(
- self, func, name=None, window=None, center=None, check_minp=None, **kwargs
- ):
- """
- Dispatch to apply; we are stripping all of the _apply kwargs and
- performing the original function call on the grouped object.
- """
-
- def f(x, name=name, *args):
- x = self._shallow_copy(x)
-
- if isinstance(name, str):
- return getattr(x, name)(*args, **kwargs)
-
- return x.apply(name, *args, **kwargs)
-
- return self._groupby.apply(f)
+ return self._apply("roll_weighted_mean", **kwargs)
class _Rolling(_Window):
@@ -870,149 +930,50 @@ class _Rolling(_Window):
def _constructor(self):
return Rolling
- def _apply(
- self, func, name=None, window=None, center=None, check_minp=None, **kwargs
- ):
- """
- Rolling statistical measure using supplied function.
- Designed to be used with passed-in Cython array-based functions.
+class _Rolling_and_Expanding(_Rolling):
- Parameters
- ----------
- func : str/callable to apply
- name : str, optional
- name of this function
- window : int/array, default to _get_window()
- center : bool, default to self.center
- check_minp : function, default to _use_window
+ _shared_docs["count"] = dedent(
+ r"""
+ The %(name)s count of any non-NaN observations inside the window.
- Returns
- -------
- y : type of input
- """
- if center is None:
- center = self.center
- if window is None:
- window = self._get_window()
+ Returns
+ -------
+ Series or DataFrame
+ Returned object type is determined by the caller of the %(name)s
+ calculation.
- if check_minp is None:
- check_minp = _use_window
+ See Also
+ --------
+ Series.%(name)s : Calling object with Series data.
+ DataFrame.%(name)s : Calling object with DataFrames.
+ DataFrame.count : Count of the full DataFrame.
- blocks, obj = self._create_blocks()
- block_list = list(blocks)
- index_as_array = self._get_index()
+ Examples
+ --------
+ >>> s = pd.Series([2, 3, np.nan, 10])
+ >>> s.rolling(2).count()
+ 0 1.0
+ 1 2.0
+ 2 1.0
+ 3 1.0
+ dtype: float64
+ >>> s.rolling(3).count()
+ 0 1.0
+ 1 2.0
+ 2 2.0
+ 3 2.0
+ dtype: float64
+ >>> s.rolling(4).count()
+ 0 1.0
+ 1 2.0
+ 2 2.0
+ 3 3.0
+ dtype: float64
+ """
+ )
- results = []
- exclude = []
- for i, b in enumerate(blocks):
- try:
- values = self._prep_values(b.values)
-
- except (TypeError, NotImplementedError):
- if isinstance(obj, ABCDataFrame):
- exclude.extend(b.columns)
- del block_list[i]
- continue
- else:
- raise DataError("No numeric types to aggregate")
-
- if values.size == 0:
- results.append(values.copy())
- continue
-
- # if we have a string function name, wrap it
- if isinstance(func, str):
- cfunc = getattr(libwindow, func, None)
- if cfunc is None:
- raise ValueError(
- "we do not support this function "
- "in libwindow.{func}".format(func=func)
- )
-
- def func(arg, window, min_periods=None, closed=None):
- minp = check_minp(min_periods, window)
- # ensure we are only rolling on floats
- arg = ensure_float64(arg)
- return cfunc(arg, window, minp, index_as_array, closed, **kwargs)
-
- # calculation function
- if center:
- offset = _offset(window, center)
- additional_nans = np.array([np.NaN] * offset)
-
- def calc(x):
- return func(
- np.concatenate((x, additional_nans)),
- window,
- min_periods=self.min_periods,
- closed=self.closed,
- )
-
- else:
-
- def calc(x):
- return func(
- x, window, min_periods=self.min_periods, closed=self.closed
- )
-
- with np.errstate(all="ignore"):
- if values.ndim > 1:
- result = np.apply_along_axis(calc, self.axis, values)
- else:
- result = calc(values)
-
- if center:
- result = self._center_window(result, window)
-
- results.append(result)
-
- return self._wrap_results(results, block_list, obj, exclude)
-
-
-class _Rolling_and_Expanding(_Rolling):
-
- _shared_docs["count"] = dedent(
- r"""
- The %(name)s count of any non-NaN observations inside the window.
-
- Returns
- -------
- Series or DataFrame
- Returned object type is determined by the caller of the %(name)s
- calculation.
-
- See Also
- --------
- Series.%(name)s : Calling object with Series data.
- DataFrame.%(name)s : Calling object with DataFrames.
- DataFrame.count : Count of the full DataFrame.
-
- Examples
- --------
- >>> s = pd.Series([2, 3, np.nan, 10])
- >>> s.rolling(2).count()
- 0 1.0
- 1 2.0
- 2 1.0
- 3 1.0
- dtype: float64
- >>> s.rolling(3).count()
- 0 1.0
- 1 2.0
- 2 2.0
- 3 2.0
- dtype: float64
- >>> s.rolling(4).count()
- 0 1.0
- 1 2.0
- 2 2.0
- 3 3.0
- dtype: float64
- """
- )
-
- def count(self):
+ def count(self):
blocks, obj = self._create_blocks()
# Validate the index
@@ -1692,10 +1653,11 @@ def is_datetimelike(self):
def _on(self):
if self.on is None:
- return self.obj.index
+ if self.axis == 0:
+ return self.obj.index
+ elif self.axis == 1:
+ return self.obj.columns
elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:
- from pandas import Index
-
return Index(self.obj[self.on])
else:
raise ValueError(
@@ -1739,7 +1701,7 @@ def validate(self):
if not self.is_datetimelike and self.closed is not None:
raise ValueError(
- "closed only implemented for datetimelike " "and offset based windows"
+ "closed only implemented for datetimelike and offset based windows"
)
def _validate_monotonic(self):
@@ -1748,7 +1710,7 @@ def _validate_monotonic(self):
"""
if not self._on.is_monotonic:
formatted = self.on or "index"
- raise ValueError("{0} must be " "monotonic".format(formatted))
+ raise ValueError("{0} must be monotonic".format(formatted))
def _validate_freq(self):
"""
@@ -1829,8 +1791,8 @@ def _validate_freq(self):
axis="",
)
@Appender(_shared_docs["aggregate"])
- def aggregate(self, arg, *args, **kwargs):
- return super().aggregate(arg, *args, **kwargs)
+ def aggregate(self, func, *args, **kwargs):
+ return super().aggregate(func, *args, **kwargs)
agg = aggregate
@@ -1948,12 +1910,12 @@ def corr(self, other=None, pairwise=None, **kwargs):
return super().corr(other=other, pairwise=pairwise, **kwargs)
+Rolling.__doc__ = Window.__doc__
+
+
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provide a rolling groupby implementation.
-
- .. versionadded:: 0.18.1
-
"""
@property
@@ -1978,893 +1940,3 @@ def _validate_monotonic(self):
level.
"""
pass
-
-
-class Expanding(_Rolling_and_Expanding):
- """
- Provide expanding transformations.
-
- .. versionadded:: 0.18.0
-
- Parameters
- ----------
- min_periods : int, default 1
- Minimum number of observations in window required to have a value
- (otherwise result is NA).
- center : bool, default False
- Set the labels at the center of the window.
- axis : int or str, default 0
-
- Returns
- -------
- a Window sub-classed for the particular operation
-
- See Also
- --------
- rolling : Provides rolling window calculations.
- ewm : Provides exponential weighted functions.
-
- Notes
- -----
- By default, the result is set to the right edge of the window. This can be
- changed to the center of the window by setting ``center=True``.
-
- Examples
- --------
-
- >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
- B
- 0 0.0
- 1 1.0
- 2 2.0
- 3 NaN
- 4 4.0
-
- >>> df.expanding(2).sum()
- B
- 0 NaN
- 1 1.0
- 2 3.0
- 3 3.0
- 4 7.0
- """
-
- _attributes = ["min_periods", "center", "axis"]
-
- def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs):
- super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis)
-
- @property
- def _constructor(self):
- return Expanding
-
- def _get_window(self, other=None):
- """
- Get the window length over which to perform some operation.
-
- Parameters
- ----------
- other : object, default None
- The other object that is involved in the operation.
- Such an object is involved for operations like covariance.
-
- Returns
- -------
- window : int
- The window length.
- """
- axis = self.obj._get_axis(self.axis)
- length = len(axis) + (other is not None) * len(axis)
-
- other = self.min_periods or -1
- return max(length, other)
-
- _agg_see_also_doc = dedent(
- """
- See Also
- --------
- DataFrame.expanding.aggregate
- DataFrame.rolling.aggregate
- DataFrame.aggregate
- """
- )
-
- _agg_examples_doc = dedent(
- """
- Examples
- --------
-
- >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
- >>> df
- A B C
- 0 -2.385977 -0.102758 0.438822
- 1 -1.004295 0.905829 -0.954544
- 2 0.735167 -0.165272 -1.619346
- 3 -0.702657 -1.340923 -0.706334
- 4 -0.246845 0.211596 -0.901819
- 5 2.463718 3.157577 -1.380906
- 6 -1.142255 2.340594 -0.039875
- 7 1.396598 -1.647453 1.677227
- 8 -0.543425 1.761277 -0.220481
- 9 -0.640505 0.289374 -1.550670
-
- >>> df.ewm(alpha=0.5).mean()
- A B C
- 0 -2.385977 -0.102758 0.438822
- 1 -1.464856 0.569633 -0.490089
- 2 -0.207700 0.149687 -1.135379
- 3 -0.471677 -0.645305 -0.906555
- 4 -0.355635 -0.203033 -0.904111
- 5 1.076417 1.503943 -1.146293
- 6 -0.041654 1.925562 -0.588728
- 7 0.680292 0.132049 0.548693
- 8 0.067236 0.948257 0.163353
- 9 -0.286980 0.618493 -0.694496
- """
- )
-
- @Substitution(
- see_also=_agg_see_also_doc,
- examples=_agg_examples_doc,
- versionadded="",
- klass="Series/Dataframe",
- axis="",
- )
- @Appender(_shared_docs["aggregate"])
- def aggregate(self, arg, *args, **kwargs):
- return super().aggregate(arg, *args, **kwargs)
-
- agg = aggregate
-
- @Substitution(name="expanding")
- @Appender(_shared_docs["count"])
- def count(self, **kwargs):
- return super().count(**kwargs)
-
- @Substitution(name="expanding")
- @Appender(_shared_docs["apply"])
- def apply(self, func, raw=None, args=(), kwargs={}):
- return super().apply(func, raw=raw, args=args, kwargs=kwargs)
-
- @Substitution(name="expanding")
- @Appender(_shared_docs["sum"])
- def sum(self, *args, **kwargs):
- nv.validate_expanding_func("sum", args, kwargs)
- return super().sum(*args, **kwargs)
-
- @Substitution(name="expanding")
- @Appender(_doc_template)
- @Appender(_shared_docs["max"])
- def max(self, *args, **kwargs):
- nv.validate_expanding_func("max", args, kwargs)
- return super().max(*args, **kwargs)
-
- @Substitution(name="expanding")
- @Appender(_shared_docs["min"])
- def min(self, *args, **kwargs):
- nv.validate_expanding_func("min", args, kwargs)
- return super().min(*args, **kwargs)
-
- @Substitution(name="expanding")
- @Appender(_shared_docs["mean"])
- def mean(self, *args, **kwargs):
- nv.validate_expanding_func("mean", args, kwargs)
- return super().mean(*args, **kwargs)
-
- @Substitution(name="expanding")
- @Appender(_shared_docs["median"])
- def median(self, **kwargs):
- return super().median(**kwargs)
-
- @Substitution(name="expanding")
- @Appender(_shared_docs["std"])
- def std(self, ddof=1, *args, **kwargs):
- nv.validate_expanding_func("std", args, kwargs)
- return super().std(ddof=ddof, **kwargs)
-
- @Substitution(name="expanding")
- @Appender(_shared_docs["var"])
- def var(self, ddof=1, *args, **kwargs):
- nv.validate_expanding_func("var", args, kwargs)
- return super().var(ddof=ddof, **kwargs)
-
- @Substitution(name="expanding")
- @Appender(_doc_template)
- @Appender(_shared_docs["skew"])
- def skew(self, **kwargs):
- return super().skew(**kwargs)
-
- _agg_doc = dedent(
- """
- Examples
- --------
-
- The example below will show an expanding calculation with a window size of
- four matching the equivalent function call using `scipy.stats`.
-
- >>> arr = [1, 2, 3, 4, 999]
- >>> import scipy.stats
- >>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
- >>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
- -1.200000
- >>> print(fmt.format(scipy.stats.kurtosis(arr, bias=False)))
- 4.999874
- >>> s = pd.Series(arr)
- >>> s.expanding(4).kurt()
- 0 NaN
- 1 NaN
- 2 NaN
- 3 -1.200000
- 4 4.999874
- dtype: float64
- """
- )
-
- @Appender(_agg_doc)
- @Substitution(name="expanding")
- @Appender(_shared_docs["kurt"])
- def kurt(self, **kwargs):
- return super().kurt(**kwargs)
-
- @Substitution(name="expanding")
- @Appender(_shared_docs["quantile"])
- def quantile(self, quantile, interpolation="linear", **kwargs):
- return super().quantile(
- quantile=quantile, interpolation=interpolation, **kwargs
- )
-
- @Substitution(name="expanding")
- @Appender(_doc_template)
- @Appender(_shared_docs["cov"])
- def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
- return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)
-
- @Substitution(name="expanding")
- @Appender(_shared_docs["corr"])
- def corr(self, other=None, pairwise=None, **kwargs):
- return super().corr(other=other, pairwise=pairwise, **kwargs)
-
-
-class ExpandingGroupby(_GroupByMixin, Expanding):
- """
- Provide a expanding groupby implementation.
-
- .. versionadded:: 0.18.1
-
- """
-
- @property
- def _constructor(self):
- return Expanding
-
-
-_bias_template = """
- Parameters
- ----------
- bias : bool, default False
- Use a standard estimation bias correction.
- *args, **kwargs
- Arguments and keyword arguments to be passed into func.
-"""
-
-_pairwise_template = """
- Parameters
- ----------
- other : Series, DataFrame, or ndarray, optional
- If not supplied then will default to self and produce pairwise
- output.
- pairwise : bool, default None
- If False then only matching columns between self and other will be
- used and the output will be a DataFrame.
- If True then all pairwise combinations will be calculated and the
- output will be a MultiIndex DataFrame in the case of DataFrame
- inputs. In the case of missing elements, only complete pairwise
- observations will be used.
- bias : bool, default False
- Use a standard estimation bias correction.
- **kwargs
- Keyword arguments to be passed into func.
-"""
-
-
-class EWM(_Rolling):
- r"""
- Provide exponential weighted functions.
-
- .. versionadded:: 0.18.0
-
- Parameters
- ----------
- com : float, optional
- Specify decay in terms of center of mass,
- :math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`.
- span : float, optional
- Specify decay in terms of span,
- :math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`.
- halflife : float, optional
- Specify decay in terms of half-life,
- :math:`\alpha = 1 - exp(log(0.5) / halflife),\text{for} halflife > 0`.
- alpha : float, optional
- Specify smoothing factor :math:`\alpha` directly,
- :math:`0 < \alpha \leq 1`.
-
- .. versionadded:: 0.18.0
-
- min_periods : int, default 0
- Minimum number of observations in window required to have a value
- (otherwise result is NA).
- adjust : bool, default True
- Divide by decaying adjustment factor in beginning periods to account
- for imbalance in relative weightings
- (viewing EWMA as a moving average).
- ignore_na : bool, default False
- Ignore missing values when calculating weights;
- specify True to reproduce pre-0.15.0 behavior.
- axis : {0 or 'index', 1 or 'columns'}, default 0
- The axis to use. The value 0 identifies the rows, and 1
- identifies the columns.
-
- Returns
- -------
- DataFrame
- A Window sub-classed for the particular operation.
-
- See Also
- --------
- rolling : Provides rolling window calculations.
- expanding : Provides expanding transformations.
-
- Notes
- -----
- Exactly one of center of mass, span, half-life, and alpha must be provided.
- Allowed values and relationship between the parameters are specified in the
- parameter descriptions above; see the link at the end of this section for
- a detailed explanation.
-
- When adjust is True (default), weighted averages are calculated using
- weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
-
- When adjust is False, weighted averages are calculated recursively as:
- weighted_average[0] = arg[0];
- weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
-
- When ignore_na is False (default), weights are based on absolute positions.
- For example, the weights of x and y used in calculating the final weighted
- average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
- (1-alpha)**2 and alpha (if adjust is False).
-
- When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based
- on relative positions. For example, the weights of x and y used in
- calculating the final weighted average of [x, None, y] are 1-alpha and 1
- (if adjust is True), and 1-alpha and alpha (if adjust is False).
-
- More details can be found at
- http://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows
-
- Examples
- --------
-
- >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
- >>> df
- B
- 0 0.0
- 1 1.0
- 2 2.0
- 3 NaN
- 4 4.0
-
- >>> df.ewm(com=0.5).mean()
- B
- 0 0.000000
- 1 0.750000
- 2 1.615385
- 3 1.615385
- 4 3.670213
- """
- _attributes = ["com", "min_periods", "adjust", "ignore_na", "axis"]
-
- def __init__(
- self,
- obj,
- com=None,
- span=None,
- halflife=None,
- alpha=None,
- min_periods=0,
- adjust=True,
- ignore_na=False,
- axis=0,
- ):
- self.obj = obj
- self.com = _get_center_of_mass(com, span, halflife, alpha)
- self.min_periods = min_periods
- self.adjust = adjust
- self.ignore_na = ignore_na
- self.axis = axis
- self.on = None
-
- @property
- def _constructor(self):
- return EWM
-
- _agg_see_also_doc = dedent(
- """
- See Also
- --------
- pandas.DataFrame.rolling.aggregate
- """
- )
-
- _agg_examples_doc = dedent(
- """
- Examples
- --------
-
- >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
- >>> df
- A B C
- 0 -2.385977 -0.102758 0.438822
- 1 -1.004295 0.905829 -0.954544
- 2 0.735167 -0.165272 -1.619346
- 3 -0.702657 -1.340923 -0.706334
- 4 -0.246845 0.211596 -0.901819
- 5 2.463718 3.157577 -1.380906
- 6 -1.142255 2.340594 -0.039875
- 7 1.396598 -1.647453 1.677227
- 8 -0.543425 1.761277 -0.220481
- 9 -0.640505 0.289374 -1.550670
-
- >>> df.ewm(alpha=0.5).mean()
- A B C
- 0 -2.385977 -0.102758 0.438822
- 1 -1.464856 0.569633 -0.490089
- 2 -0.207700 0.149687 -1.135379
- 3 -0.471677 -0.645305 -0.906555
- 4 -0.355635 -0.203033 -0.904111
- 5 1.076417 1.503943 -1.146293
- 6 -0.041654 1.925562 -0.588728
- 7 0.680292 0.132049 0.548693
- 8 0.067236 0.948257 0.163353
- 9 -0.286980 0.618493 -0.694496
- """
- )
-
- @Substitution(
- see_also=_agg_see_also_doc,
- examples=_agg_examples_doc,
- versionadded="",
- klass="Series/Dataframe",
- axis="",
- )
- @Appender(_shared_docs["aggregate"])
- def aggregate(self, arg, *args, **kwargs):
- return super().aggregate(arg, *args, **kwargs)
-
- agg = aggregate
-
- def _apply(self, func, **kwargs):
- """
- Rolling statistical measure using supplied function. Designed to be
- used with passed-in Cython array-based functions.
-
- Parameters
- ----------
- func : str/callable to apply
-
- Returns
- -------
- y : same type as input argument
- """
- blocks, obj = self._create_blocks()
- block_list = list(blocks)
-
- results = []
- exclude = []
- for i, b in enumerate(blocks):
- try:
- values = self._prep_values(b.values)
-
- except (TypeError, NotImplementedError):
- if isinstance(obj, ABCDataFrame):
- exclude.extend(b.columns)
- del block_list[i]
- continue
- else:
- raise DataError("No numeric types to aggregate")
-
- if values.size == 0:
- results.append(values.copy())
- continue
-
- # if we have a string function name, wrap it
- if isinstance(func, str):
- cfunc = getattr(libwindow, func, None)
- if cfunc is None:
- raise ValueError(
- "we do not support this function "
- "in libwindow.{func}".format(func=func)
- )
-
- def func(arg):
- return cfunc(
- arg,
- self.com,
- int(self.adjust),
- int(self.ignore_na),
- int(self.min_periods),
- )
-
- results.append(np.apply_along_axis(func, self.axis, values))
-
- return self._wrap_results(results, block_list, obj, exclude)
-
- @Substitution(name="ewm")
- @Appender(_doc_template)
- def mean(self, *args, **kwargs):
- """
- Exponential weighted moving average.
-
- Parameters
- ----------
- *args, **kwargs
- Arguments and keyword arguments to be passed into func.
- """
- nv.validate_window_func("mean", args, kwargs)
- return self._apply("ewma", **kwargs)
-
- @Substitution(name="ewm")
- @Appender(_doc_template)
- @Appender(_bias_template)
- def std(self, bias=False, *args, **kwargs):
- """
- Exponential weighted moving stddev.
- """
- nv.validate_window_func("std", args, kwargs)
- return _zsqrt(self.var(bias=bias, **kwargs))
-
- vol = std
-
- @Substitution(name="ewm")
- @Appender(_doc_template)
- @Appender(_bias_template)
- def var(self, bias=False, *args, **kwargs):
- """
- Exponential weighted moving variance.
- """
- nv.validate_window_func("var", args, kwargs)
-
- def f(arg):
- return libwindow.ewmcov(
- arg,
- arg,
- self.com,
- int(self.adjust),
- int(self.ignore_na),
- int(self.min_periods),
- int(bias),
- )
-
- return self._apply(f, **kwargs)
-
- @Substitution(name="ewm")
- @Appender(_doc_template)
- @Appender(_pairwise_template)
- def cov(self, other=None, pairwise=None, bias=False, **kwargs):
- """
- Exponential weighted sample covariance.
- """
- if other is None:
- other = self._selected_obj
- # only default unset
- pairwise = True if pairwise is None else pairwise
- other = self._shallow_copy(other)
-
- def _get_cov(X, Y):
- X = self._shallow_copy(X)
- Y = self._shallow_copy(Y)
- cov = libwindow.ewmcov(
- X._prep_values(),
- Y._prep_values(),
- self.com,
- int(self.adjust),
- int(self.ignore_na),
- int(self.min_periods),
- int(bias),
- )
- return X._wrap_result(cov)
-
- return _flex_binary_moment(
- self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
- )
-
- @Substitution(name="ewm")
- @Appender(_doc_template)
- @Appender(_pairwise_template)
- def corr(self, other=None, pairwise=None, **kwargs):
- """
- Exponential weighted sample correlation.
- """
- if other is None:
- other = self._selected_obj
- # only default unset
- pairwise = True if pairwise is None else pairwise
- other = self._shallow_copy(other)
-
- def _get_corr(X, Y):
- X = self._shallow_copy(X)
- Y = self._shallow_copy(Y)
-
- def _cov(x, y):
- return libwindow.ewmcov(
- x,
- y,
- self.com,
- int(self.adjust),
- int(self.ignore_na),
- int(self.min_periods),
- 1,
- )
-
- x_values = X._prep_values()
- y_values = Y._prep_values()
- with np.errstate(all="ignore"):
- cov = _cov(x_values, y_values)
- x_var = _cov(x_values, x_values)
- y_var = _cov(y_values, y_values)
- corr = cov / _zsqrt(x_var * y_var)
- return X._wrap_result(corr)
-
- return _flex_binary_moment(
- self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
- )
-
-
-# Helper Funcs
-
-
-def _flex_binary_moment(arg1, arg2, f, pairwise=False):
-
- if not (
- isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame))
- and isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame))
- ):
- raise TypeError(
- "arguments to moment function must be of type "
- "np.ndarray/Series/DataFrame"
- )
-
- if isinstance(arg1, (np.ndarray, ABCSeries)) and isinstance(
- arg2, (np.ndarray, ABCSeries)
- ):
- X, Y = _prep_binary(arg1, arg2)
- return f(X, Y)
-
- elif isinstance(arg1, ABCDataFrame):
- from pandas import DataFrame
-
- def dataframe_from_int_dict(data, frame_template):
- result = DataFrame(data, index=frame_template.index)
- if len(result.columns) > 0:
- result.columns = frame_template.columns[result.columns]
- return result
-
- results = {}
- if isinstance(arg2, ABCDataFrame):
- if pairwise is False:
- if arg1 is arg2:
- # special case in order to handle duplicate column names
- for i, col in enumerate(arg1.columns):
- results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])
- return dataframe_from_int_dict(results, arg1)
- else:
- if not arg1.columns.is_unique:
- raise ValueError("'arg1' columns are not unique")
- if not arg2.columns.is_unique:
- raise ValueError("'arg2' columns are not unique")
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("ignore", RuntimeWarning)
- X, Y = arg1.align(arg2, join="outer")
- X = X + 0 * Y
- Y = Y + 0 * X
-
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("ignore", RuntimeWarning)
- res_columns = arg1.columns.union(arg2.columns)
- for col in res_columns:
- if col in X and col in Y:
- results[col] = f(X[col], Y[col])
- return DataFrame(results, index=X.index, columns=res_columns)
- elif pairwise is True:
- results = defaultdict(dict)
- for i, k1 in enumerate(arg1.columns):
- for j, k2 in enumerate(arg2.columns):
- if j < i and arg2 is arg1:
- # Symmetric case
- results[i][j] = results[j][i]
- else:
- results[i][j] = f(
- *_prep_binary(arg1.iloc[:, i], arg2.iloc[:, j])
- )
-
- from pandas import MultiIndex, concat
-
- result_index = arg1.index.union(arg2.index)
- if len(result_index):
-
- # construct result frame
- result = concat(
- [
- concat(
- [results[i][j] for j, c in enumerate(arg2.columns)],
- ignore_index=True,
- )
- for i, c in enumerate(arg1.columns)
- ],
- ignore_index=True,
- axis=1,
- )
- result.columns = arg1.columns
-
- # set the index and reorder
- if arg2.columns.nlevels > 1:
- result.index = MultiIndex.from_product(
- arg2.columns.levels + [result_index]
- )
- result = result.reorder_levels([2, 0, 1]).sort_index()
- else:
- result.index = MultiIndex.from_product(
- [range(len(arg2.columns)), range(len(result_index))]
- )
- result = result.swaplevel(1, 0).sort_index()
- result.index = MultiIndex.from_product(
- [result_index] + [arg2.columns]
- )
- else:
-
- # empty result
- result = DataFrame(
- index=MultiIndex(
- levels=[arg1.index, arg2.columns], codes=[[], []]
- ),
- columns=arg2.columns,
- dtype="float64",
- )
-
- # reset our index names to arg1 names
- # reset our column names to arg2 names
- # careful not to mutate the original names
- result.columns = result.columns.set_names(arg1.columns.names)
- result.index = result.index.set_names(
- result_index.names + arg2.columns.names
- )
-
- return result
-
- else:
- raise ValueError("'pairwise' is not True/False")
- else:
- results = {
- i: f(*_prep_binary(arg1.iloc[:, i], arg2))
- for i, col in enumerate(arg1.columns)
- }
- return dataframe_from_int_dict(results, arg1)
-
- else:
- return _flex_binary_moment(arg2, arg1, f)
-
-
-def _get_center_of_mass(comass, span, halflife, alpha):
- valid_count = com.count_not_none(comass, span, halflife, alpha)
- if valid_count > 1:
- raise ValueError("comass, span, halflife, and alpha " "are mutually exclusive")
-
- # Convert to center of mass; domain checks ensure 0 < alpha <= 1
- if comass is not None:
- if comass < 0:
- raise ValueError("comass must satisfy: comass >= 0")
- elif span is not None:
- if span < 1:
- raise ValueError("span must satisfy: span >= 1")
- comass = (span - 1) / 2.0
- elif halflife is not None:
- if halflife <= 0:
- raise ValueError("halflife must satisfy: halflife > 0")
- decay = 1 - np.exp(np.log(0.5) / halflife)
- comass = 1 / decay - 1
- elif alpha is not None:
- if alpha <= 0 or alpha > 1:
- raise ValueError("alpha must satisfy: 0 < alpha <= 1")
- comass = (1.0 - alpha) / alpha
- else:
- raise ValueError("Must pass one of comass, span, halflife, or alpha")
-
- return float(comass)
-
-
-def _offset(window, center):
- if not is_integer(window):
- window = len(window)
- offset = (window - 1) / 2.0 if center else 0
- try:
- return int(offset)
- except TypeError:
- return offset.astype(int)
-
-
-def _require_min_periods(p):
- def _check_func(minp, window):
- if minp is None:
- return window
- else:
- return max(p, minp)
-
- return _check_func
-
-
-def _use_window(minp, window):
- if minp is None:
- return window
- else:
- return minp
-
-
-def _zsqrt(x):
- with np.errstate(all="ignore"):
- result = np.sqrt(x)
- mask = x < 0
-
- if isinstance(x, ABCDataFrame):
- if mask.values.any():
- result[mask] = 0
- else:
- if mask.any():
- result[mask] = 0
-
- return result
-
-
-def _prep_binary(arg1, arg2):
- if not isinstance(arg2, type(arg1)):
- raise Exception("Input arrays must be of the same type!")
-
- # mask out values, this also makes a common index...
- X = arg1 + 0 * arg2
- Y = arg2 + 0 * arg1
-
- return X, Y
-
-
-# Top-level exports
-
-
-def rolling(obj, win_type=None, **kwds):
- if not isinstance(obj, (ABCSeries, ABCDataFrame)):
- raise TypeError("invalid type: %s" % type(obj))
-
- if win_type is not None:
- return Window(obj, win_type=win_type, **kwds)
-
- return Rolling(obj, **kwds)
-
-
-rolling.__doc__ = Window.__doc__
-
-
-def expanding(obj, **kwds):
- if not isinstance(obj, (ABCSeries, ABCDataFrame)):
- raise TypeError("invalid type: %s" % type(obj))
-
- return Expanding(obj, **kwds)
-
-
-expanding.__doc__ = Expanding.__doc__
-
-
-def ewm(obj, **kwds):
- if not isinstance(obj, (ABCSeries, ABCDataFrame)):
- raise TypeError("invalid type: %s" % type(obj))
-
- return EWM(obj, **kwds)
-
-
-ewm.__doc__ = EWM.__doc__
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index 3177937ac4ba1..a85fc8bfb1414 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -4,7 +4,7 @@
Expose public exceptions & warnings
"""
-from pandas._libs.tslibs import OutOfBoundsDatetime
+from pandas._libs.tslibs import NullFrequencyError, OutOfBoundsDatetime
class PerformanceWarning(Warning):
@@ -157,14 +157,6 @@ class MergeError(ValueError):
"""
-class NullFrequencyError(ValueError):
- """
- Error raised when a null `freq` attribute is used in an operation
- that needs a non-null frequency, particularly `DatetimeIndex.shift`,
- `TimedeltaIndex.shift`, `PeriodIndex.shift`.
- """
-
-
class AccessorRegistrationWarning(Warning):
"""Warning for attribute conflicts in accessor registration."""
diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py
index 0006824f09fe7..518b940ec5da3 100644
--- a/pandas/io/clipboards.py
+++ b/pandas/io/clipboards.py
@@ -9,8 +9,7 @@
def read_clipboard(sep=r"\s+", **kwargs): # pragma: no cover
r"""
- Read text from clipboard and pass to read_csv. See read_csv for the
- full argument list
+ Read text from clipboard and pass to read_csv.
Parameters
----------
@@ -18,9 +17,13 @@ def read_clipboard(sep=r"\s+", **kwargs): # pragma: no cover
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
+ **kwargs
+ See read_csv for the full argument list.
+
Returns
-------
- parsed : DataFrame
+ DataFrame
+ A parsed DataFrame object.
"""
encoding = kwargs.pop("encoding", "utf-8")
@@ -121,14 +124,14 @@ def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover
return
except TypeError:
warnings.warn(
- "to_clipboard in excel mode requires a single " "character separator."
+ "to_clipboard in excel mode requires a single character separator."
)
elif sep is not None:
warnings.warn("to_clipboard with excel=False ignores the sep argument")
if isinstance(obj, ABCDataFrame):
# str(df) has various unhelpful defaults, like truncation
- with option_context("display.max_colwidth", 999999):
+ with option_context("display.max_colwidth", None):
objstr = obj.to_string(**kwargs)
else:
objstr = str(obj)
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 9a9620e2d0663..2ca2007e2925f 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -4,13 +4,23 @@
import codecs
import csv
import gzip
-from http.client import HTTPException # noqa
-from io import BytesIO
-import lzma
+from io import BufferedIOBase, BytesIO
import mmap
import os
import pathlib
-from urllib.error import URLError # noqa
+from typing import (
+ IO,
+ Any,
+ AnyStr,
+ BinaryIO,
+ Dict,
+ List,
+ Optional,
+ TextIO,
+ Tuple,
+ Type,
+ Union,
+)
from urllib.parse import ( # noqa
urlencode,
urljoin,
@@ -19,9 +29,9 @@
uses_params,
uses_relative,
)
-from urllib.request import pathname2url, urlopen
import zipfile
+from pandas.compat import _get_lzma_file, _import_lzma
from pandas.errors import ( # noqa
AbstractMethodError,
DtypeWarning,
@@ -32,6 +42,10 @@
from pandas.core.dtypes.common import is_file_like
+from pandas._typing import FilePathOrBuffer
+
+lzma = _import_lzma()
+
# gh-12665: Alias for now and remove later.
CParserError = ParserError
@@ -68,15 +82,16 @@ class BaseIterator:
Useful only when the object being iterated is non-reusable (e.g. OK for a
parser, not for an in-memory table, yes for its iterator)."""
- def __iter__(self):
+ def __iter__(self) -> "BaseIterator":
return self
def __next__(self):
raise AbstractMethodError(self)
-def _is_url(url):
- """Check to see if a URL has a valid protocol.
+def _is_url(url) -> bool:
+ """
+ Check to see if a URL has a valid protocol.
Parameters
----------
@@ -87,13 +102,14 @@ def _is_url(url):
isurl : bool
If `url` has a valid protocol return True otherwise False.
"""
- try:
- return parse_url(url).scheme in _VALID_URLS
- except Exception:
+ if not isinstance(url, str):
return False
+ return parse_url(url).scheme in _VALID_URLS
-def _expand_user(filepath_or_buffer):
+def _expand_user(
+ filepath_or_buffer: FilePathOrBuffer[AnyStr]
+) -> FilePathOrBuffer[AnyStr]:
"""Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
@@ -111,7 +127,7 @@ def _expand_user(filepath_or_buffer):
return filepath_or_buffer
-def _validate_header_arg(header):
+def _validate_header_arg(header) -> None:
if isinstance(header, bool):
raise TypeError(
"Passing a bool to header is invalid. "
@@ -121,7 +137,9 @@ def _validate_header_arg(header):
)
-def _stringify_path(filepath_or_buffer):
+def _stringify_path(
+ filepath_or_buffer: FilePathOrBuffer[AnyStr]
+) -> FilePathOrBuffer[AnyStr]:
"""Attempt to convert a path-like object to a string.
Parameters
@@ -144,30 +162,42 @@ def _stringify_path(filepath_or_buffer):
strings, buffers, or anything else that's not even path-like.
"""
if hasattr(filepath_or_buffer, "__fspath__"):
- return filepath_or_buffer.__fspath__()
+ # https://github.com/python/mypy/issues/1424
+ return filepath_or_buffer.__fspath__() # type: ignore
elif isinstance(filepath_or_buffer, pathlib.Path):
return str(filepath_or_buffer)
return _expand_user(filepath_or_buffer)
-def is_s3_url(url):
+def is_s3_url(url) -> bool:
"""Check for an s3, s3n, or s3a url"""
- try:
- return parse_url(url).scheme in ["s3", "s3n", "s3a"]
- except Exception:
+ if not isinstance(url, str):
return False
+ return parse_url(url).scheme in ["s3", "s3n", "s3a"]
-def is_gcs_url(url):
+def is_gcs_url(url) -> bool:
"""Check for a gcs url"""
- try:
- return parse_url(url).scheme in ["gcs", "gs"]
- except Exception:
+ if not isinstance(url, str):
return False
+ return parse_url(url).scheme in ["gcs", "gs"]
+
+
+def urlopen(*args, **kwargs):
+ """
+ Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of
+ the stdlib.
+ """
+ import urllib.request
+
+ return urllib.request.urlopen(*args, **kwargs)
def get_filepath_or_buffer(
- filepath_or_buffer, encoding=None, compression=None, mode=None
+ filepath_or_buffer: FilePathOrBuffer,
+ encoding: Optional[str] = None,
+ compression: Optional[str] = None,
+ mode: Optional[str] = None,
):
"""
If the filepath_or_buffer is a url, translate and return the buffer.
@@ -190,7 +220,7 @@ def get_filepath_or_buffer(
"""
filepath_or_buffer = _stringify_path(filepath_or_buffer)
- if _is_url(filepath_or_buffer):
+ if isinstance(filepath_or_buffer, str) and _is_url(filepath_or_buffer):
req = urlopen(filepath_or_buffer)
content_encoding = req.headers.get("Content-Encoding", None)
if content_encoding == "gzip":
@@ -224,7 +254,7 @@ def get_filepath_or_buffer(
return filepath_or_buffer, None, compression, False
-def file_path_to_url(path):
+def file_path_to_url(path: str) -> str:
"""
converts an absolute native path to a FILE URL.
@@ -236,13 +266,52 @@ def file_path_to_url(path):
-------
a valid FILE URL
"""
+ # lazify expensive import (~30ms)
+ from urllib.request import pathname2url
+
return urljoin("file:", pathname2url(path))
_compression_to_extension = {"gzip": ".gz", "bz2": ".bz2", "zip": ".zip", "xz": ".xz"}
-def _infer_compression(filepath_or_buffer, compression):
+def _get_compression_method(
+ compression: Optional[Union[str, Dict[str, str]]]
+) -> Tuple[Optional[str], Dict[str, str]]:
+ """
+ Simplifies a compression argument to a compression method string and
+ a dict containing additional arguments.
+
+ Parameters
+ ----------
+ compression : str or dict
+ If string, specifies the compression method. If dict, value at key
+ 'method' specifies compression method.
+
+ Returns
+ -------
+ tuple of ({compression method}, Optional[str]
+ {compression arguments}, Dict[str, str])
+
+ Raises
+ ------
+ ValueError on dict missing 'method' key
+ """
+ # Handle dict
+ if isinstance(compression, dict):
+ compression_args = compression.copy()
+ try:
+ compression = compression_args.pop("method")
+ except KeyError:
+ raise ValueError("If dict, compression must have key 'method'")
+ else:
+ compression_args = {}
+ return compression, compression_args
+
+
+def _infer_compression(
+ filepath_or_buffer: FilePathOrBuffer, compression: Optional[str]
+) -> Optional[str]:
"""
Get the compression method for filepath_or_buffer. If compression='infer',
the inferred compression method is returned. Otherwise, the input
@@ -251,8 +320,8 @@ def _infer_compression(filepath_or_buffer, compression):
Parameters
----------
- filepath_or_buffer :
- a path (str) or buffer
+ filepath_or_buffer : str or file handle
+ File path or object.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
If 'infer' and `filepath_or_buffer` is path-like, then detect
compression from the following extensions: '.gz', '.bz2', '.zip',
@@ -260,12 +329,11 @@ def _infer_compression(filepath_or_buffer, compression):
Returns
-------
- string or None :
- compression method
+ string or None
Raises
------
- ValueError on invalid compression specified
+ ValueError on invalid compression specified.
"""
# No compression has been explicitly specified
@@ -297,49 +365,67 @@ def _infer_compression(filepath_or_buffer, compression):
def _get_handle(
- path_or_buf, mode, encoding=None, compression=None, memory_map=False, is_text=True
+ path_or_buf,
+ mode: str,
+ encoding=None,
+ compression: Optional[Union[str, Dict[str, Any]]] = None,
+ memory_map: bool = False,
+ is_text: bool = True,
):
"""
Get file handle for given path/buffer and mode.
Parameters
----------
- path_or_buf :
- a path (str) or buffer
+ path_or_buf : str or file handle
+ File path or object.
mode : str
- mode to open path_or_buf with
+ Mode to open path_or_buf with.
encoding : str or None
- compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default None
- If 'infer' and `filepath_or_buffer` is path-like, then detect
- compression from the following extensions: '.gz', '.bz2', '.zip',
- or '.xz' (otherwise no compression).
+ Encoding to use.
+ compression : str or dict, default None
+ If string, specifies compression mode. If dict, value at key 'method'
+ specifies compression mode. Compression mode must be one of {'infer',
+ 'gzip', 'bz2', 'zip', 'xz', None}. If compression mode is 'infer'
+ and `filepath_or_buffer` is path-like, then detect compression from
+ the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise
+ no compression). If dict and compression mode is 'zip' or inferred as
+ 'zip', other entries passed as additional compression options.
+
+ .. versionchanged:: 1.0.0
+
+ May now be a dict with key 'method' as compression mode
+ and other keys as compression options if compression
+ mode is 'zip'.
+
memory_map : boolean, default False
See parsers._parser_params for more information.
is_text : boolean, default True
whether file/buffer is in text format (csv, json, etc.), or in binary
- mode (pickle, etc.)
+ mode (pickle, etc.).
Returns
-------
f : file-like
- A file-like object
+ A file-like object.
handles : list of file-like objects
A list of file-like object that were opened in this function.
"""
try:
from s3fs import S3File
- need_text_wrapping = (BytesIO, S3File)
+ need_text_wrapping = (BufferedIOBase, S3File)
except ImportError:
- need_text_wrapping = (BytesIO,)
+ need_text_wrapping = BufferedIOBase # type: ignore
- handles = list()
+ handles = list() # type: List[IO]
f = path_or_buf
# Convert pathlib.Path/py.path.local or string
path_or_buf = _stringify_path(path_or_buf)
is_path = isinstance(path_or_buf, str)
+ compression, compression_args = _get_compression_method(compression)
if is_path:
compression = _infer_compression(path_or_buf, compression)
@@ -361,7 +447,7 @@ def _get_handle(
# ZIP Compression
elif compression == "zip":
- zf = BytesZipFile(path_or_buf, mode)
+ zf = BytesZipFile(path_or_buf, mode, **compression_args)
# Ensure the container is closed as well.
handles.append(zf)
if zf.mode == "w":
@@ -382,7 +468,7 @@ def _get_handle(
# XZ Compression
elif compression == "xz":
- f = lzma.LZMAFile(path_or_buf, mode)
+ f = _get_lzma_file(lzma)(path_or_buf, mode)
# Unrecognized Compression
else:
@@ -407,14 +493,16 @@ def _get_handle(
if is_text and (compression or isinstance(f, need_text_wrapping)):
from io import TextIOWrapper
- f = TextIOWrapper(f, encoding=encoding, newline="")
- handles.append(f)
+ g = TextIOWrapper(f, encoding=encoding, newline="")
+ if not isinstance(f, BufferedIOBase):
+ handles.append(g)
+ f = g
if memory_map and hasattr(f, "fileno"):
try:
- g = MMapWrapper(f)
+ wrapped = MMapWrapper(f)
f.close()
- f = g
+ f = wrapped
except Exception:
# we catch any errors that may have occurred
# because that is consistent with the lower-level
@@ -435,13 +523,23 @@ class BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore
"""
# GH 17778
- def __init__(self, file, mode, compression=zipfile.ZIP_DEFLATED, **kwargs):
+ def __init__(
+ self,
+ file: FilePathOrBuffer,
+ mode: str,
+ archive_name: Optional[str] = None,
+ **kwargs
+ ):
if mode in ["wb", "rb"]:
mode = mode.replace("b", "")
- super().__init__(file, mode, compression, **kwargs)
+ self.archive_name = archive_name
+ super().__init__(file, mode, zipfile.ZIP_DEFLATED, **kwargs)
def write(self, data):
- super().writestr(self.filename, data)
+ archive_name = self.filename
+ if self.archive_name is not None:
+ archive_name = self.archive_name
+ super().writestr(archive_name, data)
@property
def closed(self):
@@ -461,16 +559,16 @@ class MMapWrapper(BaseIterator):
"""
- def __init__(self, f):
+ def __init__(self, f: IO):
self.mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
- def __getattr__(self, name):
+ def __getattr__(self, name: str):
return getattr(self.mmap, name)
- def __iter__(self):
+ def __iter__(self) -> "MMapWrapper":
return self
- def __next__(self):
+ def __next__(self) -> str:
newline = self.mmap.readline()
# readline returns bytes, not str, but Python's CSV reader
@@ -486,23 +584,25 @@ def __next__(self):
class UTF8Recoder(BaseIterator):
-
"""
Iterator that reads an encoded stream and re-encodes the input to UTF-8
"""
- def __init__(self, f, encoding):
+ def __init__(self, f: BinaryIO, encoding: str):
self.reader = codecs.getreader(encoding)(f)
- def read(self, bytes=-1):
+ def read(self, bytes: int = -1) -> bytes:
return self.reader.read(bytes).encode("utf-8")
- def readline(self):
+ def readline(self) -> bytes:
return self.reader.readline().encode("utf-8")
- def next(self):
+ def next(self) -> bytes:
return next(self.reader).encode("utf-8")
+ def close(self):
+ self.reader.close()
+
# Keeping these class for now because it provides a necessary convenience
# for "dropping" the "encoding" argument from our I/O arguments when
@@ -511,5 +611,7 @@ def UnicodeReader(f, dialect=csv.excel, encoding="utf-8", **kwds):
return csv.reader(f, dialect=dialect, **kwds)
-def UnicodeWriter(f, dialect=csv.excel, encoding="utf-8", **kwds):
+def UnicodeWriter(
+ f: TextIO, dialect: Type[csv.Dialect] = csv.excel, encoding: str = "utf-8", **kwds
+):
return csv.writer(f, dialect=dialect, **kwds)
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 763b12949ba0a..6dba5e042562b 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -4,7 +4,6 @@
from io import BytesIO
import os
from textwrap import fill
-from urllib.request import urlopen
from pandas._config import config
@@ -21,6 +20,7 @@
_stringify_path,
_validate_header_arg,
get_filepath_or_buffer,
+ urlopen,
)
from pandas.io.excel._util import (
_fill_mi_header,
@@ -112,7 +112,7 @@
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
- Acceptable values are None or xlrd.
+ Acceptable values are None, "xlrd", "openpyxl" or "odf".
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
@@ -120,14 +120,8 @@
content.
true_values : list, default None
Values to consider as True.
-
- .. versionadded:: 0.19.0
-
false_values : list, default None
Values to consider as False.
-
- .. versionadded:: 0.19.0
-
skiprows : list-like
Rows to skip at the beginning (0-indexed).
nrows : int, default None
@@ -303,7 +297,7 @@ def read_excel(
for arg in ("sheet", "sheetname", "parse_cols"):
if arg in kwds:
raise TypeError(
- "read_excel() got an unexpected keyword argument " "`{}`".format(arg)
+ "read_excel() got an unexpected keyword argument `{}`".format(arg)
)
if not isinstance(io, ExcelFile):
@@ -359,7 +353,7 @@ def __init__(self, filepath_or_buffer):
self.book = self.load_workbook(filepath_or_buffer)
else:
raise ValueError(
- "Must explicitly set engine if not passing in" " buffer or path for io."
+ "Must explicitly set engine if not passing in buffer or path for io."
)
@property
@@ -719,9 +713,7 @@ def _get_sheet_name(self, sheet_name):
if sheet_name is None:
sheet_name = self.cur_sheet
if sheet_name is None: # pragma: no cover
- raise ValueError(
- "Must pass explicit sheet_name or set " "cur_sheet property"
- )
+ raise ValueError("Must pass explicit sheet_name or set cur_sheet property")
return sheet_name
def _value_with_fmt(self, val):
@@ -791,11 +783,12 @@ class ExcelFile:
Parameters
----------
io : string, path object (pathlib.Path or py._path.local.LocalPath),
- file-like object or xlrd workbook
- If a string or path object, expected to be a path to xls or xlsx file.
+ a file-like object, xlrd workbook or openpypl workbook.
+ If a string or path object, expected to be a path to xls, xlsx or odf file.
engine : string, default None
If io is not a buffer or path, this must be set to identify io.
- Acceptable values are None or ``xlrd``.
+ Acceptable values are None, ``xlrd``, ``openpyxl`` or ``odf``.
+ Note that ``odf`` reads tables out of OpenDocument formatted files.
"""
from pandas.io.excel._odfreader import _ODFReader
@@ -845,10 +838,10 @@ def parse(
**kwds
):
"""
- Parse specified sheet(s) into a DataFrame
+ Parse specified sheet(s) into a DataFrame.
Equivalent to read_excel(ExcelFile, ...) See the read_excel
- docstring for more info on accepted parameters
+ docstring for more info on accepted parameters.
Returns
-------
@@ -857,7 +850,7 @@ def parse(
"""
if "chunksize" in kwds:
raise NotImplementedError(
- "chunksize keyword of read_excel " "is not implemented"
+ "chunksize keyword of read_excel is not implemented"
)
return self._reader.parse(
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
index 296b1eef68d7d..25a6db675265d 100644
--- a/pandas/io/feather_format.py
+++ b/pandas/io/feather_format.py
@@ -39,7 +39,7 @@ def to_feather(df, path):
if not isinstance(df.index, Int64Index):
raise ValueError(
"feather does not support serializing {} "
- "for the index; you can .reset_index()"
+ "for the index; you can .reset_index() "
"to make the index into column(s)".format(type(df.index))
)
@@ -53,7 +53,7 @@ def to_feather(df, path):
if df.index.name is not None:
raise ValueError(
- "feather does not serialize index meta-data on a " "default index"
+ "feather does not serialize index meta-data on a default index"
)
# validate columns
@@ -71,7 +71,7 @@ def read_feather(path, columns=None, use_threads=True):
"""
Load a feather-format object from the file path.
- .. versionadded 0.20.0
+ .. versionadded:: 0.20.0
Parameters
----------
@@ -90,16 +90,16 @@ def read_feather(path, columns=None, use_threads=True):
columns : sequence, default None
If not provided, all columns are read.
- .. versionadded 0.24.0
+ .. versionadded:: 0.24.0
nthreads : int, default 1
Number of CPU threads to use when reading to pandas.DataFrame.
- .. versionadded 0.21.0
- .. deprecated 0.24.0
+ .. versionadded:: 0.21.0
+ .. deprecated:: 0.24.0
use_threads : bool, default True
Whether to parallelize reading using multiple threads.
- .. versionadded 0.24.0
+ .. versionadded:: 0.24.0
Returns
-------
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index d86bf432b83c4..e25862537cbfc 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -22,6 +22,7 @@
from pandas.io.common import (
UnicodeWriter,
+ _get_compression_method,
_get_handle,
_infer_compression,
get_filepath_or_buffer,
@@ -58,6 +59,9 @@ def __init__(
if path_or_buf is None:
path_or_buf = StringIO()
+ # Extract compression mode as given, if dict
+ compression, self.compression_args = _get_compression_method(compression)
+
self.path_or_buf, _, _, _ = get_filepath_or_buffer(
path_or_buf, encoding=encoding, compression=compression, mode=mode
)
@@ -96,9 +100,7 @@ def __init__(
# validate mi options
if self.has_mi_columns:
if cols is not None:
- raise TypeError(
- "cannot specify cols with a MultiIndex on the " "columns"
- )
+ raise TypeError("cannot specify cols with a MultiIndex on the columns")
if cols is not None:
if isinstance(cols, ABCIndexClass):
@@ -158,7 +160,7 @@ def save(self):
"""
# GH21227 internal compression is not used when file-like passed.
if self.compression and hasattr(self.path_or_buf, "write"):
- msg = "compression has no effect when passing file-like " "object as input."
+ msg = "compression has no effect when passing file-like object as input."
warnings.warn(msg, RuntimeWarning, stacklevel=2)
# when zip compression is called.
@@ -180,7 +182,7 @@ def save(self):
self.path_or_buf,
self.mode,
encoding=self.encoding,
- compression=self.compression,
+ compression=dict(self.compression_args, method=self.compression),
)
close = True
@@ -208,11 +210,13 @@ def save(self):
if hasattr(self.path_or_buf, "write"):
self.path_or_buf.write(buf)
else:
+ compression = dict(self.compression_args, method=self.compression)
+
f, handles = _get_handle(
self.path_or_buf,
self.mode,
encoding=self.encoding,
- compression=self.compression,
+ compression=compression,
)
f.write(buf)
close = True
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 012d2d9358241..b9c847ad64c57 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -611,7 +611,7 @@ def _format_hierarchical_rows(self):
self.rowcounter += 1
# if index labels are not empty go ahead and dump
- if com._any_not_none(*index_labels) and self.header is not False:
+ if com.any_not_none(*index_labels) and self.header is not False:
for cidx, name in enumerate(index_labels):
yield ExcelCell(self.rowcounter - 1, cidx, name, self.header_style)
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 0e8ed7b25d665..3a50f63409582 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -3,9 +3,30 @@
and latex files. This module also applies to display formatting.
"""
+import codecs
+from contextlib import contextmanager
+from datetime import tzinfo
+import decimal
from functools import partial
from io import StringIO
+import math
+import re
from shutil import get_terminal_size
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
from unicodedata import east_asian_width
import numpy as np
@@ -15,6 +36,8 @@
from pandas._libs import lib
from pandas._libs.tslib import format_array_from_datetime
from pandas._libs.tslibs import NaT, Timedelta, Timestamp, iNaT
+from pandas._libs.tslibs.nattype import NaTType
+from pandas.errors import AbstractMethodError
from pandas.core.dtypes.common import (
is_categorical_dtype,
@@ -39,34 +62,46 @@
)
from pandas.core.dtypes.missing import isna, notna
+from pandas._typing import FilePathOrBuffer
+from pandas.core.arrays.datetimes import DatetimeArray
+from pandas.core.arrays.timedeltas import TimedeltaArray
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.index import Index, ensure_index
from pandas.core.indexes.datetimes import DatetimeIndex
+from pandas.core.indexes.timedeltas import TimedeltaIndex
-from pandas.io.common import _expand_user, _stringify_path
+from pandas.io.common import _stringify_path
from pandas.io.formats.printing import adjoin, justify, pprint_thing
+if TYPE_CHECKING:
+ from pandas import Series, DataFrame, Categorical
+
+formatters_type = Union[
+ List[Callable], Tuple[Callable, ...], Dict[Union[str, int], Callable]
+]
+float_format_type = Union[str, Callable, "EngFormatter"]
+
common_docstring = """
Parameters
----------
- buf : StringIO-like, optional
- Buffer to write to.
+ buf : str, Path or StringIO-like, optional, default None
+ Buffer to write to. If None, the output is returned as a string.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : %(col_space_type)s, optional
%(col_space)s.
- header : bool, optional
+ header : %(header_type)s, optional
%(header)s.
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
- formatters : list or dict of one-param. functions, optional
+ formatters : list, tuple or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
- List must be of length equal to the number of columns.
+ List/tuple must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
@@ -102,8 +137,6 @@
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
-
- .. versionadded:: 0.18.0
"""
_VALID_JUSTIFY_PARAMETERS = (
@@ -123,20 +156,28 @@
return_docstring = """
Returns
-------
- str (or unicode, depending on data and options)
- String representation of the dataframe.
+ str or None
+ If buf is None, returns the result as a string. Otherwise returns
+ None.
"""
class CategoricalFormatter:
- def __init__(self, categorical, buf=None, length=True, na_rep="NaN", footer=True):
+ def __init__(
+ self,
+ categorical: "Categorical",
+ buf: Optional[IO[str]] = None,
+ length: bool = True,
+ na_rep: str = "NaN",
+ footer: bool = True,
+ ):
self.categorical = categorical
self.buf = buf if buf is not None else StringIO("")
self.na_rep = na_rep
self.length = length
self.footer = footer
- def _get_footer(self):
+ def _get_footer(self) -> str:
footer = ""
if self.length:
@@ -153,7 +194,7 @@ def _get_footer(self):
return str(footer)
- def _get_formatted_values(self):
+ def _get_formatted_values(self) -> List[str]:
return format_array(
self.categorical._internal_get_values(),
None,
@@ -161,7 +202,7 @@ def _get_formatted_values(self):
na_rep=self.na_rep,
)
- def to_string(self):
+ def to_string(self) -> str:
categorical = self.categorical
if len(categorical) == 0:
@@ -172,10 +213,10 @@ def to_string(self):
fmt_values = self._get_formatted_values()
- result = ["{i}".format(i=i) for i in fmt_values]
- result = [i.strip() for i in result]
- result = ", ".join(result)
- result = ["[" + result + "]"]
+ fmt_values = ["{i}".format(i=i) for i in fmt_values]
+ fmt_values = [i.strip() for i in fmt_values]
+ values = ", ".join(fmt_values)
+ result = ["[" + values + "]"]
if self.footer:
footer = self._get_footer()
if footer:
@@ -187,17 +228,17 @@ def to_string(self):
class SeriesFormatter:
def __init__(
self,
- series,
- buf=None,
- length=True,
- header=True,
- index=True,
- na_rep="NaN",
- name=False,
- float_format=None,
- dtype=True,
- max_rows=None,
- min_rows=None,
+ series: "Series",
+ buf: Optional[IO[str]] = None,
+ length: bool = True,
+ header: bool = True,
+ index: bool = True,
+ na_rep: str = "NaN",
+ name: bool = False,
+ float_format: Optional[str] = None,
+ dtype: bool = True,
+ max_rows: Optional[int] = None,
+ min_rows: Optional[int] = None,
):
self.series = series
self.buf = buf if buf is not None else StringIO()
@@ -217,7 +258,7 @@ def __init__(
self._chk_truncate()
- def _chk_truncate(self):
+ def _chk_truncate(self) -> None:
from pandas.core.reshape.concat import concat
min_rows = self.min_rows
@@ -227,6 +268,7 @@ def _chk_truncate(self):
truncate_v = max_rows and (len(self.series) > max_rows)
series = self.series
if truncate_v:
+ max_rows = cast(int, max_rows)
if min_rows:
# if min_rows is set (not None or 0), set max_rows to minimum
# of both
@@ -237,13 +279,13 @@ def _chk_truncate(self):
else:
row_num = max_rows // 2
series = concat((series.iloc[:row_num], series.iloc[-row_num:]))
- self.tr_row_num = row_num
+ self.tr_row_num = row_num # type: Optional[int]
else:
self.tr_row_num = None
self.tr_series = series
self.truncate_v = truncate_v
- def _get_footer(self):
+ def _get_footer(self) -> str:
name = self.series.name
footer = ""
@@ -281,7 +323,7 @@ def _get_footer(self):
return str(footer)
- def _get_formatted_index(self):
+ def _get_formatted_index(self) -> Tuple[List[str], bool]:
index = self.tr_series.index
is_multi = isinstance(index, ABCMultiIndex)
@@ -293,13 +335,15 @@ def _get_formatted_index(self):
fmt_index = index.format(name=True)
return fmt_index, have_header
- def _get_formatted_values(self):
- values_to_format = self.tr_series._formatting_values()
+ def _get_formatted_values(self) -> List[str]:
return format_array(
- values_to_format, None, float_format=self.float_format, na_rep=self.na_rep
+ self.tr_series._values,
+ None,
+ float_format=self.float_format,
+ na_rep=self.na_rep,
)
- def to_string(self):
+ def to_string(self) -> str:
series = self.tr_series
footer = self._get_footer()
@@ -314,6 +358,7 @@ def to_string(self):
if self.truncate_v:
n_header_rows = 0
row_num = self.tr_row_num
+ row_num = cast(int, row_num)
width = self.adj.len(fmt_values[row_num - 1])
if width > 3:
dot_str = "..."
@@ -343,13 +388,13 @@ class TextAdjustment:
def __init__(self):
self.encoding = get_option("display.encoding")
- def len(self, text):
+ def len(self, text: str) -> int:
return len(text)
- def justify(self, texts, max_len, mode="right"):
+ def justify(self, texts: Any, max_len: int, mode: str = "right") -> List[str]:
return justify(texts, max_len, mode=mode)
- def adjoin(self, space, *lists, **kwargs):
+ def adjoin(self, space: int, *lists, **kwargs) -> str:
return adjoin(space, *lists, strlen=self.len, justfunc=self.justify, **kwargs)
@@ -366,7 +411,7 @@ def __init__(self):
# Ambiguous width can be changed by option
self._EAW_MAP = {"Na": 1, "N": 1, "W": 2, "F": 2, "H": 1}
- def len(self, text):
+ def len(self, text: str) -> int:
"""
Calculate display width considering unicode East Asian Width
"""
@@ -377,7 +422,9 @@ def len(self, text):
self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width) for c in text
)
- def justify(self, texts, max_len, mode="right"):
+ def justify(
+ self, texts: Iterable[str], max_len: int, mode: str = "right"
+ ) -> List[str]:
# re-calculate padding space per str considering East Asian Width
def _get_pad(t):
return max_len - self.len(t) + len(t)
@@ -390,7 +437,7 @@ def _get_pad(t):
return [x.rjust(_get_pad(x)) for x in texts]
-def _get_adjustment():
+def _get_adjustment() -> TextAdjustment:
use_east_asian_width = get_option("display.unicode.east_asian_width")
if use_east_asian_width:
return EastAsianTextAdjustment()
@@ -400,17 +447,21 @@ def _get_adjustment():
class TableFormatter:
- show_dimensions = None
+ show_dimensions = None # type: bool
+ is_truncated = None # type: bool
+ formatters = None # type: formatters_type
+ columns = None # type: Index
@property
- def should_show_dimensions(self):
+ def should_show_dimensions(self) -> Optional[bool]:
return self.show_dimensions is True or (
self.show_dimensions == "truncate" and self.is_truncated
)
- def _get_formatter(self, i):
+ def _get_formatter(self, i: Union[str, int]) -> Optional[Callable]:
if isinstance(self.formatters, (list, tuple)):
if is_integer(i):
+ i = cast(int, i)
return self.formatters[i]
else:
return None
@@ -419,6 +470,50 @@ def _get_formatter(self, i):
i = self.columns[i]
return self.formatters.get(i, None)
+ @contextmanager
+ def get_buffer(
+ self, buf: Optional[FilePathOrBuffer[str]], encoding: Optional[str] = None
+ ):
+ """
+ Context manager to open, yield and close buffer for filenames or Path-like
+ objects, otherwise yield buf unchanged.
+ """
+ if buf is not None:
+ buf = _stringify_path(buf)
+ else:
+ buf = StringIO()
+
+ if encoding is None:
+ encoding = "utf-8"
+
+ if hasattr(buf, "write"):
+ yield buf
+ elif isinstance(buf, str):
+ with codecs.open(buf, "w", encoding=encoding) as f:
+ yield f
+ else:
+ raise TypeError("buf is not a file name and it has no write method")
+
+ def write_result(self, buf: IO[str]) -> None:
+ """
+ Write the result of serialization to buf.
+ """
+ raise AbstractMethodError(self)
+
+ def get_result(
+ self,
+ buf: Optional[FilePathOrBuffer[str]] = None,
+ encoding: Optional[str] = None,
+ ) -> Optional[str]:
+ """
+ Perform serialization. Write to buf or return as string if buf is None.
+ """
+ with self.get_buffer(buf, encoding=encoding) as f:
+ self.write_result(buf=f)
+ if buf is None:
+ return f.getvalue()
+ return None
+
class DataFrameFormatter(TableFormatter):
"""
@@ -435,33 +530,29 @@ class DataFrameFormatter(TableFormatter):
def __init__(
self,
- frame,
- buf=None,
- columns=None,
- col_space=None,
- header=True,
- index=True,
- na_rep="NaN",
- formatters=None,
- justify=None,
- float_format=None,
- sparsify=None,
- index_names=True,
- line_width=None,
- max_rows=None,
- min_rows=None,
- max_cols=None,
- show_dimensions=False,
- decimal=".",
- table_id=None,
- render_links=False,
- **kwds
+ frame: "DataFrame",
+ columns: Optional[Sequence[str]] = None,
+ col_space: Optional[Union[str, int]] = None,
+ header: Union[bool, Sequence[str]] = True,
+ index: bool = True,
+ na_rep: str = "NaN",
+ formatters: Optional[formatters_type] = None,
+ justify: Optional[str] = None,
+ float_format: Optional[float_format_type] = None,
+ sparsify: Optional[bool] = None,
+ index_names: bool = True,
+ line_width: Optional[int] = None,
+ max_rows: Optional[int] = None,
+ min_rows: Optional[int] = None,
+ max_cols: Optional[int] = None,
+ show_dimensions: bool = False,
+ decimal: str = ".",
+ table_id: Optional[str] = None,
+ render_links: bool = False,
+ bold_rows: bool = False,
+ escape: bool = True,
):
self.frame = frame
- if buf is not None:
- self.buf = _expand_user(_stringify_path(buf))
- else:
- self.buf = StringIO()
self.show_index_names = index_names
if sparsify is None:
@@ -490,7 +581,8 @@ def __init__(
else:
self.justify = justify
- self.kwds = kwds
+ self.bold_rows = bold_rows
+ self.escape = escape
if columns is not None:
self.columns = ensure_index(columns)
@@ -501,7 +593,7 @@ def __init__(
self._chk_truncate()
self.adj = _get_adjustment()
- def _chk_truncate(self):
+ def _chk_truncate(self) -> None:
"""
Checks whether the frame should be truncated. If so, slices
the frame up.
@@ -521,9 +613,12 @@ def _chk_truncate(self):
prompt_row = 1
if self.show_dimensions:
show_dimension_rows = 3
+ # assume we only get here if self.header is boolean.
+ # i.e. not to_latex() where self.header may be List[str]
+ self.header = cast(bool, self.header)
n_add_rows = self.header + dot_row + show_dimension_rows + prompt_row
# rows available to fill with actual data
- max_rows_adj = self.h - n_add_rows
+ max_rows_adj = self.h - n_add_rows # type: Optional[int]
self.max_rows_adj = max_rows_adj
# Format only rows and columns that could potentially fit the
@@ -550,9 +645,12 @@ def _chk_truncate(self):
frame = self.frame
if truncate_h:
+ # cast here since if truncate_h is True, max_cols_adj is not None
+ max_cols_adj = cast(int, max_cols_adj)
if max_cols_adj == 0:
col_num = len(frame.columns)
elif max_cols_adj == 1:
+ max_cols = cast(int, max_cols)
frame = frame.iloc[:, :max_cols]
col_num = max_cols
else:
@@ -560,8 +658,17 @@ def _chk_truncate(self):
frame = concat(
(frame.iloc[:, :col_num], frame.iloc[:, -col_num:]), axis=1
)
+ # truncate formatter
+ if isinstance(self.formatters, (list, tuple)):
+ truncate_fmt = self.formatters
+ self.formatters = [
+ *truncate_fmt[:col_num],
+ *truncate_fmt[-col_num:],
+ ]
self.tr_col_num = col_num
if truncate_v:
+ # cast here since if truncate_v is True, max_rows_adj is not None
+ max_rows_adj = cast(int, max_rows_adj)
if max_rows_adj == 1:
row_num = max_rows
frame = frame.iloc[:max_rows, :]
@@ -575,12 +682,16 @@ def _chk_truncate(self):
self.tr_frame = frame
self.truncate_h = truncate_h
self.truncate_v = truncate_v
- self.is_truncated = self.truncate_h or self.truncate_v
+ self.is_truncated = bool(self.truncate_h or self.truncate_v)
- def _to_str_columns(self):
+ def _to_str_columns(self) -> List[List[str]]:
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
+ # this method is not used by to_html where self.col_space
+ # could be a string so safe to cast
+ self.col_space = cast(int, self.col_space)
+
frame = self.tr_frame
# may include levels names also
@@ -599,6 +710,8 @@ def _to_str_columns(self):
stringified.append(fmt_values)
else:
if is_list_like(self.header):
+ # cast here since can't be bool if is_list_like
+ self.header = cast(List[str], self.header)
if len(self.header) != len(self.columns):
raise ValueError(
(
@@ -645,6 +758,8 @@ def _to_str_columns(self):
if truncate_v:
n_header_rows = len(str_index) - len(frame)
row_num = self.tr_row_num
+ # cast here since if truncate_v is True, self.tr_row_num is not None
+ row_num = cast(int, row_num)
for ix, col in enumerate(strcols):
# infer from above row
cwidth = self.adj.len(strcols[ix][row_num])
@@ -667,7 +782,7 @@ def _to_str_columns(self):
strcols[ix].insert(row_num + n_header_rows, dot_str)
return strcols
- def to_string(self):
+ def write_result(self, buf: IO[str]) -> None:
"""
Render a DataFrame to a console-friendly tabular output.
"""
@@ -693,8 +808,8 @@ def to_string(self):
): # need to wrap around
text = self._join_multiline(*strcols)
else: # max_cols == 0. Try to fit frame to terminal
- text = self.adj.adjoin(1, *strcols).split("\n")
- max_len = Series(text).str.len().max()
+ lines = self.adj.adjoin(1, *strcols).split("\n")
+ max_len = Series(lines).str.len().max()
# plus truncate dot col
dif = max_len - self.w
# '+ 1' to avoid too wide repr (GH PR #17023)
@@ -722,19 +837,19 @@ def to_string(self):
self._chk_truncate()
strcols = self._to_str_columns()
text = self.adj.adjoin(1, *strcols)
- self.buf.writelines(text)
+ buf.writelines(text)
if self.should_show_dimensions:
- self.buf.write(
+ buf.write(
"\n\n[{nrows} rows x {ncols} columns]".format(
nrows=len(frame), ncols=len(frame.columns)
)
)
- def _join_multiline(self, *strcols):
+ def _join_multiline(self, *args) -> str:
lwidth = self.line_width
adjoin_width = 1
- strcols = list(strcols)
+ strcols = list(args)
if self.index:
idx = strcols.pop(0)
lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width
@@ -747,6 +862,8 @@ def _join_multiline(self, *strcols):
nbins = len(col_bins)
if self.truncate_v:
+ # cast here since if truncate_v is True, max_rows_adj is not None
+ self.max_rows_adj = cast(int, self.max_rows_adj)
nrows = self.max_rows_adj + 1
else:
nrows = len(self.frame)
@@ -766,49 +883,43 @@ def _join_multiline(self, *strcols):
st = ed
return "\n\n".join(str_lst)
+ def to_string(self, buf: Optional[FilePathOrBuffer[str]] = None) -> Optional[str]:
+ return self.get_result(buf=buf)
+
def to_latex(
self,
- column_format=None,
- longtable=False,
- encoding=None,
- multicolumn=False,
- multicolumn_format=None,
- multirow=False,
- ):
+ buf: Optional[FilePathOrBuffer[str]] = None,
+ column_format: Optional[str] = None,
+ longtable: bool = False,
+ encoding: Optional[str] = None,
+ multicolumn: bool = False,
+ multicolumn_format: Optional[str] = None,
+ multirow: bool = False,
+ caption: Optional[str] = None,
+ label: Optional[str] = None,
+ ) -> Optional[str]:
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
from pandas.io.formats.latex import LatexFormatter
- latex_renderer = LatexFormatter(
+ return LatexFormatter(
self,
column_format=column_format,
longtable=longtable,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
- )
-
- if encoding is None:
- encoding = "utf-8"
+ caption=caption,
+ label=label,
+ ).get_result(buf=buf, encoding=encoding)
- if hasattr(self.buf, "write"):
- latex_renderer.write_result(self.buf)
- elif isinstance(self.buf, str):
- import codecs
-
- with codecs.open(self.buf, "w", encoding=encoding) as f:
- latex_renderer.write_result(f)
- else:
- raise TypeError("buf is not a file name and it has no write " "method")
-
- def _format_col(self, i):
+ def _format_col(self, i: int) -> List[str]:
frame = self.tr_frame
formatter = self._get_formatter(i)
- values_to_format = frame.iloc[:, i]._formatting_values()
return format_array(
- values_to_format,
+ frame.iloc[:, i]._values,
formatter,
float_format=self.float_format,
na_rep=self.na_rep,
@@ -816,7 +927,13 @@ def _format_col(self, i):
decimal=self.decimal,
)
- def to_html(self, classes=None, notebook=False, border=None):
+ def to_html(
+ self,
+ buf: Optional[FilePathOrBuffer[str]] = None,
+ classes: Optional[Union[str, List, Tuple]] = None,
+ notebook: bool = False,
+ border: Optional[int] = None,
+ ) -> Optional[str]:
"""
Render a DataFrame to a html table.
@@ -830,22 +947,13 @@ def to_html(self, classes=None, notebook=False, border=None):
border : int
A ``border=border`` attribute is included in the opening
``<table>`` tag. Default ``pd.options.display.html.border``.
-
- .. versionadded:: 0.19.0
"""
from pandas.io.formats.html import HTMLFormatter, NotebookFormatter
Klass = NotebookFormatter if notebook else HTMLFormatter
- html = Klass(self, classes=classes, border=border).render()
- if hasattr(self.buf, "write"):
- buffer_put_lines(self.buf, html)
- elif isinstance(self.buf, str):
- with open(self.buf, "w") as f:
- buffer_put_lines(f, html)
- else:
- raise TypeError("buf is not a file name and it has no write " " method")
+ return Klass(self, classes=classes, border=border).get_result(buf=buf)
- def _get_formatted_column_labels(self, frame):
+ def _get_formatted_column_labels(self, frame: "DataFrame") -> List[List[str]]:
from pandas.core.index import _sparsify
columns = frame.columns
@@ -887,24 +995,25 @@ def space_format(x, y):
return str_columns
@property
- def has_index_names(self):
+ def has_index_names(self) -> bool:
return _has_names(self.frame.index)
@property
- def has_column_names(self):
+ def has_column_names(self) -> bool:
return _has_names(self.frame.columns)
@property
- def show_row_idx_names(self):
+ def show_row_idx_names(self) -> bool:
return all((self.has_index_names, self.index, self.show_index_names))
@property
- def show_col_idx_names(self):
+ def show_col_idx_names(self) -> bool:
return all((self.has_column_names, self.show_index_names, self.header))
- def _get_formatted_index(self, frame):
+ def _get_formatted_index(self, frame: "DataFrame") -> List[str]:
# Note: this is only used by to_string() and to_latex(), not by
- # to_html().
+ # to_html(). so safe to cast col_space here.
+ self.col_space = cast(int, self.col_space)
index = frame.index
columns = frame.columns
fmt = self._get_formatter("__index__")
@@ -941,8 +1050,8 @@ def _get_formatted_index(self, frame):
else:
return adjoined
- def _get_column_name_list(self):
- names = []
+ def _get_column_name_list(self) -> List[str]:
+ names = [] # type: List[str]
columns = self.frame.columns
if isinstance(columns, ABCMultiIndex):
names.extend("" if name is None else name for name in columns.names)
@@ -956,16 +1065,16 @@ def _get_column_name_list(self):
def format_array(
- values,
- formatter,
- float_format=None,
- na_rep="NaN",
- digits=None,
- space=None,
- justify="right",
- decimal=".",
- leading_space=None,
-):
+ values: Any,
+ formatter: Optional[Callable],
+ float_format: Optional[float_format_type] = None,
+ na_rep: str = "NaN",
+ digits: Optional[int] = None,
+ space: Optional[Union[str, int]] = None,
+ justify: str = "right",
+ decimal: str = ".",
+ leading_space: Optional[bool] = None,
+) -> List[str]:
"""
Format an array for printing.
@@ -994,7 +1103,7 @@ def format_array(
"""
if is_datetime64_dtype(values.dtype):
- fmt_klass = Datetime64Formatter
+ fmt_klass = Datetime64Formatter # type: Type[GenericArrayFormatter]
elif is_datetime64tz_dtype(values):
fmt_klass = Datetime64TZFormatter
elif is_timedelta64_dtype(values.dtype):
@@ -1035,17 +1144,17 @@ def format_array(
class GenericArrayFormatter:
def __init__(
self,
- values,
- digits=7,
- formatter=None,
- na_rep="NaN",
- space=12,
- float_format=None,
- justify="right",
- decimal=".",
- quoting=None,
- fixed_width=True,
- leading_space=None,
+ values: Any,
+ digits: int = 7,
+ formatter: Optional[Callable] = None,
+ na_rep: str = "NaN",
+ space: Union[str, int] = 12,
+ float_format: Optional[float_format_type] = None,
+ justify: str = "right",
+ decimal: str = ".",
+ quoting: Optional[int] = None,
+ fixed_width: bool = True,
+ leading_space: Optional[bool] = None,
):
self.values = values
self.digits = digits
@@ -1059,11 +1168,11 @@ def __init__(
self.fixed_width = fixed_width
self.leading_space = leading_space
- def get_result(self):
+ def get_result(self) -> List[str]:
fmt_values = self._format_strings()
return _make_fixed_width(fmt_values, self.justify)
- def _format_strings(self):
+ def _format_strings(self) -> List[str]:
if self.float_format is None:
float_format = get_option("display.float_format")
if float_format is None:
@@ -1145,7 +1254,11 @@ def __init__(self, *args, **kwargs):
self.formatter = self.float_format
self.float_format = None
- def _value_formatter(self, float_format=None, threshold=None):
+ def _value_formatter(
+ self,
+ float_format: Optional[float_format_type] = None,
+ threshold: Optional[Union[float, int]] = None,
+ ) -> Callable:
"""Returns a function to be applied on each value to format it
"""
@@ -1191,7 +1304,7 @@ def formatter(value):
return formatter
- def get_result_as_array(self):
+ def get_result_as_array(self) -> np.ndarray:
"""
Returns the float values converted into strings using
the parameters given at initialisation, as a numpy array
@@ -1231,9 +1344,10 @@ def format_values_with(float_format):
if self.fixed_width:
if is_complex:
- return _trim_zeros_complex(values, na_rep)
+ result = _trim_zeros_complex(values, na_rep)
else:
- return _trim_zeros_float(values, na_rep)
+ result = _trim_zeros_float(values, na_rep)
+ return np.asarray(result, dtype="object")
return values
@@ -1243,7 +1357,7 @@ def format_values_with(float_format):
if self.fixed_width:
float_format = partial(
"{value: .{digits:d}f}".format, digits=self.digits
- )
+ ) # type: Optional[float_format_type]
else:
float_format = self.float_format
else:
@@ -1280,7 +1394,7 @@ def format_values_with(float_format):
return formatted_values
- def _format_strings(self):
+ def _format_strings(self) -> List[str]:
# shortcut
if self.formatter is not None:
return [self.formatter(x) for x in self.values]
@@ -1289,19 +1403,25 @@ def _format_strings(self):
class IntArrayFormatter(GenericArrayFormatter):
- def _format_strings(self):
+ def _format_strings(self) -> List[str]:
formatter = self.formatter or (lambda x: "{x: d}".format(x=x))
fmt_values = [formatter(x) for x in self.values]
return fmt_values
class Datetime64Formatter(GenericArrayFormatter):
- def __init__(self, values, nat_rep="NaT", date_format=None, **kwargs):
+ def __init__(
+ self,
+ values: Union[np.ndarray, "Series", DatetimeIndex, DatetimeArray],
+ nat_rep: str = "NaT",
+ date_format: None = None,
+ **kwargs
+ ):
super().__init__(values, **kwargs)
self.nat_rep = nat_rep
self.date_format = date_format
- def _format_strings(self):
+ def _format_strings(self) -> List[str]:
""" we by definition have DO NOT have a TZ """
values = self.values
@@ -1321,7 +1441,7 @@ def _format_strings(self):
class ExtensionArrayFormatter(GenericArrayFormatter):
- def _format_strings(self):
+ def _format_strings(self) -> List[str]:
values = self.values
if isinstance(values, (ABCIndexClass, ABCSeries)):
values = values._values
@@ -1347,7 +1467,11 @@ def _format_strings(self):
return fmt_values
-def format_percentiles(percentiles):
+def format_percentiles(
+ percentiles: Union[
+ np.ndarray, List[Union[int, float]], List[float], List[Union[str, float]]
+ ]
+) -> List[str]:
"""
Outputs rounded and formatted percentiles.
@@ -1413,7 +1537,9 @@ def format_percentiles(percentiles):
return [i + "%" for i in out]
-def _is_dates_only(values):
+def _is_dates_only(
+ values: Union[np.ndarray, DatetimeArray, Index, DatetimeIndex]
+) -> bool:
# return a boolean if we are only dates (and don't have a timezone)
assert values.ndim == 1
@@ -1432,7 +1558,9 @@ def _is_dates_only(values):
return False
-def _format_datetime64(x, tz=None, nat_rep="NaT"):
+def _format_datetime64(
+ x: Union[NaTType, Timestamp], tz: Optional[tzinfo] = None, nat_rep: str = "NaT"
+) -> str:
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
@@ -1445,7 +1573,9 @@ def _format_datetime64(x, tz=None, nat_rep="NaT"):
return str(x)
-def _format_datetime64_dateonly(x, nat_rep="NaT", date_format=None):
+def _format_datetime64_dateonly(
+ x: Union[NaTType, Timestamp], nat_rep: str = "NaT", date_format: None = None
+) -> str:
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
@@ -1458,7 +1588,9 @@ def _format_datetime64_dateonly(x, nat_rep="NaT", date_format=None):
return x._date_repr
-def _get_format_datetime64(is_dates_only, nat_rep="NaT", date_format=None):
+def _get_format_datetime64(
+ is_dates_only: bool, nat_rep: str = "NaT", date_format: None = None
+) -> Callable:
if is_dates_only:
return lambda x, tz=None: _format_datetime64_dateonly(
@@ -1468,7 +1600,9 @@ def _get_format_datetime64(is_dates_only, nat_rep="NaT", date_format=None):
return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep)
-def _get_format_datetime64_from_values(values, date_format):
+def _get_format_datetime64_from_values(
+ values: Union[np.ndarray, DatetimeArray, DatetimeIndex], date_format: Optional[str]
+) -> Optional[str]:
""" given values and a date_format, return a string format """
if isinstance(values, np.ndarray) and values.ndim > 1:
@@ -1483,7 +1617,7 @@ def _get_format_datetime64_from_values(values, date_format):
class Datetime64TZFormatter(Datetime64Formatter):
- def _format_strings(self):
+ def _format_strings(self) -> List[str]:
""" we by definition have a TZ """
values = self.values.astype(object)
@@ -1497,20 +1631,29 @@ def _format_strings(self):
class Timedelta64Formatter(GenericArrayFormatter):
- def __init__(self, values, nat_rep="NaT", box=False, **kwargs):
+ def __init__(
+ self,
+ values: Union[np.ndarray, TimedeltaIndex],
+ nat_rep: str = "NaT",
+ box: bool = False,
+ **kwargs
+ ):
super().__init__(values, **kwargs)
self.nat_rep = nat_rep
self.box = box
- def _format_strings(self):
+ def _format_strings(self) -> List[str]:
formatter = self.formatter or _get_format_timedelta64(
self.values, nat_rep=self.nat_rep, box=self.box
)
- fmt_values = np.array([formatter(x) for x in self.values])
- return fmt_values
+ return [formatter(x) for x in self.values]
-def _get_format_timedelta64(values, nat_rep="NaT", box=False):
+def _get_format_timedelta64(
+ values: Union[np.ndarray, TimedeltaIndex, TimedeltaArray],
+ nat_rep: str = "NaT",
+ box: bool = False,
+) -> Callable:
"""
Return a formatter function for a range of timedeltas.
These will all have the same format argument
@@ -1551,7 +1694,12 @@ def _formatter(x):
return _formatter
-def _make_fixed_width(strings, justify="right", minimum=None, adj=None):
+def _make_fixed_width(
+ strings: List[str],
+ justify: str = "right",
+ minimum: Optional[int] = None,
+ adj: Optional[TextAdjustment] = None,
+) -> List[str]:
if len(strings) == 0 or justify == "all":
return strings
@@ -1579,25 +1727,20 @@ def just(x):
return result
-def _trim_zeros_complex(str_complexes, na_rep="NaN"):
+def _trim_zeros_complex(str_complexes: np.ndarray, na_rep: str = "NaN") -> List[str]:
"""
Separates the real and imaginary parts from the complex number, and
executes the _trim_zeros_float method on each of those.
"""
+ return [
+ "".join(_trim_zeros_float(re.split(r"([j+-])", x), na_rep))
+ for x in str_complexes
+ ]
- def separate_and_trim(str_complex, na_rep):
- num_arr = str_complex.split("+")
- return (
- _trim_zeros_float([num_arr[0]], na_rep)
- + ["+"]
- + _trim_zeros_float([num_arr[1][:-1]], na_rep)
- + ["j"]
- )
- return ["".join(separate_and_trim(x, na_rep)) for x in str_complexes]
-
-
-def _trim_zeros_float(str_floats, na_rep="NaN"):
+def _trim_zeros_float(
+ str_floats: Union[np.ndarray, List[str]], na_rep: str = "NaN"
+) -> List[str]:
"""
Trims zeros, leaving just one before the decimal points if need be.
"""
@@ -1621,9 +1764,9 @@ def _cond(values):
return [x + "0" if x.endswith(".") and _is_number(x) else x for x in trimmed]
-def _has_names(index):
+def _has_names(index: Index) -> bool:
if isinstance(index, ABCMultiIndex):
- return com._any_not_none(*index.names)
+ return com.any_not_none(*index.names)
else:
return index.name is not None
@@ -1656,11 +1799,11 @@ class EngFormatter:
24: "Y",
}
- def __init__(self, accuracy=None, use_eng_prefix=False):
+ def __init__(self, accuracy: Optional[int] = None, use_eng_prefix: bool = False):
self.accuracy = accuracy
self.use_eng_prefix = use_eng_prefix
- def __call__(self, num):
+ def __call__(self, num: Union[int, float]) -> str:
""" Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
@@ -1681,9 +1824,6 @@ def __call__(self, num):
@return: engineering formatted string
"""
- import decimal
- import math
-
dnum = decimal.Decimal(str(num))
if decimal.Decimal.is_nan(dnum):
@@ -1727,7 +1867,7 @@ def __call__(self, num):
return formatted
-def set_eng_float_format(accuracy=3, use_eng_prefix=False):
+def set_eng_float_format(accuracy: int = 3, use_eng_prefix: bool = False) -> None:
"""
Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
@@ -1740,7 +1880,7 @@ def set_eng_float_format(accuracy=3, use_eng_prefix=False):
set_option("display.column_space", max(12, accuracy + 9))
-def _binify(cols, line_width):
+def _binify(cols: List[np.int32], line_width: Union[np.int32, int]) -> List[int]:
adjoin_width = 1
bins = []
curr_width = 0
@@ -1760,7 +1900,9 @@ def _binify(cols, line_width):
return bins
-def get_level_lengths(levels, sentinel=""):
+def get_level_lengths(
+ levels: Any, sentinel: Union[bool, object, str] = ""
+) -> List[Dict[int, int]]:
"""For each index in each level the function returns lengths of indexes.
Parameters
@@ -1800,7 +1942,7 @@ def get_level_lengths(levels, sentinel=""):
return result
-def buffer_put_lines(buf, lines):
+def buffer_put_lines(buf: IO[str], lines: List[str]) -> None:
"""
Appends lines to a buffer.
diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py
index c2f4ee2c4a68b..50fa4796f8d72 100644
--- a/pandas/io/formats/html.py
+++ b/pandas/io/formats/html.py
@@ -4,11 +4,11 @@
from collections import OrderedDict
from textwrap import dedent
-from typing import Dict, List, Optional, Tuple, Union
+from typing import IO, Any, Dict, Iterable, List, Optional, Tuple, Union, cast
from pandas._config import get_option
-from pandas.core.dtypes.generic import ABCIndex, ABCMultiIndex
+from pandas.core.dtypes.generic import ABCMultiIndex
from pandas import option_context
@@ -16,6 +16,7 @@
from pandas.io.formats.format import (
DataFrameFormatter,
TableFormatter,
+ buffer_put_lines,
get_level_lengths,
)
from pandas.io.formats.printing import pprint_thing
@@ -36,8 +37,8 @@ class HTMLFormatter(TableFormatter):
def __init__(
self,
formatter: DataFrameFormatter,
- classes: Optional[Union[str, List, Tuple]] = None,
- border: Optional[bool] = None,
+ classes: Optional[Union[str, List[str], Tuple[str, ...]]] = None,
+ border: Optional[int] = None,
) -> None:
self.fmt = formatter
self.classes = classes
@@ -45,11 +46,11 @@ def __init__(
self.frame = self.fmt.frame
self.columns = self.fmt.tr_frame.columns
self.elements = [] # type: List[str]
- self.bold_rows = self.fmt.kwds.get("bold_rows", False)
- self.escape = self.fmt.kwds.get("escape", True)
+ self.bold_rows = self.fmt.bold_rows
+ self.escape = self.fmt.escape
self.show_dimensions = self.fmt.show_dimensions
if border is None:
- border = get_option("display.html.border")
+ border = cast(int, get_option("display.html.border"))
self.border = border
self.table_id = self.fmt.table_id
self.render_links = self.fmt.render_links
@@ -79,23 +80,24 @@ def row_levels(self) -> int:
# not showing (row) index
return 0
- def _get_columns_formatted_values(self) -> ABCIndex:
+ def _get_columns_formatted_values(self) -> Iterable:
return self.columns
+ # https://github.com/python/mypy/issues/1237
@property
- def is_truncated(self) -> bool:
+ def is_truncated(self) -> bool: # type: ignore
return self.fmt.is_truncated
@property
def ncols(self) -> int:
return len(self.fmt.tr_frame.columns)
- def write(self, s: str, indent: int = 0) -> None:
+ def write(self, s: Any, indent: int = 0) -> None:
rs = pprint_thing(s)
self.elements.append(" " * indent + rs)
def write_th(
- self, s: str, header: bool = False, indent: int = 0, tags: Optional[str] = None
+ self, s: Any, header: bool = False, indent: int = 0, tags: Optional[str] = None
) -> None:
"""
Method for writting a formatted <th> cell.
@@ -125,11 +127,11 @@ def write_th(
self._write_cell(s, kind="th", indent=indent, tags=tags)
- def write_td(self, s: str, indent: int = 0, tags: Optional[str] = None) -> None:
+ def write_td(self, s: Any, indent: int = 0, tags: Optional[str] = None) -> None:
self._write_cell(s, kind="td", indent=indent, tags=tags)
def _write_cell(
- self, s: str, kind: str = "td", indent: int = 0, tags: Optional[str] = None
+ self, s: Any, kind: str = "td", indent: int = 0, tags: Optional[str] = None
) -> None:
if tags is not None:
start_tag = "<{kind} {tags}>".format(kind=kind, tags=tags)
@@ -162,7 +164,7 @@ def _write_cell(
def write_tr(
self,
- line: List[str],
+ line: Iterable,
indent: int = 0,
indent_delta: int = 0,
header: bool = False,
@@ -202,6 +204,9 @@ def render(self) -> List[str]:
return self.elements
+ def write_result(self, buf: IO[str]) -> None:
+ buffer_put_lines(buf, self.render())
+
def _write_table(self, indent: int = 0) -> None:
_classes = ["dataframe"] # Default class.
use_mathjax = get_option("display.html.use_mathjax")
@@ -372,7 +377,7 @@ def _write_header(self, indent: int) -> None:
self.write("</thead>", indent)
def _get_formatted_values(self) -> Dict[int, List[str]]:
- with option_context("display.max_colwidth", 999999):
+ with option_context("display.max_colwidth", None):
fmt_values = {i: self.fmt._format_col(i) for i in range(self.ncols)}
return fmt_values
@@ -458,6 +463,8 @@ def _write_hierarchical_rows(
# Insert ... row and adjust idx_values and
# level_lengths to take this into account.
ins_row = self.fmt.tr_row_num
+ # cast here since if truncate_v is True, self.fmt.tr_row_num is not None
+ ins_row = cast(int, ins_row)
inserted = False
for lnum, records in enumerate(level_lengths):
rec_new = {}
diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py
index dad099b747701..ca9db88ae7be4 100644
--- a/pandas/io/formats/latex.py
+++ b/pandas/io/formats/latex.py
@@ -1,11 +1,13 @@
"""
Module for formatting output data in Latex.
"""
+from typing import IO, List, Optional, Tuple
+
import numpy as np
from pandas.core.dtypes.generic import ABCMultiIndex
-from pandas.io.formats.format import TableFormatter
+from pandas.io.formats.format import DataFrameFormatter, TableFormatter
class LatexFormatter(TableFormatter):
@@ -28,25 +30,31 @@ class LatexFormatter(TableFormatter):
def __init__(
self,
- formatter,
- column_format=None,
- longtable=False,
- multicolumn=False,
- multicolumn_format=None,
- multirow=False,
+ formatter: DataFrameFormatter,
+ column_format: Optional[str] = None,
+ longtable: bool = False,
+ multicolumn: bool = False,
+ multicolumn_format: Optional[str] = None,
+ multirow: bool = False,
+ caption: Optional[str] = None,
+ label: Optional[str] = None,
):
self.fmt = formatter
self.frame = self.fmt.frame
- self.bold_rows = self.fmt.kwds.get("bold_rows", False)
+ self.bold_rows = self.fmt.bold_rows
self.column_format = column_format
self.longtable = longtable
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
+ self.caption = caption
+ self.label = label
+ self.escape = self.fmt.escape
- def write_result(self, buf):
+ def write_result(self, buf: IO[str]) -> None:
"""
- Render a DataFrame to a LaTeX tabular/longtable environment output.
+ Render a DataFrame to a LaTeX tabular, longtable, or table/tabular
+ environment output.
"""
# string representation of the columns
@@ -111,12 +119,12 @@ def pad_empties(x):
"not {typ}".format(typ=type(column_format))
)
- if not self.longtable:
- buf.write("\\begin{{tabular}}{{{fmt}}}\n".format(fmt=column_format))
- buf.write("\\toprule\n")
+ if self.longtable:
+ self._write_longtable_begin(buf, column_format)
else:
- buf.write("\\begin{{longtable}}{{{fmt}}}\n".format(fmt=column_format))
- buf.write("\\toprule\n")
+ self._write_tabular_begin(buf, column_format)
+
+ buf.write("\\toprule\n")
ilevels = self.frame.index.nlevels
clevels = self.frame.columns.nlevels
@@ -124,7 +132,7 @@ def pad_empties(x):
if self.fmt.has_index_names and self.fmt.show_index_names:
nlevels += 1
strrows = list(zip(*strcols))
- self.clinebuf = []
+ self.clinebuf = [] # type: List[List[int]]
for i, row in enumerate(strrows):
if i == nlevels and self.fmt.header:
@@ -140,7 +148,7 @@ def pad_empties(x):
buf.write("\\endfoot\n\n")
buf.write("\\bottomrule\n")
buf.write("\\endlastfoot\n")
- if self.fmt.kwds.get("escape", True):
+ if self.escape:
# escape backslashes first
crow = [
(
@@ -180,13 +188,12 @@ def pad_empties(x):
if self.multirow and i < len(strrows) - 1:
self._print_cline(buf, i, len(strcols))
- if not self.longtable:
- buf.write("\\bottomrule\n")
- buf.write("\\end{tabular}\n")
+ if self.longtable:
+ self._write_longtable_end(buf)
else:
- buf.write("\\end{longtable}\n")
+ self._write_tabular_end(buf)
- def _format_multicolumn(self, row, ilevels):
+ def _format_multicolumn(self, row: List[str], ilevels: int) -> List[str]:
r"""
Combine columns belonging to a group to a single multicolumn entry
according to self.multicolumn_format
@@ -227,7 +234,9 @@ def append_col():
append_col()
return row2
- def _format_multirow(self, row, ilevels, i, rows):
+ def _format_multirow(
+ self, row: List[str], ilevels: int, i: int, rows: List[Tuple[str, ...]]
+ ) -> List[str]:
r"""
Check following rows, whether row should be a multirow
@@ -254,7 +263,7 @@ def _format_multirow(self, row, ilevels, i, rows):
self.clinebuf.append([i + nrow - 1, j + 1])
return row
- def _print_cline(self, buf, i, icol):
+ def _print_cline(self, buf: IO[str], i: int, icol: int) -> None:
"""
Print clines after multirow-blocks are finished
"""
@@ -263,3 +272,107 @@ def _print_cline(self, buf, i, icol):
buf.write("\\cline{{{cl:d}-{icol:d}}}\n".format(cl=cl[1], icol=icol))
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
+
+ def _write_tabular_begin(self, buf, column_format):
+ """
+ Write the beginning of a tabular environment or
+ nested table/tabular environments including caption and label.
+
+ Parameters
+ ----------
+ buf : string or file handle
+ File path or object. If not specified, the result is returned as
+ a string.
+ column_format : str, default None
+ The columns format as specified in `LaTeX table format
+ <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl'
+ for 3 columns
+
+ """
+ if self.caption is not None or self.label is not None:
+ # then write output in a nested table/tabular environment
+ if self.caption is None:
+ caption_ = ""
+ else:
+ caption_ = "\n\\caption{{{}}}".format(self.caption)
+
+ if self.label is None:
+ label_ = ""
+ else:
+ label_ = "\n\\label{{{}}}".format(self.label)
+
+ buf.write("\\begin{{table}}\n\\centering{}{}\n".format(caption_, label_))
+ else:
+ # then write output only in a tabular environment
+ pass
+
+ buf.write("\\begin{{tabular}}{{{fmt}}}\n".format(fmt=column_format))
+
+ def _write_tabular_end(self, buf):
+ """
+ Write the end of a tabular environment or nested table/tabular
+ environment.
+
+ Parameters
+ ----------
+ buf : string or file handle
+ File path or object. If not specified, the result is returned as
+ a string.
+
+ """
+ buf.write("\\bottomrule\n")
+ buf.write("\\end{tabular}\n")
+ if self.caption is not None or self.label is not None:
+ buf.write("\\end{table}\n")
+ else:
+ pass
+
+ def _write_longtable_begin(self, buf, column_format):
+ """
+ Write the beginning of a longtable environment including caption and
+ label if provided by user.
+
+ Parameters
+ ----------
+ buf : string or file handle
+ File path or object. If not specified, the result is returned as
+ a string.
+ column_format : str, default None
+ The columns format as specified in `LaTeX table format
+ <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl'
+ for 3 columns
+
+ """
+ buf.write("\\begin{{longtable}}{{{fmt}}}\n".format(fmt=column_format))
+
+ if self.caption is not None or self.label is not None:
+ if self.caption is None:
+ pass
+ else:
+ buf.write("\\caption{{{}}}".format(self.caption))
+
+ if self.label is None:
+ pass
+ else:
+ buf.write("\\label{{{}}}".format(self.label))
+
+ # a double-backslash is required at the end of the line
+ # as discussed here:
+ # https://tex.stackexchange.com/questions/219138
+ buf.write("\\\\\n")
+ else:
+ pass
+
+ @staticmethod
+ def _write_longtable_end(buf):
+ """
+ Write the end of a longtable environment.
+
+ Parameters
+ ----------
+ buf : string or file handle
+ File path or object. If not specified, the result is returned as
+ a string.
+
+ """
+ buf.write("\\end{longtable}\n")
diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py
index 4958d8246610e..ead51693da791 100644
--- a/pandas/io/formats/printing.py
+++ b/pandas/io/formats/printing.py
@@ -3,13 +3,16 @@
"""
import sys
+from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
from pandas._config import get_option
from pandas.core.dtypes.inference import is_sequence
+EscapeChars = Union[Dict[str, str], Iterable[str]]
-def adjoin(space, *lists, **kwargs):
+
+def adjoin(space: int, *lists: List[str], **kwargs) -> str:
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
@@ -40,11 +43,11 @@ def adjoin(space, *lists, **kwargs):
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
- out_lines.append(_join_unicode(lines))
- return _join_unicode(out_lines, sep="\n")
+ out_lines.append("".join(lines))
+ return "\n".join(out_lines)
-def justify(texts, max_len, mode="right"):
+def justify(texts: Iterable[str], max_len: int, mode: str = "right") -> List[str]:
"""
Perform ljust, center, rjust against string or list-like
"""
@@ -56,14 +59,6 @@ def justify(texts, max_len, mode="right"):
return [x.rjust(max_len) for x in texts]
-def _join_unicode(lines, sep=""):
- try:
- return sep.join(lines)
- except UnicodeDecodeError:
- sep = str(sep)
- return sep.join([x.decode("utf-8") if isinstance(x, str) else x for x in lines])
-
-
# Unicode consolidation
# ---------------------
#
@@ -88,7 +83,9 @@ def _join_unicode(lines, sep=""):
# working with straight ascii.
-def _pprint_seq(seq, _nest_lvl=0, max_seq_items=None, **kwds):
+def _pprint_seq(
+ seq: Sequence, _nest_lvl: int = 0, max_seq_items: Optional[int] = None, **kwds
+) -> str:
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
@@ -121,7 +118,9 @@ def _pprint_seq(seq, _nest_lvl=0, max_seq_items=None, **kwds):
return fmt.format(body=body)
-def _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds):
+def _pprint_dict(
+ seq: Dict, _nest_lvl: int = 0, max_seq_items: Optional[int] = None, **kwds
+) -> str:
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
@@ -151,19 +150,16 @@ def _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds):
def pprint_thing(
- thing,
- _nest_lvl=0,
- escape_chars=None,
- default_escapes=False,
- quote_strings=False,
- max_seq_items=None,
-):
+ thing: Any,
+ _nest_lvl: int = 0,
+ escape_chars: Optional[EscapeChars] = None,
+ default_escapes: bool = False,
+ quote_strings: bool = False,
+ max_seq_items: Optional[int] = None,
+) -> str:
"""
This function is the sanctioned way of converting objects
- to a unicode representation.
-
- properly handles nested sequences containing unicode strings
- (unicode(object) does not)
+ to a string representation and properly handles nested sequences.
Parameters
----------
@@ -181,21 +177,13 @@ def pprint_thing(
Returns
-------
- result - unicode str
+ str
"""
- def as_escaped_unicode(thing, escape_chars=escape_chars):
- # Unicode is fine, else we try to decode using utf-8 and 'replace'
- # if that's not it either, we have no way of knowing and the user
- # should deal with it himself.
-
- try:
- result = str(thing) # we should try this first
- except UnicodeDecodeError:
- # either utf-8 or we replace errors
- result = str(thing).decode("utf-8", "replace")
-
+ def as_escaped_string(
+ thing: Any, escape_chars: Optional[EscapeChars] = escape_chars
+ ) -> str:
translate = {"\t": r"\t", "\n": r"\n", "\r": r"\r"}
if isinstance(escape_chars, dict):
if default_escapes:
@@ -205,10 +193,11 @@ def as_escaped_unicode(thing, escape_chars=escape_chars):
escape_chars = list(escape_chars.keys())
else:
escape_chars = escape_chars or tuple()
+
+ result = str(thing)
for c in escape_chars:
result = result.replace(c, translate[c])
-
- return str(result)
+ return result
if hasattr(thing, "__next__"):
return str(thing)
@@ -227,19 +216,21 @@ def as_escaped_unicode(thing, escape_chars=escape_chars):
max_seq_items=max_seq_items,
)
elif isinstance(thing, str) and quote_strings:
- result = "'{thing}'".format(thing=as_escaped_unicode(thing))
+ result = "'{thing}'".format(thing=as_escaped_string(thing))
else:
- result = as_escaped_unicode(thing)
+ result = as_escaped_string(thing)
- return str(result) # always unicode
+ return result
-def pprint_thing_encoded(object, encoding="utf-8", errors="replace", **kwds):
+def pprint_thing_encoded(
+ object, encoding: str = "utf-8", errors: str = "replace"
+) -> bytes:
value = pprint_thing(object) # get unicode representation of object
- return value.encode(encoding, errors, **kwds)
+ return value.encode(encoding, errors)
-def _enable_data_resource_formatter(enable):
+def _enable_data_resource_formatter(enable: bool) -> None:
if "IPython" not in sys.modules:
# definitely not in IPython
return
@@ -279,12 +270,12 @@ class TableSchemaFormatter(BaseFormatter):
def format_object_summary(
obj,
- formatter,
- is_justify=True,
- name=None,
- indent_for_name=True,
- line_break_each_value=False,
-):
+ formatter: Callable,
+ is_justify: bool = True,
+ name: Optional[str] = None,
+ indent_for_name: bool = True,
+ line_break_each_value: bool = False,
+) -> str:
"""
Return the formatted obj as a unicode string
@@ -448,7 +439,9 @@ def best_len(values):
return summary
-def _justify(head, tail):
+def _justify(
+ head: List[Sequence[str]], tail: List[Sequence[str]]
+) -> Tuple[List[Tuple[str, ...]], List[Tuple[str, ...]]]:
"""
Justify items in head and tail, so they are right-aligned when stacked.
@@ -484,10 +477,16 @@ def _justify(head, tail):
tail = [
tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in tail
]
- return head, tail
+ # https://github.com/python/mypy/issues/4975
+ # error: Incompatible return value type (got "Tuple[List[Sequence[str]],
+ # List[Sequence[str]]]", expected "Tuple[List[Tuple[str, ...]],
+ # List[Tuple[str, ...]]]")
+ return head, tail # type: ignore
-def format_object_attrs(obj, include_dtype=True):
+def format_object_attrs(
+ obj: Sequence, include_dtype: bool = True
+) -> List[Tuple[str, Union[str, int]]]:
"""
Return a list of tuples of the (attr, formatted_value)
for common attrs, including dtype, name, length
@@ -501,16 +500,20 @@ def format_object_attrs(obj, include_dtype=True):
Returns
-------
- list
+ list of 2-tuple
"""
- attrs = []
+ attrs = [] # type: List[Tuple[str, Union[str, int]]]
if hasattr(obj, "dtype") and include_dtype:
- attrs.append(("dtype", "'{}'".format(obj.dtype)))
+ # error: "Sequence[Any]" has no attribute "dtype"
+ attrs.append(("dtype", "'{}'".format(obj.dtype))) # type: ignore
if getattr(obj, "name", None) is not None:
- attrs.append(("name", default_pprint(obj.name)))
- elif getattr(obj, "names", None) is not None and any(obj.names):
- attrs.append(("names", default_pprint(obj.names)))
+ # error: "Sequence[Any]" has no attribute "name"
+ attrs.append(("name", default_pprint(obj.name))) # type: ignore
+ # error: "Sequence[Any]" has no attribute "names"
+ elif getattr(obj, "names", None) is not None and any(obj.names): # type: ignore
+ # error: "Sequence[Any]" has no attribute "names"
+ attrs.append(("names", default_pprint(obj.names))) # type: ignore
max_seq_items = get_option("display.max_seq_items") or len(obj)
if len(obj) > max_seq_items:
attrs.append(("length", len(obj)))
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 98349fe1e4792..95e1084747aa3 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -316,7 +316,7 @@ def format_attr(pair):
if (
self.data.index.names
- and com._any_not_none(*self.data.index.names)
+ and com.any_not_none(*self.data.index.names)
and not hidden_index
):
index_header_row = []
@@ -414,8 +414,6 @@ def format(self, formatter, subset=None):
"""
Format the text display value of cells.
- .. versionadded:: 0.18.0
-
Parameters
----------
formatter : str, callable, or dict
@@ -647,7 +645,7 @@ def apply(self, func, axis=0, subset=None, **kwargs):
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
- kwargs : dict
+ **kwargs : dict
pass along to ``func``
Returns
@@ -699,7 +697,7 @@ def applymap(self, func, subset=None, **kwargs):
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
- kwargs : dict
+ **kwargs : dict
pass along to ``func``
Returns
@@ -734,7 +732,7 @@ def where(self, cond, value, other=None, subset=None, **kwargs):
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
- kwargs : dict
+ **kwargs : dict
pass along to ``cond``
Returns
@@ -967,8 +965,10 @@ def background_gradient(
----------
cmap : str or colormap
matplotlib colormap
- low, high : float
- compress the range by these values.
+ low : float
+ compress the range by the low.
+ high : float
+ compress the range by the high.
axis : {0 or 'index', 1 or 'columns', None}, default 0
apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
@@ -1080,7 +1080,7 @@ def set_properties(self, subset=None, **kwargs):
----------
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
- kwargs : dict
+ **kwargs : dict
property: value pairs to be set for each cell
Returns
@@ -1352,8 +1352,10 @@ def pipe(self, func, *args, **kwargs):
Function to apply to the Styler. Alternatively, a
``(callable, keyword)`` tuple where ``keyword`` is a string
indicating the keyword of ``callable`` that expects the Styler.
- *args, **kwargs :
+ *args : optional
Arguments passed to `func`.
+ **kwargs : optional
+ A dictionary of keyword arguments passed into ``func``.
Returns
-------
@@ -1407,7 +1409,7 @@ def pipe(self, func, *args, **kwargs):
... .pipe(format_conversion)
... .set_caption("Results with minimum conversion highlighted."))
"""
- return com._pipe(self, func, *args, **kwargs)
+ return com.pipe(self, func, *args, **kwargs)
def _is_visible(idx_row, idx_col, lengths):
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 12c8ec4214b38..490c574463b9b 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -1,4 +1,5 @@
-""":mod:`pandas.io.html` is a module containing functionality for dealing with
+"""
+:mod:`pandas.io.html` is a module containing functionality for dealing with
HTML IO.
"""
@@ -58,7 +59,8 @@ def _importers():
def _remove_whitespace(s, regex=_RE_WHITESPACE):
- """Replace extra whitespace inside of a string with a single space.
+ """
+ Replace extra whitespace inside of a string with a single space.
Parameters
----------
@@ -77,7 +79,8 @@ def _remove_whitespace(s, regex=_RE_WHITESPACE):
def _get_skiprows(skiprows):
- """Get an iterator given an integer, slice or container.
+ """
+ Get an iterator given an integer, slice or container.
Parameters
----------
@@ -107,7 +110,8 @@ def _get_skiprows(skiprows):
def _read(obj):
- """Try to read from a url, file or string.
+ """
+ Try to read from a url, file or string.
Parameters
----------
@@ -136,7 +140,8 @@ def _read(obj):
class _HtmlFrameParser:
- """Base class for parsers that parse HTML into DataFrames.
+ """
+ Base class for parsers that parse HTML into DataFrames.
Parameters
----------
@@ -515,7 +520,8 @@ def _handle_hidden_tables(self, tbl_list, attr_name):
class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser):
- """HTML to DataFrame parser that uses BeautifulSoup under the hood.
+ """
+ HTML to DataFrame parser that uses BeautifulSoup under the hood.
See Also
--------
@@ -622,7 +628,8 @@ def _build_xpath_expr(attrs):
class _LxmlFrameParser(_HtmlFrameParser):
- """HTML to DataFrame parser that uses lxml under the hood.
+ """
+ HTML to DataFrame parser that uses lxml under the hood.
Warning
-------
@@ -937,7 +944,8 @@ def read_html(
keep_default_na=True,
displayed_only=True,
):
- r"""Read HTML tables into a ``list`` of ``DataFrame`` objects.
+ r"""
+ Read HTML tables into a ``list`` of ``DataFrame`` objects.
Parameters
----------
@@ -1011,32 +1019,22 @@ def read_html(
Character to recognize as decimal point (e.g. use ',' for European
data).
- .. versionadded:: 0.19.0
-
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
- .. versionadded:: 0.19.0
-
na_values : iterable, default None
Custom NA values
- .. versionadded:: 0.19.0
-
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
- .. versionadded:: 0.19.0
-
displayed_only : bool, default True
Whether elements with "display: none" should be parsed
- .. versionadded:: 0.23.0
-
Returns
-------
dfs : list of DataFrames
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index ada7e6f43125d..73f4985e201f1 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -1,6 +1,8 @@
+from collections import OrderedDict
from io import StringIO
from itertools import islice
import os
+from typing import Any, Callable, Dict, List, Optional, Type, Union
import numpy as np
@@ -11,6 +13,7 @@
from pandas.core.dtypes.common import ensure_str, is_period_dtype
from pandas import DataFrame, MultiIndex, Series, isna, to_datetime
+from pandas._typing import Scalar
from pandas.core.reshape.concat import concat
from pandas.io.common import (
@@ -31,20 +34,23 @@
TABLE_SCHEMA_VERSION = "0.20.0"
+Serializable = Union[Scalar, List, Dict]
+
# interface to/from
def to_json(
path_or_buf,
obj,
- orient=None,
- date_format="epoch",
- double_precision=10,
- force_ascii=True,
- date_unit="ms",
- default_handler=None,
- lines=False,
- compression="infer",
- index=True,
+ orient: Optional[str] = None,
+ date_format: str = "epoch",
+ double_precision: int = 10,
+ force_ascii: bool = True,
+ date_unit: str = "ms",
+ default_handler: Optional[Callable[[Any], Serializable]] = None,
+ lines: bool = False,
+ compression: Optional[str] = "infer",
+ index: bool = True,
+ indent: int = 0,
):
if not index and orient not in ["split", "table"]:
@@ -59,7 +65,7 @@ def to_json(
if orient == "table" and isinstance(obj, Series):
obj = obj.to_frame(name=obj.name or "values")
if orient == "table" and isinstance(obj, DataFrame):
- writer = JSONTableWriter
+ writer = JSONTableWriter # type: Type["Writer"]
elif isinstance(obj, Series):
writer = SeriesWriter
elif isinstance(obj, DataFrame):
@@ -76,6 +82,7 @@ def to_json(
date_unit=date_unit,
default_handler=default_handler,
index=index,
+ indent=indent,
).write()
if lines:
@@ -97,18 +104,19 @@ class Writer:
def __init__(
self,
obj,
- orient,
- date_format,
- double_precision,
- ensure_ascii,
- date_unit,
- index,
- default_handler=None,
+ orient: Optional[str],
+ date_format: str,
+ double_precision: int,
+ ensure_ascii: bool,
+ date_unit: str,
+ index: bool,
+ default_handler: Optional[Callable[[Any], Serializable]] = None,
+ indent: int = 0,
):
self.obj = obj
if orient is None:
- orient = self._default_orient
+ orient = self._default_orient # type: ignore
self.orient = orient
self.date_format = date_format
@@ -117,6 +125,7 @@ def __init__(
self.date_unit = date_unit
self.default_handler = default_handler
self.index = index
+ self.indent = indent
self.is_copy = None
self._format_axes()
@@ -133,17 +142,19 @@ def write(self):
self.date_unit,
self.date_format == "iso",
self.default_handler,
+ self.indent,
)
def _write(
self,
obj,
- orient,
- double_precision,
- ensure_ascii,
- date_unit,
- iso_dates,
- default_handler,
+ orient: Optional[str],
+ double_precision: int,
+ ensure_ascii: bool,
+ date_unit: str,
+ iso_dates: bool,
+ default_handler: Optional[Callable[[Any], Serializable]],
+ indent: int,
):
return dumps(
obj,
@@ -153,6 +164,7 @@ def _write(
date_unit=date_unit,
iso_dates=iso_dates,
default_handler=default_handler,
+ indent=indent,
)
@@ -169,12 +181,13 @@ def _format_axes(self):
def _write(
self,
obj,
- orient,
- double_precision,
- ensure_ascii,
- date_unit,
- iso_dates,
- default_handler,
+ orient: Optional[str],
+ double_precision: int,
+ ensure_ascii: bool,
+ date_unit: str,
+ iso_dates: bool,
+ default_handler: Optional[Callable[[Any], Serializable]],
+ indent: int,
):
if not self.index and orient == "split":
obj = {"name": obj.name, "data": obj.values}
@@ -186,6 +199,7 @@ def _write(
date_unit,
iso_dates,
default_handler,
+ indent,
)
@@ -214,12 +228,13 @@ def _format_axes(self):
def _write(
self,
obj,
- orient,
- double_precision,
- ensure_ascii,
- date_unit,
- iso_dates,
- default_handler,
+ orient: Optional[str],
+ double_precision: int,
+ ensure_ascii: bool,
+ date_unit: str,
+ iso_dates: bool,
+ default_handler: Optional[Callable[[Any], Serializable]],
+ indent: int,
):
if not self.index and orient == "split":
obj = obj.to_dict(orient="split")
@@ -232,6 +247,7 @@ def _write(
date_unit,
iso_dates,
default_handler,
+ indent,
)
@@ -241,13 +257,14 @@ class JSONTableWriter(FrameWriter):
def __init__(
self,
obj,
- orient,
- date_format,
- double_precision,
- ensure_ascii,
- date_unit,
- index,
- default_handler=None,
+ orient: Optional[str],
+ date_format: str,
+ double_precision: int,
+ ensure_ascii: bool,
+ date_unit: str,
+ index: bool,
+ default_handler: Optional[Callable[[Any], Serializable]] = None,
+ indent: int = 0,
):
"""
Adds a `schema` attribute with the Table Schema, resets
@@ -255,6 +272,7 @@ def __init__(
to know what the index is, forces orient to records, and forces
date_format to 'iso'.
"""
+
super().__init__(
obj,
orient,
@@ -264,6 +282,7 @@ def __init__(
date_unit,
index,
default_handler=default_handler,
+ indent=indent,
)
if date_format != "iso":
@@ -315,19 +334,20 @@ def _write(
date_unit,
iso_dates,
default_handler,
+ indent,
):
- data = super()._write(
- obj,
+ table_obj = OrderedDict((("schema", self.schema), ("data", obj)))
+ serialized = super()._write(
+ table_obj,
orient,
double_precision,
ensure_ascii,
date_unit,
iso_dates,
default_handler,
+ indent,
)
- serialized = '{{"schema": {schema}, "data": {data}}}'.format(
- schema=dumps(self.schema), data=data
- )
+
return serialized
@@ -458,13 +478,9 @@ def read_json(
encoding : str, default is 'utf-8'
The encoding to use to decode py3 bytes.
- .. versionadded:: 0.19.0
-
lines : bool, default False
Read the file as a json object per line.
- .. versionadded:: 0.19.0
-
chunksize : int, optional
Return JsonReader object for iteration.
See the `line-delimited json docs
@@ -591,10 +607,8 @@ def read_json(
result = json_reader.read()
if should_close:
- try:
- filepath_or_buffer.close()
- except: # noqa: flake8
- pass
+ filepath_or_buffer.close()
+
return result
@@ -976,10 +990,8 @@ def _try_convert_to_date(self, data):
for date_unit in date_units:
try:
new_data = to_datetime(new_data, errors="raise", unit=date_unit)
- except ValueError:
+ except (ValueError, OverflowError):
continue
- except Exception:
- break
return new_data, True
return data, False
diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py
index 1e7cd54d9f4a0..b142dbf76e6b3 100644
--- a/pandas/io/json/_table_schema.py
+++ b/pandas/io/json/_table_schema.py
@@ -76,7 +76,7 @@ def as_json_table_type(x):
def set_default_names(data):
"""Sets index names to 'index' for regular, or 'level_x' for Multi"""
- if com._all_not_none(*data.index.names):
+ if com.all_not_none(*data.index.names):
nms = data.index.names
if len(nms) == 1 and data.index.name == "index":
warnings.warn("Index name of 'index' is not round-trippable")
diff --git a/pandas/io/msgpack/__init__.py b/pandas/io/msgpack/__init__.py
index 9b09cffd83f75..7107263c180cb 100644
--- a/pandas/io/msgpack/__init__.py
+++ b/pandas/io/msgpack/__init__.py
@@ -2,8 +2,8 @@
from collections import namedtuple
-from pandas.io.msgpack.exceptions import * # noqa
-from pandas.io.msgpack._version import version # noqa
+from pandas.io.msgpack.exceptions import * # noqa: F401,F403 isort:skip
+from pandas.io.msgpack._version import version # noqa: F401 isort:skip
class ExtType(namedtuple("ExtType", "code data")):
@@ -19,10 +19,14 @@ def __new__(cls, code, data):
return super().__new__(cls, code, data)
-import os # noqa
+import os # noqa: F401,E402 isort:skip
-from pandas.io.msgpack._packer import Packer # noqa
-from pandas.io.msgpack._unpacker import unpack, unpackb, Unpacker # noqa
+from pandas.io.msgpack._unpacker import ( # noqa: F401,E402 isort:skip
+ Unpacker,
+ unpack,
+ unpackb,
+)
+from pandas.io.msgpack._packer import Packer # noqa: E402 isort:skip
def pack(o, stream, **kwargs):
diff --git a/pandas/io/msgpack/_packer.pyi b/pandas/io/msgpack/_packer.pyi
new file mode 100644
index 0000000000000..e95a1622c5615
--- /dev/null
+++ b/pandas/io/msgpack/_packer.pyi
@@ -0,0 +1,22 @@
+# flake8: noqa
+
+class Packer:
+ def __cinit__(self): ...
+ def __init__(
+ self,
+ default=...,
+ encoding=...,
+ unicode_errors=...,
+ use_single_float=...,
+ autoreset: int = ...,
+ use_bin_type: int = ...,
+ ): ...
+ def __dealloc__(self): ...
+ def _pack(self, o, nest_limit: int = ...) -> int: ...
+ def pack(self, obj): ...
+ def pack_ext_type(self, typecode, data): ...
+ def pack_array_header(self, size): ...
+ def pack_map_header(self, size): ...
+ def pack_map_pairs(self, pairs): ...
+ def reset(self) -> None: ...
+ def bytes(self): ...
diff --git a/pandas/io/msgpack/_packer.pyx b/pandas/io/msgpack/_packer.pyx
index 0ed188074f3d9..19307e2334f1e 100644
--- a/pandas/io/msgpack/_packer.pyx
+++ b/pandas/io/msgpack/_packer.pyx
@@ -1,15 +1,17 @@
# coding: utf-8
# cython: embedsignature=True
-from cpython cimport (
- PyFloat_Check, PyLong_Check, PyInt_Check,
- PyDict_CheckExact, PyDict_Check,
- PyTuple_Check, PyList_Check,
- PyCallable_Check,
- PyUnicode_Check, PyBytes_Check,
- PyBytes_AsString,
- PyBytes_FromStringAndSize,
- PyUnicode_AsEncodedString)
+from cpython.bytes cimport (PyBytes_Check, PyBytes_AsString,
+ PyBytes_FromStringAndSize)
+from cpython.dict cimport PyDict_Check, PyDict_CheckExact
+from cpython.float cimport PyFloat_Check
+from cpython.int cimport PyInt_Check
+from cpython.list cimport PyList_Check
+from cpython.long cimport PyLong_Check
+from cpython.object cimport PyCallable_Check
+from cpython.tuple cimport PyTuple_Check
+from cpython.unicode cimport PyUnicode_Check, PyUnicode_AsEncodedString
+
from libc.stdlib cimport free, malloc
from pandas.io.msgpack.exceptions import PackValueError
diff --git a/pandas/io/msgpack/_unpacker.pyi b/pandas/io/msgpack/_unpacker.pyi
new file mode 100644
index 0000000000000..9910895947fb6
--- /dev/null
+++ b/pandas/io/msgpack/_unpacker.pyi
@@ -0,0 +1,59 @@
+# flake8: noqa
+
+def unpackb(
+ packed,
+ object_hook=...,
+ list_hook=...,
+ use_list=...,
+ encoding=...,
+ unicode_errors=...,
+ object_pairs_hook=...,
+ ext_hook=...,
+ max_str_len=...,
+ max_bin_len=...,
+ max_array_len=...,
+ max_map_len=...,
+ max_ext_len=...,
+): ...
+def unpack(
+ stream,
+ object_hook=...,
+ list_hook=...,
+ use_list=...,
+ encoding=...,
+ unicode_errors=...,
+ object_pairs_hook=...,
+): ...
+
+class Unpacker:
+ def __cinit__(self): ...
+ def __dealloc__(self): ...
+ def __init__(
+ self,
+ file_like=...,
+ read_size=...,
+ use_list=...,
+ object_hook=...,
+ object_pairs_hook=...,
+ list_hook=...,
+ encoding=...,
+ unicode_errors=...,
+ max_buffer_size: int = ...,
+ ext_hook=...,
+ max_str_len=...,
+ max_bin_len=...,
+ max_array_len=...,
+ max_map_len=...,
+ max_ext_len=...,
+ ): ...
+ def feed(self, next_bytes): ...
+ def append_buffer(self, _buf, _buf_len): ...
+ def read_from_file(self): ...
+ def _unpack(self, execute, write_bytes, iter=...): ...
+ def read_bytes(self, nbytes): ...
+ def unpack(self, write_bytes=...): ...
+ def skip(self, write_bytes=...): ...
+ def read_array_header(self, write_bytes=...): ...
+ def read_map_header(self, write_bytes=...): ...
+ def __iter__(self): ...
+ def __next__(self): ...
diff --git a/pandas/io/msgpack/_unpacker.pyx b/pandas/io/msgpack/_unpacker.pyx
index c2e2dfc521a51..d7ebb194ef5c5 100644
--- a/pandas/io/msgpack/_unpacker.pyx
+++ b/pandas/io/msgpack/_unpacker.pyx
@@ -3,12 +3,11 @@
from cython cimport Py_ssize_t
-from cpython cimport (
- PyCallable_Check,
- PyBUF_SIMPLE, PyObject_GetBuffer, PyBuffer_Release, Py_buffer,
- PyBytes_Size,
- PyBytes_FromStringAndSize,
- PyBytes_AsString)
+from cpython.buffer cimport (PyBUF_SIMPLE, PyObject_GetBuffer,
+ PyBuffer_Release, Py_buffer)
+from cpython.bytes cimport (PyBytes_Size, PyBytes_AsString,
+ PyBytes_FromStringAndSize)
+from cpython.object cimport PyCallable_Check
cdef extern from "Python.h":
ctypedef struct PyObject
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
index 04e49708ff082..c0ace7996e1b9 100644
--- a/pandas/io/packers.py
+++ b/pandas/io/packers.py
@@ -85,7 +85,6 @@
from pandas.core.arrays.sparse import BlockIndex, IntIndex
from pandas.core.generic import NDFrame
from pandas.core.internals import BlockManager, _safe_reshape, make_block
-from pandas.core.sparse.api import SparseDataFrame, SparseSeries
from pandas.io.common import _stringify_path, get_filepath_or_buffer
from pandas.io.msgpack import ExtType, Packer as _Packer, Unpacker as _Unpacker
@@ -105,6 +104,17 @@ def to_msgpack(path_or_buf, *args, **kwargs):
It is recommended to use pyarrow for on-the-wire transmission of
pandas objects.
+ Example pyarrow usage:
+
+ >>> import pandas as pd
+ >>> import pyarrow as pa
+ >>> df = pd.DataFrame({'A': [1, 2, 3]})
+ >>> context = pa.default_serialization_context()
+ >>> df_bytestring = context.serialize(df).to_buffer().to_pybytes()
+
+ For documentation on pyarrow, see `here
+ <https://arrow.apache.org/docs/python/index.html>`__.
+
Parameters
----------
path_or_buf : string File path, buffer-like, or None
@@ -120,7 +130,9 @@ def to_msgpack(path_or_buf, *args, **kwargs):
"to_msgpack is deprecated and will be removed in a "
"future version.\n"
"It is recommended to use pyarrow for on-the-wire "
- "transmission of pandas objects.",
+ "transmission of pandas objects.\n"
+ "For a full example, check\n"
+ "https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_msgpack.html", # noqa: E501
FutureWarning,
stacklevel=3,
)
@@ -469,62 +481,37 @@ def encode(obj):
}
elif isinstance(obj, Series):
- if isinstance(obj, SparseSeries):
- raise NotImplementedError("msgpack sparse series is not implemented")
- # d = {'typ': 'sparse_series',
- # 'klass': obj.__class__.__name__,
- # 'dtype': obj.dtype.name,
- # 'index': obj.index,
- # 'sp_index': obj.sp_index,
- # 'sp_values': convert(obj.sp_values),
- # 'compress': compressor}
- # for f in ['name', 'fill_value', 'kind']:
- # d[f] = getattr(obj, f, None)
- # return d
- else:
- return {
- "typ": "series",
- "klass": obj.__class__.__name__,
- "name": getattr(obj, "name", None),
- "index": obj.index,
- "dtype": obj.dtype.name,
- "data": convert(obj.values),
- "compress": compressor,
- }
+ return {
+ "typ": "series",
+ "klass": obj.__class__.__name__,
+ "name": getattr(obj, "name", None),
+ "index": obj.index,
+ "dtype": obj.dtype.name,
+ "data": convert(obj.values),
+ "compress": compressor,
+ }
elif issubclass(tobj, NDFrame):
- if isinstance(obj, SparseDataFrame):
- raise NotImplementedError("msgpack sparse frame is not implemented")
- # d = {'typ': 'sparse_dataframe',
- # 'klass': obj.__class__.__name__,
- # 'columns': obj.columns}
- # for f in ['default_fill_value', 'default_kind']:
- # d[f] = getattr(obj, f, None)
- # d['data'] = dict([(name, ss)
- # for name, ss in obj.items()])
- # return d
- else:
-
- data = obj._data
- if not data.is_consolidated():
- data = data.consolidate()
+ data = obj._data
+ if not data.is_consolidated():
+ data = data.consolidate()
- # the block manager
- return {
- "typ": "block_manager",
- "klass": obj.__class__.__name__,
- "axes": data.axes,
- "blocks": [
- {
- "locs": b.mgr_locs.as_array,
- "values": convert(b.values),
- "shape": b.values.shape,
- "dtype": b.dtype.name,
- "klass": b.__class__.__name__,
- "compress": compressor,
- }
- for b in data.blocks
- ],
- }
+ # the block manager
+ return {
+ "typ": "block_manager",
+ "klass": obj.__class__.__name__,
+ "axes": data.axes,
+ "blocks": [
+ {
+ "locs": b.mgr_locs.as_array,
+ "values": convert(b.values),
+ "shape": b.values.shape,
+ "dtype": b.dtype.name,
+ "klass": b.__class__.__name__,
+ "compress": compressor,
+ }
+ for b in data.blocks
+ ],
+ }
elif (
isinstance(obj, (datetime, date, np.datetime64, timedelta, np.timedelta64))
@@ -708,18 +695,6 @@ def create_block(b):
return timedelta(*obj["data"])
elif typ == "timedelta64":
return np.timedelta64(int(obj["data"]))
- # elif typ == 'sparse_series':
- # dtype = dtype_for(obj['dtype'])
- # return SparseSeries(
- # unconvert(obj['sp_values'], dtype, obj['compress']),
- # sparse_index=obj['sp_index'], index=obj['index'],
- # fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])
- # elif typ == 'sparse_dataframe':
- # return SparseDataFrame(
- # obj['data'], columns=obj['columns'],
- # default_fill_value=obj['default_fill_value'],
- # default_kind=obj['default_kind']
- # )
elif typ == "block_index":
return globals()[obj["klass"]](obj["length"], obj["blocs"], obj["blengths"])
elif typ == "int_index":
@@ -846,7 +821,6 @@ def __init__(
class Iterator:
-
""" manage the unpacking iteration,
close the file on completion """
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 617f4f44ae8af..69ee6583d12c8 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -7,7 +7,7 @@
from pandas import DataFrame, get_option
-from pandas.io.common import get_filepath_or_buffer, is_s3_url
+from pandas.io.common import get_filepath_or_buffer, is_gcs_url, is_s3_url
def get_engine(engine):
@@ -125,10 +125,7 @@ def read(self, path, columns=None, **kwargs):
path, columns=columns, **kwargs
).to_pandas()
if should_close:
- try:
- path.close()
- except: # noqa: flake8
- pass
+ path.close()
return result
@@ -162,12 +159,12 @@ def write(
if partition_cols is not None:
kwargs["file_scheme"] = "hive"
- if is_s3_url(path):
- # path is s3:// so we need to open the s3file in 'wb' mode.
+ if is_s3_url(path) or is_gcs_url(path):
+ # if path is s3:// or gs:// we need to open the file in 'wb' mode.
# TODO: Support 'ab'
path, _, _, _ = get_filepath_or_buffer(path, mode="wb")
- # And pass the opened s3file to the fastparquet internal impl.
+ # And pass the opened file to the fastparquet internal impl.
kwargs["open_with"] = lambda path, _: path
else:
path, _, _, _ = get_filepath_or_buffer(path)
@@ -184,12 +181,14 @@ def write(
def read(self, path, columns=None, **kwargs):
if is_s3_url(path):
+ from pandas.io.s3 import get_file_and_filesystem
+
# When path is s3:// an S3File is returned.
# We need to retain the original path(str) while also
# pass the S3File().open function to fsatparquet impl.
- s3, _, _, should_close = get_filepath_or_buffer(path)
+ s3, filesystem = get_file_and_filesystem(path)
try:
- parquet_file = self.api.ParquetFile(path, open_with=s3.s3.open)
+ parquet_file = self.api.ParquetFile(path, open_with=filesystem.open)
finally:
s3.close()
else:
@@ -228,10 +227,14 @@ def to_parquet(
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output. If
- ``False``, they will not be written to the file. If ``None``, the
- engine's default behavior will be used.
+ ``False``, they will not be written to the file.
+ If ``None``, similar to ``True`` the dataframe's index(es)
+ will be saved. However, instead of being saved as values,
+ the RangeIndex will be stored as a range in the metadata so it
+ doesn't require much space and is faster. Other indexes will
+ be included as columns in the file output.
- .. versionadded 0.24.0
+ .. versionadded:: 0.24.0
partition_cols : list, optional, default None
Column names by which to partition the dataset
@@ -257,7 +260,7 @@ def read_parquet(path, engine="auto", columns=None, **kwargs):
"""
Load a parquet object from the file path, returning a DataFrame.
- .. versionadded 0.21.0
+ .. versionadded:: 0.21.0
Parameters
----------
@@ -281,7 +284,7 @@ def read_parquet(path, engine="auto", columns=None, **kwargs):
columns : list, default=None
If not None, only these columns will be read from the file.
- .. versionadded 0.21.1
+ .. versionadded:: 0.21.1
**kwargs
Any additional kwargs are passed to the engine.
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 300f17bd25432..3678e32943b2e 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -277,9 +277,6 @@
following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
decompression). If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
-
- .. versionadded:: 0.18.1 support for 'zip' and 'xz' compression.
-
thousands : str, optional
Thousands separator.
decimal : str, default '.'
@@ -329,9 +326,6 @@
used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
-
- .. versionadded:: 0.18.1 support for the Python parser.
-
low_memory : bool, default True
Internally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
@@ -693,7 +687,7 @@ def parser_f(
read_csv = Appender(
_doc_read_csv_and_table.format(
func_name="read_csv",
- summary=("Read a comma-separated values (csv) file " "into DataFrame."),
+ summary=("Read a comma-separated values (csv) file into DataFrame."),
_default_sep="','",
)
)(read_csv)
@@ -776,7 +770,7 @@ def read_fwf(
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, "infer") and widths is not None:
- raise ValueError("You must specify only one of 'widths' and " "'colspecs'")
+ raise ValueError("You must specify only one of 'widths' and 'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
@@ -907,9 +901,7 @@ def _get_options_with_defaults(self, engine):
# see gh-12935
if argname == "mangle_dupe_cols" and not value:
- raise ValueError(
- "Setting mangle_dupe_cols=False is " "not supported yet"
- )
+ raise ValueError("Setting mangle_dupe_cols=False is not supported yet")
else:
options[argname] = value
@@ -948,7 +940,7 @@ def _check_file_or_buffer(self, f, engine):
# needs to have that attribute ("next" for Python 2.x, "__next__"
# for Python 3.x)
if engine != "c" and not hasattr(f, next_attr):
- msg = "The 'python' engine cannot iterate " "through this file buffer."
+ msg = "The 'python' engine cannot iterate through this file buffer."
raise ValueError(msg)
return engine
@@ -965,7 +957,7 @@ def _clean_options(self, options, engine):
# C engine not supported yet
if engine == "c":
if options["skipfooter"] > 0:
- fallback_reason = "the 'c' engine does not support" " skipfooter"
+ fallback_reason = "the 'c' engine does not support skipfooter"
engine = "python"
encoding = sys.getfilesystemencoding() or "utf-8"
@@ -1072,7 +1064,6 @@ def _clean_options(self, options, engine):
)
if result.get(arg, depr_default) != depr_default:
- # raise Exception(result.get(arg, depr_default), depr_default)
depr_warning += msg + "\n\n"
else:
result[arg] = parser_default
@@ -1401,13 +1392,17 @@ def __init__(self, kwds):
if isinstance(self.header, (list, tuple, np.ndarray)):
if not all(map(is_integer, self.header)):
raise ValueError("header must be integer or list of integers")
+ if any(i < 0 for i in self.header):
+ raise ValueError(
+ "cannot specify multi-index header with negative integers"
+ )
if kwds.get("usecols"):
raise ValueError(
- "cannot specify usecols when " "specifying a multi-index header"
+ "cannot specify usecols when specifying a multi-index header"
)
if kwds.get("names"):
raise ValueError(
- "cannot specify names when " "specifying a multi-index header"
+ "cannot specify names when specifying a multi-index header"
)
# validate index_col that only contains integers
@@ -1427,6 +1422,13 @@ def __init__(self, kwds):
elif self.header is not None and not is_integer(self.header):
raise ValueError("header must be integer or list of integers")
+ # GH 27779
+ elif self.header is not None and self.header < 0:
+ raise ValueError(
+ "Passing negative integer to header is invalid. "
+ "For no header, use header=None instead"
+ )
+
self._name_processed = False
self._first_chunk = True
@@ -1617,7 +1619,7 @@ def _get_name(icol):
if col_names is None:
raise ValueError(
- ("Must supply column order to use {icol!s} " "as index").format(
+ ("Must supply column order to use {icol!s} as index").format(
icol=icol
)
)
@@ -1780,14 +1782,17 @@ def _infer_types(self, values, na_values, try_num_bool=True):
np.putmask(values, mask, np.nan)
return values, na_count
- if try_num_bool:
+ if try_num_bool and is_object_dtype(values.dtype):
+ # exclude e.g DatetimeIndex here
try:
result = lib.maybe_convert_numeric(values, na_values, False)
- na_count = isna(result).sum()
- except Exception:
+ except (ValueError, TypeError):
+ # e.g. encountering datetime string gets ValueError
+ # TypeError can be raised in floatify
result = values
- if values.dtype == np.object_:
- na_count = parsers.sanitize_objects(result, na_values, False)
+ na_count = parsers.sanitize_objects(result, na_values, False)
+ else:
+ na_count = isna(result).sum()
else:
result = values
if values.dtype == np.object_:
@@ -2385,7 +2390,7 @@ def _make_reader(self, f):
if sep is None or len(sep) == 1:
if self.lineterminator:
raise ValueError(
- "Custom line terminators not supported in " "python parser (yet)"
+ "Custom line terminators not supported in python parser (yet)"
)
class MyDialect(csv.Dialect):
@@ -2668,7 +2673,7 @@ def _infer_columns(self):
"number of header fields in the file"
)
if len(columns) > 1:
- raise TypeError("Cannot pass names with multi-index " "columns")
+ raise TypeError("Cannot pass names with multi-index columns")
if self.usecols is not None:
# Set _use_cols. We don't store columns because they are
@@ -2733,7 +2738,7 @@ def _handle_usecols(self, columns, usecols_key):
elif any(isinstance(u, str) for u in self.usecols):
if len(columns) > 1:
raise ValueError(
- "If using multiple headers, usecols must " "be integers."
+ "If using multiple headers, usecols must be integers."
)
col_indices = []
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
index 4e390de87fc60..4b9a52a1fb8f3 100644
--- a/pandas/io/pickle.py
+++ b/pandas/io/pickle.py
@@ -153,10 +153,10 @@ def read_pickle(path, compression="infer"):
# We want to silence any warnings about, e.g. moved modules.
warnings.simplefilter("ignore", Warning)
return pickle.load(f)
- except Exception: # noqa: E722
+ except Exception:
try:
return pc.load(f, encoding=None)
- except Exception: # noqa: E722
+ except Exception:
return pc.load(f, encoding="latin1")
finally:
f.close()
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 3433d25609255..55ccd838f8a16 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -40,8 +40,6 @@
MultiIndex,
PeriodIndex,
Series,
- SparseDataFrame,
- SparseSeries,
TimedeltaIndex,
concat,
isna,
@@ -173,12 +171,7 @@ class DuplicateWarning(Warning):
"""
# map object types
-_TYPE_MAP = {
- Series: "series",
- SparseSeries: "sparse_series",
- DataFrame: "frame",
- SparseDataFrame: "sparse_frame",
-}
+_TYPE_MAP = {Series: "series", DataFrame: "frame"}
# storer class map
_STORER_MAP = {
@@ -186,9 +179,7 @@ class DuplicateWarning(Warning):
"DataFrame": "LegacyFrameFixed",
"DataMatrix": "LegacyFrameFixed",
"series": "SeriesFixed",
- "sparse_series": "SparseSeriesFixed",
"frame": "FrameFixed",
- "sparse_frame": "SparseFrameFixed",
}
# table class map
@@ -303,7 +294,6 @@ def read_hdf(path_or_buf, key=None, mode="r", **kwargs):
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
- .. versionadded:: 0.19.0 support for pathlib, py.path.
.. versionadded:: 0.21.0 support for __fspath__ protocol.
key : object, optional
@@ -367,7 +357,7 @@ def read_hdf(path_or_buf, key=None, mode="r", **kwargs):
path_or_buf = _stringify_path(path_or_buf)
if not isinstance(path_or_buf, str):
raise NotImplementedError(
- "Support for generic buffers has not " "been implemented."
+ "Support for generic buffers has not been implemented."
)
try:
exists = os.path.exists(path_or_buf)
@@ -430,10 +420,10 @@ def _is_metadata_of(group, parent_group):
class HDFStore:
-
"""
- Dict-like IO interface for storing pandas objects in PyTables
- either Fixed or Table format.
+ Dict-like IO interface for storing pandas objects in PyTables.
+
+ Either Fixed or Table format.
Parameters
----------
@@ -565,13 +555,12 @@ def __exit__(self, exc_type, exc_value, traceback):
def keys(self):
"""
- Return a (potentially unordered) list of the keys corresponding to the
- objects stored in the HDFStore. These are ABSOLUTE path-names (e.g.
- have the leading '/'
+ Return a list of keys corresponding to objects stored in HDFStore.
Returns
-------
list
+ List of ABSOLUTE path-names (e.g. have the leading '/').
"""
return [n._v_pathname for n in self.groups()]
@@ -704,7 +693,7 @@ def flush(self, fsync=False):
def get(self, key):
"""
- Retrieve pandas object stored in file
+ Retrieve pandas object stored in file.
Parameters
----------
@@ -712,7 +701,8 @@ def get(self, key):
Returns
-------
- obj : same type as object stored in file
+ object
+ Same type as object stored in file.
"""
group = self.get_node(key)
if group is None:
@@ -732,25 +722,31 @@ def select(
**kwargs
):
"""
- Retrieve pandas object stored in file, optionally based on where
- criteria
+ Retrieve pandas object stored in file, optionally based on where criteria.
Parameters
----------
key : object
- where : list of Term (or convertible) objects, optional
- start : integer (defaults to None), row number to start selection
- stop : integer (defaults to None), row number to stop selection
- columns : a list of columns that if not None, will limit the return
- columns
- iterator : boolean, return an iterator, default False
- chunksize : nrows to include in iteration, return an iterator
- auto_close : boolean, should automatically close the store when
- finished, default is False
+ Object being retrieved from file.
+ where : list, default None
+ List of Term (or convertible) objects, optional.
+ start : int, default None
+ Row number to start selection.
+ stop : int, default None
+ Row number to stop selection.
+ columns : list, default None
+ A list of columns that if not None, will limit the return columns.
+ iterator : bool, default False
+ Returns an iterator.
+ chunksize : int, default None
+ Number or rows to include in iteration, return an iterator.
+ auto_close : bool, default False
+ Should automatically close the store when finished.
Returns
-------
- The selected object
+ object
+ Retrieved object from file.
"""
group = self.get_node(key)
if group is None:
@@ -930,28 +926,30 @@ def func(_start, _stop, _where):
def put(self, key, value, format=None, append=False, **kwargs):
"""
- Store object in HDFStore
+ Store object in HDFStore.
Parameters
----------
- key : object
- value : {Series, DataFrame}
- format : 'fixed(f)|table(t)', default is 'fixed'
+ key : object
+ value : {Series, DataFrame}
+ format : 'fixed(f)|table(t)', default is 'fixed'
fixed(f) : Fixed format
- Fast writing/reading. Not-appendable, nor searchable
+ Fast writing/reading. Not-appendable, nor searchable.
table(t) : Table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
- / selecting subsets of the data
- append : boolean, default False
+ / selecting subsets of the data.
+ append : bool, default False
This will force Table format, append the input data to the
existing.
- data_columns : list of columns to create as data columns, or True to
+ data_columns : list, default None
+ List of columns to create as data columns, or True to
use all columns. See `here
<http://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
- encoding : default None, provide an encoding for strings
- dropna : boolean, default False, do not write an ALL nan row to
- the store settable by the option 'io.hdf.dropna_table'
+ encoding : str, default None
+ Provide an encoding for strings.
+ dropna : bool, default False, do not write an ALL nan row to
+ The store settable by the option 'io.hdf.dropna_table'.
"""
if format is None:
format = get_option("io.hdf.default_format") or "fixed"
@@ -999,7 +997,7 @@ def remove(self, key, where=None, start=None, stop=None):
return None
# remove the node
- if com._all_none(where, start, stop):
+ if com.all_none(where, start, stop):
s.group._f_remove(recursive=True)
# delete from the table
@@ -1048,7 +1046,7 @@ def append(
"""
if columns is not None:
raise TypeError(
- "columns is not a supported keyword in append, " "try data_columns"
+ "columns is not a supported keyword in append, try data_columns"
)
if dropna is None:
@@ -1166,12 +1164,15 @@ def create_table_index(self, key, **kwargs):
s.create_index(**kwargs)
def groups(self):
- """return a list of all the top-level nodes (that are not themselves a
- pandas storage object)
+ """
+ Return a list of all the top-level nodes.
+
+ Each node returned is not a pandas storage object.
Returns
-------
list
+ List of objects.
"""
_tables()
self._check_if_open()
@@ -1189,10 +1190,12 @@ def groups(self):
]
def walk(self, where="/"):
- """ Walk the pytables group hierarchy for pandas objects
+ """
+ Walk the pytables group hierarchy for pandas objects.
This generator will yield the group path, subgroups and pandas object
names for each group.
+
Any non-pandas PyTables objects that are not a group will be ignored.
The `where` group itself is listed first (preorder), then each of its
@@ -1203,18 +1206,17 @@ def walk(self, where="/"):
Parameters
----------
- where : str, optional
+ where : str, default "/"
Group where to start walking.
- If not supplied, the root group is used.
Yields
------
path : str
- Full path to a group (without trailing '/')
- groups : list of str
- names of the groups contained in `path`
- leaves : list of str
- names of the pandas objects contained in `path`
+ Full path to a group (without trailing '/').
+ groups : list
+ Names (strings) of the groups contained in `path`.
+ leaves : list
+ Names (strings) of the pandas objects contained in `path`.
"""
_tables()
self._check_if_open()
@@ -1534,7 +1536,6 @@ def _read_group(self, group, **kwargs):
class TableIterator:
-
""" define the iteration interface on a table
Parameters
@@ -1642,7 +1643,6 @@ def get_result(self, coordinates=False):
class IndexCol:
-
""" an index column description class
Parameters
@@ -1782,7 +1782,7 @@ def convert(self, values, nan_rep, encoding, errors, start=None, stop=None):
# making an Index instance could throw a number of different errors
try:
self.values = Index(values, **kwargs)
- except Exception: # noqa: E722
+ except Exception:
# if the output freq is different that what we recorded,
# it should be None (see also 'doc example part 2')
@@ -1956,7 +1956,6 @@ def write_metadata(self, handler):
class GenericIndexCol(IndexCol):
-
""" an index which is not represented in the data of the table """
@property
@@ -1994,7 +1993,6 @@ def set_attr(self):
class DataCol(IndexCol):
-
""" a data holding column, by definition this is not indexable
Parameters
@@ -2162,7 +2160,7 @@ def set_atom(
# which is an error
raise TypeError(
- "too many timezones in this block, create separate " "data columns"
+ "too many timezones in this block, create separate data columns"
)
elif inferred_type == "unicode":
raise TypeError("[unicode] is not implemented as a table column")
@@ -2339,9 +2337,7 @@ def validate_attr(self, append):
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if existing_fields is not None and existing_fields != list(self.values):
- raise ValueError(
- "appended items do not match existing items" " in table!"
- )
+ raise ValueError("appended items do not match existing items in table!")
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if existing_dtype is not None and existing_dtype != self.dtype:
@@ -2446,7 +2442,6 @@ def set_attr(self):
class DataIndexableCol(DataCol):
-
""" represent a data column that can be indexed """
is_data_indexable = True
@@ -2469,7 +2464,6 @@ def get_atom_timedelta64(self, block):
class GenericDataIndexableCol(DataIndexableCol):
-
""" represent a generic pytables data column """
def get_attr(self):
@@ -2477,7 +2471,6 @@ def get_attr(self):
class Fixed:
-
""" represent an object in my store
facilitate read/write of various types of objects
this is an abstract base class
@@ -2637,7 +2630,7 @@ def delete(self, where=None, start=None, stop=None, **kwargs):
support fully deleting the node in its entirety (only) - where
specification must be None
"""
- if com._all_none(where, start, stop):
+ if com.all_none(where, start, stop):
self._handle.remove_node(self.group, recursive=True)
return None
@@ -2645,7 +2638,6 @@ def delete(self, where=None, start=None, stop=None, **kwargs):
class GenericFixed(Fixed):
-
""" a generified fixed version """
_index_type_map = {DatetimeIndex: "datetime", PeriodIndex: "period"}
@@ -2835,7 +2827,7 @@ def write_multi_index(self, key, index):
# write the level
if is_extension_type(lev):
raise NotImplementedError(
- "Saving a MultiIndex with an " "extension dtype is not supported."
+ "Saving a MultiIndex with an extension dtype is not supported."
)
level_key = "{key}_level{idx}".format(key=key, idx=i)
conv_level = _convert_index(
@@ -2901,7 +2893,12 @@ def read_index_node(self, node, start=None, stop=None):
kwargs["freq"] = node._v_attrs["freq"]
if "tz" in node._v_attrs:
- kwargs["tz"] = node._v_attrs["tz"]
+ if isinstance(node._v_attrs["tz"], bytes):
+ # created by python2
+ kwargs["tz"] = node._v_attrs["tz"].decode("utf-8")
+ else:
+ # created by python3
+ kwargs["tz"] = node._v_attrs["tz"]
if kind in ("date", "datetime"):
index = factory(
@@ -3072,83 +3069,6 @@ def write(self, obj, **kwargs):
self.attrs.name = obj.name
-class SparseFixed(GenericFixed):
- def validate_read(self, kwargs):
- """
- we don't support start, stop kwds in Sparse
- """
- kwargs = super().validate_read(kwargs)
- if "start" in kwargs or "stop" in kwargs:
- raise NotImplementedError(
- "start and/or stop are not supported " "in fixed Sparse reading"
- )
- return kwargs
-
-
-class SparseSeriesFixed(SparseFixed):
- pandas_kind = "sparse_series"
- attributes = ["name", "fill_value", "kind"]
-
- def read(self, **kwargs):
- kwargs = self.validate_read(kwargs)
- index = self.read_index("index")
- sp_values = self.read_array("sp_values")
- sp_index = self.read_index("sp_index")
- return SparseSeries(
- sp_values,
- index=index,
- sparse_index=sp_index,
- kind=self.kind or "block",
- fill_value=self.fill_value,
- name=self.name,
- )
-
- def write(self, obj, **kwargs):
- super().write(obj, **kwargs)
- self.write_index("index", obj.index)
- self.write_index("sp_index", obj.sp_index)
- self.write_array("sp_values", obj.sp_values)
- self.attrs.name = obj.name
- self.attrs.fill_value = obj.fill_value
- self.attrs.kind = obj.kind
-
-
-class SparseFrameFixed(SparseFixed):
- pandas_kind = "sparse_frame"
- attributes = ["default_kind", "default_fill_value"]
-
- def read(self, **kwargs):
- kwargs = self.validate_read(kwargs)
- columns = self.read_index("columns")
- sdict = {}
- for c in columns:
- key = "sparse_series_{columns}".format(columns=c)
- s = SparseSeriesFixed(self.parent, getattr(self.group, key))
- s.infer_axes()
- sdict[c] = s.read()
- return SparseDataFrame(
- sdict,
- columns=columns,
- default_kind=self.default_kind,
- default_fill_value=self.default_fill_value,
- )
-
- def write(self, obj, **kwargs):
- """ write it as a collection of individual sparse series """
- super().write(obj, **kwargs)
- for name, ss in obj.items():
- key = "sparse_series_{name}".format(name=name)
- if key not in self.group._v_children:
- node = self._handle.create_group(self.group, key)
- else:
- node = getattr(self.group, key)
- s = SparseSeriesFixed(self.parent, node)
- s.write(ss)
- self.attrs.default_fill_value = obj.default_fill_value
- self.attrs.default_kind = obj.default_kind
- self.write_index("columns", obj.columns)
-
-
class BlockManagerFixed(GenericFixed):
attributes = ["ndim", "nblocks"]
is_shape_reversed = False
@@ -3205,7 +3125,9 @@ def read(self, start=None, stop=None, **kwargs):
values = self.read_array(
"block{idx}_values".format(idx=i), start=_start, stop=_stop
)
- blk = make_block(values, placement=items.get_indexer(blk_items))
+ blk = make_block(
+ values, placement=items.get_indexer(blk_items), ndim=len(axes)
+ )
blocks.append(blk)
return self.obj_type(BlockManager(blocks, axes))
@@ -3240,7 +3162,6 @@ class FrameFixed(BlockManagerFixed):
class Table(Fixed):
-
""" represent a table:
facilitate read/write of various types of tables
@@ -3377,7 +3298,7 @@ def validate_multiindex(self, obj):
return obj.reset_index(), levels
except ValueError:
raise ValueError(
- "duplicate names/columns in the multi-index when " "storing as a table"
+ "duplicate names/columns in the multi-index when storing as a table"
)
@property
@@ -3993,7 +3914,7 @@ def process_filter(field, filt):
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
- return obj.loc._getitem_axis(takers, axis=axis_number)
+ return obj.loc(axis=axis_number)[takers]
# this might be the name of a file IN an axis
elif field in axis_values:
@@ -4006,7 +3927,7 @@ def process_filter(field, filt):
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
- return obj.loc._getitem_axis(takers, axis=axis_number)
+ return obj.loc(axis=axis_number)[takers]
raise ValueError(
"cannot find the field [{field}] for "
@@ -4082,7 +4003,7 @@ def read_column(self, column, where=None, start=None, stop=None):
return False
if where is not None:
- raise TypeError("read_column does not currently accept a where " "clause")
+ raise TypeError("read_column does not currently accept a where clause")
# find the axes
for a in self.axes:
@@ -4115,7 +4036,6 @@ def read_column(self, column, where=None, start=None, stop=None):
class WORMTable(Table):
-
""" a write-once read-many table: this format DOES NOT ALLOW appending to a
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
@@ -4137,7 +4057,6 @@ def write(self, **kwargs):
class LegacyTable(Table):
-
""" an appendable table: allow append/query/delete operations to a
(possibly) already existing appendable table this table ALLOWS
append (but doesn't require them), and stores the data in a format
@@ -4465,7 +4384,7 @@ def read(self, where=None, columns=None, **kwargs):
if values.ndim == 1 and isinstance(values, np.ndarray):
values = values.reshape((1, values.shape[0]))
- block = make_block(values, placement=np.arange(len(cols_)))
+ block = make_block(values, placement=np.arange(len(cols_)), ndim=2)
mgr = BlockManager([block], [cols_, index_])
frames.append(DataFrame(mgr))
@@ -4591,7 +4510,6 @@ def write(self, **kwargs):
class AppendableMultiFrameTable(AppendableFrameTable):
-
""" a frame with a multi-index """
table_type = "appendable_multiframe"
@@ -4950,7 +4868,6 @@ def _need_convert(kind):
class Selection:
-
"""
Carries out a selection operation on a tables.Table object.
@@ -4991,7 +4908,7 @@ def __init__(self, table, where=None, start=None, stop=None):
self.stop is not None and (where >= self.stop).any()
):
raise ValueError(
- "where must have index locations >= start and " "< stop"
+ "where must have index locations >= start and < stop"
)
self.coordinates = where
diff --git a/pandas/io/s3.py b/pandas/io/s3.py
index 0a7c082fec51c..7e0a37e8cba20 100644
--- a/pandas/io/s3.py
+++ b/pandas/io/s3.py
@@ -1,8 +1,11 @@
""" s3 support for remote file interactivity """
+from typing import IO, Any, Optional, Tuple
from urllib.parse import urlparse as parse_url
from pandas.compat._optional import import_optional_dependency
+from pandas._typing import FilePathOrBuffer
+
s3fs = import_optional_dependency(
"s3fs", extra="The s3fs package is required to handle s3 files."
)
@@ -14,9 +17,9 @@ def _strip_schema(url):
return result.netloc + result.path
-def get_filepath_or_buffer(
- filepath_or_buffer, encoding=None, compression=None, mode=None
-):
+def get_file_and_filesystem(
+ filepath_or_buffer: FilePathOrBuffer, mode: Optional[str] = None
+) -> Tuple[IO, Any]:
from botocore.exceptions import NoCredentialsError
if mode is None:
@@ -24,7 +27,7 @@ def get_filepath_or_buffer(
fs = s3fs.S3FileSystem(anon=False)
try:
- filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer), mode)
+ file = fs.open(_strip_schema(filepath_or_buffer), mode)
except (FileNotFoundError, NoCredentialsError):
# boto3 has troubles when trying to access a public file
# when credentialed...
@@ -33,5 +36,15 @@ def get_filepath_or_buffer(
# A NoCredentialsError is raised if you don't have creds
# for that bucket.
fs = s3fs.S3FileSystem(anon=True)
- filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer), mode)
- return filepath_or_buffer, None, compression, True
+ file = fs.open(_strip_schema(filepath_or_buffer), mode)
+ return file, fs
+
+
+def get_filepath_or_buffer(
+ filepath_or_buffer: FilePathOrBuffer,
+ encoding: Optional[str] = None,
+ compression: Optional[str] = None,
+ mode: Optional[str] = None,
+) -> Tuple[IO, Optional[str], Optional[str], bool]:
+ file, _fs = get_file_and_filesystem(filepath_or_buffer, mode=mode)
+ return file, None, compression, True
diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py
index 34b93d72d0e29..ea26a9b8efdbf 100644
--- a/pandas/io/sas/sas_xport.py
+++ b/pandas/io/sas/sas_xport.py
@@ -26,7 +26,7 @@
"000000000000000000000000000000 "
)
_correct_header1 = (
- "HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!" "000000000000000001600000000"
+ "HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!000000000000000001600000000"
)
_correct_header2 = (
"HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!"
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index 571c544d48b29..6bd3532d538c7 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -29,12 +29,12 @@ def read_sas(
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
- format : string {'xport', 'sas7bdat'} or None
+ format : str {'xport', 'sas7bdat'} or None
If None, file format is inferred from file extension. If 'xport' or
'sas7bdat', uses the corresponding format.
index : identifier of index column, defaults to None
Identifier of column that should be used as index of the DataFrame.
- encoding : string, default is None
+ encoding : str, default is None
Encoding for text data. If None, text data are stored as raw bytes.
chunksize : int
Read file `chunksize` lines at a time, returns iterator.
diff --git a/pandas/io/spss.py b/pandas/io/spss.py
index 983ac1c818c42..4f13349a819c3 100644
--- a/pandas/io/spss.py
+++ b/pandas/io/spss.py
@@ -15,7 +15,7 @@ def read_spss(
"""
Load an SPSS file from the file path, returning a DataFrame.
- .. versionadded 0.25.0
+ .. versionadded:: 0.25.0
Parameters
----------
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 6fe34e4e9705a..b0683fb8b0dfb 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -233,7 +233,7 @@ def read_sql_table(
con = _engine_builder(con)
if not _is_sqlalchemy_connectable(con):
raise NotImplementedError(
- "read_sql_table only supported for " "SQLAlchemy connectable."
+ "read_sql_table only supported for SQLAlchemy connectable."
)
import sqlalchemy
from sqlalchemy.schema import MetaData
@@ -269,7 +269,8 @@ def read_sql_query(
parse_dates=None,
chunksize=None,
):
- """Read SQL query into a DataFrame.
+ """
+ Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
@@ -455,14 +456,14 @@ def to_sql(
Parameters
----------
frame : DataFrame, Series
- name : string
+ name : str
Name of SQL table.
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
- schema : string, default None
+ schema : str, optional
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
@@ -471,18 +472,19 @@ def to_sql(
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column.
- index_label : string or sequence, default None
+ index_label : str or sequence, optional
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
- chunksize : int, default None
- If not None, then rows will be written in batches of this size at a
- time. If None, all rows will be written at once.
- dtype : single SQLtype or dict of column name to SQL type, default None
- Optional specifying the datatype for columns. The SQL type should
- be a SQLAlchemy type, or a string for sqlite3 fallback connection.
- If all columns are of the same type, one single value can be used.
- method : {None, 'multi', callable}, default None
+ chunksize : int, optional
+ Specify the number of rows in each batch to be written at a time.
+ By default, all rows will be written at once.
+ dtype : dict or scalar, optional
+ Specifying the datatype for columns. If a dictionary is used, the
+ keys should be the column names and the values should be the
+ SQLAlchemy types or strings for the sqlite3 fallback mode. If a
+ scalar is provided, it will be applied to all columns.
+ method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
- None : Uses standard SQL ``INSERT`` clause (one per row).
@@ -503,7 +505,7 @@ def to_sql(
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError(
- "'frame' argument should be either a " "Series or a DataFrame"
+ "'frame' argument should be either a Series or a DataFrame"
)
pandas_sql.to_sql(
@@ -1589,10 +1591,7 @@ def execute(self, *args, **kwargs):
else:
cur = self.con.cursor()
try:
- if kwargs:
- cur.execute(*args, **kwargs)
- else:
- cur.execute(*args)
+ cur.execute(*args, **kwargs)
return cur
except Exception as exc:
try:
@@ -1756,7 +1755,7 @@ def has_table(self, name, schema=None):
wld = "?"
query = (
- "SELECT name FROM sqlite_master " "WHERE type='table' AND name={wld};"
+ "SELECT name FROM sqlite_master WHERE type='table' AND name={wld};"
).format(wld=wld)
return len(self.execute(query, [name]).fetchall()) > 0
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 8dbcee829ee1e..0b674b556b2ee 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -138,7 +138,7 @@
_iterator_params,
)
-_data_method_doc = """\
+_data_method_doc = """
Read observations from Stata file, converting them into a dataframe
.. deprecated::
@@ -367,7 +367,7 @@ def convert_delta_safe(base, deltas, unit):
conv_dates = convert_delta_safe(base, ms, "ms")
elif fmt.startswith(("%tC", "tC")):
- warnings.warn("Encountered %tC format. Leaving in Stata " "Internal Format.")
+ warnings.warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=np.object)
if has_bad_values:
conv_dates[bad_locs] = NaT
@@ -856,7 +856,7 @@ def __init__(self, value):
string = property(
lambda self: self._str,
- doc="The Stata representation of the missing value: " "'.', '.a'..'.z'",
+ doc="The Stata representation of the missing value: '.', '.a'..'.z'",
)
value = property(
lambda self: self._value, doc="The binary representation of the missing value."
@@ -1139,13 +1139,17 @@ def _read_new_header(self, first_char):
# The first part of the header is common to 117 and 118.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
- if self.format_version not in [117, 118]:
+ if self.format_version not in [117, 118, 119]:
raise ValueError(_version_error)
self._set_encoding()
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == b"MSF" and ">" or "<"
self.path_or_buf.read(15) # </byteorder><K>
- self.nvar = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
+ nvar_type = "H" if self.format_version <= 118 else "I"
+ nvar_size = 2 if self.format_version <= 118 else 4
+ self.nvar = struct.unpack(
+ self.byteorder + nvar_type, self.path_or_buf.read(nvar_size)
+ )[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
@@ -1207,7 +1211,7 @@ def _read_new_header(self, first_char):
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
- # Get data type information, works for versions 117-118.
+ # Get data type information, works for versions 117-119.
def _get_dtypes(self, seek_vartypes):
self.path_or_buf.seek(seek_vartypes)
@@ -1241,14 +1245,14 @@ def f(typ):
def _get_varlist(self):
if self.format_version == 117:
b = 33
- elif self.format_version == 118:
+ elif self.format_version >= 118:
b = 129
return [self._decode(self.path_or_buf.read(b)) for i in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self):
- if self.format_version == 118:
+ if self.format_version >= 118:
b = 57
elif self.format_version > 113:
b = 49
@@ -1270,7 +1274,7 @@ def _get_lbllist(self):
return [self._decode(self.path_or_buf.read(b)) for i in range(self.nvar)]
def _get_variable_labels(self):
- if self.format_version == 118:
+ if self.format_version >= 118:
vlblist = [
self._decode(self.path_or_buf.read(321)) for i in range(self.nvar)
]
@@ -1285,13 +1289,13 @@ def _get_variable_labels(self):
return vlblist
def _get_nobs(self):
- if self.format_version == 118:
+ if self.format_version >= 118:
return struct.unpack(self.byteorder + "Q", self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
def _get_data_label(self):
- if self.format_version == 118:
+ if self.format_version >= 118:
strlen = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
@@ -1303,7 +1307,7 @@ def _get_data_label(self):
return self._decode(self.path_or_buf.read(32))
def _get_time_stamp(self):
- if self.format_version == 118:
+ if self.format_version >= 118:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
@@ -1321,7 +1325,7 @@ def _get_seek_variable_labels(self):
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
- elif self.format_version == 118:
+ elif self.format_version >= 118:
return struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
@@ -1519,10 +1523,12 @@ def _read_strls(self):
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
+ v_size = 2 if self.format_version == 118 else 3
if self.byteorder == "<":
- buf = buf[0:2] + buf[4:10]
+ buf = buf[0:v_size] + buf[4 : 12 - v_size]
else:
- buf = buf[0:2] + buf[6:]
+ # This path may not be correct, impossible to test
+ buf = buf[0:v_size] + buf[4 + v_size :]
v_o = struct.unpack("Q", buf)[0]
typ = struct.unpack("B", self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
@@ -1959,7 +1965,7 @@ def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
- raise ValueError("convert_dates key must be a " "column or an integer")
+ raise ValueError("convert_dates key must be a column or an integer")
new_dict.update({key: convert_dates[key]})
return new_dict
@@ -2096,8 +2102,6 @@ class StataWriter(StataParser):
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
- .. versionadded:: 0.19.0
-
Returns
-------
writer : StataWriter instance
@@ -2384,16 +2388,16 @@ def write_file(self):
self._write_map()
except Exception as exc:
self._close()
- try:
- if self._own_file:
+ if self._own_file:
+ try:
os.unlink(self._fname)
- except Exception:
- warnings.warn(
- "This save was not successful but {0} could not "
- "be deleted. This file is not "
- "valid.".format(self._fname),
- ResourceWarning,
- )
+ except OSError:
+ warnings.warn(
+ "This save was not successful but {0} could not "
+ "be deleted. This file is not "
+ "valid.".format(self._fname),
+ ResourceWarning,
+ )
raise exc
else:
self._close()
@@ -2535,9 +2539,7 @@ def _write_variable_labels(self):
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
- raise ValueError(
- "Variable labels must be 80 characters " "or fewer"
- )
+ raise ValueError("Variable labels must be 80 characters or fewer")
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError(
@@ -3095,9 +3097,7 @@ def _write_variable_labels(self):
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
- raise ValueError(
- "Variable labels must be 80 characters " "or fewer"
- )
+ raise ValueError("Variable labels must be 80 characters or fewer")
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError(
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 0610780edb28d..8724382d9ec55 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -1,22 +1,20 @@
import importlib
-from typing import List, Type # noqa
import warnings
+from pandas._config import get_option
+
+from pandas.compat._optional import import_optional_dependency
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_integer, is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
-import pandas
from pandas.core.base import PandasObject
# Trigger matplotlib import, which implicitly registers our
# converts. Implicit registration is deprecated, and when enforced
# we can lazily import matplotlib.
-try:
- import pandas.plotting._matplotlib # noqa
-except ImportError:
- pass
+import_optional_dependency("pandas.plotting._matplotlib", raise_on_missing=False)
def hist_series(
@@ -30,7 +28,7 @@ def hist_series(
yrot=None,
figsize=None,
bins=10,
- **kwds
+ **kwargs
):
"""
Draw histogram of the input series using matplotlib.
@@ -53,12 +51,12 @@ def hist_series(
rotation of y axis labels
figsize : tuple, default None
figure size in inches by default
- bins : integer or sequence, default 10
+ bins : int or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
- `**kwds` : keywords
+ **kwargs
To be passed to the actual plotting function
Returns
@@ -82,7 +80,7 @@ def hist_series(
yrot=yrot,
figsize=figsize,
bins=bins,
- **kwds
+ **kwargs
)
@@ -101,7 +99,7 @@ def hist_frame(
figsize=None,
layout=None,
bins=10,
- **kwds
+ **kwargs
):
"""
Make a histogram of the DataFrame's.
@@ -116,7 +114,7 @@ def hist_frame(
----------
data : DataFrame
The pandas object holding the data.
- column : string or sequence
+ column : str or sequence
If passed, will be used to limit data to a subset of columns.
by : object, optional
If passed, then used to form histograms for separate groups.
@@ -148,12 +146,12 @@ def hist_frame(
`matplotlib.rcParams` by default.
layout : tuple, optional
Tuple of (rows, columns) for the layout of the histograms.
- bins : integer or sequence, default 10
+ bins : int or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
- **kwds
+ **kwargs
All other plotting keyword arguments to be passed to
:meth:`matplotlib.pyplot.hist`.
@@ -177,7 +175,7 @@ def hist_frame(
>>> df = pd.DataFrame({
... 'length': [1.5, 0.5, 1.2, 0.9, 3],
... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]
- ... }, index= ['pig', 'rabbit', 'duck', 'chicken', 'horse'])
+ ... }, index=['pig', 'rabbit', 'duck', 'chicken', 'horse'])
>>> hist = df.hist(bins=3)
"""
plot_backend = _get_plot_backend()
@@ -196,7 +194,7 @@ def hist_frame(
figsize=figsize,
layout=layout,
bins=bins,
- **kwds
+ **kwargs
)
@@ -211,7 +209,7 @@ def boxplot(
figsize=None,
layout=None,
return_type=None,
- **kwds
+ **kwargs
):
"""
Make a box plot from DataFrame columns.
@@ -262,7 +260,7 @@ def boxplot(
If ``return_type`` is `None`, a NumPy array
of axes with the same shape as ``layout`` is returned.
- **kwds
+ **kwargs
All other plotting keyword arguments to be passed to
:func:`matplotlib.pyplot.boxplot`.
@@ -370,8 +368,8 @@ def boxplot(
If ``return_type`` is `None`, a NumPy array of axes with the same shape
as ``layout`` is returned:
- >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
- ... return_type=None)
+ >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
+ ... return_type=None)
>>> type(boxplot)
<class 'numpy.ndarray'>
"""
@@ -387,7 +385,7 @@ def boxplot(
figsize=figsize,
layout=layout,
return_type=return_type,
- **kwds
+ **kwargs
)
@@ -403,7 +401,7 @@ def boxplot_frame(
figsize=None,
layout=None,
return_type=None,
- **kwds
+ **kwargs
):
plot_backend = _get_plot_backend()
return plot_backend.boxplot_frame(
@@ -417,7 +415,7 @@ def boxplot_frame(
figsize=figsize,
layout=layout,
return_type=return_type,
- **kwds
+ **kwargs
)
@@ -433,7 +431,7 @@ def boxplot_frame_groupby(
layout=None,
sharex=False,
sharey=True,
- **kwds
+ **kwargs
):
"""
Make box plots from DataFrameGroupBy data.
@@ -446,7 +444,7 @@ def boxplot_frame_groupby(
* ``True`` - create a subplot for each group
column : column name or list of names, or vector
Can be any valid input to groupby
- fontsize : int or string
+ fontsize : int or str
rot : label rotation angle
grid : Setting this to True will show the grid
ax : Matplotlib axis object, default None
@@ -461,7 +459,7 @@ def boxplot_frame_groupby(
Whether y-axes will be shared among subplots
.. versionadded:: 0.23.1
- `**kwds` : Keyword Arguments
+ **kwargs
All other plotting keyword arguments to be passed to
matplotlib's boxplot function
@@ -497,7 +495,7 @@ def boxplot_frame_groupby(
layout=layout,
sharex=sharex,
sharey=sharey,
- **kwds
+ **kwargs
)
@@ -530,7 +528,7 @@ class PlotAccessor(PandasObject):
figsize : a tuple (width, height) in inches
use_index : bool, default True
Use index as ticks for x axis
- title : string or list
+ title : str or list
Title to use for the plot. If a string is passed, print the string
at the top of the figure. If a list is passed and `subplots` is
True, print each item in the list above the corresponding subplot.
@@ -553,16 +551,16 @@ class PlotAccessor(PandasObject):
.. versionchanged:: 0.25.0
xticks : sequence
- Values to use for the xticks
+ Values to use for the xticks.
yticks : sequence
- Values to use for the yticks
+ Values to use for the yticks.
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal
plots)
fontsize : int, default None
- Font size for xticks and yticks
+ Font size for xticks and yticks.
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that
name from matplotlib.
@@ -586,8 +584,10 @@ class PlotAccessor(PandasObject):
mark_right : bool, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
- `**kwds` : keywords
- Options to pass to matplotlib plotting method
+ include_bool : bool, default is False
+ If True, boolean values can be plotted.
+ **kwargs
+ Options to pass to matplotlib plotting method.
Returns
-------
@@ -730,7 +730,7 @@ def __call__(self, *args, **kwargs):
# `x` parameter, and return a Series with the parameter `y` as values.
data = self._parent.copy()
- if isinstance(data, pandas.core.dtypes.generic.ABCSeries):
+ if isinstance(data, ABCSeries):
kwargs["reuse_plot"] = True
if kind in self._dataframe_kinds:
@@ -810,7 +810,7 @@ def line(self, x=None, y=None, **kwargs):
The values to be plotted.
Either the location or the label of the columns to be used.
By default, it will use the remaining DataFrame numeric columns.
- **kwds
+ **kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
@@ -880,7 +880,7 @@ def bar(self, x=None, y=None, **kwargs):
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
- **kwds
+ **kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
@@ -963,7 +963,7 @@ def barh(self, x=None, y=None, **kwargs):
Column to be used for categories.
y : label or position, default All numeric columns in dataframe
Columns to be plotted from the DataFrame.
- **kwds
+ **kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
@@ -983,7 +983,7 @@ def barh(self, x=None, y=None, **kwargs):
.. plot::
:context: close-figs
- >>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
+ >>> df = pd.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> ax = df.plot.barh(x='lab', y='val')
Plot a whole DataFrame to a horizontal bar plot
@@ -1047,9 +1047,9 @@ def box(self, by=None, **kwargs):
Parameters
----------
- by : string or sequence
+ by : str or sequence
Column in the DataFrame to group by.
- **kwds : optional
+ **kwargs
Additional keywords are documented in
:meth:`DataFrame.plot`.
@@ -1092,7 +1092,7 @@ def hist(self, by=None, bins=10, **kwargs):
Column in the DataFrame to group by.
bins : int, default 10
Number of histogram bins to be used.
- **kwds
+ **kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
@@ -1143,12 +1143,12 @@ def kde(self, bw_method=None, ind=None, **kwargs):
'scott', 'silverman', a scalar constant or a callable.
If None (default), 'scott' is used.
See :class:`scipy.stats.gaussian_kde` for more information.
- ind : NumPy array or integer, optional
+ ind : NumPy array or int, optional
Evaluation points for the estimated PDF. If None (default),
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
- **kwds : optional
+ **kwargs
Additional keyword arguments are documented in
:meth:`pandas.%(this-datatype)s.plot`.
@@ -1250,7 +1250,7 @@ def area(self, x=None, y=None, **kwargs):
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
- **kwds : optional
+ **kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
@@ -1322,7 +1322,7 @@ def pie(self, **kwargs):
y : int or label, optional
Label or position of the column to plot.
If not provided, ``subplots=True`` argument must be passed.
- **kwds
+ **kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
@@ -1404,7 +1404,7 @@ def scatter(self, x, y, s=None, c=None, **kwargs):
- A column name or position whose values will be used to color the
marker points according to a colormap.
- **kwds
+ **kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
@@ -1476,7 +1476,7 @@ def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, **kwargs):
Alternatively, gridsize can be a tuple with two elements
specifying the number of hexagons in the x-direction and the
y-direction.
- **kwds
+ **kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
@@ -1533,6 +1533,61 @@ def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, **kwargs):
return self(kind="hexbin", x=x, y=y, C=C, **kwargs)
+_backends = {}
+
+
+def _find_backend(backend: str):
+ """
+ Find a pandas plotting backend>
+
+ Parameters
+ ----------
+ backend : str
+ The identifier for the backend. Either an entrypoint item registered
+ with pkg_resources, or a module name.
+
+ Notes
+ -----
+ Modifies _backends with imported backends as a side effect.
+
+ Returns
+ -------
+ types.ModuleType
+ The imported backend.
+ """
+ import pkg_resources # Delay import for performance.
+
+ for entry_point in pkg_resources.iter_entry_points("pandas_plotting_backends"):
+ if entry_point.name == "matplotlib":
+ # matplotlib is an optional dependency. When
+ # missing, this would raise.
+ continue
+ _backends[entry_point.name] = entry_point.load()
+
+ try:
+ return _backends[backend]
+ except KeyError:
+ # Fall back to unregisted, module name approach.
+ try:
+ module = importlib.import_module(backend)
+ except ImportError:
+ # We re-raise later on.
+ pass
+ else:
+ if hasattr(module, "plot"):
+ # Validate that the interface is implemented when the option
+ # is set, rather than at plot time.
+ _backends[backend] = module
+ return module
+
+ msg = (
+ "Could not find plotting backend '{name}'. Ensure that you've installed the "
+ "package providing the '{name}' entrypoint, or that the package has a"
+ "top-level `.plot` method."
+ )
+ raise ValueError(msg.format(name=backend))
+
+
def _get_plot_backend(backend=None):
"""
Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`).
@@ -1546,7 +1601,24 @@ def _get_plot_backend(backend=None):
The backend is imported lazily, as matplotlib is a soft dependency, and
pandas can be used without it being installed.
"""
- backend_str = backend or pandas.get_option("plotting.backend")
- if backend_str == "matplotlib":
- backend_str = "pandas.plotting._matplotlib"
- return importlib.import_module(backend_str)
+ backend = backend or get_option("plotting.backend")
+
+ if backend == "matplotlib":
+ # Because matplotlib is an optional dependency and first-party backend,
+ # we need to attempt an import here to raise an ImportError if needed.
+ try:
+ import pandas.plotting._matplotlib as module
+ except ImportError:
+ raise ImportError(
+ "matplotlib is required for plotting when the "
+ 'default backend "matplotlib" is selected.'
+ ) from None
+
+ _backends["matplotlib"] = module
+
+ if backend in _backends:
+ return _backends[backend]
+
+ module = _find_backend(backend)
+ _backends[backend] = module
+ return module
diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py
index 8ff7441df5354..99035013092cc 100644
--- a/pandas/plotting/_matplotlib/boxplot.py
+++ b/pandas/plotting/_matplotlib/boxplot.py
@@ -4,6 +4,7 @@
from matplotlib.artist import setp
import numpy as np
+from pandas.core.dtypes.common import is_dict_like
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.missing import remove_na_arraylike
@@ -250,13 +251,38 @@ def boxplot(
def _get_colors():
# num_colors=3 is required as method maybe_color_bp takes the colors
# in positions 0 and 2.
- return _get_standard_colors(color=kwds.get("color"), num_colors=3)
+ # if colors not provided, use same defaults as DataFrame.plot.box
+ result = _get_standard_colors(num_colors=3)
+ result = np.take(result, [0, 0, 2])
+ result = np.append(result, "k")
+
+ colors = kwds.pop("color", None)
+ if colors:
+ if is_dict_like(colors):
+ # replace colors in result array with user-specified colors
+ # taken from the colors dict parameter
+ # "boxes" value placed in position 0, "whiskers" in 1, etc.
+ valid_keys = ["boxes", "whiskers", "medians", "caps"]
+ key_to_index = dict(zip(valid_keys, range(4)))
+ for key, value in colors.items():
+ if key in valid_keys:
+ result[key_to_index[key]] = value
+ else:
+ raise ValueError(
+ "color dict contains invalid "
+ "key '{0}' "
+ "The key must be either {1}".format(key, valid_keys)
+ )
+ else:
+ result.fill(colors)
+
+ return result
def maybe_color_bp(bp):
- if "color" not in kwds:
- setp(bp["boxes"], color=colors[0], alpha=1)
- setp(bp["whiskers"], color=colors[0], alpha=1)
- setp(bp["medians"], color=colors[2], alpha=1)
+ setp(bp["boxes"], color=colors[0], alpha=1)
+ setp(bp["whiskers"], color=colors[1], alpha=1)
+ setp(bp["medians"], color=colors[2], alpha=1)
+ setp(bp["caps"], color=colors[3], alpha=1)
def plot_group(keys, values, ax):
keys = [pprint_thing(x) for x in keys]
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index 15648d59c8f98..446350cb5d915 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -64,11 +64,12 @@ def register(explicit=True):
pairs = get_pairs()
for type_, cls in pairs:
- converter = cls()
- if type_ in units.registry:
+ # Cache previous converter if present
+ if type_ in units.registry and not isinstance(units.registry[type_], cls):
previous = units.registry[type_]
_mpl_units[type_] = previous
- units.registry[type_] = converter
+ # Replace with pandas converter
+ units.registry[type_] = cls()
def deregister():
@@ -328,7 +329,7 @@ def __init__(self, locator, tz=None, defaultfmt="%Y-%m-%d"):
class PandasAutoDateLocator(dates.AutoDateLocator):
def get_locator(self, dmin, dmax):
- "Pick the best locator based on a distance."
+ """Pick the best locator based on a distance."""
_check_implicitly_registered()
delta = relativedelta(dmax, dmin)
@@ -381,6 +382,7 @@ def __call__(self):
dmax, dmin = dmin, dmax
# We need to cap at the endpoints of valid datetime
+ # FIXME: dont leave commented-out
# TODO(wesm) unused?
# delta = relativedelta(dmax, dmin)
# try:
@@ -447,6 +449,7 @@ def autoscale(self):
# We need to cap at the endpoints of valid datetime
+ # FIXME: dont leave commented-out
# TODO(wesm): unused?
# delta = relativedelta(dmax, dmin)
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 519465802085b..82c5ba7f0317d 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -1,5 +1,5 @@
import re
-from typing import Optional # noqa
+from typing import Optional
import warnings
import numpy as np
@@ -106,6 +106,7 @@ def __init__(
colormap=None,
table=False,
layout=None,
+ include_bool=False,
**kwds
):
@@ -191,6 +192,7 @@ def __init__(
self.colormap = colormap
self.table = table
+ self.include_bool = include_bool
self.kwds = kwds
@@ -400,12 +402,23 @@ def _compute_plot_data(self):
# GH16953, _convert is needed as fallback, for ``Series``
# with ``dtype == object``
data = data._convert(datetime=True, timedelta=True)
- numeric_data = data.select_dtypes(
- include=[np.number, "datetime", "datetimetz", "timedelta"]
- )
+ include_type = [np.number, "datetime", "datetimetz", "timedelta"]
+
+ # GH23719, allow plotting boolean
+ if self.include_bool is True:
+ include_type.append(np.bool_)
+
+ # GH22799, exclude datatime-like type for boxplot
+ exclude_type = None
+ if self._kind == "box":
+ # TODO: change after solving issue 27881
+ include_type = [np.number]
+ exclude_type = ["timedelta"]
+
+ numeric_data = data.select_dtypes(include=include_type, exclude=exclude_type)
try:
- is_empty = numeric_data.empty
+ is_empty = numeric_data.columns.empty
except AttributeError:
is_empty = not len(numeric_data)
@@ -549,7 +562,7 @@ def _add_legend_handle(self, handle, label, index=None):
self.legend_labels.append(label)
def _make_legend(self):
- ax, leg = self._get_ax_legend(self.axes[0])
+ ax, leg, handle = self._get_ax_legend_handle(self.axes[0])
handles = []
labels = []
@@ -558,7 +571,8 @@ def _make_legend(self):
if not self.subplots:
if leg is not None:
title = leg.get_title().get_text()
- handles = leg.legendHandles
+ # Replace leg.LegendHandles because it misses marker info
+ handles.extend(handle)
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
@@ -568,6 +582,7 @@ def _make_legend(self):
handles += self.legend_handles
labels += self.legend_labels
+
if self.legend_title is not None:
title = self.legend_title
@@ -579,8 +594,14 @@ def _make_legend(self):
if ax.get_visible():
ax.legend(loc="best")
- def _get_ax_legend(self, ax):
+ def _get_ax_legend_handle(self, ax):
+ """
+ Take in axes and return ax, legend and handle under different scenarios
+ """
leg = ax.get_legend()
+
+ # Get handle from axes
+ handle, _ = ax.get_legend_handles_labels()
other_ax = getattr(ax, "left_ax", None) or getattr(ax, "right_ax", None)
other_leg = None
if other_ax is not None:
@@ -588,7 +609,7 @@ def _get_ax_legend(self, ax):
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
- return ax, leg
+ return ax, leg, handle
@cache_readonly
def plt(self):
@@ -654,7 +675,7 @@ def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds):
def _get_index_name(self):
if isinstance(self.data.index, ABCMultiIndex):
name = self.data.index.names
- if com._any_not_none(*name):
+ if com.any_not_none(*name):
name = ",".join(pprint_thing(x) for x in name)
else:
name = None
@@ -1054,7 +1075,7 @@ def _make_plot(self):
it = self._iter_data()
stacking_id = self._get_stacking_id()
- is_errorbar = com._any_not_none(*self.errors.values())
+ is_errorbar = com.any_not_none(*self.errors.values())
colors = self._get_colors()
for i, (label, y) in enumerate(it):
@@ -1080,9 +1101,13 @@ def _make_plot(self):
)
self._add_legend_handle(newlines[0], label, index=i)
- lines = _get_all_lines(ax)
- left, right = _get_xlim(lines)
- ax.set_xlim(left, right)
+ if self._is_ts_plot():
+
+ # reset of xlim should be used for ts data
+ # TODO: GH28021, should find a way to change view limit on xaxis
+ lines = _get_all_lines(ax)
+ left, right = _get_xlim(lines)
+ ax.set_xlim(left, right)
@classmethod
def _plot(cls, ax, x, y, style=None, column_num=None, stacking_id=None, **kwds):
@@ -1410,8 +1435,13 @@ def _post_plot_logic(self, ax, data):
def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):
ax.set_xlim((start_edge, end_edge))
- ax.set_xticks(self.tick_pos)
- ax.set_xticklabels(ticklabels)
+
+ if self.xticks is not None:
+ ax.set_xticks(np.array(self.xticks))
+ else:
+ ax.set_xticks(self.tick_pos)
+ ax.set_xticklabels(ticklabels)
+
if name is not None and self.use_index:
ax.set_xlabel(name)
diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py
index f3fcb090e9883..f160e50d8d99b 100644
--- a/pandas/plotting/_matplotlib/timeseries.py
+++ b/pandas/plotting/_matplotlib/timeseries.py
@@ -304,23 +304,6 @@ def _maybe_convert_index(ax, data):
# Do we need the rest for convenience?
-def format_timedelta_ticks(x, pos, n_decimals):
- """
- Convert seconds to 'D days HH:MM:SS.F'
- """
- s, ns = divmod(x, 1e9)
- m, s = divmod(s, 60)
- h, m = divmod(m, 60)
- d, h = divmod(h, 24)
- decimals = int(ns * 10 ** (n_decimals - 9))
- s = r"{:02d}:{:02d}:{:02d}".format(int(h), int(m), int(s))
- if n_decimals > 0:
- s += ".{{:0{:0d}d}}".format(n_decimals).format(decimals)
- if d != 0:
- s = "{:d} days ".format(int(d)) + s
- return s
-
-
def _format_coord(freq, t, y):
return "t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y)
diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index 8472eb3a3d887..1c9bd01b16739 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -281,17 +281,15 @@ def _remove_labels_from_axis(axis):
for t in axis.get_majorticklabels():
t.set_visible(False)
- try:
- # set_visible will not be effective if
- # minor axis has NullLocator and NullFormattor (default)
- if isinstance(axis.get_minor_locator(), ticker.NullLocator):
- axis.set_minor_locator(ticker.AutoLocator())
- if isinstance(axis.get_minor_formatter(), ticker.NullFormatter):
- axis.set_minor_formatter(ticker.FormatStrFormatter(""))
- for t in axis.get_minorticklabels():
- t.set_visible(False)
- except Exception: # pragma no cover
- raise
+ # set_visible will not be effective if
+ # minor axis has NullLocator and NullFormattor (default)
+ if isinstance(axis.get_minor_locator(), ticker.NullLocator):
+ axis.set_minor_locator(ticker.AutoLocator())
+ if isinstance(axis.get_minor_formatter(), ticker.NullFormatter):
+ axis.set_minor_formatter(ticker.FormatStrFormatter(""))
+ for t in axis.get_minorticklabels():
+ t.set_visible(False)
+
axis.get_label().set_visible(False)
@@ -343,6 +341,21 @@ def _flatten(axes):
return np.array(axes)
+def _set_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None):
+ import matplotlib.pyplot as plt
+
+ for ax in _flatten(axes):
+ if xlabelsize is not None:
+ plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
+ if xrot is not None:
+ plt.setp(ax.get_xticklabels(), rotation=xrot)
+ if ylabelsize is not None:
+ plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
+ if yrot is not None:
+ plt.setp(ax.get_yticklabels(), rotation=yrot)
+ return axes
+
+
def _get_all_lines(ax):
lines = ax.get_lines()
@@ -362,18 +375,3 @@ def _get_xlim(lines):
left = min(np.nanmin(x), left)
right = max(np.nanmax(x), right)
return left, right
-
-
-def _set_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None):
- import matplotlib.pyplot as plt
-
- for ax in _flatten(axes):
- if xlabelsize is not None:
- plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
- if xrot is not None:
- plt.setp(ax.get_xticklabels(), rotation=xrot)
- if ylabelsize is not None:
- plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
- if yrot is not None:
- plt.setp(ax.get_yticklabels(), rotation=yrot)
- return axes
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index efe88d6b19b10..a8e86d9dfa997 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -46,7 +46,7 @@ def register(explicit=True):
See Also
--------
- deregister_matplotlib_converter
+ deregister_matplotlib_converters
"""
plot_backend = _get_plot_backend("matplotlib")
plot_backend.register(explicit=explicit)
@@ -65,7 +65,7 @@ def deregister():
See Also
--------
- deregister_matplotlib_converters
+ register_matplotlib_converters
"""
plot_backend = _get_plot_backend("matplotlib")
plot_backend.deregister()
@@ -329,7 +329,8 @@ def parallel_coordinates(
sort_labels=False,
**kwds
):
- """Parallel coordinates plotting.
+ """
+ Parallel coordinates plotting.
Parameters
----------
@@ -392,7 +393,8 @@ def parallel_coordinates(
def lag_plot(series, lag=1, ax=None, **kwds):
- """Lag plot for time series.
+ """
+ Lag plot for time series.
Parameters
----------
@@ -415,8 +417,8 @@ def autocorrelation_plot(series, ax=None, **kwds):
Parameters
----------
- series: Time series
- ax: Matplotlib axis object, optional
+ series : Time series
+ ax : Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 326bef7f4b480..2f24bbd6f0c85 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -67,9 +67,7 @@ class TestPDApi(Base):
"UInt64Index",
"Series",
"SparseArray",
- "SparseDataFrame",
"SparseDtype",
- "SparseSeries",
"Timedelta",
"TimedeltaIndex",
"Timestamp",
@@ -90,7 +88,7 @@ class TestPDApi(Base):
"NamedAgg",
]
if not compat.PY37:
- classes.append("Panel")
+ classes.extend(["Panel", "SparseSeries", "SparseDataFrame"])
# these are already deprecated; awaiting removal
deprecated_classes = []
diff --git a/pandas/tests/arithmetic/conftest.py b/pandas/tests/arithmetic/conftest.py
index f047154f2c636..774ff14398bdb 100644
--- a/pandas/tests/arithmetic/conftest.py
+++ b/pandas/tests/arithmetic/conftest.py
@@ -190,7 +190,12 @@ def box(request):
@pytest.fixture(
- params=[pd.Index, pd.Series, pytest.param(pd.DataFrame, marks=pytest.mark.xfail)],
+ params=[
+ pd.Index,
+ pd.Series,
+ pytest.param(pd.DataFrame, marks=pytest.mark.xfail),
+ tm.to_array,
+ ],
ids=id_func,
)
def box_df_fail(request):
@@ -206,6 +211,7 @@ def box_df_fail(request):
(pd.Series, False),
(pd.DataFrame, False),
pytest.param((pd.DataFrame, True), marks=pytest.mark.xfail),
+ (tm.to_array, False),
],
ids=id_func,
)
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index 6037273450a1c..6f7222f523579 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -30,15 +30,52 @@
import pandas.util.testing as tm
-def assert_all(obj):
+def assert_invalid_comparison(left, right, box):
"""
- Test helper to call call obj.all() the appropriate number of times on
- a Series or DataFrame.
+ Assert that comparison operations with mismatched types behave correctly.
+
+ Parameters
+ ----------
+ left : np.ndarray, ExtensionArray, Index, or Series
+ right : object
+ box : {pd.DataFrame, pd.Series, pd.Index, tm.to_array}
"""
- if isinstance(obj, pd.DataFrame):
- assert obj.all().all()
- else:
- assert obj.all()
+ # Not for tznaive-tzaware comparison
+
+ # Note: not quite the same as how we do this for tm.box_expected
+ xbox = box if box is not pd.Index else np.array
+
+ result = left == right
+ expected = xbox(np.zeros(result.shape, dtype=np.bool_))
+
+ tm.assert_equal(result, expected)
+
+ result = right == left
+ tm.assert_equal(result, expected)
+
+ result = left != right
+ tm.assert_equal(result, ~expected)
+
+ result = right != left
+ tm.assert_equal(result, ~expected)
+
+ msg = "Invalid comparison between"
+ with pytest.raises(TypeError, match=msg):
+ left < right
+ with pytest.raises(TypeError, match=msg):
+ left <= right
+ with pytest.raises(TypeError, match=msg):
+ left > right
+ with pytest.raises(TypeError, match=msg):
+ left >= right
+ with pytest.raises(TypeError, match=msg):
+ right < left
+ with pytest.raises(TypeError, match=msg):
+ right <= left
+ with pytest.raises(TypeError, match=msg):
+ right > left
+ with pytest.raises(TypeError, match=msg):
+ right >= left
# ------------------------------------------------------------------
@@ -47,7 +84,7 @@ def assert_all(obj):
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
- # DataFrame/Series/DatetimeIndex/DateteimeArray. Ideally all comparison
+ # DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
@@ -59,36 +96,61 @@ def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
other = np.array(dti.to_numpy()[0])
- # FIXME: ValueError with transpose on tzaware
- dtarr = tm.box_expected(dti, box, transpose=False)
+ dtarr = tm.box_expected(dti, box)
result = dtarr <= other
expected = np.array([True, False, False])
- expected = tm.box_expected(expected, xbox, transpose=False)
+ expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
+ def test_dt64arr_cmp_date_invalid(self, tz_naive_fixture, box_with_array):
+ # GH#19800, GH#19301 datetime.date comparison raises to
+ # match DatetimeIndex/Timestamp. This also matches the behavior
+ # of stdlib datetime.datetime
+ tz = tz_naive_fixture
-class TestDatetime64DataFrameComparison:
- @pytest.mark.parametrize(
- "timestamps",
- [
- [pd.Timestamp("2012-01-01 13:00:00+00:00")] * 2,
- [pd.Timestamp("2012-01-01 13:00:00")] * 2,
- ],
- )
- def test_tz_aware_scalar_comparison(self, timestamps):
- # GH#15966
- df = pd.DataFrame({"test": timestamps})
- expected = pd.DataFrame({"test": [False, False]})
- tm.assert_frame_equal(df == -1, expected)
+ dti = pd.date_range("20010101", periods=10, tz=tz)
+ date = dti[0].to_pydatetime().date()
- def test_dt64_nat_comparison(self):
+ dtarr = tm.box_expected(dti, box_with_array)
+ assert_invalid_comparison(dtarr, date, box_with_array)
+
+ @pytest.mark.parametrize("other", ["foo", -1, 99, 4.0, object(), timedelta(days=2)])
+ def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
+ # GH#22074, GH#15966
+ tz = tz_naive_fixture
+
+ rng = date_range("1/1/2000", periods=10, tz=tz)
+ dtarr = tm.box_expected(rng, box_with_array)
+ assert_invalid_comparison(dtarr, other, box_with_array)
+
+ @pytest.mark.parametrize("other", [None, np.nan])
+ def test_dt64arr_cmp_na_scalar_invalid(
+ self, other, tz_naive_fixture, box_with_array
+ ):
+ # GH#19301
+ tz = tz_naive_fixture
+ dti = pd.date_range("2016-01-01", periods=2, tz=tz)
+ dtarr = tm.box_expected(dti, box_with_array)
+ assert_invalid_comparison(dtarr, other, box_with_array)
+
+ def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
- ts = pd.Timestamp.now()
- df = pd.DataFrame([ts, pd.NaT])
- expected = pd.DataFrame([True, False])
+ tz = tz_naive_fixture
+ box = box_with_array
+ xbox = box if box is not pd.Index else np.ndarray
- result = df == ts
- tm.assert_frame_equal(result, expected)
+ ts = pd.Timestamp.now(tz)
+ ser = pd.Series([ts, pd.NaT])
+
+ # FIXME: Can't transpose because that loses the tz dtype on
+ # the NaT column
+ obj = tm.box_expected(ser, box, transpose=False)
+
+ expected = pd.Series([True, False], dtype=np.bool_)
+ expected = tm.box_expected(expected, xbox, transpose=False)
+
+ result = obj == ts
+ tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
@@ -142,35 +204,17 @@ def test_nat_comparisons(self, dtype, box, reverse, pair):
expected = Series([False, False, True])
tm.assert_series_equal(left <= right, expected)
- def test_comparison_invalid(self, box_with_array):
+ def test_comparison_invalid(self, tz_naive_fixture, box_with_array):
# GH#4968
# invalid date/int comparisons
- xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
-
+ tz = tz_naive_fixture
ser = Series(range(5))
- ser2 = Series(pd.date_range("20010101", periods=5))
+ ser2 = Series(pd.date_range("20010101", periods=5, tz=tz))
ser = tm.box_expected(ser, box_with_array)
ser2 = tm.box_expected(ser2, box_with_array)
- for (x, y) in [(ser, ser2), (ser2, ser)]:
-
- result = x == y
- expected = tm.box_expected([False] * 5, xbox)
- tm.assert_equal(result, expected)
-
- result = x != y
- expected = tm.box_expected([True] * 5, xbox)
- tm.assert_equal(result, expected)
- msg = "Invalid comparison between"
- with pytest.raises(TypeError, match=msg):
- x >= y
- with pytest.raises(TypeError, match=msg):
- x > y
- with pytest.raises(TypeError, match=msg):
- x < y
- with pytest.raises(TypeError, match=msg):
- x <= y
+ assert_invalid_comparison(ser, ser2, box_with_array)
@pytest.mark.parametrize(
"data",
@@ -227,65 +271,6 @@ def test_series_comparison_scalars(self):
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
- def test_dt64_ser_cmp_date_warning(self):
- # https://github.com/pandas-dev/pandas/issues/21359
- # Remove this test and enble invalid test below
- ser = pd.Series(pd.date_range("20010101", periods=10), name="dates")
- date = ser.iloc[0].to_pydatetime().date()
-
- with tm.assert_produces_warning(FutureWarning) as m:
- result = ser == date
- expected = pd.Series([True] + [False] * 9, name="dates")
- tm.assert_series_equal(result, expected)
- assert "Comparing Series of datetimes " in str(m[0].message)
- assert "will not compare equal" in str(m[0].message)
-
- with tm.assert_produces_warning(FutureWarning) as m:
- result = ser != date
- tm.assert_series_equal(result, ~expected)
- assert "will not compare equal" in str(m[0].message)
-
- with tm.assert_produces_warning(FutureWarning) as m:
- result = ser <= date
- tm.assert_series_equal(result, expected)
- assert "a TypeError will be raised" in str(m[0].message)
-
- with tm.assert_produces_warning(FutureWarning) as m:
- result = ser < date
- tm.assert_series_equal(result, pd.Series([False] * 10, name="dates"))
- assert "a TypeError will be raised" in str(m[0].message)
-
- with tm.assert_produces_warning(FutureWarning) as m:
- result = ser >= date
- tm.assert_series_equal(result, pd.Series([True] * 10, name="dates"))
- assert "a TypeError will be raised" in str(m[0].message)
-
- with tm.assert_produces_warning(FutureWarning) as m:
- result = ser > date
- tm.assert_series_equal(result, pd.Series([False] + [True] * 9, name="dates"))
- assert "a TypeError will be raised" in str(m[0].message)
-
- @pytest.mark.skip(reason="GH#21359")
- def test_dt64ser_cmp_date_invalid(self, box_with_array):
- # GH#19800 datetime.date comparison raises to
- # match DatetimeIndex/Timestamp. This also matches the behavior
- # of stdlib datetime.datetime
-
- ser = pd.date_range("20010101", periods=10)
- date = ser.iloc[0].to_pydatetime().date()
-
- ser = tm.box_expected(ser, box_with_array)
- assert not (ser == date).any()
- assert (ser != date).all()
- with pytest.raises(TypeError):
- ser > date
- with pytest.raises(TypeError):
- ser < date
- with pytest.raises(TypeError):
- ser >= date
- with pytest.raises(TypeError):
- ser <= date
-
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
@@ -352,28 +337,6 @@ def test_dt64arr_timestamp_equality(self, box_with_array):
expected = tm.box_expected([False, False], xbox)
tm.assert_equal(result, expected)
- @pytest.mark.parametrize(
- "op",
- [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
- )
- def test_comparison_tzawareness_compat(self, op):
- # GH#18162
- dr = pd.date_range("2016-01-01", periods=6)
- dz = dr.tz_localize("US/Pacific")
-
- # Check that there isn't a problem aware-aware and naive-naive do not
- # raise
- naive_series = Series(dr)
- aware_series = Series(dz)
- msg = "Cannot compare tz-naive and tz-aware"
- with pytest.raises(TypeError, match=msg):
- op(dz, naive_series)
- with pytest.raises(TypeError, match=msg):
- op(dr, aware_series)
-
- # TODO: implement _assert_tzawareness_compat for the reverse
- # comparison with the Series on the left-hand side
-
class TestDatetimeIndexComparisons:
@@ -427,57 +390,6 @@ def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
- def dt64arr_cmp_non_datetime(self, tz_naive_fixture, box_with_array):
- # GH#19301 by convention datetime.date is not considered comparable
- # to Timestamp or DatetimeIndex. This may change in the future.
- tz = tz_naive_fixture
- dti = pd.date_range("2016-01-01", periods=2, tz=tz)
- dtarr = tm.box_expected(dti, box_with_array)
-
- other = datetime(2016, 1, 1).date()
- assert not (dtarr == other).any()
- assert (dtarr != other).all()
- with pytest.raises(TypeError):
- dtarr < other
- with pytest.raises(TypeError):
- dtarr <= other
- with pytest.raises(TypeError):
- dtarr > other
- with pytest.raises(TypeError):
- dtarr >= other
-
- @pytest.mark.parametrize("other", [None, np.nan, pd.NaT])
- def test_dti_eq_null_scalar(self, other, tz_naive_fixture):
- # GH#19301
- tz = tz_naive_fixture
- dti = pd.date_range("2016-01-01", periods=2, tz=tz)
- assert not (dti == other).any()
-
- @pytest.mark.parametrize("other", [None, np.nan, pd.NaT])
- def test_dti_ne_null_scalar(self, other, tz_naive_fixture):
- # GH#19301
- tz = tz_naive_fixture
- dti = pd.date_range("2016-01-01", periods=2, tz=tz)
- assert (dti != other).all()
-
- @pytest.mark.parametrize("other", [None, np.nan])
- def test_dti_cmp_null_scalar_inequality(
- self, tz_naive_fixture, other, box_with_array
- ):
- # GH#19301
- tz = tz_naive_fixture
- dti = pd.date_range("2016-01-01", periods=2, tz=tz)
- dtarr = tm.box_expected(dti, box_with_array)
- msg = "Invalid comparison between"
- with pytest.raises(TypeError, match=msg):
- dtarr < other
- with pytest.raises(TypeError, match=msg):
- dtarr <= other
- with pytest.raises(TypeError, match=msg):
- dtarr > other
- with pytest.raises(TypeError, match=msg):
- dtarr >= other
-
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
if box_with_array is tm.to_array and dtype is object:
@@ -654,15 +566,18 @@ def test_comparison_tzawareness_compat(self, op, box_df_fail):
with pytest.raises(TypeError, match=msg):
op(dz, np.array(list(dr), dtype=object))
- # Check that there isn't a problem aware-aware and naive-naive do not
- # raise
- assert_all(dr == dr)
- assert_all(dz == dz)
+ # The aware==aware and naive==naive comparisons should *not* raise
+ assert np.all(dr == dr)
+ assert np.all(dr == list(dr))
+ assert np.all(list(dr) == dr)
+ assert np.all(np.array(list(dr), dtype=object) == dr)
+ assert np.all(dr == np.array(list(dr), dtype=object))
- # FIXME: DataFrame case fails to raise for == and !=, wrong
- # message for inequalities
- assert (dr == list(dr)).all()
- assert (dz == list(dz)).all()
+ assert np.all(dz == dz)
+ assert np.all(dz == list(dz))
+ assert np.all(list(dz) == dz)
+ assert np.all(np.array(list(dz), dtype=object) == dz)
+ assert np.all(dz == np.array(list(dz), dtype=object))
@pytest.mark.parametrize(
"op",
@@ -680,12 +595,12 @@ def test_comparison_tzawareness_compat_scalars(self, op, box_with_array):
ts = pd.Timestamp("2000-03-14 01:59")
ts_tz = pd.Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
- assert_all(dr > ts)
+ assert np.all(dr > ts)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
- assert_all(dz > ts_tz)
+ assert np.all(dz > ts_tz)
with pytest.raises(TypeError, match=msg):
op(dz, ts)
@@ -705,6 +620,7 @@ def test_comparison_tzawareness_compat_scalars(self, op, box_with_array):
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
+ @pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, op, other, tz_aware_fixture, box_with_array
):
@@ -766,34 +682,6 @@ def test_dti_cmp_str(self, tz_naive_fixture):
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
- @pytest.mark.parametrize("other", ["foo", 99, 4.0, object(), timedelta(days=2)])
- def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
- # GH#22074
- tz = tz_naive_fixture
- xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
-
- rng = date_range("1/1/2000", periods=10, tz=tz)
- rng = tm.box_expected(rng, box_with_array)
-
- result = rng == other
- expected = np.array([False] * 10)
- expected = tm.box_expected(expected, xbox)
- tm.assert_equal(result, expected)
-
- result = rng != other
- expected = np.array([True] * 10)
- expected = tm.box_expected(expected, xbox)
- tm.assert_equal(result, expected)
- msg = "Invalid comparison between"
- with pytest.raises(TypeError, match=msg):
- rng < other
- with pytest.raises(TypeError, match=msg):
- rng <= other
- with pytest.raises(TypeError, match=msg):
- rng > other
- with pytest.raises(TypeError, match=msg):
- rng >= other
-
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
@@ -926,6 +814,7 @@ def test_dt64arr_isub_timedeltalike_scalar(
rng -= two_hours
tm.assert_equal(rng, expected)
+ # TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
@@ -1135,7 +1024,13 @@ def test_dt64arr_add_timestamp_raises(self, box_with_array):
def test_dt64arr_add_sub_float(self, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
dtarr = tm.box_expected(dti, box_with_array)
- msg = "|".join(["unsupported operand type", "cannot (add|subtract)"])
+ msg = "|".join(
+ [
+ "unsupported operand type",
+ "cannot (add|subtract)",
+ "ufunc '?(add|subtract)'? cannot use operands with types",
+ ]
+ )
with pytest.raises(TypeError, match=msg):
dtarr + other
with pytest.raises(TypeError, match=msg):
@@ -1785,14 +1680,12 @@ def test_operators_datetimelike(self):
dt1 - dt2
dt2 - dt1
- # ## datetime64 with timetimedelta ###
+ # datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
- # TODO: Decide if this ought to work.
- # td1 - dt1
- # ## timetimedelta with datetime64 ###
+ # timetimedelta with datetime64
td1 + dt1
dt1 + td1
@@ -1990,7 +1883,7 @@ def test_dt64_series_add_intlike(self, tz, op):
with pytest.raises(TypeError, match=msg):
method(other)
with pytest.raises(TypeError, match=msg):
- method(other.values)
+ method(np.array(other))
with pytest.raises(TypeError, match=msg):
method(pd.Index(other))
@@ -2288,6 +2181,23 @@ def test_add_datetimelike_and_dti(self, addend, tz):
# -------------------------------------------------------------
+ def test_dta_add_sub_index(self, tz_naive_fixture):
+ # Check that DatetimeArray defers to Index classes
+ dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
+ dta = dti.array
+ result = dta - dti
+ expected = dti - dti
+ tm.assert_index_equal(result, expected)
+
+ tdi = result
+ result = dta + tdi
+ expected = dti + tdi
+ tm.assert_index_equal(result, expected)
+
+ result = dta - tdi
+ expected = dti - tdi
+ tm.assert_index_equal(result, expected)
+
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
@@ -2439,34 +2349,34 @@ def test_ufunc_coercions(self):
idx = date_range("2011-01-01", periods=3, freq="2D", name="x")
delta = np.timedelta64(1, "D")
+ exp = date_range("2011-01-02", periods=3, freq="2D", name="x")
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
- exp = date_range("2011-01-02", periods=3, freq="2D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
+ exp = date_range("2010-12-31", periods=3, freq="2D", name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
- exp = date_range("2010-12-31", periods=3, freq="2D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
delta = np.array(
[np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")]
)
+ exp = DatetimeIndex(
+ ["2011-01-02", "2011-01-05", "2011-01-08"], freq="3D", name="x"
+ )
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
- exp = DatetimeIndex(
- ["2011-01-02", "2011-01-05", "2011-01-08"], freq="3D", name="x"
- )
tm.assert_index_equal(result, exp)
assert result.freq == "3D"
+ exp = DatetimeIndex(
+ ["2010-12-31", "2011-01-01", "2011-01-02"], freq="D", name="x"
+ )
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
- exp = DatetimeIndex(
- ["2010-12-31", "2011-01-01", "2011-01-02"], freq="D", name="x"
- )
tm.assert_index_equal(result, exp)
assert result.freq == "D"
@@ -2591,23 +2501,3 @@ def test_shift_months(years, months):
raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti]
expected = DatetimeIndex(raw)
tm.assert_index_equal(actual, expected)
-
-
-class SubDatetime(datetime):
- pass
-
-
-@pytest.mark.parametrize(
- "lh,rh",
- [
- (SubDatetime(2000, 1, 1), Timedelta(hours=1)),
- (Timedelta(hours=1), SubDatetime(2000, 1, 1)),
- ],
-)
-def test_dt_subclass_add_timedelta(lh, rh):
- # GH 25851
- # ensure that subclassed datetime works for
- # Timedelta operations
- result = lh + rh
- expected = SubDatetime(2000, 1, 1, 1)
- assert result == expected
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index 2b23790e4ccd3..584e22f8488f5 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -73,10 +73,10 @@ def test_compare_invalid(self):
# ------------------------------------------------------------------
-# Numeric dtypes Arithmetic with Timedelta Scalar
+# Numeric dtypes Arithmetic with Datetime/Timedelta Scalar
-class TestNumericArraylikeArithmeticWithTimedeltaLike:
+class TestNumericArraylikeArithmeticWithDatetimeLike:
# TODO: also check name retentention
@pytest.mark.parametrize("box_cls", [np.array, pd.Index, pd.Series])
@@ -235,6 +235,30 @@ def test_add_sub_timedeltalike_invalid(self, numeric_idx, other, box):
with pytest.raises(TypeError):
other - left
+ @pytest.mark.parametrize(
+ "other",
+ [
+ pd.Timestamp.now().to_pydatetime(),
+ pd.Timestamp.now(tz="UTC").to_pydatetime(),
+ pd.Timestamp.now().to_datetime64(),
+ pd.NaT,
+ ],
+ )
+ @pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
+ def test_add_sub_datetimelike_invalid(self, numeric_idx, other, box):
+ # GH#28080 numeric+datetime64 should raise; Timestamp raises
+ # NullFrequencyError instead of TypeError so is excluded.
+ left = tm.box_expected(numeric_idx, box)
+
+ with pytest.raises(TypeError):
+ left + other
+ with pytest.raises(TypeError):
+ other + left
+ with pytest.raises(TypeError):
+ left - other
+ with pytest.raises(TypeError):
+ other - left
+
# ------------------------------------------------------------------
# Arithmetic
@@ -561,9 +585,9 @@ def test_div_int(self, numeric_idx):
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("op", [operator.mul, ops.rmul, operator.floordiv])
- def test_mul_int_identity(self, op, numeric_idx, box):
+ def test_mul_int_identity(self, op, numeric_idx, box_with_array):
idx = numeric_idx
- idx = tm.box_expected(idx, box)
+ idx = tm.box_expected(idx, box_with_array)
result = op(idx, 1)
tm.assert_equal(result, idx)
@@ -615,8 +639,9 @@ def test_mul_size_mismatch_raises(self, numeric_idx):
idx * np.array([1, 2])
@pytest.mark.parametrize("op", [operator.pow, ops.rpow])
- def test_pow_float(self, op, numeric_idx, box):
+ def test_pow_float(self, op, numeric_idx, box_with_array):
# test power calculations both ways, GH#14973
+ box = box_with_array
idx = numeric_idx
expected = pd.Float64Index(op(idx.values, 2.0))
@@ -626,8 +651,9 @@ def test_pow_float(self, op, numeric_idx, box):
result = op(idx, 2.0)
tm.assert_equal(result, expected)
- def test_modulo(self, numeric_idx, box):
+ def test_modulo(self, numeric_idx, box_with_array):
# GH#9244
+ box = box_with_array
idx = numeric_idx
expected = Index(idx.values % 2)
@@ -1041,7 +1067,8 @@ class TestObjectDtypeEquivalence:
# Tests that arithmetic operations match operations executed elementwise
@pytest.mark.parametrize("dtype", [None, object])
- def test_numarr_with_dtype_add_nan(self, dtype, box):
+ def test_numarr_with_dtype_add_nan(self, dtype, box_with_array):
+ box = box_with_array
ser = pd.Series([1, 2, 3], dtype=dtype)
expected = pd.Series([np.nan, np.nan, np.nan], dtype=dtype)
@@ -1055,7 +1082,8 @@ def test_numarr_with_dtype_add_nan(self, dtype, box):
tm.assert_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
- def test_numarr_with_dtype_add_int(self, dtype, box):
+ def test_numarr_with_dtype_add_int(self, dtype, box_with_array):
+ box = box_with_array
ser = pd.Series([1, 2, 3], dtype=dtype)
expected = pd.Series([2, 3, 4], dtype=dtype)
@@ -1227,3 +1255,36 @@ def test_addsub_arithmetic(self, dtype, delta):
tm.assert_index_equal(index + index, 2 * index)
tm.assert_index_equal(index - index, 0 * index)
assert not (index - index).empty
+
+
+def test_fill_value_inf_masking():
+ # GH #27464 make sure we mask 0/1 with Inf and not NaN
+ df = pd.DataFrame({"A": [0, 1, 2], "B": [1.1, None, 1.1]})
+
+ other = pd.DataFrame({"A": [1.1, 1.2, 1.3]}, index=[0, 2, 3])
+
+ result = df.rfloordiv(other, fill_value=1)
+
+ expected = pd.DataFrame(
+ {"A": [np.inf, 1.0, 0.0, 1.0], "B": [0.0, np.nan, 0.0, np.nan]}
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_dataframe_div_silenced():
+ # GH#26793
+ pdf1 = pd.DataFrame(
+ {
+ "A": np.arange(10),
+ "B": [np.nan, 1, 2, 3, 4] * 2,
+ "C": [np.nan] * 10,
+ "D": np.arange(10),
+ },
+ index=list("abcdefghij"),
+ columns=list("ABCD"),
+ )
+ pdf2 = pd.DataFrame(
+ np.random.randn(10, 4), index=list("abcdefghjk"), columns=list("ABCX")
+ )
+ with tm.assert_produces_warning(None):
+ pdf1.div(pdf2, fill_value=0)
diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py
index fd9db80671360..f9c1de115b3a4 100644
--- a/pandas/tests/arithmetic/test_object.py
+++ b/pandas/tests/arithmetic/test_object.py
@@ -89,7 +89,7 @@ def test_pow_ops_object(self):
@pytest.mark.parametrize("op", [operator.add, ops.radd])
@pytest.mark.parametrize("other", ["category", "Int64"])
- def test_add_extension_scalar(self, other, box, op):
+ def test_add_extension_scalar(self, other, box_with_array, op):
# GH#22378
# Check that scalars satisfying is_extension_array_dtype(obj)
# do not incorrectly try to dispatch to an ExtensionArray operation
@@ -97,8 +97,8 @@ def test_add_extension_scalar(self, other, box, op):
arr = pd.Series(["a", "b", "c"])
expected = pd.Series([op(x, other) for x in arr])
- arr = tm.box_expected(arr, box)
- expected = tm.box_expected(expected, box)
+ arr = tm.box_expected(arr, box_with_array)
+ expected = tm.box_expected(expected, box_with_array)
result = op(arr, other)
tm.assert_equal(result, expected)
@@ -133,16 +133,17 @@ def test_objarr_radd_str(self, box):
],
)
@pytest.mark.parametrize("dtype", [None, object])
- def test_objarr_radd_str_invalid(self, dtype, data, box):
+ def test_objarr_radd_str_invalid(self, dtype, data, box_with_array):
ser = Series(data, dtype=dtype)
- ser = tm.box_expected(ser, box)
+ ser = tm.box_expected(ser, box_with_array)
with pytest.raises(TypeError):
"foo_" + ser
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
- def test_objarr_add_invalid(self, op, box):
+ def test_objarr_add_invalid(self, op, box_with_array):
# invalid ops
+ box = box_with_array
obj_ser = tm.makeObjectSeries()
obj_ser.name = "objects"
diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py
index e54c16c7a27a4..ed693d873efb8 100644
--- a/pandas/tests/arithmetic/test_period.py
+++ b/pandas/tests/arithmetic/test_period.py
@@ -12,6 +12,7 @@
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
+from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
@@ -572,12 +573,19 @@ def test_parr_add_sub_float_raises(self, op, other, box_with_array):
@pytest.mark.parametrize(
"other",
[
+ # datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
+ # datetime-like arrays
+ pd.date_range("2016-01-01", periods=3, freq="H"),
+ pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
+ pd.date_range("2016-01-01", periods=3, freq="S")._data,
+ pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
+ # Miscellaneous invalid types
],
)
- def test_parr_add_sub_datetime_scalar(self, other, box_with_array):
+ def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
@@ -594,23 +602,6 @@ def test_parr_add_sub_datetime_scalar(self, other, box_with_array):
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
- def test_parr_add_sub_dt64_array_raises(self, box_with_array):
- rng = pd.period_range("1/1/2000", freq="D", periods=3)
- dti = pd.date_range("2016-01-01", periods=3)
- dtarr = dti.values
-
- rng = tm.box_expected(rng, box_with_array)
-
- with pytest.raises(TypeError):
- rng + dtarr
- with pytest.raises(TypeError):
- dtarr + rng
-
- with pytest.raises(TypeError):
- rng - dtarr
- with pytest.raises(TypeError):
- dtarr - rng
-
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = pd.period_range("1/1/2000", freq="Q", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
@@ -1013,6 +1004,45 @@ def test_parr_add_sub_td64_nat(self, box_transpose_fail):
with pytest.raises(TypeError):
other - obj
+ @pytest.mark.parametrize(
+ "other",
+ [
+ np.array(["NaT"] * 9, dtype="m8[ns]"),
+ TimedeltaArray._from_sequence(["NaT"] * 9),
+ ],
+ )
+ def test_parr_add_sub_tdt64_nat_array(self, box_df_fail, other):
+ # FIXME: DataFrame fails because when when operating column-wise
+ # timedelta64 entries become NaT and are treated like datetimes
+ box = box_df_fail
+
+ pi = pd.period_range("1994-04-01", periods=9, freq="19D")
+ expected = pd.PeriodIndex(["NaT"] * 9, freq="19D")
+
+ obj = tm.box_expected(pi, box)
+ expected = tm.box_expected(expected, box)
+
+ result = obj + other
+ tm.assert_equal(result, expected)
+ result = other + obj
+ tm.assert_equal(result, expected)
+ result = obj - other
+ tm.assert_equal(result, expected)
+ with pytest.raises(TypeError):
+ other - obj
+
+ # ---------------------------------------------------------------
+ # Unsorted
+
+ def test_parr_add_sub_index(self):
+ # Check that PeriodArray defers to Index on arithmetic ops
+ pi = pd.period_range("2000-12-31", periods=3)
+ parr = pi.array
+
+ result = parr - pi
+ expected = pi - pi
+ tm.assert_index_equal(result, expected)
+
class TestPeriodSeriesArithmetic:
def test_ops_series_timedelta(self):
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index 326c565308124..d480b26e30fff 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -18,6 +18,7 @@
Timestamp,
timedelta_range,
)
+from pandas.tests.arithmetic.test_datetime64 import assert_invalid_comparison
import pandas.util.testing as tm
@@ -61,42 +62,33 @@ def test_compare_timedelta64_zerodim(self, box_with_array):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
-
-class TestTimedelta64ArrayComparisons:
- # TODO: All of these need to be parametrized over box
-
- def test_compare_timedelta_series(self):
+ @pytest.mark.parametrize(
+ "td_scalar",
+ [timedelta(days=1), Timedelta(days=1), Timedelta(days=1).to_timedelta64()],
+ )
+ def test_compare_timedeltalike_scalar(self, box_with_array, td_scalar):
# regression test for GH#5963
- s = pd.Series([timedelta(days=1), timedelta(days=2)])
- actual = s > timedelta(days=1)
+ box = box_with_array
+ xbox = box if box is not pd.Index else np.ndarray
+ ser = pd.Series([timedelta(days=1), timedelta(days=2)])
+ ser = tm.box_expected(ser, box)
+ actual = ser > td_scalar
expected = pd.Series([False, True])
- tm.assert_series_equal(actual, expected)
+ expected = tm.box_expected(expected, xbox)
+ tm.assert_equal(actual, expected)
- def test_tdi_cmp_str_invalid(self, box_with_array):
- # GH#13624
- xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
- tdi = TimedeltaIndex(["1 day", "2 days"])
- tdarr = tm.box_expected(tdi, box_with_array)
+ @pytest.mark.parametrize("invalid", [345600000000000, "a"])
+ def test_td64_comparisons_invalid(self, box_with_array, invalid):
+ # GH#13624 for str
+ box = box_with_array
+ rng = timedelta_range("1 days", periods=10)
+ obj = tm.box_expected(rng, box)
- for left, right in [(tdarr, "a"), ("a", tdarr)]:
- with pytest.raises(TypeError):
- left > right
- with pytest.raises(TypeError):
- left >= right
- with pytest.raises(TypeError):
- left < right
- with pytest.raises(TypeError):
- left <= right
-
- result = left == right
- expected = np.array([False, False], dtype=bool)
- expected = tm.box_expected(expected, xbox)
- tm.assert_equal(result, expected)
+ assert_invalid_comparison(obj, invalid, box)
- result = left != right
- expected = np.array([True, True], dtype=bool)
- expected = tm.box_expected(expected, xbox)
- tm.assert_equal(result, expected)
+
+class TestTimedelta64ArrayComparisons:
+ # TODO: All of these need to be parametrized over box
@pytest.mark.parametrize("dtype", [None, object])
def test_comp_nat(self, dtype):
@@ -191,10 +183,6 @@ def test_comparisons_coverage(self):
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
- # raise TypeError for now
- with pytest.raises(TypeError):
- rng < rng[3].value
-
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
@@ -253,10 +241,7 @@ def test_subtraction_ops(self):
with pytest.raises(TypeError, match=msg):
tdi - dti
- msg = (
- r"descriptor '__sub__' requires a 'datetime\.datetime' object"
- " but received a 'Timedelta'"
- )
+ msg = r"unsupported operand type\(s\) for -"
with pytest.raises(TypeError, match=msg):
td - dt
@@ -480,6 +465,25 @@ def test_timedelta(self, freq):
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
+ def test_tda_add_sub_index(self):
+ # Check that TimedeltaArray defers to Index on arithmetic ops
+ tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"])
+ tda = tdi.array
+
+ dti = pd.date_range("1999-12-31", periods=3, freq="D")
+
+ result = tda + dti
+ expected = tdi + dti
+ tm.assert_index_equal(result, expected)
+
+ result = tda + tdi
+ expected = tdi + tdi
+ tm.assert_index_equal(result, expected)
+
+ result = tda - tdi
+ expected = tdi - tdi
+ tm.assert_index_equal(result, expected)
+
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
@@ -816,19 +820,10 @@ def test_timedelta64_ops_nat(self):
# -------------------------------------------------------------
# Invalid Operations
- def test_td64arr_add_str_invalid(self, box_with_array):
- # GH#13624
+ @pytest.mark.parametrize("other", ["a", 3.14, np.array([2.0, 3.0])])
+ def test_td64arr_add_sub_invalid(self, box_with_array, other):
+ # GH#13624 for str
tdi = TimedeltaIndex(["1 day", "2 days"])
- tdi = tm.box_expected(tdi, box_with_array)
-
- with pytest.raises(TypeError):
- tdi + "a"
- with pytest.raises(TypeError):
- "a" + tdi
-
- @pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
- def test_td64arr_add_sub_float(self, box_with_array, other):
- tdi = TimedeltaIndex(["-1 days", "-1 days"])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
@@ -970,71 +965,37 @@ def test_td64arr_add_datetime64_nat(self, box_with_array):
# ------------------------------------------------------------------
# Operations with int-like others
- def test_td64arr_add_int_series_invalid(self, box):
- tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
- tdser = tm.box_expected(tdser, box)
- err = TypeError if box is not pd.Index else NullFrequencyError
- int_ser = Series([2, 3, 4])
-
- with pytest.raises(err):
- tdser + int_ser
- with pytest.raises(err):
- int_ser + tdser
- with pytest.raises(err):
- tdser - int_ser
- with pytest.raises(err):
- int_ser - tdser
-
- def test_td64arr_add_intlike(self, box_with_array):
- # GH#19123
- tdi = TimedeltaIndex(["59 days", "59 days", "NaT"])
- ser = tm.box_expected(tdi, box_with_array)
-
- err = TypeError
- if box_with_array in [pd.Index, tm.to_array]:
- err = NullFrequencyError
-
- other = Series([20, 30, 40], dtype="uint8")
-
- # TODO: separate/parametrize
- with pytest.raises(err):
- ser + 1
- with pytest.raises(err):
- ser - 1
-
- with pytest.raises(err):
- ser + other
- with pytest.raises(err):
- ser - other
-
- with pytest.raises(err):
- ser + np.array(other)
- with pytest.raises(err):
- ser - np.array(other)
-
- with pytest.raises(err):
- ser + pd.Index(other)
- with pytest.raises(err):
- ser - pd.Index(other)
-
- @pytest.mark.parametrize("scalar", [1, 1.5, np.array(2)])
- def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array, scalar):
+ @pytest.mark.parametrize(
+ "other",
+ [
+ # GH#19123
+ 1,
+ Series([20, 30, 40], dtype="uint8"),
+ np.array([20, 30, 40], dtype="uint8"),
+ pd.UInt64Index([20, 30, 40]),
+ pd.Int64Index([20, 30, 40]),
+ Series([2, 3, 4]),
+ 1.5,
+ np.array(2),
+ ],
+ )
+ def test_td64arr_addsub_numeric_invalid(self, box_with_array, other):
box = box_with_array
-
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdser = tm.box_expected(tdser, box)
+
err = TypeError
- if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
+ if box in [pd.Index, tm.to_array] and not isinstance(other, float):
err = NullFrequencyError
with pytest.raises(err):
- tdser + scalar
+ tdser + other
with pytest.raises(err):
- scalar + tdser
+ other + tdser
with pytest.raises(err):
- tdser - scalar
+ tdser - other
with pytest.raises(err):
- scalar - tdser
+ other - tdser
@pytest.mark.parametrize(
"dtype",
@@ -1061,11 +1022,12 @@ def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array, scalar):
],
ids=lambda x: type(x).__name__,
)
- def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
+ def test_td64arr_add_sub_numeric_arr_invalid(self, box_with_array, vec, dtype):
+ box = box_with_array
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdser = tm.box_expected(tdser, box)
err = TypeError
- if box is pd.Index and not dtype.startswith("float"):
+ if box in [pd.Index, tm.to_array] and not dtype.startswith("float"):
err = NullFrequencyError
vector = vec.astype(dtype)
@@ -1082,14 +1044,6 @@ def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
- @pytest.mark.parametrize(
- "scalar_td",
- [
- timedelta(minutes=5, seconds=4),
- Timedelta(minutes=5, seconds=4),
- Timedelta("5m4s").to_timedelta64(),
- ],
- )
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
@@ -1143,7 +1097,8 @@ def test_timedelta64_operations_with_timedeltas(self):
# roundtrip
tm.assert_series_equal(result + td2, td1)
- def test_td64arr_add_td64_array(self, box):
+ def test_td64arr_add_td64_array(self, box_with_array):
+ box = box_with_array
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
@@ -1157,7 +1112,8 @@ def test_td64arr_add_td64_array(self, box):
result = tdarr + tdi
tm.assert_equal(result, expected)
- def test_td64arr_sub_td64_array(self, box):
+ def test_td64arr_sub_td64_array(self, box_with_array):
+ box = box_with_array
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
@@ -1231,8 +1187,9 @@ def test_td64arr_add_sub_tdi(self, box, names):
else:
assert result.dtypes[0] == "timedelta64[ns]"
- def test_td64arr_add_sub_td64_nat(self, box):
+ def test_td64arr_add_sub_td64_nat(self, box_with_array):
# GH#23320 special handling for timedelta64("NaT")
+ box = box_with_array
tdi = pd.TimedeltaIndex([NaT, Timedelta("1s")])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
@@ -1249,8 +1206,9 @@ def test_td64arr_add_sub_td64_nat(self, box):
result = other - obj
tm.assert_equal(result, expected)
- def test_td64arr_sub_NaT(self, box):
+ def test_td64arr_sub_NaT(self, box_with_array):
# GH#18808
+ box = box_with_array
ser = Series([NaT, Timedelta("1s")])
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
@@ -1260,8 +1218,9 @@ def test_td64arr_sub_NaT(self, box):
res = ser - pd.NaT
tm.assert_equal(res, expected)
- def test_td64arr_add_timedeltalike(self, two_hours, box):
+ def test_td64arr_add_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as + is now numeric
+ box = box_with_array
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D")
rng = tm.box_expected(rng, box)
@@ -1270,8 +1229,9 @@ def test_td64arr_add_timedeltalike(self, two_hours, box):
result = rng + two_hours
tm.assert_equal(result, expected)
- def test_td64arr_sub_timedeltalike(self, two_hours, box):
+ def test_td64arr_sub_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as - is now numeric
+ box = box_with_array
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00")
@@ -1354,8 +1314,9 @@ def test_td64arr_add_offset_index(self, names, box):
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
- def test_td64arr_add_offset_array(self, box):
+ def test_td64arr_add_offset_array(self, box_with_array):
# GH#18849
+ box = box_with_array
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
@@ -1380,8 +1341,12 @@ def test_td64arr_add_offset_array(self, box):
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")]
)
- def test_td64arr_sub_offset_index(self, names, box):
+ def test_td64arr_sub_offset_index(self, names, box_with_array):
# GH#18824, GH#19744
+ box = box_with_array
+ xbox = box if box is not tm.to_array else pd.Index
+ exname = names[2] if box is not tm.to_array else names[1]
+
if box is pd.DataFrame and names[1] == "bar":
pytest.skip(
"Name propagation for DataFrame does not behave like "
@@ -1392,11 +1357,11 @@ def test_td64arr_sub_offset_index(self, names, box):
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
expected = TimedeltaIndex(
- [tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=names[2]
+ [tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=exname
)
tdi = tm.box_expected(tdi, box)
- expected = tm.box_expected(expected, box)
+ expected = tm.box_expected(expected, xbox)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
@@ -1431,13 +1396,12 @@ def test_td64arr_with_offset_series(self, names, box_df_fail):
# GH#18849
box = box_df_fail
box2 = Series if box in [pd.Index, tm.to_array] else box
+ exname = names[2] if box is not tm.to_array else names[1]
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
- expected_add = Series(
- [tdi[n] + other[n] for n in range(len(tdi))], name=names[2]
- )
+ expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))], name=exname)
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
@@ -1450,9 +1414,7 @@ def test_td64arr_with_offset_series(self, names, box_df_fail):
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
- expected_sub = Series(
- [tdi[n] - other[n] for n in range(len(tdi))], name=names[2]
- )
+ expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))], name=exname)
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
@@ -1612,7 +1574,7 @@ def test_td64arr_div_nat_invalid(self, box_with_array):
rng = timedelta_range("1 days", "10 days", name="foo")
rng = tm.box_expected(rng, box_with_array)
- with pytest.raises(TypeError, match="'?true_divide'? cannot use operands"):
+ with pytest.raises(TypeError, match="unsupported operand type"):
rng / pd.NaT
with pytest.raises(TypeError, match="Cannot divide NaTType by"):
pd.NaT / rng
@@ -2053,6 +2015,8 @@ def test_td64arr_div_numeric_array(self, box_with_array, vector, dtype):
def test_td64arr_mul_int_series(self, box_df_fail, names):
# GH#19042 test for correct name attachment
box = box_df_fail # broadcasts along wrong axis, but doesn't raise
+ exname = names[2] if box is not tm.to_array else names[1]
+
tdi = TimedeltaIndex(
["0days", "1day", "2days", "3days", "4days"], name=names[0]
)
@@ -2062,11 +2026,11 @@ def test_td64arr_mul_int_series(self, box_df_fail, names):
expected = Series(
["0days", "1day", "4days", "9days", "16days"],
dtype="timedelta64[ns]",
- name=names[2],
+ name=exname,
)
tdi = tm.box_expected(tdi, box)
- box = Series if (box is pd.Index and type(ser) is Series) else box
+ box = Series if (box is pd.Index or box is tm.to_array) else box
expected = tm.box_expected(expected, box)
result = ser * tdi
@@ -2117,7 +2081,11 @@ def test_float_series_rdiv_td64arr(self, box_with_array, names):
tm.assert_equal(result, expected)
-class TestTimedeltaArraylikeInvalidArithmeticOps:
+class TestTimedelta64ArrayLikeArithmetic:
+ # Arithmetic tests for timedelta64[ns] vectors fully parametrized over
+ # DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all arithmetic
+ # tests will eventually end up here.
+
def test_td64arr_pow_invalid(self, scalar_td, box_with_array):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
diff --git a/pandas/tests/arrays/categorical/test_missing.py b/pandas/tests/arrays/categorical/test_missing.py
index 1b62479530d24..3037ac79cd592 100644
--- a/pandas/tests/arrays/categorical/test_missing.py
+++ b/pandas/tests/arrays/categorical/test_missing.py
@@ -5,7 +5,7 @@
from pandas.core.dtypes.dtypes import CategoricalDtype
-from pandas import Categorical, Index, isna
+from pandas import Categorical, Index, Series, isna
import pandas.util.testing as tm
@@ -59,11 +59,13 @@ def test_set_item_nan(self):
),
(dict(), "Must specify a fill 'value' or 'method'."),
(dict(method="bad"), "Invalid fill method. Expecting .* bad"),
+ (dict(value=Series([1, 2, 3, 4, "a"])), "fill value must be in categories"),
],
)
def test_fillna_raises(self, fillna_kwargs, msg):
# https://github.com/pandas-dev/pandas/issues/19682
- cat = Categorical([1, 2, 3])
+ # https://github.com/pandas-dev/pandas/issues/13628
+ cat = Categorical([1, 2, 3, None, None])
with pytest.raises(ValueError, match=msg):
cat.fillna(**fillna_kwargs)
diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py
index 9a09ea8422b1f..22c1d5373372a 100644
--- a/pandas/tests/arrays/categorical/test_operators.py
+++ b/pandas/tests/arrays/categorical/test_operators.py
@@ -349,7 +349,9 @@ def test_numeric_like_ops(self):
("__mul__", r"\*"),
("__truediv__", "/"),
]:
- msg = r"Series cannot perform the operation {}".format(str_rep)
+ msg = r"Series cannot perform the operation {}|unsupported operand".format(
+ str_rep
+ )
with pytest.raises(TypeError, match=msg):
getattr(df, op)(df)
@@ -375,7 +377,9 @@ def test_numeric_like_ops(self):
("__mul__", r"\*"),
("__truediv__", "/"),
]:
- msg = r"Series cannot perform the operation {}".format(str_rep)
+ msg = r"Series cannot perform the operation {}|unsupported operand".format(
+ str_rep
+ )
with pytest.raises(TypeError, match=msg):
getattr(s, op)(2)
diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py
index 82409df5b46f7..655a6e717119b 100644
--- a/pandas/tests/arrays/interval/test_interval.py
+++ b/pandas/tests/arrays/interval/test_interval.py
@@ -42,10 +42,9 @@ class TestAttributes:
(0, 1),
(Timedelta("0 days"), Timedelta("1 day")),
(Timestamp("2018-01-01"), Timestamp("2018-01-02")),
- pytest.param(
+ (
Timestamp("2018-01-01", tz="US/Eastern"),
Timestamp("2018-01-02", tz="US/Eastern"),
- marks=pytest.mark.xfail(strict=True, reason="GH 27011"),
),
],
)
@@ -94,8 +93,13 @@ def test_set_na(self, left_right_dtypes):
tm.assert_extension_array_equal(result, expected)
-def test_repr_matches():
- idx = IntervalIndex.from_breaks([1, 2, 3])
- a = repr(idx)
- b = repr(idx.values)
- assert a.replace("Index", "Array") == b
+def test_repr():
+ # GH 25022
+ arr = IntervalArray.from_tuples([(0, 1), (1, 2)])
+ result = repr(arr)
+ expected = (
+ "<IntervalArray>\n"
+ "[(0, 1], (1, 2]]\n"
+ "Length: 2, closed: right, dtype: interval[int64]"
+ )
+ assert result == expected
diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py
index 57e5a35d99e48..f1d2803ce5505 100644
--- a/pandas/tests/arrays/sparse/test_arithmetics.py
+++ b/pandas/tests/arrays/sparse/test_arithmetics.py
@@ -5,7 +5,7 @@
import pandas as pd
from pandas.core import ops
-from pandas.core.sparse.api import SparseDtype
+from pandas.core.arrays.sparse import SparseDtype
import pandas.util.testing as tm
@@ -21,8 +21,6 @@ def mix(request):
return request.param
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
class TestSparseArrayArithmetics:
_base = np.array
@@ -391,48 +389,6 @@ def test_mixed_array_comparison(self, kind):
self._check_comparison_ops(a, b, values, rvalues)
-class TestSparseSeriesArithmetic(TestSparseArrayArithmetics):
-
- _base = pd.Series
- _klass = pd.SparseSeries
-
- def _assert(self, a, b):
- tm.assert_series_equal(a, b)
-
- def test_alignment(self, mix, all_arithmetic_functions):
- op = all_arithmetic_functions
-
- da = pd.Series(np.arange(4))
- db = pd.Series(np.arange(4), index=[1, 2, 3, 4])
-
- sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=0)
- sb = pd.SparseSeries(
- np.arange(4), index=[1, 2, 3, 4], dtype=np.int64, fill_value=0
- )
- self._check_numeric_ops(sa, sb, da, db, mix, op)
-
- sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=np.nan)
- sb = pd.SparseSeries(
- np.arange(4), index=[1, 2, 3, 4], dtype=np.int64, fill_value=np.nan
- )
- self._check_numeric_ops(sa, sb, da, db, mix, op)
-
- da = pd.Series(np.arange(4))
- db = pd.Series(np.arange(4), index=[10, 11, 12, 13])
-
- sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=0)
- sb = pd.SparseSeries(
- np.arange(4), index=[10, 11, 12, 13], dtype=np.int64, fill_value=0
- )
- self._check_numeric_ops(sa, sb, da, db, mix, op)
-
- sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=np.nan)
- sb = pd.SparseSeries(
- np.arange(4), index=[10, 11, 12, 13], dtype=np.int64, fill_value=np.nan
- )
- self._check_numeric_ops(sa, sb, da, db, mix, op)
-
-
@pytest.mark.parametrize("op", [operator.eq, operator.add])
def test_with_list(op):
arr = pd.SparseArray([0, 1], fill_value=0)
@@ -441,6 +397,23 @@ def test_with_list(op):
tm.assert_sp_array_equal(result, expected)
+def test_with_dataframe():
+ # GH#27910
+ arr = pd.SparseArray([0, 1], fill_value=0)
+ df = pd.DataFrame([[1, 2], [3, 4]])
+ result = arr.__add__(df)
+ assert result is NotImplemented
+
+
+def test_with_zerodim_ndarray():
+ # GH#27910
+ arr = pd.SparseArray([0, 1], fill_value=0)
+
+ result = arr * np.array(2)
+ expected = arr * 2
+ tm.assert_sp_array_equal(result, expected)
+
+
@pytest.mark.parametrize("ufunc", [np.abs, np.exp])
@pytest.mark.parametrize(
"arr", [pd.SparseArray([0, 0, -1, 1]), pd.SparseArray([None, None, -1, 1])]
diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py
index b94e2a16d217a..c02d8ae4e7429 100644
--- a/pandas/tests/arrays/sparse/test_array.py
+++ b/pandas/tests/arrays/sparse/test_array.py
@@ -10,7 +10,7 @@
import pandas as pd
from pandas import isna
-from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
+from pandas.core.arrays.sparse import SparseArray, SparseDtype
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@@ -221,36 +221,6 @@ def test_scalar_with_index_infer_dtype(self, scalar, dtype):
assert arr.dtype == dtype
assert exp.dtype == dtype
- @pytest.mark.parametrize("fill", [1, np.nan, 0])
- @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
- def test_sparse_series_round_trip(self, kind, fill):
- # see gh-13999
- arr = SparseArray([np.nan, 1, np.nan, 2, 3], kind=kind, fill_value=fill)
- res = SparseArray(SparseSeries(arr))
- tm.assert_sp_array_equal(arr, res)
-
- arr = SparseArray(
- [0, 0, 0, 1, 1, 2], dtype=np.int64, kind=kind, fill_value=fill
- )
- res = SparseArray(SparseSeries(arr), dtype=np.int64)
- tm.assert_sp_array_equal(arr, res)
-
- res = SparseArray(SparseSeries(arr))
- tm.assert_sp_array_equal(arr, res)
-
- @pytest.mark.parametrize("fill", [True, False, np.nan])
- @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
- def test_sparse_series_round_trip2(self, kind, fill):
- # see gh-13999
- arr = SparseArray(
- [True, False, True, True], dtype=np.bool, kind=kind, fill_value=fill
- )
- res = SparseArray(SparseSeries(arr))
- tm.assert_sp_array_equal(arr, res)
-
- res = SparseArray(SparseSeries(arr))
- tm.assert_sp_array_equal(arr, res)
-
def test_get_item(self):
assert np.isnan(self.arr[1])
@@ -1142,7 +1112,6 @@ def test_npoints(self):
assert arr.npoints == 1
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestAccessor:
@pytest.mark.parametrize("attr", ["npoints", "density", "fill_value", "sp_values"])
def test_get_attributes(self, attr):
diff --git a/pandas/tests/arrays/sparse/test_combine_concat.py b/pandas/tests/arrays/sparse/test_combine_concat.py
new file mode 100644
index 0000000000000..4ad1aa60e7b4f
--- /dev/null
+++ b/pandas/tests/arrays/sparse/test_combine_concat.py
@@ -0,0 +1,31 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+import pandas.util.testing as tm
+
+
+class TestSparseArrayConcat:
+ @pytest.mark.parametrize("kind", ["integer", "block"])
+ def test_basic(self, kind):
+ a = pd.SparseArray([1, 0, 0, 2], kind=kind)
+ b = pd.SparseArray([1, 0, 2, 2], kind=kind)
+
+ result = pd.SparseArray._concat_same_type([a, b])
+ # Can't make any assertions about the sparse index itself
+ # since we aren't don't merge sparse blocs across arrays
+ # in to_concat
+ expected = np.array([1, 2, 1, 2, 2], dtype="int64")
+ tm.assert_numpy_array_equal(result.sp_values, expected)
+ assert result.kind == kind
+
+ @pytest.mark.parametrize("kind", ["integer", "block"])
+ def test_uses_first_kind(self, kind):
+ other = "integer" if kind == "block" else "block"
+ a = pd.SparseArray([1, 0, 0, 2], kind=kind)
+ b = pd.SparseArray([1, 0, 2, 2], kind=other)
+
+ result = pd.SparseArray._concat_same_type([a, b])
+ expected = np.array([1, 2, 1, 2, 2], dtype="int64")
+ tm.assert_numpy_array_equal(result.sp_values, expected)
+ assert result.kind == kind
diff --git a/pandas/tests/arrays/sparse/test_dtype.py b/pandas/tests/arrays/sparse/test_dtype.py
index db8f62962f0b0..aa8d2afca11e6 100644
--- a/pandas/tests/arrays/sparse/test_dtype.py
+++ b/pandas/tests/arrays/sparse/test_dtype.py
@@ -2,7 +2,7 @@
import pytest
import pandas as pd
-from pandas.core.sparse.api import SparseDtype
+from pandas.core.arrays.sparse import SparseDtype
@pytest.mark.parametrize(
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index d9646feaf661e..7c482664bca48 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -1,6 +1,8 @@
import numpy as np
import pytest
+from pandas._libs import OutOfBoundsDatetime
+
import pandas as pd
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray
import pandas.util.testing as tm
@@ -462,6 +464,13 @@ def test_concat_same_type_different_freq(self):
tm.assert_datetime_array_equal(result, expected)
+ def test_strftime(self, datetime_index):
+ arr = DatetimeArray(datetime_index)
+
+ result = arr.strftime("%Y %b")
+ expected = np.array(datetime_index.strftime("%Y %b"))
+ tm.assert_numpy_array_equal(result, expected)
+
class TestTimedeltaArray(SharedTests):
index_cls = pd.TimedeltaIndex
@@ -608,6 +617,15 @@ def test_to_timestamp(self, how, period_index):
# an EA-specific tm.assert_ function
tm.assert_index_equal(pd.Index(result), pd.Index(expected))
+ def test_to_timestamp_out_of_bounds(self):
+ # GH#19643 previously overflowed silently
+ pi = pd.period_range("1500", freq="Y", periods=3)
+ with pytest.raises(OutOfBoundsDatetime):
+ pi.to_timestamp()
+
+ with pytest.raises(OutOfBoundsDatetime):
+ pi._data.to_timestamp()
+
@pytest.mark.parametrize("propname", PeriodArray._bool_ops)
def test_bool_properties(self, period_index, propname):
# in this case _bool_ops is just `is_leap_year`
@@ -652,6 +670,13 @@ def test_array_interface(self, period_index):
expected = np.asarray(arr).astype("S20")
tm.assert_numpy_array_equal(result, expected)
+ def test_strftime(self, period_index):
+ arr = PeriodArray(period_index)
+
+ result = arr.strftime("%Y")
+ expected = np.array(period_index.strftime("%Y"))
+ tm.assert_numpy_array_equal(result, expected)
+
@pytest.mark.parametrize(
"array,casting_nats",
@@ -682,15 +707,15 @@ def test_casting_nat_setitem_array(array, casting_nats):
[
(
pd.TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data,
- (np.datetime64("NaT", "ns"),),
+ (np.datetime64("NaT", "ns"), pd.NaT.value),
),
(
pd.date_range("2000-01-01", periods=3, freq="D")._data,
- (np.timedelta64("NaT", "ns"),),
+ (np.timedelta64("NaT", "ns"), pd.NaT.value),
),
(
pd.period_range("2000-01-01", periods=3, freq="D")._data,
- (np.datetime64("NaT", "ns"), np.timedelta64("NaT", "ns")),
+ (np.datetime64("NaT", "ns"), np.timedelta64("NaT", "ns"), pd.NaT.value),
),
],
ids=lambda x: type(x).__name__,
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index 58c2f3fc65bb2..c3cda22497ecb 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -15,6 +15,11 @@
class TestDatetimeArrayConstructor:
+ def test_from_sequence_invalid_type(self):
+ mi = pd.MultiIndex.from_product([np.arange(5), np.arange(5)])
+ with pytest.raises(TypeError, match="Cannot create a DatetimeArray"):
+ DatetimeArray._from_sequence(mi)
+
def test_only_1dim_accepted(self):
arr = np.array([0, 1, 2, 3], dtype="M8[h]").astype("M8[ns]")
@@ -179,6 +184,22 @@ def test_setitem_clears_freq(self):
a[0] = pd.Timestamp("2000", tz="US/Central")
assert a.freq is None
+ @pytest.mark.parametrize(
+ "obj",
+ [
+ pd.Timestamp.now(),
+ pd.Timestamp.now().to_datetime64(),
+ pd.Timestamp.now().to_pydatetime(),
+ ],
+ )
+ def test_setitem_objects(self, obj):
+ # make sure we accept datetime64 and datetime in addition to Timestamp
+ dti = pd.date_range("2000", periods=2, freq="D")
+ arr = dti._data
+
+ arr[0] = obj
+ assert arr[0] == obj
+
def test_repeat_preserves_tz(self):
dti = pd.date_range("2000", periods=2, freq="D", tz="US/Central")
arr = DatetimeArray(dti)
diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py
index 8fbfb4c12f4b2..55e25caafc4ee 100644
--- a/pandas/tests/arrays/test_integer.py
+++ b/pandas/tests/arrays/test_integer.py
@@ -1,6 +1,8 @@
import numpy as np
import pytest
+import pandas.util._test_decorators as td
+
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
@@ -280,7 +282,7 @@ def test_arith_coerce_scalar(self, data, all_arithmetic_operators):
other = 0.01
self._check_op(s, op, other)
- @pytest.mark.parametrize("other", [1.0, 1.0, np.array(1.0), np.array([1.0])])
+ @pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(self, all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
@@ -290,6 +292,15 @@ def test_arithmetic_conversion(self, all_arithmetic_operators, other):
result = op(s, other)
assert result.dtype is np.dtype("float")
+ def test_arith_len_mismatch(self, all_arithmetic_operators):
+ # operating with a list-like with non-matching length raises
+ op = self.get_op_from_name(all_arithmetic_operators)
+ other = np.array([1.0])
+
+ s = pd.Series([1, 2, 3], dtype="Int64")
+ with pytest.raises(ValueError, match="Lengths must match"):
+ op(s, other)
+
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(self, other):
arr = integer_array([1, None, 2])
@@ -322,8 +333,9 @@ def test_error(self, data, all_arithmetic_operators):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
- with pytest.raises(NotImplementedError):
- opa(pd.DataFrame({"A": s}))
+ result = opa(pd.DataFrame({"A": s}))
+ assert result is NotImplemented
+
with pytest.raises(NotImplementedError):
opa(np.arange(len(s)).reshape(-1, len(s)))
@@ -379,8 +391,6 @@ def test_compare_array(self, data, all_compare_operators):
class TestCasting:
- pass
-
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(self, all_data, dropna):
# ensure that we do not coerce to Float64Index, rather
@@ -809,6 +819,16 @@ def test_ufunc_reduce_raises(values):
np.add.reduce(a)
+@td.skip_if_no("pyarrow", min_version="0.14.1.dev")
+def test_arrow_array(data):
+ # protocol added in 0.15.0
+ import pyarrow as pa
+
+ arr = pa.array(data)
+ expected = pa.array(list(data), type=data.dtype.name.lower(), from_pandas=True)
+ assert arr.equals(expected)
+
+
# TODO(jreback) - these need testing / are broken
# shift
diff --git a/pandas/tests/arrays/test_numpy.py b/pandas/tests/arrays/test_numpy.py
index c4c1696ede6e6..7a150c35fea09 100644
--- a/pandas/tests/arrays/test_numpy.py
+++ b/pandas/tests/arrays/test_numpy.py
@@ -211,3 +211,18 @@ def test_basic_binop():
result = x + x
expected = PandasArray(np.array([2, 4, 6]))
tm.assert_extension_array_equal(result, expected)
+
+
+@pytest.mark.parametrize("dtype", [None, object])
+def test_setitem_object_typecode(dtype):
+ arr = PandasArray(np.array(["a", "b", "c"], dtype=dtype))
+ arr[0] = "t"
+ expected = PandasArray(np.array(["t", "b", "c"], dtype=dtype))
+ tm.assert_extension_array_equal(arr, expected)
+
+
+def test_setitem_no_coercion():
+ # https://github.com/pandas-dev/pandas/issues/28150
+ arr = PandasArray(np.array([1, 2, 3]))
+ with pytest.raises(ValueError, match="int"):
+ arr[0] = "a"
diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py
index 5825f9f150eb8..540c3343b2a1b 100644
--- a/pandas/tests/arrays/test_timedeltas.py
+++ b/pandas/tests/arrays/test_timedeltas.py
@@ -125,6 +125,22 @@ def test_setitem_clears_freq(self):
a[0] = pd.Timedelta("1H")
assert a.freq is None
+ @pytest.mark.parametrize(
+ "obj",
+ [
+ pd.Timedelta(seconds=1),
+ pd.Timedelta(seconds=1).to_timedelta64(),
+ pd.Timedelta(seconds=1).to_pytimedelta(),
+ ],
+ )
+ def test_setitem_objects(self, obj):
+ # make sure we accept timedelta64 and timedelta in addition to Timedelta
+ tdi = pd.timedelta_range("2 Days", periods=4, freq="H")
+ arr = TimedeltaArray(tdi, freq=tdi.freq)
+
+ arr[0] = obj
+ assert arr[0] == pd.Timedelta(seconds=1)
+
class TestReductions:
def test_min_max(self):
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index 8c0930c044838..b6ffd8a83e409 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -14,7 +14,7 @@
from pandas.core.dtypes.common import is_bool, is_list_like, is_scalar
import pandas as pd
-from pandas import DataFrame, Series, date_range
+from pandas import DataFrame, Series, compat, date_range
from pandas.core.computation import pytables
from pandas.core.computation.check import _NUMEXPR_VERSION
from pandas.core.computation.engines import NumExprClobberingError, _engines
@@ -1267,7 +1267,10 @@ def test_assignment_column(self):
msg = "left hand side of an assignment must be a single name"
with pytest.raises(SyntaxError, match=msg):
df.eval("d,c = a + b")
- msg = "can't assign to function call"
+ if compat.PY38:
+ msg = "cannot assign to function call"
+ else:
+ msg = "can't assign to function call"
with pytest.raises(SyntaxError, match=msg):
df.eval('Timestamp("20131001") = a + b')
@@ -1789,9 +1792,10 @@ def test_result_types(self):
self.check_result_type(np.float32, np.float32)
self.check_result_type(np.float64, np.float64)
- def test_result_types2(self):
+ @td.skip_if_windows
+ def test_result_complex128(self):
# xref https://github.com/pandas-dev/pandas/issues/12293
- pytest.skip("unreliable tests on complex128")
+ # this fails on Windows, apparently a floating point precision issue
# Did not test complex64 because DataFrame is converting it to
# complex128. Due to https://github.com/pandas-dev/pandas/issues/10952
@@ -1966,6 +1970,26 @@ def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser):
pd.eval(ex, engine=engine, parser=parser)
+@pytest.mark.parametrize(
+ "other",
+ [
+ "'x'",
+ pytest.param(
+ "...", marks=pytest.mark.xfail(not compat.PY38, reason="GH-28116")
+ ),
+ ],
+)
+def test_equals_various(other):
+ df = DataFrame({"A": ["a", "b", "c"]})
+ result = df.eval("A == {}".format(other))
+ expected = Series([False, False, False], name="A")
+ if _USE_NUMEXPR:
+ # https://github.com/pandas-dev/pandas/issues/10239
+ # lose name with numexpr engine. Remove when that's fixed.
+ expected.name = None
+ tm.assert_series_equal(result, expected)
+
+
def test_inf(engine, parser):
s = "inf + 1"
expected = np.inf
diff --git a/pandas/tests/config/test_config.py b/pandas/tests/config/test_config.py
index 3f12d1d7a292d..51640641c78e6 100644
--- a/pandas/tests/config/test_config.py
+++ b/pandas/tests/config/test_config.py
@@ -208,13 +208,17 @@ def test_set_option_multiple(self):
def test_validation(self):
self.cf.register_option("a", 1, "doc", validator=self.cf.is_int)
+ self.cf.register_option("d", 1, "doc", validator=self.cf.is_nonnegative_int)
self.cf.register_option("b.c", "hullo", "doc2", validator=self.cf.is_text)
+
msg = "Value must have type '<class 'int'>'"
with pytest.raises(ValueError, match=msg):
self.cf.register_option("a.b.c.d2", "NO", "doc", validator=self.cf.is_int)
self.cf.set_option("a", 2) # int is_int
self.cf.set_option("b.c", "wurld") # str is_str
+ self.cf.set_option("d", 2)
+ self.cf.set_option("d", None) # non-negative int can be None
# None not is_int
with pytest.raises(ValueError, match=msg):
@@ -222,6 +226,16 @@ def test_validation(self):
with pytest.raises(ValueError, match=msg):
self.cf.set_option("a", "ab")
+ msg = "Value must be a nonnegative integer or None"
+ with pytest.raises(ValueError, match=msg):
+ self.cf.register_option(
+ "a.b.c.d3", "NO", "doc", validator=self.cf.is_nonnegative_int
+ )
+ with pytest.raises(ValueError, match=msg):
+ self.cf.register_option(
+ "a.b.c.d3", -2, "doc", validator=self.cf.is_nonnegative_int
+ )
+
msg = r"Value must be an instance of <class 'str'>\|<class 'bytes'>"
with pytest.raises(ValueError, match=msg):
self.cf.set_option("b.c", 1)
diff --git a/pandas/tests/dtypes/cast/test_convert_objects.py b/pandas/tests/dtypes/cast/test_convert_objects.py
index 45980dbd82736..a28d554acd312 100644
--- a/pandas/tests/dtypes/cast/test_convert_objects.py
+++ b/pandas/tests/dtypes/cast/test_convert_objects.py
@@ -5,9 +5,8 @@
@pytest.mark.parametrize("data", [[1, 2], ["apply", "banana"]])
-@pytest.mark.parametrize("copy", [True, False])
-def test_maybe_convert_objects_copy(data, copy):
+def test_maybe_convert_objects_copy(data):
arr = np.array(data)
- out = maybe_convert_objects(arr, copy=copy)
+ out = maybe_convert_objects(arr)
- assert (arr is out) is (not copy)
+ assert arr is not out
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 36548f3515a48..266f7ac50c663 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -21,11 +21,8 @@
UNSIGNED_EA_INT_DTYPES,
UNSIGNED_INT_DTYPES,
)
-from pandas.core.sparse.api import SparseDtype
import pandas.util.testing as tm
-ignore_sparse_warning = pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-
# EA & Actual Dtypes
def to_ea_dtypes(dtypes):
@@ -179,10 +176,8 @@ def test_is_object():
@pytest.mark.parametrize(
"check_scipy", [False, pytest.param(True, marks=td.skip_if_no_scipy)]
)
-@ignore_sparse_warning
def test_is_sparse(check_scipy):
assert com.is_sparse(pd.SparseArray([1, 2, 3]))
- assert com.is_sparse(pd.SparseSeries([1, 2, 3]))
assert not com.is_sparse(np.array([1, 2, 3]))
@@ -193,14 +188,12 @@ def test_is_sparse(check_scipy):
@td.skip_if_no_scipy
-@ignore_sparse_warning
def test_is_scipy_sparse():
from scipy.sparse import bsr_matrix
assert com.is_scipy_sparse(bsr_matrix([1, 2, 3]))
assert not com.is_scipy_sparse(pd.SparseArray([1, 2, 3]))
- assert not com.is_scipy_sparse(pd.SparseSeries([1, 2, 3]))
def test_is_categorical():
@@ -586,7 +579,6 @@ def test_is_bool_dtype():
@pytest.mark.parametrize(
"check_scipy", [False, pytest.param(True, marks=td.skip_if_no_scipy)]
)
-@ignore_sparse_warning
def test_is_extension_type(check_scipy):
assert not com.is_extension_type([1, 2, 3])
assert not com.is_extension_type(np.array([1, 2, 3]))
@@ -596,7 +588,6 @@ def test_is_extension_type(check_scipy):
assert com.is_extension_type(cat)
assert com.is_extension_type(pd.Series(cat))
assert com.is_extension_type(pd.SparseArray([1, 2, 3]))
- assert com.is_extension_type(pd.SparseSeries([1, 2, 3]))
assert com.is_extension_type(pd.DatetimeIndex(["2000"], tz="US/Eastern"))
dtype = DatetimeTZDtype("ns", tz="US/Eastern")
@@ -664,21 +655,13 @@ def test__get_dtype(input_param, result):
assert com._get_dtype(input_param) == result
-@ignore_sparse_warning
-def test__get_dtype_sparse():
- ser = pd.SparseSeries([1, 2], dtype="int32")
- expected = SparseDtype("int32")
- assert com._get_dtype(ser) == expected
- assert com._get_dtype(ser.dtype) == expected
-
-
@pytest.mark.parametrize(
"input_param,expected_error_message",
[
(None, "Cannot deduce dtype from null object"),
(1, "data type not understood"),
(1.2, "data type not understood"),
- ("random string", "data type 'random string' not understood"),
+ ("random string", 'data type "random string" not understood'),
(pd.DataFrame([1, 2]), "data type not understood"),
],
)
@@ -723,11 +706,3 @@ def test__get_dtype_fails(input_param, expected_error_message):
)
def test__is_dtype_type(input_param, result):
assert com._is_dtype_type(input_param, lambda tipo: tipo == result)
-
-
-@ignore_sparse_warning
-def test__is_dtype_type_sparse():
- ser = pd.SparseSeries([1, 2], dtype="int32")
- result = np.dtype("int32")
- assert com._is_dtype_type(ser, lambda tipo: tipo == result)
- assert com._is_dtype_type(ser.dtype, lambda tipo: tipo == result)
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index d3f0d7c43ee6b..f4bf4c1fc83d9 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -30,7 +30,7 @@
import pandas as pd
from pandas import Categorical, CategoricalIndex, IntervalIndex, Series, date_range
-from pandas.core.sparse.api import SparseDtype
+from pandas.core.arrays.sparse import SparseDtype
import pandas.util.testing as tm
@@ -248,9 +248,19 @@ def test_construct_from_string_raises(self):
with pytest.raises(TypeError, match="notatz"):
DatetimeTZDtype.construct_from_string("datetime64[ns, notatz]")
- with pytest.raises(TypeError, match="^Could not construct DatetimeTZDtype$"):
+ msg = "^Could not construct DatetimeTZDtype"
+ with pytest.raises(TypeError, match=msg):
+ # list instead of string
DatetimeTZDtype.construct_from_string(["datetime64[ns, notatz]"])
+ with pytest.raises(TypeError, match=msg):
+ # non-nano unit
+ DatetimeTZDtype.construct_from_string("datetime64[ps, UTC]")
+
+ with pytest.raises(TypeError, match=msg):
+ # dateutil str that returns None from gettz
+ DatetimeTZDtype.construct_from_string("datetime64[ns, dateutil/invalid]")
+
def test_is_dtype(self):
assert not DatetimeTZDtype.is_dtype(None)
assert DatetimeTZDtype.is_dtype(self.dtype)
@@ -960,9 +970,8 @@ def test_is_bool_dtype(dtype, expected):
assert result is expected
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_is_bool_dtype_sparse():
- result = is_bool_dtype(pd.SparseSeries([True, False]))
+ result = is_bool_dtype(pd.Series(pd.SparseArray([True, False])))
assert result is True
diff --git a/pandas/tests/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py
index b42822a03ebcd..471fd06a29ae9 100644
--- a/pandas/tests/dtypes/test_generic.py
+++ b/pandas/tests/dtypes/test_generic.py
@@ -1,4 +1,4 @@
-from warnings import catch_warnings, simplefilter
+from warnings import catch_warnings
import numpy as np
@@ -17,11 +17,6 @@ class TestABCClasses:
categorical = pd.Categorical([1, 2, 3], categories=[2, 3, 1])
categorical_df = pd.DataFrame({"values": [1, 2, 3]}, index=categorical)
df = pd.DataFrame({"names": ["a", "b", "c"]}, index=multi_index)
- with catch_warnings():
- simplefilter("ignore", FutureWarning)
- sparse_series = pd.Series([1, 2, 3]).to_sparse()
- sparse_frame = pd.SparseDataFrame({"a": [1, -1, None]})
-
sparse_array = pd.SparseArray(np.random.randn(10))
datetime_array = pd.core.arrays.DatetimeArray(datetime_index)
timedelta_array = pd.core.arrays.TimedeltaArray(timedelta_index)
@@ -40,9 +35,7 @@ def test_abc_types(self):
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCIndexClass)
assert isinstance(pd.Series([1, 2, 3]), gt.ABCSeries)
assert isinstance(self.df, gt.ABCDataFrame)
- assert isinstance(self.sparse_series, gt.ABCSparseSeries)
assert isinstance(self.sparse_array, gt.ABCSparseArray)
- assert isinstance(self.sparse_frame, gt.ABCSparseDataFrame)
assert isinstance(self.categorical, gt.ABCCategorical)
assert isinstance(pd.Period("2012", freq="A-DEC"), gt.ABCPeriod)
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 4d688976cd50b..cfa6304909bb7 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -379,9 +379,12 @@ def test_isinf_scalar(self):
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar("a")
- def test_maybe_convert_numeric_infinities(self):
+ @pytest.mark.parametrize("maybe_int", [True, False])
+ @pytest.mark.parametrize(
+ "infinity", ["inf", "inF", "iNf", "Inf", "iNF", "InF", "INf", "INF"]
+ )
+ def test_maybe_convert_numeric_infinities(self, infinity, maybe_int):
# see gh-13274
- infinities = ["inf", "inF", "iNf", "Inf", "iNF", "InF", "INf", "INF"]
na_values = {"", "NULL", "nan"}
pos = np.array(["inf"], dtype=np.float64)
@@ -389,35 +392,31 @@ def test_maybe_convert_numeric_infinities(self):
msg = "Unable to parse string"
- for infinity in infinities:
- for maybe_int in (True, False):
- out = lib.maybe_convert_numeric(
- np.array([infinity], dtype=object), na_values, maybe_int
- )
- tm.assert_numpy_array_equal(out, pos)
-
- out = lib.maybe_convert_numeric(
- np.array(["-" + infinity], dtype=object), na_values, maybe_int
- )
- tm.assert_numpy_array_equal(out, neg)
-
- out = lib.maybe_convert_numeric(
- np.array([infinity], dtype=object), na_values, maybe_int
- )
- tm.assert_numpy_array_equal(out, pos)
-
- out = lib.maybe_convert_numeric(
- np.array(["+" + infinity], dtype=object), na_values, maybe_int
- )
- tm.assert_numpy_array_equal(out, pos)
-
- # too many characters
- with pytest.raises(ValueError, match=msg):
- lib.maybe_convert_numeric(
- np.array(["foo_" + infinity], dtype=object),
- na_values,
- maybe_int,
- )
+ out = lib.maybe_convert_numeric(
+ np.array([infinity], dtype=object), na_values, maybe_int
+ )
+ tm.assert_numpy_array_equal(out, pos)
+
+ out = lib.maybe_convert_numeric(
+ np.array(["-" + infinity], dtype=object), na_values, maybe_int
+ )
+ tm.assert_numpy_array_equal(out, neg)
+
+ out = lib.maybe_convert_numeric(
+ np.array([infinity], dtype=object), na_values, maybe_int
+ )
+ tm.assert_numpy_array_equal(out, pos)
+
+ out = lib.maybe_convert_numeric(
+ np.array(["+" + infinity], dtype=object), na_values, maybe_int
+ )
+ tm.assert_numpy_array_equal(out, pos)
+
+ # too many characters
+ with pytest.raises(ValueError, match=msg):
+ lib.maybe_convert_numeric(
+ np.array(["foo_" + infinity], dtype=object), na_values, maybe_int
+ )
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
@@ -531,6 +530,25 @@ def test_maybe_convert_objects_uint64(self):
exp = np.array([2 ** 63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
+ def test_maybe_convert_objects_datetime(self):
+ # GH27438
+ arr = np.array(
+ [np.datetime64("2000-01-01"), np.timedelta64(1, "s")], dtype=object
+ )
+ exp = arr.copy()
+ out = lib.maybe_convert_objects(arr, convert_datetime=1, convert_timedelta=1)
+ tm.assert_numpy_array_equal(out, exp)
+
+ arr = np.array([pd.NaT, np.timedelta64(1, "s")], dtype=object)
+ exp = np.array([np.timedelta64("NaT"), np.timedelta64(1, "s")], dtype="m8[ns]")
+ out = lib.maybe_convert_objects(arr, convert_datetime=1, convert_timedelta=1)
+ tm.assert_numpy_array_equal(out, exp)
+
+ arr = np.array([np.timedelta64(1, "s"), np.nan], dtype=object)
+ exp = arr.copy()
+ out = lib.maybe_convert_objects(arr, convert_datetime=1, convert_timedelta=1)
+ tm.assert_numpy_array_equal(out, exp)
+
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1], dtype=object)
@@ -577,6 +595,21 @@ def test_integers(self):
result = lib.infer_dtype(arr, skipna=True)
assert result == "integer"
+ @pytest.mark.parametrize(
+ "arr, skipna",
+ [
+ (np.array([1, 2, np.nan, np.nan, 3], dtype="O"), False),
+ (np.array([1, 2, np.nan, np.nan, 3], dtype="O"), True),
+ (np.array([1, 2, 3, np.int64(4), np.int32(5), np.nan], dtype="O"), False),
+ (np.array([1, 2, 3, np.int64(4), np.int32(5), np.nan], dtype="O"), True),
+ ],
+ )
+ def test_integer_na(self, arr, skipna):
+ # GH 27392
+ result = lib.infer_dtype(arr, skipna=skipna)
+ expected = "integer" if skipna else "integer-na"
+ assert result == expected
+
def test_deprecation(self):
# GH 24050
arr = np.array([1, 2, 3], dtype=object)
@@ -1134,6 +1167,17 @@ def test_categorical(self):
result = lib.infer_dtype(Series(arr), skipna=True)
assert result == "categorical"
+ def test_interval(self):
+ idx = pd.IntervalIndex.from_breaks(range(5), closed="both")
+ inferred = lib.infer_dtype(idx, skipna=False)
+ assert inferred == "interval"
+
+ inferred = lib.infer_dtype(idx._data, skipna=False)
+ assert inferred == "interval"
+
+ inferred = lib.infer_dtype(pd.Series(idx), skipna=False)
+ assert inferred == "interval"
+
class TestNumberScalar:
def test_is_number(self):
diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py
index a688dec50bc95..25b447e1df7d4 100644
--- a/pandas/tests/dtypes/test_missing.py
+++ b/pandas/tests/dtypes/test_missing.py
@@ -25,6 +25,9 @@
from pandas import DatetimeIndex, Float64Index, NaT, Series, TimedeltaIndex, date_range
from pandas.util import testing as tm
+now = pd.Timestamp.now()
+utcnow = pd.Timestamp.now("UTC")
+
@pytest.mark.parametrize("notna_f", [notna, notnull])
def test_notna_notnull(notna_f):
@@ -86,6 +89,10 @@ def test_isna_isnull(self, isna_f):
assert not isna_f(np.inf)
assert not isna_f(-np.inf)
+ # type
+ assert not isna_f(type(pd.Series()))
+ assert not isna_f(type(pd.DataFrame()))
+
# series
for s in [
tm.makeFloatSeries(),
@@ -328,6 +335,29 @@ def test_array_equivalent():
assert not array_equivalent(DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan]))
+@pytest.mark.parametrize(
+ "lvalue, rvalue",
+ [
+ # There are 3 variants for each of lvalue and rvalue. We include all
+ # three for the tz-naive `now` and exclude the datetim64 variant
+ # for utcnow because it drops tzinfo.
+ (now, utcnow),
+ (now.to_datetime64(), utcnow),
+ (now.to_pydatetime(), utcnow),
+ (now, utcnow),
+ (now.to_datetime64(), utcnow.to_pydatetime()),
+ (now.to_pydatetime(), utcnow.to_pydatetime()),
+ ],
+)
+def test_array_equivalent_tzawareness(lvalue, rvalue):
+ # we shouldn't raise if comparing tzaware and tznaive datetimes
+ left = np.array([lvalue], dtype=object)
+ right = np.array([rvalue], dtype=object)
+
+ assert not array_equivalent(left, right, strict_nan=True)
+ assert not array_equivalent(left, right, strict_nan=False)
+
+
def test_array_equivalent_compat():
# see gh-13388
m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)])
@@ -356,6 +386,20 @@ def test_array_equivalent_str():
)
+def test_array_equivalent_nested():
+ # reached in groupby aggregations, make sure we use np.any when checking
+ # if the comparison is truthy
+ left = np.array([np.array([50, 70, 90]), np.array([20, 30, 40])], dtype=object)
+ right = np.array([np.array([50, 70, 90]), np.array([20, 30, 40])], dtype=object)
+
+ assert array_equivalent(left, right, strict_nan=True)
+ assert not array_equivalent(left, right[::-1], strict_nan=True)
+
+ left = np.array([np.array([50, 50, 50]), np.array([40, 40, 40])], dtype=object)
+ right = np.array([50, 40])
+ assert not array_equivalent(left, right, strict_nan=True)
+
+
@pytest.mark.parametrize(
"dtype, na_value",
[
diff --git a/pandas/tests/extension/arrow/bool.py b/pandas/tests/extension/arrow/arrays.py
similarity index 80%
rename from pandas/tests/extension/arrow/bool.py
rename to pandas/tests/extension/arrow/arrays.py
index eb75d6d968073..6a28f76e474cc 100644
--- a/pandas/tests/extension/arrow/bool.py
+++ b/pandas/tests/extension/arrow/arrays.py
@@ -43,18 +43,27 @@ def _is_boolean(self):
return True
-class ArrowBoolArray(ExtensionArray):
- def __init__(self, values):
- if not isinstance(values, pa.ChunkedArray):
- raise ValueError
+@register_extension_dtype
+class ArrowStringDtype(ExtensionDtype):
- assert values.type == pa.bool_()
- self._data = values
- self._dtype = ArrowBoolDtype()
+ type = str
+ kind = "U"
+ name = "arrow_string"
+ na_value = pa.NULL
+
+ @classmethod
+ def construct_from_string(cls, string):
+ if string == cls.name:
+ return cls()
+ else:
+ raise TypeError("Cannot construct a '{}' from '{}'".format(cls, string))
+
+ @classmethod
+ def construct_array_type(cls):
+ return ArrowStringArray
- def __repr__(self):
- return "ArrowBoolArray({})".format(repr(self._data))
+class ArrowExtensionArray(ExtensionArray):
@classmethod
def from_scalars(cls, values):
arr = pa.chunked_array([pa.array(np.asarray(values))])
@@ -69,6 +78,9 @@ def from_array(cls, arr):
def _from_sequence(cls, scalars, dtype=None, copy=False):
return cls.from_scalars(scalars)
+ def __repr__(self):
+ return "{cls}({data})".format(cls=type(self).__name__, data=repr(self._data))
+
def __getitem__(self, item):
if pd.api.types.is_scalar(item):
return self._data.to_pandas()[item]
@@ -142,3 +154,23 @@ def any(self, axis=0, out=None):
def all(self, axis=0, out=None):
return self._data.to_pandas().all()
+
+
+class ArrowBoolArray(ArrowExtensionArray):
+ def __init__(self, values):
+ if not isinstance(values, pa.ChunkedArray):
+ raise ValueError
+
+ assert values.type == pa.bool_()
+ self._data = values
+ self._dtype = ArrowBoolDtype()
+
+
+class ArrowStringArray(ArrowExtensionArray):
+ def __init__(self, values):
+ if not isinstance(values, pa.ChunkedArray):
+ raise ValueError
+
+ assert values.type == pa.string()
+ self._data = values
+ self._dtype = ArrowStringDtype()
diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py
index 205edf5da5b74..9c53210b75d6b 100644
--- a/pandas/tests/extension/arrow/test_bool.py
+++ b/pandas/tests/extension/arrow/test_bool.py
@@ -7,7 +7,7 @@
pytest.importorskip("pyarrow", minversion="0.10.0")
-from .bool import ArrowBoolArray, ArrowBoolDtype # isort:skip
+from .arrays import ArrowBoolArray, ArrowBoolDtype # isort:skip
@pytest.fixture
@@ -41,6 +41,10 @@ def test_copy(self, data):
# __setitem__ does not work, so we only have a smoke-test
data.copy()
+ def test_view(self, data):
+ # __setitem__ does not work, so we only have a smoke-test
+ data.view()
+
class TestConstructors(BaseArrowTests, base.BaseConstructorsTests):
def test_from_dtype(self, data):
diff --git a/pandas/tests/extension/arrow/test_string.py b/pandas/tests/extension/arrow/test_string.py
new file mode 100644
index 0000000000000..06f149aa4b75f
--- /dev/null
+++ b/pandas/tests/extension/arrow/test_string.py
@@ -0,0 +1,13 @@
+import pytest
+
+import pandas as pd
+
+pytest.importorskip("pyarrow", minversion="0.10.0")
+
+from .arrays import ArrowStringDtype # isort:skip
+
+
+def test_constructor_from_list():
+ # GH 27673
+ result = pd.Series(["E"], dtype=ArrowStringDtype())
+ assert isinstance(result.dtype, ArrowStringDtype)
diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py
index e02586eacfea7..d56cc50f4739c 100644
--- a/pandas/tests/extension/base/getitem.py
+++ b/pandas/tests/extension/base/getitem.py
@@ -260,3 +260,9 @@ def test_reindex_non_na_fill_value(self, data_missing):
expected = pd.Series(data_missing._from_sequence([na, valid, valid]))
self.assert_series_equal(result, expected)
+
+ def test_loc_len1(self, data):
+ # see GH-27785 take_nd with indexer of len 1 resulting in wrong ndim
+ df = pd.DataFrame({"A": data})
+ res = df.loc[[0], "A"]
+ assert res._data._block.ndim == 1
diff --git a/pandas/tests/extension/base/interface.py b/pandas/tests/extension/base/interface.py
index dee8021f5375f..a29f6deeffae6 100644
--- a/pandas/tests/extension/base/interface.py
+++ b/pandas/tests/extension/base/interface.py
@@ -75,3 +75,18 @@ def test_copy(self, data):
data[1] = data[0]
assert result[1] != result[0]
+
+ def test_view(self, data):
+ # view with no dtype should return a shallow copy, *not* the same
+ # object
+ assert data[1] != data[0]
+
+ result = data.view()
+ assert result is not data
+ assert type(result) == type(data)
+
+ result[1] = result[0]
+ assert data[1] == data[0]
+
+ # check specifically that the `dtype` kwarg is accepted
+ data.view(dtype=None)
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index c28ff956a33a4..a1988744d76a1 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -137,11 +137,11 @@ def __setitem__(self, key, value):
value = decimal.Decimal(value)
self._data[key] = value
- def __len__(self):
+ def __len__(self) -> int:
return len(self._data)
@property
- def nbytes(self):
+ def nbytes(self) -> int:
n = len(self)
if n:
return n * sys.getsizeof(self[0])
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 9dec023f4073a..3ac9d37ccf4f3 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -392,17 +392,6 @@ def test_ufunc_fallback(data):
tm.assert_series_equal(result, expected)
-def test_formatting_values_deprecated():
- class DecimalArray2(DecimalArray):
- def _formatting_values(self):
- return np.array(self)
-
- ser = pd.Series(DecimalArray2([decimal.Decimal("1.0")]))
-
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- repr(ser)
-
-
def test_array_ufunc():
a = to_decimal([1, 2, 3])
result = np.exp(a)
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 21c4ac8f055a2..b64ddbd6ac84d 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -80,6 +80,9 @@ def __getitem__(self, item):
elif isinstance(item, abc.Iterable):
# fancy indexing
return type(self)([self.data[i] for i in item])
+ elif isinstance(item, slice) and item == slice(None):
+ # Make sure we get a view
+ return type(self)(self.data)
else:
# slice
return type(self)(self.data[item])
@@ -103,11 +106,11 @@ def __setitem__(self, key, value):
assert isinstance(v, self.dtype.type)
self.data[k] = v
- def __len__(self):
+ def __len__(self) -> int:
return len(self.data)
@property
- def nbytes(self):
+ def nbytes(self) -> int:
return sys.getsizeof(self.data)
def isna(self):
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index f7456d24ad6d3..c342777b0ebc4 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -19,7 +19,7 @@
import pytest
import pandas as pd
-from pandas import Categorical
+from pandas import Categorical, CategoricalIndex, Timestamp
from pandas.api.types import CategoricalDtype
from pandas.tests.extension import base
import pandas.util.testing as tm
@@ -197,7 +197,15 @@ def test_searchsorted(self, data_for_sorting):
class TestCasting(base.BaseCastingTests):
- pass
+ @pytest.mark.parametrize("cls", [Categorical, CategoricalIndex])
+ @pytest.mark.parametrize("values", [[1, np.nan], [Timestamp("2000"), pd.NaT]])
+ def test_cast_nan_to_int(self, cls, values):
+ # GH 28406
+ s = cls(values)
+
+ msg = "Cannot (cast|convert)"
+ with pytest.raises((ValueError, TypeError), match=msg):
+ s.astype(int)
class TestArithmeticOps(base.BaseArithmeticOpsTests):
@@ -211,7 +219,7 @@ def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
def test_add_series_with_extension_array(self, data):
ser = pd.Series(data)
- with pytest.raises(TypeError, match="cannot perform"):
+ with pytest.raises(TypeError, match="cannot perform|unsupported operand"):
ser + data
def test_divmod_series_array(self):
diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py
index 9a7a43cff0c27..a60607d586ada 100644
--- a/pandas/tests/extension/test_datetime.py
+++ b/pandas/tests/extension/test_datetime.py
@@ -142,16 +142,6 @@ def test_divmod_series_array(self):
# skipping because it is not implemented
pass
- @pytest.mark.xfail(reason="different implementation", strict=False)
- def test_direct_arith_with_series_returns_not_implemented(self, data):
- # Right now, we have trouble with this. Returning NotImplemented
- # fails other tests like
- # tests/arithmetic/test_datetime64::TestTimestampSeriesArithmetic::
- # test_dt64_seris_add_intlike
- return super(
- TestArithmeticOps, self
- ).test_direct_arith_with_series_returns_not_implemented(data)
-
class TestCasting(BaseDatetimeTests, base.BaseCastingTests):
pass
@@ -163,12 +153,6 @@ def _compare_other(self, s, data, op_name, other):
# with (some) integers, depending on the value.
pass
- @pytest.mark.xfail(reason="different implementation", strict=False)
- def test_direct_arith_with_series_returns_not_implemented(self, data):
- return super(
- TestComparisonOps, self
- ).test_direct_arith_with_series_returns_not_implemented(data)
-
class TestMissing(BaseDatetimeTests, base.BaseMissingTests):
pass
diff --git a/pandas/tests/extension/test_external_block.py b/pandas/tests/extension/test_external_block.py
index 1a4f84e2c0fd2..6311070cfe2bb 100644
--- a/pandas/tests/extension/test_external_block.py
+++ b/pandas/tests/extension/test_external_block.py
@@ -2,7 +2,7 @@
import pytest
import pandas as pd
-from pandas.core.internals import BlockManager, SingleBlockManager
+from pandas.core.internals import BlockManager
from pandas.core.internals.blocks import Block, NonConsolidatableMixIn
@@ -10,9 +10,6 @@ class CustomBlock(NonConsolidatableMixIn, Block):
_holder = np.ndarray
- def formatting_values(self):
- return np.array(["Val: {}".format(i) for i in self.values])
-
def concat_same_type(self, to_concat, placement=None):
"""
Always concatenate disregarding self.ndim as the values are
@@ -35,22 +32,6 @@ def df():
return pd.DataFrame(block_manager)
-def test_custom_repr():
- values = np.arange(3, dtype="int64")
-
- # series
- block = CustomBlock(values, placement=slice(0, 3))
-
- s = pd.Series(SingleBlockManager(block, pd.RangeIndex(3)))
- assert repr(s) == "0 Val: 0\n1 Val: 1\n2 Val: 2\ndtype: int64"
-
- # dataframe
- block = CustomBlock(values, placement=slice(0, 1))
- blk_mgr = BlockManager([block], [["col"], range(3)])
- df = pd.DataFrame(blk_mgr)
- assert repr(df) == " col\n0 Val: 0\n1 Val: 1\n2 Val: 2"
-
-
def test_concat_series():
# GH17728
values = np.arange(3, dtype="int64")
diff --git a/pandas/tests/extension/test_interval.py b/pandas/tests/extension/test_interval.py
index 1aab71286b4a6..4fdcf930d224f 100644
--- a/pandas/tests/extension/test_interval.py
+++ b/pandas/tests/extension/test_interval.py
@@ -95,7 +95,10 @@ class TestGrouping(BaseInterval, base.BaseGroupbyTests):
class TestInterface(BaseInterval, base.BaseInterfaceTests):
- pass
+ def test_view(self, data):
+ # __setitem__ incorrectly makes a copy (GH#27147), so we only
+ # have a smoke-test
+ data.view()
class TestReduce(base.BaseNoReduceTests):
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index 84d59902d2aa7..6ebe71e173ec2 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -103,6 +103,10 @@ def test_copy(self, data):
# __setitem__ does not work, so we only have a smoke-test
data.copy()
+ def test_view(self, data):
+ # __setitem__ does not work, so we only have a smoke-test
+ data.view()
+
class TestConstructors(BaseSparseTests, base.BaseConstructorsTests):
pass
diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py
index c57b2a6964f39..017cbea7ec723 100644
--- a/pandas/tests/frame/test_alter_axes.py
+++ b/pandas/tests/frame/test_alter_axes.py
@@ -27,7 +27,6 @@
import pandas.util.testing as tm
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
@@ -1083,7 +1082,7 @@ def test_reset_index_level(self):
# Missing levels - for both MultiIndex and single-level Index:
for idx_lev in ["A", "B"], ["A"]:
- with pytest.raises(KeyError, match="Level E "):
+ with pytest.raises(KeyError, match=r"(L|l)evel \(?E\)?"):
df.set_index(idx_lev).reset_index(level=["A", "E"])
with pytest.raises(IndexError, match="Too many levels"):
df.set_index(idx_lev).reset_index(level=[0, 1, 2])
@@ -1452,7 +1451,6 @@ def test_droplevel(self):
tm.assert_frame_equal(result, expected)
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestIntervalIndex:
def test_setitem(self):
@@ -1515,30 +1513,23 @@ def test_set_axis_inplace(self):
expected["columns"] = expected[1]
for axis in expected:
- # inplace=True
- # The FutureWarning comes from the fact that we would like to have
- # inplace default to False some day
- for inplace, warn in (None, FutureWarning), (True, None):
- kwargs = {"inplace": inplace}
-
- result = df.copy()
- with tm.assert_produces_warning(warn):
- result.set_axis(list("abc"), axis=axis, **kwargs)
- tm.assert_frame_equal(result, expected[axis])
+ result = df.copy()
+ result.set_axis(list("abc"), axis=axis, inplace=True)
+ tm.assert_frame_equal(result, expected[axis])
# inplace=False
- result = df.set_axis(list("abc"), axis=axis, inplace=False)
+ result = df.set_axis(list("abc"), axis=axis)
tm.assert_frame_equal(expected[axis], result)
# omitting the "axis" parameter
with tm.assert_produces_warning(None):
- result = df.set_axis(list("abc"), inplace=False)
+ result = df.set_axis(list("abc"))
tm.assert_frame_equal(result, expected[0])
# wrong values for the "axis" parameter
for axis in 3, "foo":
with pytest.raises(ValueError, match="No axis named"):
- df.set_axis(list("abc"), axis=axis, inplace=False)
+ df.set_axis(list("abc"), axis=axis)
def test_set_axis_prior_to_deprecation_signature(self):
df = DataFrame(
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index d5c66f0c1dd64..e99208ac78e15 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -1819,10 +1819,17 @@ def test_any_all_bool_only(self):
(np.any, {"A": pd.Series([0, 1], dtype="category")}, True),
(np.all, {"A": pd.Series([1, 2], dtype="category")}, True),
(np.any, {"A": pd.Series([1, 2], dtype="category")}, True),
- # # Mix
- # GH 21484
- # (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'),
- # 'B': pd.Series([10, 20], dtype='m8[ns]')}, True),
+ # Mix GH#21484
+ pytest.param(
+ np.all,
+ {
+ "A": pd.Series([10, 20], dtype="M8[ns]"),
+ "B": pd.Series([10, 20], dtype="m8[ns]"),
+ },
+ True,
+ # In 1.13.3 and 1.14 np.all(df) returns a Timedelta here
+ marks=[td.skip_if_np_lt("1.15")],
+ ),
],
)
def test_any_all_np_func(self, func, data, expected):
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index b4b081cfe8d76..d53a3d81ab5f8 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -10,7 +10,6 @@
Categorical,
DataFrame,
Series,
- SparseDataFrame,
SparseDtype,
compat,
date_range,
@@ -67,8 +66,7 @@ def test_getitem_pop_assign_name(self, float_frame):
def test_get_value(self, float_frame):
for idx in float_frame.index:
for col in float_frame.columns:
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- result = float_frame.get_value(idx, col)
+ result = float_frame._get_value(idx, col)
expected = float_frame[col][idx]
tm.assert_almost_equal(result, expected)
@@ -221,9 +219,6 @@ def test_iterrows(self, float_frame, float_string_frame):
def test_iterrows_iso8601(self):
# GH 19671
- if self.klass == SparseDataFrame:
- pytest.xfail(reason="SparseBlock datetime type not implemented.")
-
s = self.klass(
{
"non_iso8601": ["M1701", "M1802", "M1903", "M2004"],
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index 92912ff9ec093..0328232213588 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -116,6 +116,27 @@ def test_apply_with_reduce_empty(self):
# Ensure that x.append hasn't been called
assert x == []
+ @pytest.mark.parametrize("func", ["sum", "prod", "any", "all"])
+ def test_apply_funcs_over_empty(self, func):
+ # GH 28213
+ df = DataFrame(columns=["a", "b", "c"])
+
+ result = df.apply(getattr(np, func))
+ expected = getattr(df, func)()
+ assert_series_equal(result, expected)
+
+ def test_nunique_empty(self):
+ # GH 28213
+ df = DataFrame(columns=["a", "b", "c"])
+
+ result = df.nunique()
+ expected = Series(0, index=df.columns)
+ assert_series_equal(result, expected)
+
+ result = df.T.nunique()
+ expected = Series([], index=pd.Index([]))
+ assert_series_equal(result, expected)
+
def test_apply_deprecate_reduce(self):
empty_frame = DataFrame()
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 7c022106c9104..88bd5a4fedfae 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -235,21 +235,46 @@ def _test_seq(df, idx_ser, col_ser):
rs = df.le(df)
assert not rs.loc[0, 0]
+ def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
- rs = df.gt(df2)
- assert not rs.values.any()
+
+ msg = "|".join(
+ [
+ "'>' not supported between instances of '.*' and 'complex'",
+ r"unorderable types: .*complex\(\)", # PY35
+ ]
+ )
+ with pytest.raises(TypeError, match=msg):
+ # inequalities are not well-defined for complex numbers
+ df.gt(df2)
+ with pytest.raises(TypeError, match=msg):
+ # regression test that we get the same behavior for Series
+ df["a"].gt(df2["a"])
+ with pytest.raises(TypeError, match=msg):
+ # Check that we match numpy behavior here
+ df.values > df2.values
+
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
- rs = df3.gt(2j)
- assert not rs.values.any()
+ with pytest.raises(TypeError, match=msg):
+ # inequalities are not well-defined for complex numbers
+ df3.gt(2j)
+ with pytest.raises(TypeError, match=msg):
+ # regression test that we get the same behavior for Series
+ df3["a"].gt(2j)
+ with pytest.raises(TypeError, match=msg):
+ # Check that we match numpy behavior here
+ df3.values > 2j
+
+ def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
@@ -457,6 +482,16 @@ def test_arith_flex_zero_len_raises(self):
class TestFrameArithmetic:
+ def test_td64_op_nat_casting(self):
+ # Make sure we don't accidentally treat timedelta64(NaT) as datetime64
+ # when calling dispatch_to_series in DataFrame arithmetic
+ ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
+ df = pd.DataFrame([[1, 2], [3, 4]])
+
+ result = df * ser
+ expected = pd.DataFrame({0: ser, 1: ser})
+ tm.assert_frame_equal(result, expected)
+
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
@@ -642,3 +677,45 @@ def test_arith_non_pandas_object(self):
val3 = np.random.rand(*df.shape)
added = pd.DataFrame(df.values + val3, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val3), added)
+
+ def test_operations_with_interval_categories_index(self, all_arithmetic_operators):
+ # GH#27415
+ op = all_arithmetic_operators
+ ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0))
+ data = [1, 2]
+ df = pd.DataFrame([data], columns=ind)
+ num = 10
+ result = getattr(df, op)(num)
+ expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_frame_with_zero_len_series_corner_cases():
+ # GH#28600
+ # easy all-float case
+ df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
+ ser = pd.Series(dtype=np.float64)
+
+ result = df + ser
+ expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
+ tm.assert_frame_equal(result, expected)
+
+ result = df == ser
+ expected = pd.DataFrame(False, index=df.index, columns=df.columns)
+ tm.assert_frame_equal(result, expected)
+
+ # non-float case should not raise on comparison
+ df2 = pd.DataFrame(df.values.view("M8[ns]"), columns=df.columns)
+ result = df2 == ser
+ expected = pd.DataFrame(False, index=df.index, columns=df.columns)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_zero_len_frame_with_series_corner_cases():
+ # GH#28600
+ df = pd.DataFrame(columns=["A", "B"], dtype=np.float64)
+ ser = pd.Series([1, 2], index=["A", "B"])
+
+ result = df + ser
+ expected = df
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 29e46ac70c943..ebffeeaa3063e 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -329,10 +329,8 @@ def test_constructor_dict(self):
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- assert frame_none.get_value(0, "a") is None
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- assert frame_none_list.get_value(0, "a") is None
+ assert frame_none._get_value(0, "a") is None
+ assert frame_none_list._get_value(0, "a") is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
@@ -702,8 +700,7 @@ def test_nested_dict_frame_constructor(self):
data = {}
for col in df.columns:
for row in df.index:
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- data.setdefault(col, {})[row] = df.get_value(row, col)
+ data.setdefault(col, {})[row] = df._get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
@@ -711,8 +708,7 @@ def test_nested_dict_frame_constructor(self):
data = {}
for col in df.columns:
for row in df.index:
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- data.setdefault(row, {})[col] = df.get_value(row, col)
+ data.setdefault(row, {})[col] = df._get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
diff --git a/pandas/tests/frame/test_explode.py b/pandas/tests/frame/test_explode.py
index b4330aadbfba3..c07de35f8bf34 100644
--- a/pandas/tests/frame/test_explode.py
+++ b/pandas/tests/frame/test_explode.py
@@ -118,3 +118,47 @@ def test_usecase():
index=[0, 0, 1, 1],
)
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "input_dict, input_index, expected_dict, expected_index",
+ [
+ (
+ {"col1": [[1, 2], [3, 4]], "col2": ["foo", "bar"]},
+ [0, 0],
+ {"col1": [1, 2, 3, 4], "col2": ["foo", "foo", "bar", "bar"]},
+ [0, 0, 0, 0],
+ ),
+ (
+ {"col1": [[1, 2], [3, 4]], "col2": ["foo", "bar"]},
+ pd.Index([0, 0], name="my_index"),
+ {"col1": [1, 2, 3, 4], "col2": ["foo", "foo", "bar", "bar"]},
+ pd.Index([0, 0, 0, 0], name="my_index"),
+ ),
+ (
+ {"col1": [[1, 2], [3, 4]], "col2": ["foo", "bar"]},
+ pd.MultiIndex.from_arrays(
+ [[0, 0], [1, 1]], names=["my_first_index", "my_second_index"]
+ ),
+ {"col1": [1, 2, 3, 4], "col2": ["foo", "foo", "bar", "bar"]},
+ pd.MultiIndex.from_arrays(
+ [[0, 0, 0, 0], [1, 1, 1, 1]],
+ names=["my_first_index", "my_second_index"],
+ ),
+ ),
+ (
+ {"col1": [[1, 2], [3, 4]], "col2": ["foo", "bar"]},
+ pd.MultiIndex.from_arrays([[0, 0], [1, 1]], names=["my_index", None]),
+ {"col1": [1, 2, 3, 4], "col2": ["foo", "foo", "bar", "bar"]},
+ pd.MultiIndex.from_arrays(
+ [[0, 0, 0, 0], [1, 1, 1, 1]], names=["my_index", None]
+ ),
+ ),
+ ],
+)
+def test_duplicate_index(input_dict, input_index, expected_dict, expected_index):
+ # GH 28005
+ df = pd.DataFrame(input_dict, index=input_index)
+ result = df.explode("col1")
+ expected = pd.DataFrame(expected_dict, index=expected_index, dtype=object)
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index 0cb7db0e47123..6b073c460ea08 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -269,7 +269,7 @@ def test_getitem_boolean(
subframe_obj = datetime_frame[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
- with pytest.raises(ValueError, match="boolean values only"):
+ with pytest.raises(ValueError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
@@ -821,6 +821,14 @@ def test_setitem_empty_frame_with_boolean(self, dtype, kwargs):
df[df > df2] = 47
assert_frame_equal(df, df2)
+ def test_setitem_with_empty_listlike(self):
+ # GH #17101
+ index = pd.Index([], name="idx")
+ result = pd.DataFrame(columns=["A"], index=index)
+ result["A"] = []
+ expected = pd.DataFrame(columns=["A"], index=index)
+ tm.assert_index_equal(result.index, expected.index)
+
def test_setitem_scalars_no_index(self):
# GH16823 / 17894
df = DataFrame()
@@ -1150,6 +1158,7 @@ def test_fancy_index_int_labels_exceptions(self, float_frame):
with pytest.raises(KeyError, match=msg):
float_frame.ix[:, ["E"]] = 1
+ # FIXME: don't leave commented-out
# partial setting now allows this GH2578
# pytest.raises(KeyError, float_frame.ix.__setitem__,
# (slice(None, None), 'E'), 1)
@@ -1676,9 +1685,11 @@ def test_setitem_single_column_mixed_datetime(self):
)
assert_series_equal(result, expected)
- # set an allowable datetime64 type
+ # GH#16674 iNaT is treated as an integer when given by the user
df.loc["b", "timestamp"] = iNaT
- assert isna(df.loc["b", "timestamp"])
+ assert not isna(df.loc["b", "timestamp"])
+ assert df["timestamp"].dtype == np.object_
+ assert df.loc["b", "timestamp"] == iNaT
# allow this syntax
df.loc["c", "timestamp"] = np.nan
@@ -1849,8 +1860,7 @@ def test_getitem_list_duplicates(self):
def test_get_value(self, float_frame):
for idx in float_frame.index:
for col in float_frame.columns:
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- result = float_frame.get_value(idx, col)
+ result = float_frame._get_value(idx, col)
expected = float_frame[col][idx]
assert result == expected
@@ -1905,42 +1915,34 @@ def test_lookup_raises(self, float_frame):
def test_set_value(self, float_frame):
for idx in float_frame.index:
for col in float_frame.columns:
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- float_frame.set_value(idx, col, 1)
+ float_frame._set_value(idx, col, 1)
assert float_frame[col][idx] == 1
def test_set_value_resize(self, float_frame):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- res = float_frame.set_value("foobar", "B", 0)
+ res = float_frame._set_value("foobar", "B", 0)
assert res is float_frame
assert res.index[-1] == "foobar"
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- assert res.get_value("foobar", "B") == 0
+ assert res._get_value("foobar", "B") == 0
float_frame.loc["foobar", "qux"] = 0
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- assert float_frame.get_value("foobar", "qux") == 0
+ assert float_frame._get_value("foobar", "qux") == 0
res = float_frame.copy()
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- res3 = res.set_value("foobar", "baz", "sam")
+ res3 = res._set_value("foobar", "baz", "sam")
assert res3["baz"].dtype == np.object_
res = float_frame.copy()
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- res3 = res.set_value("foobar", "baz", True)
+ res3 = res._set_value("foobar", "baz", True)
assert res3["baz"].dtype == np.object_
res = float_frame.copy()
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- res3 = res.set_value("foobar", "baz", 5)
+ res3 = res._set_value("foobar", "baz", 5)
assert is_float_dtype(res3["baz"])
assert isna(res3["baz"].drop(["foobar"])).all()
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- msg = "could not convert string to float: 'sam'"
- with pytest.raises(ValueError, match=msg):
- res3.set_value("foobar", "baz", "sam")
+ msg = "could not convert string to float: 'sam'"
+ with pytest.raises(ValueError, match=msg):
+ res3._set_value("foobar", "baz", "sam")
def test_set_value_with_index_dtype_change(self):
df_orig = DataFrame(np.random.randn(3, 3), index=range(3), columns=list("ABC"))
@@ -1948,8 +1950,7 @@ def test_set_value_with_index_dtype_change(self):
# this is actually ambiguous as the 2 is interpreted as a positional
# so column is not created
df = df_orig.copy()
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- df.set_value("C", 2, 1.0)
+ df._set_value("C", 2, 1.0)
assert list(df.index) == list(df_orig.index) + ["C"]
# assert list(df.columns) == list(df_orig.columns) + [2]
@@ -1960,8 +1961,7 @@ def test_set_value_with_index_dtype_change(self):
# create both new
df = df_orig.copy()
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- df.set_value("C", "D", 1.0)
+ df._set_value("C", "D", 1.0)
assert list(df.index) == list(df_orig.index) + ["C"]
assert list(df.columns) == list(df_orig.columns) + ["D"]
@@ -1974,9 +1974,8 @@ def test_get_set_value_no_partial_indexing(self):
# partial w/ MultiIndex raise exception
index = MultiIndex.from_tuples([(0, 1), (0, 2), (1, 1), (1, 2)])
df = DataFrame(index=index, columns=range(4))
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- with pytest.raises(KeyError, match=r"^0$"):
- df.get_value(0, 1)
+ with pytest.raises(KeyError, match=r"^0$"):
+ df._get_value(0, 1)
def test_single_element_ix_dont_upcast(self, float_frame):
float_frame["E"] = 1
@@ -2146,13 +2145,6 @@ def test_loc_duplicates(self):
df.loc[trange[bool_idx], "A"] += 6
tm.assert_frame_equal(df, expected)
- @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
- def test_iloc_sparse_propegate_fill_value(self):
- from pandas.core.sparse.api import SparseDataFrame
-
- df = SparseDataFrame({"A": [999, 1]}, default_fill_value=999)
- assert len(df["A"].sp_values) == len(df.iloc[:, 0].sp_values)
-
def test_iat(self, float_frame):
for i, row in enumerate(float_frame.index):
@@ -2161,23 +2153,6 @@ def test_iat(self, float_frame):
expected = float_frame.at[row, col]
assert result == expected
- def test_nested_exception(self):
- # Ignore the strange way of triggering the problem
- # (which may get fixed), it's just a way to trigger
- # the issue or reraising an outer exception without
- # a named argument
- df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}).set_index(
- ["a", "b"]
- )
- index = list(df.index)
- index[0] = ["a", "b"]
- df.index = index
-
- try:
- repr(df)
- except Exception as e:
- assert type(e) != UnboundLocalError
-
@pytest.mark.parametrize(
"method,expected_values",
[
diff --git a/pandas/tests/frame/test_join.py b/pandas/tests/frame/test_join.py
index adace5e4784ae..220968d4b3d29 100644
--- a/pandas/tests/frame/test_join.py
+++ b/pandas/tests/frame/test_join.py
@@ -193,3 +193,32 @@ def test_join_left_sequence_non_unique_index():
)
tm.assert_frame_equal(joined, expected)
+
+
+@pytest.mark.parametrize("sort_kw", [True, False, None])
+def test_suppress_future_warning_with_sort_kw(sort_kw):
+ a = DataFrame({"col1": [1, 2]}, index=["c", "a"])
+
+ b = DataFrame({"col2": [4, 5]}, index=["b", "a"])
+
+ c = DataFrame({"col3": [7, 8]}, index=["a", "b"])
+
+ expected = DataFrame(
+ {
+ "col1": {"a": 2.0, "b": float("nan"), "c": 1.0},
+ "col2": {"a": 5.0, "b": 4.0, "c": float("nan")},
+ "col3": {"a": 7.0, "b": 8.0, "c": float("nan")},
+ }
+ )
+ if sort_kw is False:
+ expected = expected.reindex(index=["c", "a", "b"])
+
+ if sort_kw is None:
+ # only warn if not explicitly specified
+ ctx = tm.assert_produces_warning(FutureWarning, check_stacklevel=False)
+ else:
+ ctx = tm.assert_produces_warning(None, check_stacklevel=False)
+
+ with ctx:
+ result = a.join([b, c], how="outer", sort=sort_kw)
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/test_quantile.py b/pandas/tests/frame/test_quantile.py
index 236cadf67735d..e5e881dece34a 100644
--- a/pandas/tests/frame/test_quantile.py
+++ b/pandas/tests/frame/test_quantile.py
@@ -439,7 +439,7 @@ def test_quantile_nat(self):
)
tm.assert_frame_equal(res, exp)
- def test_quantile_empty(self):
+ def test_quantile_empty_no_rows(self):
# floats
df = DataFrame(columns=["a", "b"], dtype="float64")
@@ -467,3 +467,17 @@ def test_quantile_empty(self):
# FIXME (gives NaNs instead of NaT in 0.18.1 or 0.19.0)
# res = df.quantile(0.5, numeric_only=False)
+
+ def test_quantile_empty_no_columns(self):
+ # GH#23925 _get_numeric_data may drop all columns
+ df = pd.DataFrame(pd.date_range("1/1/18", periods=5))
+ df.columns.name = "captain tightpants"
+ result = df.quantile(0.5)
+ expected = pd.Series([], index=[], name=0.5)
+ expected.index.name = "captain tightpants"
+ tm.assert_series_equal(result, expected)
+
+ result = df.quantile([0.5])
+ expected = pd.DataFrame([], index=[0.5], columns=[])
+ expected.columns.name = "captain tightpants"
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/test_replace.py b/pandas/tests/frame/test_replace.py
index 2862615ef8585..b341ed6a52ca5 100644
--- a/pandas/tests/frame/test_replace.py
+++ b/pandas/tests/frame/test_replace.py
@@ -1069,18 +1069,24 @@ def test_replace_truthy(self):
e = df
assert_frame_equal(r, e)
- def test_replace_int_to_int_chain(self):
+ def test_nested_dict_overlapping_keys_replace_int(self):
+ # GH 27660 keep behaviour consistent for simple dictionary and
+ # nested dictionary replacement
df = DataFrame({"a": list(range(1, 5))})
- with pytest.raises(ValueError, match="Replacement not allowed .+"):
- df.replace({"a": dict(zip(range(1, 5), range(2, 6)))})
- def test_replace_str_to_str_chain(self):
+ result = df.replace({"a": dict(zip(range(1, 5), range(2, 6)))})
+ expected = df.replace(dict(zip(range(1, 5), range(2, 6))))
+ assert_frame_equal(result, expected)
+
+ def test_nested_dict_overlapping_keys_replace_str(self):
+ # GH 27660
a = np.arange(1, 5)
astr = a.astype(str)
bstr = np.arange(2, 6).astype(str)
df = DataFrame({"a": astr})
- with pytest.raises(ValueError, match="Replacement not allowed .+"):
- df.replace({"a": dict(zip(astr, bstr))})
+ result = df.replace(dict(zip(astr, bstr)))
+ expected = df.replace({"a": dict(zip(astr, bstr))})
+ assert_frame_equal(result, expected)
def test_replace_swapping_bug(self):
df = pd.DataFrame({"a": [True, False, True]})
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py
index f3452e9a85fb3..eb654be3f12e6 100644
--- a/pandas/tests/frame/test_reshape.py
+++ b/pandas/tests/frame/test_reshape.py
@@ -984,7 +984,7 @@ def test_stack_preserve_categorical_dtype(self, ordered, labels):
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack()
- # `MutliIndex.from_product` preserves categorical dtype -
+ # `MultiIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = pd.MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
@@ -1002,6 +1002,27 @@ def test_stack_preserve_categorical_dtype_values(self):
)
tm.assert_series_equal(result, expected)
+ @pytest.mark.parametrize(
+ "index, columns",
+ [
+ ([0, 0, 1, 1], pd.MultiIndex.from_product([[1, 2], ["a", "b"]])),
+ ([0, 0, 2, 3], pd.MultiIndex.from_product([[1, 2], ["a", "b"]])),
+ ([0, 1, 2, 3], pd.MultiIndex.from_product([[1, 2], ["a", "b"]])),
+ ],
+ )
+ def test_stack_multi_columns_non_unique_index(self, index, columns):
+ # GH-28301
+ df = pd.DataFrame(index=index, columns=columns).fillna(1)
+ stacked = df.stack()
+ new_index = pd.MultiIndex.from_tuples(stacked.index.to_numpy())
+ expected = pd.DataFrame(
+ stacked.to_numpy(), index=new_index, columns=stacked.columns
+ )
+ tm.assert_frame_equal(stacked, expected)
+ stacked_codes = np.asarray(stacked.index.codes)
+ expected_codes = np.asarray(new_index.codes)
+ tm.assert_numpy_array_equal(stacked_codes, expected_codes)
+
@pytest.mark.parametrize("level", [0, 1])
def test_unstack_mixed_extension_types(self, level):
index = pd.MultiIndex.from_tuples(
diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py
index c66a97c2b294b..649a78b785d21 100644
--- a/pandas/tests/frame/test_subclass.py
+++ b/pandas/tests/frame/test_subclass.py
@@ -190,38 +190,6 @@ def test_subclass_iterrows(self):
assert isinstance(row, tm.SubclassedSeries)
tm.assert_series_equal(row, df.loc[i])
- @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
- def test_subclass_sparse_slice(self):
- rows = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]
- ssdf = tm.SubclassedSparseDataFrame(rows)
- ssdf.testattr = "testattr"
-
- tm.assert_sp_frame_equal(ssdf.loc[:2], tm.SubclassedSparseDataFrame(rows[:3]))
- tm.assert_sp_frame_equal(ssdf.iloc[:2], tm.SubclassedSparseDataFrame(rows[:2]))
- tm.assert_sp_frame_equal(ssdf[:2], tm.SubclassedSparseDataFrame(rows[:2]))
- assert ssdf.loc[:2].testattr == "testattr"
- assert ssdf.iloc[:2].testattr == "testattr"
- assert ssdf[:2].testattr == "testattr"
-
- tm.assert_sp_series_equal(
- ssdf.loc[1],
- tm.SubclassedSparseSeries(rows[1]),
- check_names=False,
- check_kind=False,
- )
- tm.assert_sp_series_equal(
- ssdf.iloc[1],
- tm.SubclassedSparseSeries(rows[1]),
- check_names=False,
- check_kind=False,
- )
-
- @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
- def test_subclass_sparse_transpose(self):
- ossdf = tm.SubclassedSparseDataFrame([[1, 2, 3], [4, 5, 6]])
- essdf = tm.SubclassedSparseDataFrame([[1, 4], [2, 5], [3, 6]])
- tm.assert_sp_frame_equal(ossdf.T, essdf)
-
def test_subclass_stack(self):
# GH 15564
df = tm.SubclassedDataFrame(
diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py
index 28051d9b7f3b9..8fb028a0f0326 100644
--- a/pandas/tests/frame/test_to_csv.py
+++ b/pandas/tests/frame/test_to_csv.py
@@ -655,7 +655,7 @@ def _make_frame(names=None):
df = _make_frame(True)
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
- assert com._all_none(*result.columns.names)
+ assert com.all_none(*result.columns.names)
result.columns.names = df.columns.names
assert_frame_equal(df, result)
@@ -695,6 +695,20 @@ def _make_frame(names=None):
tm.assert_index_equal(recons.columns, exp.columns)
assert len(recons) == 0
+ def test_to_csv_interval_index(self):
+ # GH 28210
+ df = DataFrame({"A": list("abc"), "B": range(3)}, index=pd.interval_range(0, 3))
+
+ with ensure_clean("__tmp_to_csv_interval_index__.csv") as path:
+ df.to_csv(path)
+ result = self.read_csv(path, index_col=0)
+
+ # can't roundtrip intervalindex via read_csv so check string repr (GH 23595)
+ expected = df.copy()
+ expected.index = expected.index.astype(str)
+
+ assert_frame_equal(result, expected)
+
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index 7b9e50ebbf342..98c4c48e0292d 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -589,7 +589,7 @@ def test_pct_change(self, periods, fill_method, limit, exp):
class TestNDFrame:
# tests that don't fit elsewhere
- def test_sample(sel):
+ def test_sample(self):
# Fixes issue: 2419
# additional specific object based tests
@@ -800,6 +800,11 @@ def test_take_invalid_kwargs(self):
with pytest.raises(ValueError, match=msg):
obj.take(indices, mode="clip")
+ def test_take_deprecated_kwarg_is_copy(self):
+ df = DataFrame([1, 2])
+ with tm.assert_produces_warning(FutureWarning):
+ df.take([0, 1], is_copy=True)
+
def test_equals(self):
s1 = pd.Series([1, 2, 3], index=[0, 2, 1])
s2 = s1.copy()
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index 52d4fa76bf879..aa80c461a00e7 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -10,7 +10,7 @@
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
from pandas.core.base import SpecificationError
-from pandas.core.groupby.generic import _maybe_mangle_lambdas
+from pandas.core.groupby.generic import _make_unique, _maybe_mangle_lambdas
from pandas.core.groupby.grouper import Grouping
import pandas.util.testing as tm
@@ -560,3 +560,150 @@ def test_with_kwargs(self):
result = pd.Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0, b=10)
expected = pd.DataFrame({"<lambda_0>": [13], "<lambda_1>": [30]})
tm.assert_frame_equal(result, expected)
+
+ def test_agg_with_one_lambda(self):
+ # GH 25719, write tests for DataFrameGroupby.agg with only one lambda
+ df = pd.DataFrame(
+ {
+ "kind": ["cat", "dog", "cat", "dog"],
+ "height": [9.1, 6.0, 9.5, 34.0],
+ "weight": [7.9, 7.5, 9.9, 198.0],
+ }
+ )
+
+ # sort for 35 and earlier
+ columns = ["height_sqr_min", "height_max", "weight_max"]
+ if compat.PY35:
+ columns = ["height_max", "height_sqr_min", "weight_max"]
+ expected = pd.DataFrame(
+ {
+ "height_sqr_min": [82.81, 36.00],
+ "height_max": [9.5, 34.0],
+ "weight_max": [9.9, 198.0],
+ },
+ index=pd.Index(["cat", "dog"], name="kind"),
+ columns=columns,
+ )
+
+ # check pd.NameAgg case
+ result1 = df.groupby(by="kind").agg(
+ height_sqr_min=pd.NamedAgg(
+ column="height", aggfunc=lambda x: np.min(x ** 2)
+ ),
+ height_max=pd.NamedAgg(column="height", aggfunc="max"),
+ weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
+ )
+ tm.assert_frame_equal(result1, expected)
+
+ # check agg(key=(col, aggfunc)) case
+ result2 = df.groupby(by="kind").agg(
+ height_sqr_min=("height", lambda x: np.min(x ** 2)),
+ height_max=("height", "max"),
+ weight_max=("weight", "max"),
+ )
+ tm.assert_frame_equal(result2, expected)
+
+ def test_agg_multiple_lambda(self):
+ # GH25719, test for DataFrameGroupby.agg with multiple lambdas
+ # with mixed aggfunc
+ df = pd.DataFrame(
+ {
+ "kind": ["cat", "dog", "cat", "dog"],
+ "height": [9.1, 6.0, 9.5, 34.0],
+ "weight": [7.9, 7.5, 9.9, 198.0],
+ }
+ )
+ # sort for 35 and earlier
+ columns = [
+ "height_sqr_min",
+ "height_max",
+ "weight_max",
+ "height_max_2",
+ "weight_min",
+ ]
+ if compat.PY35:
+ columns = [
+ "height_max",
+ "height_max_2",
+ "height_sqr_min",
+ "weight_max",
+ "weight_min",
+ ]
+ expected = pd.DataFrame(
+ {
+ "height_sqr_min": [82.81, 36.00],
+ "height_max": [9.5, 34.0],
+ "weight_max": [9.9, 198.0],
+ "height_max_2": [9.5, 34.0],
+ "weight_min": [7.9, 7.5],
+ },
+ index=pd.Index(["cat", "dog"], name="kind"),
+ columns=columns,
+ )
+
+ # check agg(key=(col, aggfunc)) case
+ result1 = df.groupby(by="kind").agg(
+ height_sqr_min=("height", lambda x: np.min(x ** 2)),
+ height_max=("height", "max"),
+ weight_max=("weight", "max"),
+ height_max_2=("height", lambda x: np.max(x)),
+ weight_min=("weight", lambda x: np.min(x)),
+ )
+ tm.assert_frame_equal(result1, expected)
+
+ # check pd.NamedAgg case
+ result2 = df.groupby(by="kind").agg(
+ height_sqr_min=pd.NamedAgg(
+ column="height", aggfunc=lambda x: np.min(x ** 2)
+ ),
+ height_max=pd.NamedAgg(column="height", aggfunc="max"),
+ weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
+ height_max_2=pd.NamedAgg(column="height", aggfunc=lambda x: np.max(x)),
+ weight_min=pd.NamedAgg(column="weight", aggfunc=lambda x: np.min(x)),
+ )
+ tm.assert_frame_equal(result2, expected)
+
+ @pytest.mark.parametrize(
+ "order, expected_reorder",
+ [
+ (
+ [
+ ("height", "<lambda>"),
+ ("height", "max"),
+ ("weight", "max"),
+ ("height", "<lambda>"),
+ ("weight", "<lambda>"),
+ ],
+ [
+ ("height", "<lambda>_0"),
+ ("height", "max"),
+ ("weight", "max"),
+ ("height", "<lambda>_1"),
+ ("weight", "<lambda>"),
+ ],
+ ),
+ (
+ [
+ ("col2", "min"),
+ ("col1", "<lambda>"),
+ ("col1", "<lambda>"),
+ ("col1", "<lambda>"),
+ ],
+ [
+ ("col2", "min"),
+ ("col1", "<lambda>_0"),
+ ("col1", "<lambda>_1"),
+ ("col1", "<lambda>_2"),
+ ],
+ ),
+ (
+ [("col", "<lambda>"), ("col", "<lambda>"), ("col", "<lambda>")],
+ [("col", "<lambda>_0"), ("col", "<lambda>_1"), ("col", "<lambda>_2")],
+ ),
+ ],
+ )
+ def test_make_unique(self, order, expected_reorder):
+ # GH 27519, test if make_unique function reorders correctly
+ result = _make_unique(order)
+
+ assert result == expected_reorder
diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py
index 103ebf514b702..7e3cbed09c6d7 100644
--- a/pandas/tests/groupby/aggregate/test_other.py
+++ b/pandas/tests/groupby/aggregate/test_other.py
@@ -385,10 +385,7 @@ def test_agg_consistency():
# agg with ([]) and () not consistent
# GH 6715
def P1(a):
- try:
- return np.percentile(a.dropna(), q=1)
- except Exception:
- return np.nan
+ return np.percentile(a.dropna(), q=1)
df = DataFrame(
{
diff --git a/pandas/tests/groupby/conftest.py b/pandas/tests/groupby/conftest.py
index bdf93756b7559..72e60c5099304 100644
--- a/pandas/tests/groupby/conftest.py
+++ b/pandas/tests/groupby/conftest.py
@@ -2,6 +2,7 @@
import pytest
from pandas import DataFrame, MultiIndex
+from pandas.core.groupby.base import reduction_kernels
from pandas.util import testing as tm
@@ -102,3 +103,10 @@ def three_group():
"F": np.random.randn(11),
}
)
+
+
+@pytest.fixture(params=sorted(reduction_kernels))
+def reduction_func(request):
+ """yields the string names of all groupby reduction functions, one at a time.
+ """
+ return request.param
diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py
index b240876de92b1..b8f9ecd42bae3 100644
--- a/pandas/tests/groupby/test_bin_groupby.py
+++ b/pandas/tests/groupby/test_bin_groupby.py
@@ -2,25 +2,23 @@
from numpy import nan
import pytest
-from pandas._libs import groupby, lib, reduction
+from pandas._libs import groupby, lib, reduction as libreduction
from pandas.core.dtypes.common import ensure_int64
-from pandas import Index, isna
+from pandas import Index, Series, isna
from pandas.core.groupby.ops import generate_bins_generic
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
def test_series_grouper():
- from pandas import Series
-
obj = Series(np.random.randn(10))
dummy = obj[:0]
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)
- grouper = reduction.SeriesGrouper(obj, np.mean, labels, 2, dummy)
+ grouper = libreduction.SeriesGrouper(obj, np.mean, labels, 2, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[3:6].mean(), obj[6:].mean()])
@@ -31,14 +29,12 @@ def test_series_grouper():
def test_series_bin_grouper():
- from pandas import Series
-
obj = Series(np.random.randn(10))
dummy = obj[:0]
bins = np.array([3, 6])
- grouper = reduction.SeriesBinGrouper(obj, np.mean, bins, dummy)
+ grouper = libreduction.SeriesBinGrouper(obj, np.mean, bins, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[:3].mean(), obj[3:6].mean(), obj[6:].mean()])
@@ -123,30 +119,32 @@ class TestMoments:
class TestReducer:
def test_int_index(self):
- from pandas.core.series import Series
-
arr = np.random.randn(100, 4)
- result = reduction.reduce(arr, np.sum, labels=Index(np.arange(4)))
+ result = libreduction.compute_reduction(arr, np.sum, labels=Index(np.arange(4)))
expected = arr.sum(0)
assert_almost_equal(result, expected)
- result = reduction.reduce(arr, np.sum, axis=1, labels=Index(np.arange(100)))
+ result = libreduction.compute_reduction(
+ arr, np.sum, axis=1, labels=Index(np.arange(100))
+ )
expected = arr.sum(1)
assert_almost_equal(result, expected)
dummy = Series(0.0, index=np.arange(100))
- result = reduction.reduce(arr, np.sum, dummy=dummy, labels=Index(np.arange(4)))
+ result = libreduction.compute_reduction(
+ arr, np.sum, dummy=dummy, labels=Index(np.arange(4))
+ )
expected = arr.sum(0)
assert_almost_equal(result, expected)
dummy = Series(0.0, index=np.arange(4))
- result = reduction.reduce(
+ result = libreduction.compute_reduction(
arr, np.sum, axis=1, dummy=dummy, labels=Index(np.arange(100))
)
expected = arr.sum(1)
assert_almost_equal(result, expected)
- result = reduction.reduce(
+ result = libreduction.compute_reduction(
arr, np.sum, axis=1, dummy=dummy, labels=Index(np.arange(100))
)
assert_almost_equal(result, expected)
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 486b3b28b29a3..fcc0aa3b1c015 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -4,7 +4,7 @@
import numpy as np
import pytest
-from pandas.compat import PY37
+from pandas.compat import PY37, is_platform_windows
import pandas as pd
from pandas import (
@@ -209,7 +209,10 @@ def test_level_get_group(observed):
assert_frame_equal(result, expected)
-@pytest.mark.xfail(PY37, reason="flaky on 3.7, xref gh-21636", strict=False)
+# GH#21636 previously flaky on py37
+@pytest.mark.xfail(
+ is_platform_windows() and PY37, reason="Flaky, GH-27902", strict=False
+)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
@@ -431,6 +434,21 @@ def test_observed_groups_with_nan(observed):
tm.assert_dict_equal(result, expected)
+def test_observed_nth():
+ # GH 26385
+ cat = pd.Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
+ ser = pd.Series([1, 2, 3])
+ df = pd.DataFrame({"cat": cat, "ser": ser})
+
+ result = df.groupby("cat", observed=False)["ser"].nth(0)
+
+ index = pd.Categorical(["a", "b", "c"], categories=["a", "b", "c"])
+ expected = pd.Series([1, np.nan, np.nan], index=index, name="ser")
+ expected.index.name = "cat"
+
+ tm.assert_series_equal(result, expected)
+
+
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
@@ -508,7 +526,7 @@ def test_datetime():
desc_result = grouped.describe()
idx = cats.codes.argsort()
- ord_labels = cats.take_nd(idx)
+ ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
assert_frame_equal(desc_result, expected)
@@ -764,7 +782,7 @@ def test_categorical_no_compress():
def test_sort():
- # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: flake8
+ # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: E501
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
@@ -1165,3 +1183,13 @@ def test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):
lambda x: OrderedDict([("min", x.min()), ("max", x.max())])
)
assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("code", [([1, 0, 0]), ([0, 0, 0])])
+def test_groupby_categorical_axis_1(code):
+ # GH 13420
+ df = DataFrame({"a": [1, 2, 3, 4], "b": [-1, -2, -3, -4], "c": [5, 6, 7, 8]})
+ cat = pd.Categorical.from_codes(code, categories=list("abc"))
+ result = df.groupby(cat, axis=1).mean()
+ expected = df.T.groupby(cat, axis=0).mean().T
+ assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index efc3142b25b82..afb22a732691c 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -1,4 +1,5 @@
import builtins
+import datetime as dt
from io import StringIO
from itertools import product
from string import ascii_lowercase
@@ -9,7 +10,16 @@
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
-from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
+from pandas import (
+ DataFrame,
+ Index,
+ MultiIndex,
+ NaT,
+ Series,
+ Timestamp,
+ date_range,
+ isna,
+)
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td, testing as tm
@@ -1015,6 +1025,42 @@ def test_nunique_with_timegrouper():
tm.assert_series_equal(result, expected)
+@pytest.mark.parametrize(
+ "key, data, dropna, expected",
+ [
+ (
+ ["x", "x", "x"],
+ [Timestamp("2019-01-01"), NaT, Timestamp("2019-01-01")],
+ True,
+ Series([1], index=pd.Index(["x"], name="key"), name="data"),
+ ),
+ (
+ ["x", "x", "x"],
+ [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)],
+ True,
+ Series([1], index=pd.Index(["x"], name="key"), name="data"),
+ ),
+ (
+ ["x", "x", "x", "y", "y"],
+ [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)],
+ False,
+ Series([2, 2], index=pd.Index(["x", "y"], name="key"), name="data"),
+ ),
+ (
+ ["x", "x", "x", "x", "y"],
+ [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)],
+ False,
+ Series([2, 1], index=pd.Index(["x", "y"], name="key"), name="data"),
+ ),
+ ],
+)
+def test_nunique_with_NaT(key, data, dropna, expected):
+ # GH 27951
+ df = pd.DataFrame({"key": key, "data": data})
+ result = df.groupby(["key"])["data"].nunique(dropna=dropna)
+ tm.assert_series_equal(result, expected)
+
+
def test_nunique_preserves_column_level_names():
# GH 23222
test = pd.DataFrame([1, 2, 2], columns=pd.Index(["A"], name="level_0"))
@@ -1238,6 +1284,75 @@ def test_quantile(interpolation, a_vals, b_vals, q):
tm.assert_frame_equal(result, expected)
+def test_quantile_array():
+ # https://github.com/pandas-dev/pandas/issues/27526
+ df = pd.DataFrame({"A": [0, 1, 2, 3, 4]})
+ result = df.groupby([0, 0, 1, 1, 1]).quantile([0.25])
+
+ index = pd.MultiIndex.from_product([[0, 1], [0.25]])
+ expected = pd.DataFrame({"A": [0.25, 2.50]}, index=index)
+ tm.assert_frame_equal(result, expected)
+
+ df = pd.DataFrame({"A": [0, 1, 2, 3], "B": [4, 5, 6, 7]})
+ index = pd.MultiIndex.from_product([[0, 1], [0.25, 0.75]])
+
+ result = df.groupby([0, 0, 1, 1]).quantile([0.25, 0.75])
+ expected = pd.DataFrame(
+ {"A": [0.25, 0.75, 2.25, 2.75], "B": [4.25, 4.75, 6.25, 6.75]}, index=index
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_quantile_array2():
+ # https://github.com/pandas-dev/pandas/pull/28085#issuecomment-524066959
+ df = pd.DataFrame(
+ np.random.RandomState(0).randint(0, 5, size=(10, 3)), columns=list("ABC")
+ )
+ result = df.groupby("A").quantile([0.3, 0.7])
+ expected = pd.DataFrame(
+ {
+ "B": [0.9, 2.1, 2.2, 3.4, 1.6, 2.4, 2.3, 2.7, 0.0, 0.0],
+ "C": [1.2, 2.8, 1.8, 3.0, 0.0, 0.0, 1.9, 3.1, 3.0, 3.0],
+ },
+ index=pd.MultiIndex.from_product(
+ [[0, 1, 2, 3, 4], [0.3, 0.7]], names=["A", None]
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_quantile_array_no_sort():
+ df = pd.DataFrame({"A": [0, 1, 2], "B": [3, 4, 5]})
+ result = df.groupby([1, 0, 1], sort=False).quantile([0.25, 0.5, 0.75])
+ expected = pd.DataFrame(
+ {"A": [0.5, 1.0, 1.5, 1.0, 1.0, 1.0], "B": [3.5, 4.0, 4.5, 4.0, 4.0, 4.0]},
+ index=pd.MultiIndex.from_product([[1, 0], [0.25, 0.5, 0.75]]),
+ )
+ tm.assert_frame_equal(result, expected)
+
+ result = df.groupby([1, 0, 1], sort=False).quantile([0.75, 0.25])
+ expected = pd.DataFrame(
+ {"A": [1.5, 0.5, 1.0, 1.0], "B": [4.5, 3.5, 4.0, 4.0]},
+ index=pd.MultiIndex.from_product([[1, 0], [0.75, 0.25]]),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_quantile_array_multiple_levels():
+ df = pd.DataFrame(
+ {"A": [0, 1, 2], "B": [3, 4, 5], "c": ["a", "a", "a"], "d": ["a", "a", "b"]}
+ )
+ result = df.groupby(["c", "d"]).quantile([0.25, 0.75])
+ index = pd.MultiIndex.from_tuples(
+ [("a", "a", 0.25), ("a", "a", 0.75), ("a", "b", 0.25), ("a", "b", 0.75)],
+ names=["c", "d", None],
+ )
+ expected = pd.DataFrame(
+ {"A": [0.25, 0.75, 2.0, 2.0], "B": [3.25, 3.75, 5.0, 5.0]}, index=index
+ )
+ tm.assert_frame_equal(result, expected)
+
+
def test_quantile_raises():
df = pd.DataFrame(
[["foo", "a"], ["foo", "b"], ["foo", "c"]], columns=["key", "val"]
@@ -1247,6 +1362,17 @@ def test_quantile_raises():
df.groupby("key").quantile()
+def test_quantile_out_of_bounds_q_raises():
+ # https://github.com/pandas-dev/pandas/issues/27470
+ df = pd.DataFrame(dict(a=[0, 0, 0, 1, 1, 1], b=range(6)))
+ g = df.groupby([0, 0, 0, 1, 1, 1])
+ with pytest.raises(ValueError, match="Got '50.0' instead"):
+ g.quantile(50)
+
+ with pytest.raises(ValueError, match="Got '-1.0' instead"):
+ g.quantile(-1)
+
+
# pipe
# --------------------------------
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 2379d25ebe5aa..bec5cbc5fecb8 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1860,3 +1860,91 @@ def test_groupby_groups_in_BaseGrouper():
result = df.groupby(["beta", pd.Grouper(level="alpha")])
expected = df.groupby(["beta", "alpha"])
assert result.groups == expected.groups
+
+
+@pytest.mark.parametrize("group_name", ["x", ["x"]])
+def test_groupby_axis_1(group_name):
+ # GH 27614
+ df = pd.DataFrame(
+ np.arange(12).reshape(3, 4), index=[0, 1, 0], columns=[10, 20, 10, 20]
+ )
+ df.index.name = "y"
+ df.columns.name = "x"
+
+ results = df.groupby(group_name, axis=1).sum()
+ expected = df.T.groupby(group_name).sum().T
+ assert_frame_equal(results, expected)
+
+ # test on MI column
+ iterables = [["bar", "baz", "foo"], ["one", "two"]]
+ mi = pd.MultiIndex.from_product(iterables=iterables, names=["x", "x1"])
+ df = pd.DataFrame(np.arange(18).reshape(3, 6), index=[0, 1, 0], columns=mi)
+ results = df.groupby(group_name, axis=1).sum()
+ expected = df.T.groupby(group_name).sum().T
+ assert_frame_equal(results, expected)
+
+
+@pytest.mark.parametrize(
+ "op, expected",
+ [
+ (
+ "shift",
+ {
+ "time": [
+ None,
+ None,
+ Timestamp("2019-01-01 12:00:00"),
+ Timestamp("2019-01-01 12:30:00"),
+ None,
+ None,
+ ]
+ },
+ ),
+ (
+ "bfill",
+ {
+ "time": [
+ Timestamp("2019-01-01 12:00:00"),
+ Timestamp("2019-01-01 12:30:00"),
+ Timestamp("2019-01-01 14:00:00"),
+ Timestamp("2019-01-01 14:30:00"),
+ Timestamp("2019-01-01 14:00:00"),
+ Timestamp("2019-01-01 14:30:00"),
+ ]
+ },
+ ),
+ (
+ "ffill",
+ {
+ "time": [
+ Timestamp("2019-01-01 12:00:00"),
+ Timestamp("2019-01-01 12:30:00"),
+ Timestamp("2019-01-01 12:00:00"),
+ Timestamp("2019-01-01 12:30:00"),
+ Timestamp("2019-01-01 14:00:00"),
+ Timestamp("2019-01-01 14:30:00"),
+ ]
+ },
+ ),
+ ],
+)
+def test_shift_bfill_ffill_tz(tz_naive_fixture, op, expected):
+ # GH19995, GH27992: Check that timezone does not drop in shift, bfill, and ffill
+ tz = tz_naive_fixture
+ data = {
+ "id": ["A", "B", "A", "B", "A", "B"],
+ "time": [
+ Timestamp("2019-01-01 12:00:00"),
+ Timestamp("2019-01-01 12:30:00"),
+ None,
+ None,
+ Timestamp("2019-01-01 14:00:00"),
+ Timestamp("2019-01-01 14:30:00"),
+ ],
+ }
+ df = DataFrame(data).assign(time=lambda x: x.time.dt.tz_localize(tz))
+
+ grouped = df.groupby("id")
+ result = getattr(grouped, op)()
+ expected = DataFrame(expected).assign(time=lambda x: x.time.dt.tz_localize(tz))
+ assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py
index 1eab3ba253f4d..d3972e6ba9008 100644
--- a/pandas/tests/groupby/test_transform.py
+++ b/pandas/tests/groupby/test_transform.py
@@ -1001,3 +1001,76 @@ def test_ffill_not_in_axis(func, key, val):
expected = df
assert_frame_equal(result, expected)
+
+
+def test_transform_invalid_name_raises():
+ # GH#27486
+ df = DataFrame(dict(a=[0, 1, 1, 2]))
+ g = df.groupby(["a", "b", "b", "c"])
+ with pytest.raises(ValueError, match="not a valid function name"):
+ g.transform("some_arbitrary_name")
+
+ # method exists on the object, but is not a valid transformation/agg
+ assert hasattr(g, "aggregate") # make sure the method exists
+ with pytest.raises(ValueError, match="not a valid function name"):
+ g.transform("aggregate")
+
+ # Test SeriesGroupBy
+ g = df["a"].groupby(["a", "b", "b", "c"])
+ with pytest.raises(ValueError, match="not a valid function name"):
+ g.transform("some_arbitrary_name")
+
+
+@pytest.mark.parametrize(
+ "obj",
+ [
+ DataFrame(
+ dict(a=[0, 0, 0, 1, 1, 1], b=range(6)), index=["A", "B", "C", "D", "E", "F"]
+ ),
+ Series([0, 0, 0, 1, 1, 1], index=["A", "B", "C", "D", "E", "F"]),
+ ],
+)
+def test_transform_agg_by_name(reduction_func, obj):
+ func = reduction_func
+ g = obj.groupby(np.repeat([0, 1], 3))
+
+ if func == "ngroup": # GH#27468
+ pytest.xfail("TODO: g.transform('ngroup') doesn't work")
+ if func == "size": # GH#27469
+ pytest.xfail("TODO: g.transform('size') doesn't work")
+
+ args = {"nth": [0], "quantile": [0.5]}.get(func, [])
+
+ result = g.transform(func, *args)
+
+ # this is the *definition* of a transformation
+ tm.assert_index_equal(result.index, obj.index)
+ if hasattr(obj, "columns"):
+ tm.assert_index_equal(result.columns, obj.columns)
+
+ # verify that values were broadcasted across each group
+ assert len(set(DataFrame(result).iloc[-3:, -1])) == 1
+
+
+def test_transform_lambda_with_datetimetz():
+ # GH 27496
+ df = DataFrame(
+ {
+ "time": [
+ Timestamp("2010-07-15 03:14:45"),
+ Timestamp("2010-11-19 18:47:06"),
+ ],
+ "timezone": ["Etc/GMT+4", "US/Eastern"],
+ }
+ )
+ result = df.groupby(["timezone"])["time"].transform(
+ lambda x: x.dt.tz_localize(x.name)
+ )
+ expected = Series(
+ [
+ Timestamp("2010-07-15 03:14:45", tz="Etc/GMT+4"),
+ Timestamp("2010-11-19 18:47:06", tz="US/Eastern"),
+ ],
+ name="time",
+ )
+ assert_series_equal(result, expected)
diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py
index c7b28822092a8..f8bd8843ab7e3 100644
--- a/pandas/tests/groupby/test_value_counts.py
+++ b/pandas/tests/groupby/test_value_counts.py
@@ -52,29 +52,30 @@ def seed_df(seed_nans, n, m):
@pytest.mark.slow
@pytest.mark.parametrize("df, keys, bins, n, m", binned, ids=ids)
-def test_series_groupby_value_counts(df, keys, bins, n, m):
+@pytest.mark.parametrize("isort", [True, False])
+@pytest.mark.parametrize("normalize", [True, False])
+@pytest.mark.parametrize("sort", [True, False])
+@pytest.mark.parametrize("ascending", [True, False])
+@pytest.mark.parametrize("dropna", [True, False])
+def test_series_groupby_value_counts(
+ df, keys, bins, n, m, isort, normalize, sort, ascending, dropna
+):
def rebuild_index(df):
arr = list(map(df.index.get_level_values, range(df.index.nlevels)))
df.index = MultiIndex.from_arrays(arr, names=df.index.names)
return df
- for isort, normalize, sort, ascending, dropna in product((False, True), repeat=5):
-
- kwargs = dict(
- normalize=normalize,
- sort=sort,
- ascending=ascending,
- dropna=dropna,
- bins=bins,
- )
+ kwargs = dict(
+ normalize=normalize, sort=sort, ascending=ascending, dropna=dropna, bins=bins
+ )
- gr = df.groupby(keys, sort=isort)
- left = gr["3rd"].value_counts(**kwargs)
+ gr = df.groupby(keys, sort=isort)
+ left = gr["3rd"].value_counts(**kwargs)
- gr = df.groupby(keys, sort=isort)
- right = gr["3rd"].apply(Series.value_counts, **kwargs)
- right.index.names = right.index.names[:-1] + ["3rd"]
+ gr = df.groupby(keys, sort=isort)
+ right = gr["3rd"].apply(Series.value_counts, **kwargs)
+ right.index.names = right.index.names[:-1] + ["3rd"]
- # have to sort on index because of unstable sort on values
- left, right = map(rebuild_index, (left, right)) # xref GH9212
- tm.assert_series_equal(left.sort_index(), right.sort_index())
+ # have to sort on index because of unstable sort on values
+ left, right = map(rebuild_index, (left, right)) # xref GH9212
+ tm.assert_series_equal(left.sort_index(), right.sort_index())
diff --git a/pandas/tests/groupby/test_whitelist.py b/pandas/tests/groupby/test_whitelist.py
index ee380c6108c38..05d745ccc0e8e 100644
--- a/pandas/tests/groupby/test_whitelist.py
+++ b/pandas/tests/groupby/test_whitelist.py
@@ -9,6 +9,11 @@
import pytest
from pandas import DataFrame, Index, MultiIndex, Series, date_range
+from pandas.core.groupby.base import (
+ groupby_other_methods,
+ reduction_kernels,
+ transformation_kernels,
+)
from pandas.util import testing as tm
AGG_FUNCTIONS = [
@@ -376,3 +381,49 @@ def test_groupby_selection_with_methods(df):
tm.assert_frame_equal(
g.filter(lambda x: len(x) == 3), g_exp.filter(lambda x: len(x) == 3)
)
+
+
+def test_all_methods_categorized(mframe):
+ grp = mframe.groupby(mframe.iloc[:, 0])
+ names = {_ for _ in dir(grp) if not _.startswith("_")} - set(mframe.columns)
+ new_names = set(names)
+ new_names -= reduction_kernels
+ new_names -= transformation_kernels
+ new_names -= groupby_other_methods
+
+ assert not (reduction_kernels & transformation_kernels)
+ assert not (reduction_kernels & groupby_other_methods)
+ assert not (transformation_kernels & groupby_other_methods)
+
+ # new public method?
+ if new_names:
+ msg = """
+There are uncatgeorized methods defined on the Grouper class:
+{names}.
+
+Was a new method recently added?
+
+Every public method On Grouper must appear in exactly one the
+following three lists defined in pandas.core.groupby.base:
+- `reduction_kernels`
+- `transformation_kernels`
+- `groupby_other_methods`
+see the comments in pandas/core/groupby/base.py for guidance on
+how to fix this test.
+ """
+ raise AssertionError(msg.format(names=names))
+
+ # removed a public method?
+ all_categorized = reduction_kernels | transformation_kernels | groupby_other_methods
+ print(names)
+ print(all_categorized)
+ if not (names == all_categorized):
+ msg = """
+Some methods which are supposed to be on the Grouper class
+are missing:
+{names}.
+
+They're still defined in one of the lists that live in pandas/core/groupby/base.py.
+If you removed a method, you should update them
+"""
+ raise AssertionError(msg.format(names=all_categorized - names))
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 9459069f0ea2d..0e74c87388682 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -1,3 +1,5 @@
+import gc
+
import numpy as np
import pytest
@@ -908,3 +910,10 @@ def test_is_unique(self):
# multiple NA should not be unique
index_na_dup = index_na.insert(0, np.nan)
assert index_na_dup.is_unique is False
+
+ def test_engine_reference_cycle(self):
+ # GH27585
+ index = self.create_index()
+ nrefs_pre = len(gc.get_referrers(index))
+ index._engine
+ assert len(gc.get_referrers(index)) == nrefs_pre
diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py
index 6708feda7dd1e..88bc11c588673 100644
--- a/pandas/tests/indexes/datetimes/test_construction.py
+++ b/pandas/tests/indexes/datetimes/test_construction.py
@@ -759,6 +759,8 @@ def test_constructor_with_int_tz(self, klass, box, tz, dtype):
assert result == expected
# This is the desired future behavior
+ # Note: this xfail is not strict because the test passes with
+ # None or any of the UTC variants for tz_naive_fixture
@pytest.mark.xfail(reason="Future behavior", strict=False)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
def test_construction_int_rountrip(self, tz_naive_fixture):
@@ -766,7 +768,7 @@ def test_construction_int_rountrip(self, tz_naive_fixture):
# TODO(GH-24559): Remove xfail
tz = tz_naive_fixture
result = 1293858000000000000
- expected = DatetimeIndex([1293858000000000000], tz=tz).asi8[0]
+ expected = DatetimeIndex([result], tz=tz).asi8[0]
assert result == expected
def test_construction_from_replaced_timestamps_with_dst(self):
@@ -822,6 +824,12 @@ def test_constructor_wrong_precision_raises(self):
with pytest.raises(ValueError):
pd.DatetimeIndex(["2000"], dtype="datetime64[us]")
+ def test_index_constructor_with_numpy_object_array_and_timestamp_tz_with_nan(self):
+ # GH 27011
+ result = Index(np.array([Timestamp("2019", tz="UTC"), np.nan], dtype=object))
+ expected = DatetimeIndex([Timestamp("2019", tz="UTC"), pd.NaT])
+ tm.assert_index_equal(result, expected)
+
class TestTimeSeries:
def test_dti_constructor_preserve_dti_freq(self):
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index 4ea32359b8d4a..ab3107a0798e5 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -377,3 +377,11 @@ def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
tm.assert_index_equal(dti.nanosecond, pd.Index(np.arange(10, dtype=np.int64)))
+
+
+def test_iter_readonly():
+ # GH#28055 ints_to_pydatetime with readonly array
+ arr = np.array([np.datetime64("2012-02-15T12:00:00.000000000")])
+ arr.setflags(write=False)
+ dti = pd.to_datetime(arr)
+ list(dti)
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index d4dff2cbce89b..2ec267c66091b 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -393,6 +393,18 @@ def test_equals(self):
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
+ # check that we do not raise when comparing with OutOfBounds objects
+ oob = pd.Index([datetime(2500, 1, 1)] * 3, dtype=object)
+ assert not idx.equals(oob)
+ assert not idx2.equals(oob)
+ assert not idx3.equals(oob)
+
+ # check that we do not raise when comparing with OutOfBounds dt64
+ oob2 = oob.map(np.datetime64)
+ assert not idx.equals(oob2)
+ assert not idx2.equals(oob2)
+ assert not idx3.equals(oob2)
+
@pytest.mark.parametrize("values", [["20180101", "20180103", "20180105"], []])
@pytest.mark.parametrize("freq", ["2D", Day(2), "2B", BDay(2), "48H", Hour(48)])
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py
index 3095bf9657277..5660fa5ffed80 100644
--- a/pandas/tests/indexes/datetimes/test_partial_slicing.py
+++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py
@@ -468,3 +468,14 @@ def test_getitem_with_datestring_with_UTC_offset(self, start, end):
with pytest.raises(ValueError, match="The index must be timezone"):
df = df.tz_localize(None)
df[start:end]
+
+ def test_slice_reduce_to_series(self):
+ # GH 27516
+ df = pd.DataFrame(
+ {"A": range(24)}, index=pd.date_range("2000", periods=24, freq="M")
+ )
+ expected = pd.Series(
+ range(12), index=pd.date_range("2000", periods=12, freq="M"), name="A"
+ )
+ result = df.loc["2000", "A"]
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index 10d422e8aa52c..b6f25d45f136a 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -741,10 +741,7 @@ def test_to_datetime_tz_psycopg2(self, cache):
)
tm.assert_index_equal(result, expected)
- @pytest.mark.parametrize(
- "cache",
- [pytest.param(True, marks=pytest.mark.skipif(True, reason="GH 18111")), False],
- )
+ @pytest.mark.parametrize("cache", [True, False])
def test_datetime_bool(self, cache):
# GH13176
with pytest.raises(TypeError):
@@ -904,6 +901,13 @@ def test_to_datetime_coerce(self):
)
tm.assert_index_equal(result, expected)
+ def test_to_datetime_coerce_malformed(self):
+ # GH 28299
+ ts_strings = ["200622-12-31", "111111-24-11"]
+ result = to_datetime(ts_strings, errors="coerce")
+ expected = Index([NaT, NaT])
+ tm.assert_index_equal(result, expected)
+
def test_iso_8601_strings_with_same_offset(self):
# GH 17697, 11736
ts_str = "2015-11-18 15:30:00+05:30"
@@ -1031,6 +1035,12 @@ def test_to_datetime_box_deprecated(self):
result = pd.to_datetime(expected).to_datetime64()
assert result == expected
+ @pytest.mark.parametrize("dt_str", ["00010101", "13000101", "30000101", "99990101"])
+ def test_to_datetime_with_format_out_of_bounds(self, dt_str):
+ # GH 9107
+ with pytest.raises(OutOfBoundsDatetime):
+ pd.to_datetime(dt_str, format="%Y%m%d")
+
class TestToDatetimeUnit:
@pytest.mark.parametrize("cache", [True, False])
@@ -1623,6 +1633,18 @@ def test_dayfirst(self, cache):
tm.assert_index_equal(expected, idx5)
tm.assert_index_equal(expected, idx6)
+ @pytest.mark.parametrize("klass", [DatetimeIndex, DatetimeArray])
+ def test_to_datetime_dta_tz(self, klass):
+ # GH#27733
+ dti = date_range("2015-04-05", periods=3).rename("foo")
+ expected = dti.tz_localize("UTC")
+
+ obj = klass(dti)
+ expected = klass(expected)
+
+ result = to_datetime(obj, utc=True)
+ tm.assert_equal(result, expected)
+
class TestGuessDatetimeFormat:
@td.skip_if_not_us_locale
diff --git a/pandas/tests/indexes/interval/test_astype.py b/pandas/tests/indexes/interval/test_astype.py
index 91022fef16521..708cd8a4579e8 100644
--- a/pandas/tests/indexes/interval/test_astype.py
+++ b/pandas/tests/indexes/interval/test_astype.py
@@ -67,7 +67,7 @@ def test_astype_cannot_cast(self, index, dtype):
index.astype(dtype)
def test_astype_invalid_dtype(self, index):
- msg = "data type 'fake_dtype' not understood"
+ msg = 'data type "fake_dtype" not understood'
with pytest.raises(TypeError, match=msg):
index.astype("fake_dtype")
@@ -143,7 +143,7 @@ def test_subtype_integer(self, subtype):
tm.assert_index_equal(result, expected)
# raises with NA
- msg = "Cannot convert NA to integer"
+ msg = r"Cannot convert non-finite values \(NA or inf\) to integer"
with pytest.raises(ValueError, match=msg):
index.insert(0, np.nan).astype(dtype)
diff --git a/pandas/tests/indexes/interval/test_construction.py b/pandas/tests/indexes/interval/test_construction.py
index e2abb4531525a..98c1f7c6c2a8a 100644
--- a/pandas/tests/indexes/interval/test_construction.py
+++ b/pandas/tests/indexes/interval/test_construction.py
@@ -164,7 +164,7 @@ def test_generic_errors(self, constructor):
constructor(dtype="int64", **filler)
# invalid dtype
- msg = "data type 'invalid' not understood"
+ msg = 'data type "invalid" not understood'
with pytest.raises(TypeError, match=msg):
constructor(dtype="invalid", **filler)
@@ -421,32 +421,3 @@ def test_index_mixed_closed(self):
result = Index(intervals)
expected = Index(intervals, dtype=object)
tm.assert_index_equal(result, expected)
-
-
-class TestFromIntervals(TestClassConstructors):
- """
- Tests for IntervalIndex.from_intervals, which is deprecated in favor of the
- IntervalIndex constructor. Same tests as the IntervalIndex constructor,
- plus deprecation test. Should only need to delete this class when removed.
- """
-
- @pytest.fixture
- def constructor(self):
- def from_intervals_ignore_warnings(*args, **kwargs):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- return IntervalIndex.from_intervals(*args, **kwargs)
-
- return from_intervals_ignore_warnings
-
- def test_deprecated(self):
- ivs = [Interval(0, 1), Interval(1, 2)]
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- IntervalIndex.from_intervals(ivs)
-
- @pytest.mark.skip(reason="parent class test that is not applicable")
- def test_index_object_dtype(self):
- pass
-
- @pytest.mark.skip(reason="parent class test that is not applicable")
- def test_index_mixed_closed(self):
- pass
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index c61af1ce70aed..eeb0f43f4b900 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -417,6 +417,46 @@ def test_repr_missing(self, constructor, expected):
result = repr(obj)
assert result == expected
+ @pytest.mark.parametrize(
+ "tuples, closed, expected_data",
+ [
+ ([(0, 1), (1, 2), (2, 3)], "left", ["[0, 1)", "[1, 2)", "[2, 3)"]),
+ (
+ [(0.5, 1.0), np.nan, (2.0, 3.0)],
+ "right",
+ ["(0.5, 1.0]", "NaN", "(2.0, 3.0]"],
+ ),
+ (
+ [
+ (Timestamp("20180101"), Timestamp("20180102")),
+ np.nan,
+ ((Timestamp("20180102"), Timestamp("20180103"))),
+ ],
+ "both",
+ ["[2018-01-01, 2018-01-02]", "NaN", "[2018-01-02, 2018-01-03]"],
+ ),
+ (
+ [
+ (Timedelta("0 days"), Timedelta("1 days")),
+ (Timedelta("1 days"), Timedelta("2 days")),
+ np.nan,
+ ],
+ "neither",
+ [
+ "(0 days 00:00:00, 1 days 00:00:00)",
+ "(1 days 00:00:00, 2 days 00:00:00)",
+ "NaN",
+ ],
+ ),
+ ],
+ )
+ def test_to_native_types(self, tuples, closed, expected_data):
+ # GH 28210
+ index = IntervalIndex.from_tuples(tuples, closed=closed)
+ result = index.to_native_types()
+ expected = np.array(expected_data)
+ tm.assert_numpy_array_equal(result, expected)
+
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan), closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
@@ -1095,3 +1135,10 @@ def test_is_all_dates(self):
)
year_2017_index = pd.IntervalIndex([year_2017])
assert not year_2017_index.is_all_dates
+
+
+def test_dir():
+ # GH#27571 dir(interval_index) should not raise
+ index = IntervalIndex.from_arrays([0, 1], [1, 2])
+ result = dir(index)
+ assert "str" not in result
diff --git a/pandas/tests/indexes/multi/test_constructor.py b/pandas/tests/indexes/multi/test_constructor.py
index 86c9ee3455d0b..9472d539537ba 100644
--- a/pandas/tests/indexes/multi/test_constructor.py
+++ b/pandas/tests/indexes/multi/test_constructor.py
@@ -348,6 +348,19 @@ def test_from_arrays_different_lengths(idx1, idx2):
MultiIndex.from_arrays([idx1, idx2])
+def test_from_arrays_respects_none_names():
+ # GH27292
+ a = pd.Series([1, 2, 3], name="foo")
+ b = pd.Series(["a", "b", "c"], name="bar")
+
+ result = MultiIndex.from_arrays([a, b], names=None)
+ expected = MultiIndex(
+ levels=[[1, 2, 3], ["a", "b", "c"]], codes=[[0, 1, 2], [0, 1, 2]], names=None
+ )
+
+ tm.assert_index_equal(result, expected)
+
+
# ----------------------------------------------------------------------------
# from_tuples
# ----------------------------------------------------------------------------
@@ -539,6 +552,43 @@ def test_from_product_iterator():
MultiIndex.from_product(0)
+@pytest.mark.parametrize(
+ "a, b, expected_names",
+ [
+ (
+ pd.Series([1, 2, 3], name="foo"),
+ pd.Series(["a", "b"], name="bar"),
+ ["foo", "bar"],
+ ),
+ (pd.Series([1, 2, 3], name="foo"), ["a", "b"], ["foo", None]),
+ ([1, 2, 3], ["a", "b"], None),
+ ],
+)
+def test_from_product_infer_names(a, b, expected_names):
+ # GH27292
+ result = MultiIndex.from_product([a, b])
+ expected = MultiIndex(
+ levels=[[1, 2, 3], ["a", "b"]],
+ codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
+ names=expected_names,
+ )
+ tm.assert_index_equal(result, expected)
+
+
+def test_from_product_respects_none_names():
+ # GH27292
+ a = pd.Series([1, 2, 3], name="foo")
+ b = pd.Series(["a", "b"], name="bar")
+
+ result = MultiIndex.from_product([a, b], names=None)
+ expected = MultiIndex(
+ levels=[[1, 2, 3], ["a", "b"]],
+ codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
+ names=None,
+ )
+ tm.assert_index_equal(result, expected)
+
+
def test_create_index_existing_name(idx):
# GH11193, when an existing index is passed, and a new name is not
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index 8b3b66bd1ee6b..ee37be7ab4c14 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -354,6 +354,35 @@ def test_period_set_index_reindex(self):
df = df.set_index(idx2)
tm.assert_index_equal(df.index, idx2)
+ @pytest.mark.parametrize(
+ "p_values, o_values, values, expected_values",
+ [
+ (
+ [Period("2019Q1", "Q-DEC"), Period("2019Q2", "Q-DEC")],
+ [Period("2019Q1", "Q-DEC"), Period("2019Q2", "Q-DEC"), "All"],
+ [1.0, 1.0],
+ [1.0, 1.0, np.nan],
+ ),
+ (
+ [Period("2019Q1", "Q-DEC"), Period("2019Q2", "Q-DEC")],
+ [Period("2019Q1", "Q-DEC"), Period("2019Q2", "Q-DEC")],
+ [1.0, 1.0],
+ [1.0, 1.0],
+ ),
+ ],
+ )
+ def test_period_reindex_with_object(
+ self, p_values, o_values, values, expected_values
+ ):
+ # GH 28337
+ period_index = PeriodIndex(p_values)
+ object_index = Index(o_values)
+
+ s = pd.Series(values, index=period_index)
+ result = s.reindex(object_index)
+ expected = pd.Series(expected_values, index=object_index)
+ tm.assert_series_equal(result, expected)
+
def test_factorize(self):
idx1 = PeriodIndex(
["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"], freq="M"
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index e75d80bec1fdf..d1ed79118d2fa 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -2004,7 +2004,7 @@ def test_isin_level_kwarg_bad_label_raises(self, label, indices):
msg = "'Level {} not found'"
else:
index = index.rename("foo")
- msg = r"'Level {} must be same as name \(foo\)'"
+ msg = r"Requested level \({}\) does not match index name \(foo\)"
with pytest.raises(KeyError, match=msg.format(label)):
index.isin([], level=label)
@@ -2805,3 +2805,17 @@ def test_deprecated_fastpath():
expected = pd.CategoricalIndex(["a", "b", "c"], name="test")
tm.assert_index_equal(idx, expected)
+
+
+def test_shape_of_invalid_index():
+ # Currently, it is possible to create "invalid" index objects backed by
+ # a multi-dimensional array (see https://github.com/pandas-dev/pandas/issues/27125
+ # about this). However, as long as this is not solved in general,this test ensures
+ # that the returned shape is consistent with this underlying array for
+ # compat with matplotlib (see https://github.com/pandas-dev/pandas/issues/27775)
+ a = np.arange(8).reshape(2, 2, 2)
+ idx = pd.Index(a)
+ assert idx.shape == a.shape
+
+ idx = pd.Index([0, 1, 2, 3])
+ assert idx[:, None].shape == (4, 1)
diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py
index e79991f652154..67bf9bd20e716 100644
--- a/pandas/tests/indexes/test_category.py
+++ b/pandas/tests/indexes/test_category.py
@@ -411,7 +411,7 @@ def test_append(self):
tm.assert_index_equal(result, expected, exact=True)
def test_append_to_another(self):
- # hits _concat_index_asobject
+ # hits Index._concat_same_dtype
fst = Index(["a", "b"])
snd = CategoricalIndex(["d", "e"])
result = fst.append(snd)
@@ -823,6 +823,11 @@ def test_equals_categorical(self):
msg = (
"categorical index comparisons must have the same categories"
" and ordered attributes"
+ "|"
+ "Categoricals can only be compared if 'categories' are the same. "
+ "Categories are different lengths"
+ "|"
+ "Categoricals can only be compared if 'ordered' is the same"
)
with pytest.raises(TypeError, match=msg):
ci1 == ci2
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index 0e9aa07a4c05a..ae1a21e9b3980 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -35,7 +35,8 @@ def test_droplevel(self, indices):
for level in "wrong", ["wrong"]:
with pytest.raises(
- KeyError, match=re.escape("'Level wrong must be same as name (None)'")
+ KeyError,
+ match=r"'Requested level \(wrong\) does not match index name \(None\)'",
):
indices.droplevel(level)
@@ -200,7 +201,7 @@ def test_unique(self, indices):
with pytest.raises(IndexError, match=msg):
indices.unique(level=3)
- msg = r"Level wrong must be same as name \({}\)".format(
+ msg = r"Requested level \(wrong\) does not match index name \({}\)".format(
re.escape(indices.name.__repr__())
)
with pytest.raises(KeyError, match=msg):
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index f246307e63e3b..8bc9783694492 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -242,10 +242,17 @@ def test_astype(self):
# GH 13149
for dtype in ["int16", "int32", "int64"]:
i = Float64Index([0, 1.1, np.NAN])
- msg = "Cannot convert NA to integer"
+ msg = r"Cannot convert non-finite values \(NA or inf\) to integer"
with pytest.raises(ValueError, match=msg):
i.astype(dtype)
+ def test_cannot_cast_inf_to_int(self):
+ idx = pd.Float64Index([1, 2, np.inf])
+
+ msg = r"Cannot convert non-finite values \(NA or inf\) to integer"
+ with pytest.raises(ValueError, match=msg):
+ idx.astype(int)
+
def test_type_coercion_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
diff --git a/pandas/tests/indexes/test_numpy_compat.py b/pandas/tests/indexes/test_numpy_compat.py
index f9ca1bca04165..645ad19ea4cc9 100644
--- a/pandas/tests/indexes/test_numpy_compat.py
+++ b/pandas/tests/indexes/test_numpy_compat.py
@@ -118,4 +118,7 @@ def test_elementwise_comparison_warning():
# this test.
idx = Index([1, 2])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- idx == "a"
+ result = idx == "a"
+
+ expected = np.array([False, False])
+ tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index d7d8b10347861..54ed5058b5253 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -1,3 +1,5 @@
+from datetime import timedelta
+
import numpy as np
import pytest
@@ -266,6 +268,17 @@ def test_equals(self):
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
+ # Check that we dont raise OverflowError on comparisons outside the
+ # implementation range
+ oob = pd.Index([timedelta(days=10 ** 6)] * 3, dtype=object)
+ assert not idx.equals(oob)
+ assert not idx2.equals(oob)
+
+ # FIXME: oob.apply(np.timedelta64) incorrectly overflows
+ oob2 = pd.Index([np.timedelta64(x) for x in oob], dtype=object)
+ assert not idx.equals(oob2)
+ assert not idx2.equals(oob2)
+
@pytest.mark.parametrize("values", [["0 days", "2 days", "4 days"], []])
@pytest.mark.parametrize("freq", ["2D", Day(2), "48H", Hour(48)])
def test_freq_setter(self, values, freq):
diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py
index 9ceeb06b6fd86..78764e6763e95 100644
--- a/pandas/tests/indexing/common.py
+++ b/pandas/tests/indexing/common.py
@@ -210,22 +210,23 @@ def _print(result, error=None):
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
- try:
- xp = self.get_result(obj, method2, k2, a)
- except Exception:
- result = "no comp"
- _print(result)
- return
+ with catch_warnings(record=True):
+ filterwarnings("ignore", "\\n.ix", FutureWarning)
+ try:
+ xp = self.get_result(obj, method2, k2, a)
+ except (KeyError, IndexError):
+ # TODO: why is this allowed?
+ result = "no comp"
+ _print(result)
+ return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
assert rs == xp
- elif xp.ndim == 1:
- tm.assert_series_equal(rs, xp)
- elif xp.ndim == 2:
- tm.assert_frame_equal(rs, xp)
+ else:
+ tm.assert_equal(rs, xp)
result = "ok"
except AssertionError as e:
detail = str(e)
@@ -242,7 +243,7 @@ def _print(result, error=None):
except AssertionError:
raise
- except Exception as detail:
+ except (IndexError, TypeError, KeyError) as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
diff --git a/pandas/tests/indexing/interval/test_interval.py b/pandas/tests/indexing/interval/test_interval.py
index 7ae42782774db..bbce786fc07ba 100644
--- a/pandas/tests/indexing/interval/test_interval.py
+++ b/pandas/tests/indexing/interval/test_interval.py
@@ -112,3 +112,38 @@ def test_loc_getitem_frame(self):
# partial missing
with pytest.raises(KeyError, match="^$"):
df.loc[[10, 4]]
+
+
+class TestIntervalIndexInsideMultiIndex:
+ def test_mi_intervalindex_slicing_with_scalar(self):
+ # GH#27456
+ idx = pd.MultiIndex.from_arrays(
+ [
+ pd.Index(["FC", "FC", "FC", "FC", "OWNER", "OWNER", "OWNER", "OWNER"]),
+ pd.Index(
+ ["RID1", "RID1", "RID2", "RID2", "RID1", "RID1", "RID2", "RID2"]
+ ),
+ pd.IntervalIndex.from_arrays(
+ [0, 1, 10, 11, 0, 1, 10, 11], [1, 2, 11, 12, 1, 2, 11, 12]
+ ),
+ ]
+ )
+
+ idx.names = ["Item", "RID", "MP"]
+ df = pd.DataFrame({"value": [1, 2, 3, 4, 5, 6, 7, 8]})
+ df.index = idx
+ query_df = pd.DataFrame(
+ {
+ "Item": ["FC", "OWNER", "FC", "OWNER", "OWNER"],
+ "RID": ["RID1", "RID1", "RID1", "RID2", "RID2"],
+ "MP": [0.2, 1.5, 1.6, 11.1, 10.9],
+ }
+ )
+
+ query_df = query_df.sort_index()
+
+ idx = pd.MultiIndex.from_arrays([query_df.Item, query_df.RID, query_df.MP])
+ query_df.index = idx
+ result = df.value.loc[query_df.index]
+ expected = pd.Series([1, 6, 2, 8, 7], index=idx, name="value")
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/indexing/multiindex/test_ix.py b/pandas/tests/indexing/multiindex/test_ix.py
index d43115d60c029..2e7a5a08a16f0 100644
--- a/pandas/tests/indexing/multiindex/test_ix.py
+++ b/pandas/tests/indexing/multiindex/test_ix.py
@@ -1,5 +1,3 @@
-from warnings import catch_warnings, simplefilter
-
import numpy as np
import pytest
@@ -9,9 +7,8 @@
from pandas.util import testing as tm
-@pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning")
-class TestMultiIndexIx:
- def test_frame_setitem_ix(self, multiindex_dataframe_random_data):
+class TestMultiIndex:
+ def test_frame_setitem_loc(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
frame.loc[("bar", "two"), "B"] = 5
assert frame.loc[("bar", "two"), "B"] == 5
@@ -22,16 +19,7 @@ def test_frame_setitem_ix(self, multiindex_dataframe_random_data):
df.loc[("bar", "two"), 1] = 7
assert df.loc[("bar", "two"), 1] == 7
- with catch_warnings(record=True):
- simplefilter("ignore", FutureWarning)
- df = frame.copy()
- df.columns = list(range(3))
- df.ix[("bar", "two"), 1] = 7
- assert df.loc[("bar", "two"), 1] == 7
-
- def test_ix_general(self):
-
- # ix general issues
+ def test_loc_general(self):
# GH 2817
data = {
@@ -55,7 +43,7 @@ def test_ix_general(self):
expected = DataFrame({"amount": [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
- def test_ix_multiindex_missing_label_raises(self):
+ def test_loc_multiindex_missing_label_raises(self):
# GH 21593
df = DataFrame(
np.random.randn(3, 3),
@@ -64,12 +52,12 @@ def test_ix_multiindex_missing_label_raises(self):
)
with pytest.raises(KeyError, match=r"^2$"):
- df.ix[2]
+ df.loc[2]
- def test_series_ix_getitem_fancy(
+ def test_series_loc_getitem_fancy(
self, multiindex_year_month_day_dataframe_random_data
):
s = multiindex_year_month_day_dataframe_random_data["A"]
expected = s.reindex(s.index[49:51])
- result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
+ result = s.loc[[(2000, 3, 10), (2000, 3, 13)]]
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index a08b2b4c66af2..8b48c2bf7169f 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -390,3 +390,26 @@ def test_loc_getitem_lowerdim_corner(multiindex_dataframe_random_data):
expected = 0
result = df.sort_index().loc[("bar", "three"), "B"]
assert result == expected
+
+
+def test_loc_setitem_single_column_slice():
+ # case from https://github.com/pandas-dev/pandas/issues/27841
+ df = DataFrame(
+ "string",
+ index=list("abcd"),
+ columns=MultiIndex.from_product([["Main"], ("another", "one")]),
+ )
+ df["labels"] = "a"
+ df.loc[:, "labels"] = df.index
+ tm.assert_numpy_array_equal(np.asarray(df["labels"]), np.asarray(df.index))
+
+ # test with non-object block
+ df = DataFrame(
+ np.nan,
+ index=range(4),
+ columns=MultiIndex.from_tuples([("A", "1"), ("A", "2"), ("B", "1")]),
+ )
+ expected = df.copy()
+ df.loc[:, "B"] = np.arange(4)
+ expected.iloc[:, 2] = np.arange(4)
+ tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/indexing/multiindex/test_partial.py b/pandas/tests/indexing/multiindex/test_partial.py
index b1519d82e1aa7..692b57ff98f94 100644
--- a/pandas/tests/indexing/multiindex/test_partial.py
+++ b/pandas/tests/indexing/multiindex/test_partial.py
@@ -1,5 +1,3 @@
-from warnings import catch_warnings, simplefilter
-
import numpy as np
import pytest
@@ -106,11 +104,6 @@ def test_getitem_partial_column_select(self):
expected = df.loc[("a", "y")][[1, 0]]
tm.assert_frame_equal(result, expected)
- with catch_warnings(record=True):
- simplefilter("ignore", FutureWarning)
- result = df.ix[("a", "y"), [1, 0]]
- tm.assert_frame_equal(result, expected)
-
with pytest.raises(KeyError, match=r"\('a', 'foo'\)"):
df.loc[("a", "foo"), :]
diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py
index 261d2e9c04e77..c383c38958692 100644
--- a/pandas/tests/indexing/multiindex/test_setitem.py
+++ b/pandas/tests/indexing/multiindex/test_setitem.py
@@ -1,5 +1,3 @@
-from warnings import catch_warnings, simplefilter
-
import numpy as np
from numpy.random import randn
import pytest
@@ -10,133 +8,114 @@
from pandas.util import testing as tm
-@pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning")
class TestMultiIndexSetItem:
def test_setitem_multiindex(self):
- with catch_warnings(record=True):
-
- for index_fn in ("ix", "loc"):
-
- def assert_equal(a, b):
- assert a == b
-
- def check(target, indexers, value, compare_fn, expected=None):
- fn = getattr(target, index_fn)
- fn.__setitem__(indexers, value)
- result = fn.__getitem__(indexers)
- if expected is None:
- expected = value
- compare_fn(result, expected)
-
- # GH7190
- index = MultiIndex.from_product(
- [np.arange(0, 100), np.arange(0, 80)], names=["time", "firm"]
- )
- t, n = 0, 2
- df = DataFrame(
- np.nan,
- columns=["A", "w", "l", "a", "x", "X", "d", "profit"],
- index=index,
- )
- check(
- target=df, indexers=((t, n), "X"), value=0, compare_fn=assert_equal
- )
-
- df = DataFrame(
- -999,
- columns=["A", "w", "l", "a", "x", "X", "d", "profit"],
- index=index,
- )
- check(
- target=df, indexers=((t, n), "X"), value=1, compare_fn=assert_equal
- )
-
- df = DataFrame(
- columns=["A", "w", "l", "a", "x", "X", "d", "profit"], index=index
- )
- check(
- target=df, indexers=((t, n), "X"), value=2, compare_fn=assert_equal
- )
-
- # gh-7218: assigning with 0-dim arrays
- df = DataFrame(
- -999,
- columns=["A", "w", "l", "a", "x", "X", "d", "profit"],
- index=index,
- )
- check(
- target=df,
- indexers=((t, n), "X"),
- value=np.array(3),
- compare_fn=assert_equal,
- expected=3,
- )
-
- # GH5206
- df = DataFrame(
- np.arange(25).reshape(5, 5),
- columns="A,B,C,D,E".split(","),
- dtype=float,
- )
- df["F"] = 99
- row_selection = df["A"] % 2 == 0
- col_selection = ["B", "C"]
- with catch_warnings(record=True):
- df.ix[row_selection, col_selection] = df["F"]
- output = DataFrame(99.0, index=[0, 2, 4], columns=["B", "C"])
- with catch_warnings(record=True):
- tm.assert_frame_equal(df.ix[row_selection, col_selection], output)
- check(
- target=df,
- indexers=(row_selection, col_selection),
- value=df["F"],
- compare_fn=tm.assert_frame_equal,
- expected=output,
- )
-
- # GH11372
- idx = MultiIndex.from_product(
- [["A", "B", "C"], date_range("2015-01-01", "2015-04-01", freq="MS")]
- )
- cols = MultiIndex.from_product(
- [["foo", "bar"], date_range("2016-01-01", "2016-02-01", freq="MS")]
- )
-
- df = DataFrame(np.random.random((12, 4)), index=idx, columns=cols)
-
- subidx = MultiIndex.from_tuples(
- [("A", Timestamp("2015-01-01")), ("A", Timestamp("2015-02-01"))]
- )
- subcols = MultiIndex.from_tuples(
- [("foo", Timestamp("2016-01-01")), ("foo", Timestamp("2016-02-01"))]
- )
-
- vals = DataFrame(
- np.random.random((2, 2)), index=subidx, columns=subcols
- )
- check(
- target=df,
- indexers=(subidx, subcols),
- value=vals,
- compare_fn=tm.assert_frame_equal,
- )
- # set all columns
- vals = DataFrame(np.random.random((2, 4)), index=subidx, columns=cols)
- check(
- target=df,
- indexers=(subidx, slice(None, None, None)),
- value=vals,
- compare_fn=tm.assert_frame_equal,
- )
- # identity
- copy = df.copy()
- check(
- target=df,
- indexers=(df.index, df.columns),
- value=df,
- compare_fn=tm.assert_frame_equal,
- expected=copy,
- )
+ for index_fn in ("loc",):
+
+ def assert_equal(a, b):
+ assert a == b
+
+ def check(target, indexers, value, compare_fn, expected=None):
+ fn = getattr(target, index_fn)
+ fn.__setitem__(indexers, value)
+ result = fn.__getitem__(indexers)
+ if expected is None:
+ expected = value
+ compare_fn(result, expected)
+
+ # GH7190
+ index = MultiIndex.from_product(
+ [np.arange(0, 100), np.arange(0, 80)], names=["time", "firm"]
+ )
+ t, n = 0, 2
+ df = DataFrame(
+ np.nan,
+ columns=["A", "w", "l", "a", "x", "X", "d", "profit"],
+ index=index,
+ )
+ check(target=df, indexers=((t, n), "X"), value=0, compare_fn=assert_equal)
+
+ df = DataFrame(
+ -999, columns=["A", "w", "l", "a", "x", "X", "d", "profit"], index=index
+ )
+ check(target=df, indexers=((t, n), "X"), value=1, compare_fn=assert_equal)
+
+ df = DataFrame(
+ columns=["A", "w", "l", "a", "x", "X", "d", "profit"], index=index
+ )
+ check(target=df, indexers=((t, n), "X"), value=2, compare_fn=assert_equal)
+
+ # gh-7218: assigning with 0-dim arrays
+ df = DataFrame(
+ -999, columns=["A", "w", "l", "a", "x", "X", "d", "profit"], index=index
+ )
+ check(
+ target=df,
+ indexers=((t, n), "X"),
+ value=np.array(3),
+ compare_fn=assert_equal,
+ expected=3,
+ )
+
+ # GH5206
+ df = DataFrame(
+ np.arange(25).reshape(5, 5), columns="A,B,C,D,E".split(","), dtype=float
+ )
+ df["F"] = 99
+ row_selection = df["A"] % 2 == 0
+ col_selection = ["B", "C"]
+ df.loc[row_selection, col_selection] = df["F"]
+ output = DataFrame(99.0, index=[0, 2, 4], columns=["B", "C"])
+ tm.assert_frame_equal(df.loc[row_selection, col_selection], output)
+ check(
+ target=df,
+ indexers=(row_selection, col_selection),
+ value=df["F"],
+ compare_fn=tm.assert_frame_equal,
+ expected=output,
+ )
+
+ # GH11372
+ idx = MultiIndex.from_product(
+ [["A", "B", "C"], date_range("2015-01-01", "2015-04-01", freq="MS")]
+ )
+ cols = MultiIndex.from_product(
+ [["foo", "bar"], date_range("2016-01-01", "2016-02-01", freq="MS")]
+ )
+
+ df = DataFrame(np.random.random((12, 4)), index=idx, columns=cols)
+
+ subidx = MultiIndex.from_tuples(
+ [("A", Timestamp("2015-01-01")), ("A", Timestamp("2015-02-01"))]
+ )
+ subcols = MultiIndex.from_tuples(
+ [("foo", Timestamp("2016-01-01")), ("foo", Timestamp("2016-02-01"))]
+ )
+
+ vals = DataFrame(np.random.random((2, 2)), index=subidx, columns=subcols)
+ check(
+ target=df,
+ indexers=(subidx, subcols),
+ value=vals,
+ compare_fn=tm.assert_frame_equal,
+ )
+ # set all columns
+ vals = DataFrame(np.random.random((2, 4)), index=subidx, columns=cols)
+ check(
+ target=df,
+ indexers=(subidx, slice(None, None, None)),
+ value=vals,
+ compare_fn=tm.assert_frame_equal,
+ )
+ # identity
+ copy = df.copy()
+ check(
+ target=df,
+ indexers=(df.index, df.columns),
+ value=df,
+ compare_fn=tm.assert_frame_equal,
+ expected=copy,
+ )
def test_multiindex_setitem(self):
@@ -204,9 +183,8 @@ def test_multiindex_assignment(self):
df["d"] = np.nan
arr = np.array([0.0, 1.0])
- with catch_warnings(record=True):
- df.ix[4, "d"] = arr
- tm.assert_series_equal(df.ix[4, "d"], Series(arr, index=[8, 10], name="d"))
+ df.loc[4, "d"] = arr
+ tm.assert_series_equal(df.loc[4, "d"], Series(arr, index=[8, 10], name="d"))
# single dtype
df = DataFrame(
@@ -215,25 +193,21 @@ def test_multiindex_assignment(self):
index=[[4, 4, 8], [8, 10, 12]],
)
- with catch_warnings(record=True):
- df.ix[4, "c"] = arr
- exp = Series(arr, index=[8, 10], name="c", dtype="float64")
- tm.assert_series_equal(df.ix[4, "c"], exp)
+ df.loc[4, "c"] = arr
+ exp = Series(arr, index=[8, 10], name="c", dtype="float64")
+ tm.assert_series_equal(df.loc[4, "c"], exp)
# scalar ok
- with catch_warnings(record=True):
- df.ix[4, "c"] = 10
- exp = Series(10, index=[8, 10], name="c", dtype="float64")
- tm.assert_series_equal(df.ix[4, "c"], exp)
+ df.loc[4, "c"] = 10
+ exp = Series(10, index=[8, 10], name="c", dtype="float64")
+ tm.assert_series_equal(df.loc[4, "c"], exp)
# invalid assignments
with pytest.raises(ValueError):
- with catch_warnings(record=True):
- df.ix[4, "c"] = [0, 1, 2, 3]
+ df.loc[4, "c"] = [0, 1, 2, 3]
with pytest.raises(ValueError):
- with catch_warnings(record=True):
- df.ix[4, "c"] = [0]
+ df.loc[4, "c"] = [0]
# groupby example
NUM_ROWS = 100
@@ -264,8 +238,7 @@ def f(name, df2):
# but in this case, that's ok
for name, df2 in grp:
new_vals = np.arange(df2.shape[0])
- with catch_warnings(record=True):
- df.ix[name, "new_col"] = new_vals
+ df.loc[name, "new_col"] = new_vals
def test_series_setitem(self, multiindex_year_month_day_dataframe_random_data):
ymd = multiindex_year_month_day_dataframe_random_data
@@ -313,11 +286,6 @@ def test_frame_getitem_setitem_multislice(self):
result = df.loc[:, "value"]
tm.assert_series_equal(df["value"], result)
- with catch_warnings(record=True):
- simplefilter("ignore", FutureWarning)
- result = df.ix[:, "value"]
- tm.assert_series_equal(df["value"], result)
-
result = df.loc[df.index[1:3], "value"]
tm.assert_series_equal(df["value"][1:3], result)
@@ -412,7 +380,7 @@ def test_setitem_change_dtype(self, multiindex_dataframe_random_data):
reindexed = dft.reindex(columns=[("foo", "two")])
tm.assert_series_equal(reindexed["foo", "two"], s > s.median())
- def test_set_column_scalar_with_ix(self, multiindex_dataframe_random_data):
+ def test_set_column_scalar_with_loc(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
subset = frame.index[[1, 4, 5]]
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index dea1d5114f1b9..05b58b0eca9b8 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -1029,22 +1029,16 @@ def test_replace_series(self, how, to_key, from_key):
tm.assert_series_equal(result, exp)
- # TODO(jbrockmendel) commented out to only have a single xfail printed
- @pytest.mark.xfail(
- reason="GH #18376, tzawareness-compat bug in BlockManager.replace_list"
+ @pytest.mark.parametrize("how", ["dict", "series"])
+ @pytest.mark.parametrize(
+ "to_key",
+ ["timedelta64[ns]", "bool", "object", "complex128", "float64", "int64"],
)
- # @pytest.mark.parametrize('how', ['dict', 'series'])
- # @pytest.mark.parametrize('to_key', ['timedelta64[ns]', 'bool', 'object',
- # 'complex128', 'float64', 'int64'])
- # @pytest.mark.parametrize('from_key', ['datetime64[ns, UTC]',
- # 'datetime64[ns, US/Eastern]'])
- # def test_replace_series_datetime_tz(self, how, to_key, from_key):
- def test_replace_series_datetime_tz(self):
- how = "series"
- from_key = "datetime64[ns, US/Eastern]"
- to_key = "timedelta64[ns]"
-
- index = pd.Index([3, 4], name="xxx")
+ @pytest.mark.parametrize(
+ "from_key", ["datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"]
+ )
+ def test_replace_series_datetime_tz(self, how, to_key, from_key):
+ index = pd.Index([3, 4], name="xyz")
obj = pd.Series(self.rep[from_key], index=index, name="yyy")
assert obj.dtype == from_key
@@ -1061,24 +1055,17 @@ def test_replace_series_datetime_tz(self):
tm.assert_series_equal(result, exp)
- # TODO(jreback) commented out to only have a single xfail printed
- @pytest.mark.xfail(
- reason="different tz, currently mask_missing raises SystemError", strict=False
+ @pytest.mark.parametrize("how", ["dict", "series"])
+ @pytest.mark.parametrize(
+ "to_key",
+ ["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"],
)
- # @pytest.mark.parametrize('how', ['dict', 'series'])
- # @pytest.mark.parametrize('to_key', [
- # 'datetime64[ns]', 'datetime64[ns, UTC]',
- # 'datetime64[ns, US/Eastern]'])
- # @pytest.mark.parametrize('from_key', [
- # 'datetime64[ns]', 'datetime64[ns, UTC]',
- # 'datetime64[ns, US/Eastern]'])
- # def test_replace_series_datetime_datetime(self, how, to_key, from_key):
- def test_replace_series_datetime_datetime(self):
- how = "dict"
- to_key = "datetime64[ns]"
- from_key = "datetime64[ns]"
-
- index = pd.Index([3, 4], name="xxx")
+ @pytest.mark.parametrize(
+ "from_key",
+ ["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"],
+ )
+ def test_replace_series_datetime_datetime(self, how, to_key, from_key):
+ index = pd.Index([3, 4], name="xyz")
obj = pd.Series(self.rep[from_key], index=index, name="yyy")
assert obj.dtype == from_key
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index 31e9cff68445e..fb8f62d7a06c5 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -51,7 +51,7 @@ def test_indexing_with_datetime_tz(self):
# indexing
result = df.iloc[1]
expected = Series(
- [Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), np.nan, np.nan],
+ [Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
@@ -59,7 +59,7 @@ def test_indexing_with_datetime_tz(self):
tm.assert_series_equal(result, expected)
result = df.loc[1]
expected = Series(
- [Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), np.nan, np.nan],
+ [Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index 78ff6580bb1e1..56a78081bc624 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -1,5 +1,3 @@
-from warnings import catch_warnings
-
import numpy as np
import pytest
@@ -7,8 +5,6 @@
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_series_equal
-ignore_ix = pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning")
-
class TestFloatIndexers:
def check(self, result, original, indexer, getitem):
@@ -62,7 +58,6 @@ def test_scalar_error(self):
with pytest.raises(TypeError, match=msg):
s.iloc[3.0] = 0
- @ignore_ix
def test_scalar_non_numeric(self):
# GH 4892
@@ -86,11 +81,7 @@ def test_scalar_non_numeric(self):
]:
# getting
- for idxr, getitem in [
- (lambda x: x.ix, False),
- (lambda x: x.iloc, False),
- (lambda x: x, True),
- ]:
+ for idxr, getitem in [(lambda x: x.iloc, False), (lambda x: x, True)]:
# gettitem on a DataFrame is a KeyError as it is indexing
# via labels on the columns
@@ -106,9 +97,8 @@ def test_scalar_non_numeric(self):
"Cannot index by location index with a"
" non-integer key".format(klass=type(i), kind=str(float))
)
- with catch_warnings(record=True):
- with pytest.raises(error, match=msg):
- idxr(s)[3.0]
+ with pytest.raises(error, match=msg):
+ idxr(s)[3.0]
# label based can be a TypeError or KeyError
if s.index.inferred_type in ["string", "unicode", "mixed"]:
@@ -158,10 +148,9 @@ def test_scalar_non_numeric(self):
s2.loc[3.0] = 10
assert s2.index.is_object()
- for idxr in [lambda x: x.ix, lambda x: x]:
+ for idxr in [lambda x: x]:
s2 = s.copy()
- with catch_warnings(record=True):
- idxr(s2)[3.0] = 0
+ idxr(s2)[3.0] = 0
assert s2.index.is_object()
# fallsback to position selection, series only
@@ -175,7 +164,6 @@ def test_scalar_non_numeric(self):
with pytest.raises(TypeError, match=msg):
s[3.0]
- @ignore_ix
def test_scalar_with_mixed(self):
s2 = Series([1, 2, 3], index=["a", "b", "c"])
@@ -183,7 +171,7 @@ def test_scalar_with_mixed(self):
# lookup in a pure stringstr
# with an invalid indexer
- for idxr in [lambda x: x.ix, lambda x: x, lambda x: x.iloc]:
+ for idxr in [lambda x: x, lambda x: x.iloc]:
msg = (
r"cannot do label indexing"
@@ -193,9 +181,8 @@ def test_scalar_with_mixed(self):
klass=str(Index), kind=str(float)
)
)
- with catch_warnings(record=True):
- with pytest.raises(TypeError, match=msg):
- idxr(s2)[1.0]
+ with pytest.raises(TypeError, match=msg):
+ idxr(s2)[1.0]
with pytest.raises(KeyError, match=r"^1$"):
s2.loc[1.0]
@@ -220,23 +207,6 @@ def test_scalar_with_mixed(self):
expected = 2
assert result == expected
- # mixed index so we have label
- # indexing
- for idxr in [lambda x: x.ix]:
- with catch_warnings(record=True):
-
- msg = (
- r"cannot do label indexing"
- r" on {klass} with these indexers \[1\.0\] of"
- r" {kind}".format(klass=str(Index), kind=str(float))
- )
- with pytest.raises(TypeError, match=msg):
- idxr(s3)[1.0]
-
- result = idxr(s3)[1]
- expected = 2
- assert result == expected
-
msg = "Cannot index by location index with a non-integer key"
with pytest.raises(TypeError, match=msg):
s3.iloc[1.0]
@@ -247,7 +217,6 @@ def test_scalar_with_mixed(self):
expected = 3
assert result == expected
- @ignore_ix
def test_scalar_integer(self):
# test how scalar float indexers work on int indexes
@@ -261,22 +230,13 @@ def test_scalar_integer(self):
]:
# coerce to equal int
- for idxr, getitem in [
- (lambda x: x.ix, False),
- (lambda x: x.loc, False),
- (lambda x: x, True),
- ]:
+ for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:
- with catch_warnings(record=True):
- result = idxr(s)[3.0]
+ result = idxr(s)[3.0]
self.check(result, s, 3, getitem)
# coerce to equal int
- for idxr, getitem in [
- (lambda x: x.ix, False),
- (lambda x: x.loc, False),
- (lambda x: x, True),
- ]:
+ for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:
if isinstance(s, Series):
@@ -292,20 +252,18 @@ def compare(x, y):
expected = Series(100.0, index=range(len(s)), name=3)
s2 = s.copy()
- with catch_warnings(record=True):
- idxr(s2)[3.0] = 100
+ idxr(s2)[3.0] = 100
- result = idxr(s2)[3.0]
- compare(result, expected)
+ result = idxr(s2)[3.0]
+ compare(result, expected)
- result = idxr(s2)[3]
- compare(result, expected)
+ result = idxr(s2)[3]
+ compare(result, expected)
# contains
# coerce to equal int
assert 3.0 in s
- @ignore_ix
def test_scalar_float(self):
# scalar float indexers work on a float index
@@ -319,11 +277,7 @@ def test_scalar_float(self):
# assert all operations except for iloc are ok
indexer = index[3]
- for idxr, getitem in [
- (lambda x: x.ix, False),
- (lambda x: x.loc, False),
- (lambda x: x, True),
- ]:
+ for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:
# getting
result = idxr(s)[indexer]
@@ -332,14 +286,12 @@ def test_scalar_float(self):
# setting
s2 = s.copy()
- with catch_warnings(record=True):
- result = idxr(s2)[indexer]
+ result = idxr(s2)[indexer]
self.check(result, s, 3, getitem)
# random integer is a KeyError
- with catch_warnings(record=True):
- with pytest.raises(KeyError, match=r"^3\.5$"):
- idxr(s)[3.5]
+ with pytest.raises(KeyError, match=r"^3\.5$"):
+ idxr(s)[3.5]
# contains
assert 3.0 in s
@@ -365,7 +317,6 @@ def test_scalar_float(self):
with pytest.raises(TypeError, match=msg):
s2.iloc[3.0] = 0
- @ignore_ix
def test_slice_non_numeric(self):
# GH 4892
@@ -397,12 +348,7 @@ def test_slice_non_numeric(self):
with pytest.raises(TypeError, match=msg):
s.iloc[l]
- for idxr in [
- lambda x: x.ix,
- lambda x: x.loc,
- lambda x: x.iloc,
- lambda x: x,
- ]:
+ for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
msg = (
"cannot do slice indexing"
@@ -414,9 +360,8 @@ def test_slice_non_numeric(self):
kind_int=str(int),
)
)
- with catch_warnings(record=True):
- with pytest.raises(TypeError, match=msg):
- idxr(s)[l]
+ with pytest.raises(TypeError, match=msg):
+ idxr(s)[l]
# setitem
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
@@ -429,12 +374,7 @@ def test_slice_non_numeric(self):
with pytest.raises(TypeError, match=msg):
s.iloc[l] = 0
- for idxr in [
- lambda x: x.ix,
- lambda x: x.loc,
- lambda x: x.iloc,
- lambda x: x,
- ]:
+ for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers"
@@ -445,11 +385,9 @@ def test_slice_non_numeric(self):
kind_int=str(int),
)
)
- with catch_warnings(record=True):
- with pytest.raises(TypeError, match=msg):
- idxr(s)[l] = 0
+ with pytest.raises(TypeError, match=msg):
+ idxr(s)[l] = 0
- @ignore_ix
def test_slice_integer(self):
# same as above, but for Integer based indexes
@@ -468,10 +406,9 @@ def test_slice_integer(self):
# getitem
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
- for idxr in [lambda x: x.loc, lambda x: x.ix]:
+ for idxr in [lambda x: x.loc]:
- with catch_warnings(record=True):
- result = idxr(s)[l]
+ result = idxr(s)[l]
# these are all label indexing
# except getitem which is positional
@@ -494,9 +431,8 @@ def test_slice_integer(self):
# getitem out-of-bounds
for l in [slice(-6, 6), slice(-6.0, 6.0)]:
- for idxr in [lambda x: x.loc, lambda x: x.ix]:
- with catch_warnings(record=True):
- result = idxr(s)[l]
+ for idxr in [lambda x: x.loc]:
+ result = idxr(s)[l]
# these are all label indexing
# except getitem which is positional
@@ -523,10 +459,9 @@ def test_slice_integer(self):
(slice(2.5, 3.5), slice(3, 4)),
]:
- for idxr in [lambda x: x.loc, lambda x: x.ix]:
+ for idxr in [lambda x: x.loc]:
- with catch_warnings(record=True):
- result = idxr(s)[l]
+ result = idxr(s)[l]
if oob:
res = slice(0, 0)
else:
@@ -546,11 +481,10 @@ def test_slice_integer(self):
# setitem
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
- for idxr in [lambda x: x.loc, lambda x: x.ix]:
+ for idxr in [lambda x: x.loc]:
sc = s.copy()
- with catch_warnings(record=True):
- idxr(sc)[l] = 0
- result = idxr(sc)[l].values.ravel()
+ idxr(sc)[l] = 0
+ result = idxr(sc)[l].values.ravel()
assert (result == 0).all()
# positional indexing
@@ -585,7 +519,6 @@ def test_integer_positional_indexing(self):
with pytest.raises(TypeError, match=msg):
idxr(s)[l]
- @ignore_ix
def test_slice_integer_frame_getitem(self):
# similar to above, but on the getitem dim (of a DataFrame)
@@ -663,10 +596,7 @@ def f(idxr):
s[l] = 0
f(lambda x: x.loc)
- with catch_warnings(record=True):
- f(lambda x: x.ix)
- @ignore_ix
def test_slice_float(self):
# same as above, but for floats
@@ -679,20 +609,18 @@ def test_slice_float(self):
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
expected = s.iloc[3:4]
- for idxr in [lambda x: x.ix, lambda x: x.loc, lambda x: x]:
+ for idxr in [lambda x: x.loc, lambda x: x]:
# getitem
- with catch_warnings(record=True):
- result = idxr(s)[l]
+ result = idxr(s)[l]
if isinstance(s, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
# setitem
s2 = s.copy()
- with catch_warnings(record=True):
- idxr(s2)[l] = 0
- result = idxr(s2)[l].values.ravel()
+ idxr(s2)[l] = 0
+ result = idxr(s2)[l].values.ravel()
assert (result == 0).all()
def test_floating_index_doc_example(self):
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 85eab91af3c48..c3ba5c0545b8b 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -8,6 +8,7 @@
import pandas as pd
from pandas import DataFrame, Series, concat, date_range, isna
from pandas.api.types import is_scalar
+from pandas.core.indexing import IndexingError
from pandas.tests.indexing.common import Base
from pandas.util import testing as tm
@@ -722,7 +723,7 @@ def test_iloc_mask(self):
else:
accessor = df
ans = str(bin(accessor[mask]["nums"].sum()))
- except Exception as e:
+ except (ValueError, IndexingError, NotImplementedError) as e:
ans = str(e)
key = tuple([idx, method])
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 58c054fa27d76..d478fbfa1686d 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -2,13 +2,13 @@
from datetime import datetime
import re
-from warnings import catch_warnings, simplefilter
import weakref
import numpy as np
import pytest
from pandas.compat import PY36
+from pandas.errors import AbstractMethodError
from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
@@ -20,8 +20,6 @@
from pandas.tests.indexing.common import Base, _mklbl
import pandas.util.testing as tm
-ignore_ix = pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning")
-
# ------------------------------------------------------------------------
# Indexing test cases
@@ -75,7 +73,6 @@ def test_setitem_ndarray_1d(self):
(lambda x: x, "getitem"),
(lambda x: x.loc, "loc"),
(lambda x: x.iloc, "iloc"),
- pytest.param(lambda x: x.ix, "ix", marks=ignore_ix),
],
)
def test_getitem_ndarray_3d(self, index, obj, idxr, idxr_id):
@@ -141,7 +138,6 @@ def test_getitem_ndarray_3d(self, index, obj, idxr, idxr_id):
(lambda x: x, "setitem"),
(lambda x: x.loc, "loc"),
(lambda x: x.iloc, "iloc"),
- pytest.param(lambda x: x.ix, "ix", marks=ignore_ix),
],
)
def test_setitem_ndarray_3d(self, index, obj, idxr, idxr_id):
@@ -163,27 +159,20 @@ def test_setitem_ndarray_3d(self, index, obj, idxr, idxr_id):
r"^\[\[\[" # pandas.core.indexing.IndexingError
)
- if (
- (idxr_id == "iloc")
- or (
- (
- isinstance(obj, Series)
- and idxr_id == "setitem"
- and index.inferred_type
- in [
- "floating",
- "string",
- "datetime64",
- "period",
- "timedelta64",
- "boolean",
- "categorical",
- ]
- )
- )
- or (
- idxr_id == "ix"
- and index.inferred_type in ["string", "datetime64", "period", "boolean"]
+ if (idxr_id == "iloc") or (
+ (
+ isinstance(obj, Series)
+ and idxr_id == "setitem"
+ and index.inferred_type
+ in [
+ "floating",
+ "string",
+ "datetime64",
+ "period",
+ "timedelta64",
+ "boolean",
+ "categorical",
+ ]
)
):
idxr[nd3] = 0
@@ -427,10 +416,6 @@ def test_indexing_mixed_frame_bug(self):
df.loc[idx, "test"] = temp
assert df.iloc[0, 2] == "-----"
- # if I look at df, then element [0,2] equals '_'. If instead I type
- # df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
- # get '_'.
-
def test_multitype_list_index_access(self):
# GH 10610
df = DataFrame(np.random.random((10, 5)), columns=["a"] + [20, 21, 22, 23])
@@ -592,21 +577,17 @@ def test_multi_assign(self):
def test_setitem_list(self):
# GH 6043
- # ix with a list
+ # iloc with a list
df = DataFrame(index=[0, 1], columns=[0])
- with catch_warnings(record=True):
- simplefilter("ignore")
- df.ix[1, 0] = [1, 2, 3]
- df.ix[1, 0] = [1, 2]
+ df.iloc[1, 0] = [1, 2, 3]
+ df.iloc[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
- with catch_warnings(record=True):
- simplefilter("ignore")
- result.ix[1, 0] = [1, 2]
+ result.iloc[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
- # ix with an object
+ # iloc with an object
class TO:
def __init__(self, value):
self.value = value
@@ -623,24 +604,18 @@ def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
- with catch_warnings(record=True):
- simplefilter("ignore")
- df.ix[1, 0] = TO(1)
- df.ix[1, 0] = TO(2)
+ df.iloc[1, 0] = TO(1)
+ df.iloc[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
- with catch_warnings(record=True):
- simplefilter("ignore")
- result.ix[1, 0] = TO(2)
+ result.iloc[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
- with catch_warnings(record=True):
- simplefilter("ignore")
- df.ix[1, 0] = TO(1)
- df.ix[1, 0] = np.nan
+ df.iloc[1, 0] = TO(1)
+ df.iloc[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
@@ -777,55 +752,52 @@ def test_contains_with_float_index(self):
def test_index_type_coercion(self):
- with catch_warnings(record=True):
- simplefilter("ignore")
+ # GH 11836
+ # if we have an index type and set it with something that looks
+ # to numpy like the same, but is actually, not
+ # (e.g. setting with a float or string '0')
+ # then we need to coerce to object
- # GH 11836
- # if we have an index type and set it with something that looks
- # to numpy like the same, but is actually, not
- # (e.g. setting with a float or string '0')
- # then we need to coerce to object
+ # integer indexes
+ for s in [Series(range(5)), Series(range(5), index=range(1, 6))]:
- # integer indexes
- for s in [Series(range(5)), Series(range(5), index=range(1, 6))]:
+ assert s.index.is_integer()
- assert s.index.is_integer()
+ for indexer in [lambda x: x.loc, lambda x: x]:
+ s2 = s.copy()
+ indexer(s2)[0.1] = 0
+ assert s2.index.is_floating()
+ assert indexer(s2)[0.1] == 0
- for indexer in [lambda x: x.ix, lambda x: x.loc, lambda x: x]:
- s2 = s.copy()
- indexer(s2)[0.1] = 0
- assert s2.index.is_floating()
- assert indexer(s2)[0.1] == 0
+ s2 = s.copy()
+ indexer(s2)[0.0] = 0
+ exp = s.index
+ if 0 not in s:
+ exp = Index(s.index.tolist() + [0])
+ tm.assert_index_equal(s2.index, exp)
- s2 = s.copy()
- indexer(s2)[0.0] = 0
- exp = s.index
- if 0 not in s:
- exp = Index(s.index.tolist() + [0])
- tm.assert_index_equal(s2.index, exp)
+ s2 = s.copy()
+ indexer(s2)["0"] = 0
+ assert s2.index.is_object()
- s2 = s.copy()
- indexer(s2)["0"] = 0
- assert s2.index.is_object()
+ for s in [Series(range(5), index=np.arange(5.0))]:
- for s in [Series(range(5), index=np.arange(5.0))]:
+ assert s.index.is_floating()
- assert s.index.is_floating()
+ for idxr in [lambda x: x.loc, lambda x: x]:
- for idxr in [lambda x: x.ix, lambda x: x.loc, lambda x: x]:
+ s2 = s.copy()
+ idxr(s2)[0.1] = 0
+ assert s2.index.is_floating()
+ assert idxr(s2)[0.1] == 0
- s2 = s.copy()
- idxr(s2)[0.1] = 0
- assert s2.index.is_floating()
- assert idxr(s2)[0.1] == 0
+ s2 = s.copy()
+ idxr(s2)[0.0] = 0
+ tm.assert_index_equal(s2.index, s.index)
- s2 = s.copy()
- idxr(s2)[0.0] = 0
- tm.assert_index_equal(s2.index, s.index)
-
- s2 = s.copy()
- idxr(s2)["0"] = 0
- assert s2.index.is_object()
+ s2 = s.copy()
+ idxr(s2)["0"] = 0
+ assert s2.index.is_object()
class TestMisc(Base):
@@ -887,22 +859,7 @@ def run_tests(df, rhs, right):
tm.assert_frame_equal(left, right)
left = df.copy()
- with catch_warnings(record=True):
- # XXX: finer-filter here.
- simplefilter("ignore")
- left.ix[slice_one, slice_two] = rhs
- tm.assert_frame_equal(left, right)
-
- left = df.copy()
- with catch_warnings(record=True):
- simplefilter("ignore")
- left.ix[idx_one, idx_two] = rhs
- tm.assert_frame_equal(left, right)
-
- left = df.copy()
- with catch_warnings(record=True):
- simplefilter("ignore")
- left.ix[lbl_one, lbl_two] = rhs
+ left.iloc[slice_one, slice_two] = rhs
tm.assert_frame_equal(left, right)
xs = np.arange(20).reshape(5, 4)
@@ -933,7 +890,7 @@ def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
if not idx.is_integer:
- # For integer indices, ix and plain getitem are position-based.
+ # For integer indices, .loc and plain getitem are position-based.
tm.assert_series_equal(s[l_slc], s.iloc[i_slc])
tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
@@ -951,10 +908,6 @@ def test_slice_with_zero_step_raises(self):
s[::0]
with pytest.raises(ValueError, match="slice step cannot be zero"):
s.loc[::0]
- with catch_warnings(record=True):
- simplefilter("ignore")
- with pytest.raises(ValueError, match="slice step cannot be zero"):
- s.ix[::0]
def test_indexing_assignment_dict_already_exists(self):
df = DataFrame({"x": [1, 2, 6], "y": [2, 2, 8], "z": [-5, 0, 5]}).set_index("z")
@@ -965,17 +918,12 @@ def test_indexing_assignment_dict_already_exists(self):
tm.assert_frame_equal(df, expected)
def test_indexing_dtypes_on_empty(self):
- # Check that .iloc and .ix return correct dtypes GH9983
+ # Check that .iloc returns correct dtypes GH9983
df = DataFrame({"a": [1, 2, 3], "b": ["b", "b2", "b3"]})
- with catch_warnings(record=True):
- simplefilter("ignore")
- df2 = df.ix[[], :]
+ df2 = df.iloc[[], :]
assert df2.loc[:, "a"].dtype == np.int64
tm.assert_series_equal(df2.loc[:, "a"], df2.iloc[:, 0])
- with catch_warnings(record=True):
- simplefilter("ignore")
- tm.assert_series_equal(df2.loc[:, "a"], df2.ix[:, 0])
def test_range_in_series_indexing(self):
# range can cause an indexing error
@@ -1048,9 +996,6 @@ def test_no_reference_cycle(self):
df = DataFrame({"a": [0, 1], "b": [2, 3]})
for name in ("loc", "iloc", "at", "iat"):
getattr(df, name)
- with catch_warnings(record=True):
- simplefilter("ignore")
- getattr(df, "ix")
wr = weakref.ref(df)
del df
assert wr() is None
@@ -1224,7 +1169,7 @@ def test_extension_array_cross_section_converts():
@pytest.mark.parametrize(
"idxr, error, error_message",
[
- (lambda x: x, AttributeError, "'numpy.ndarray' object has no attribute 'get'"),
+ (lambda x: x, AbstractMethodError, None),
(
lambda x: x.loc,
AttributeError,
@@ -1235,12 +1180,6 @@ def test_extension_array_cross_section_converts():
AttributeError,
"type object 'NDFrame' has no attribute '_AXIS_ALIASES'",
),
- pytest.param(
- lambda x: x.ix,
- ValueError,
- "NDFrameIndexer does not support NDFrame objects with ndim > 2",
- marks=ignore_ix,
- ),
],
)
def test_ndframe_indexing_raises(idxr, error, error_message):
@@ -1263,3 +1202,12 @@ def test_readonly_indices():
result = df["data"].iloc[indices]
expected = df["data"].loc[[1, 3, 6]]
tm.assert_series_equal(result, expected)
+
+
+def test_1tuple_without_multiindex():
+ ser = pd.Series(range(5))
+ key = (slice(3),)
+
+ result = ser[key]
+ expected = ser[key[0]]
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/indexing/test_ix.py b/pandas/tests/indexing/test_ix.py
index 45ccd8d1b8fb3..6029db8ed66f6 100644
--- a/pandas/tests/indexing/test_ix.py
+++ b/pandas/tests/indexing/test_ix.py
@@ -343,3 +343,13 @@ def test_ix_duplicate_returns_series(self):
r = df.ix[0.2, "a"]
e = df.loc[0.2, "a"]
tm.assert_series_equal(r, e)
+
+ def test_ix_intervalindex(self):
+ # https://github.com/pandas-dev/pandas/issues/27865
+ df = DataFrame(
+ np.random.randn(5, 2),
+ index=pd.IntervalIndex.from_breaks([-np.inf, 0, 1, 2, 3, np.inf]),
+ )
+ result = df.ix[0:2, 0]
+ expected = df.iloc[0:2, 0]
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 19c288a4b63ae..35291efecd1ac 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -690,6 +690,28 @@ def test_loc_setitem_consistency_slice_column_len(self):
)
tm.assert_series_equal(df[("Respondent", "Duration")], expected)
+ @pytest.mark.parametrize("unit", ["Y", "M", "D", "h", "m", "s", "ms", "us"])
+ def test_loc_assign_non_ns_datetime(self, unit):
+ # GH 27395, non-ns dtype assignment via .loc should work
+ # and return the same result when using simple assignment
+ df = DataFrame(
+ {
+ "timestamp": [
+ np.datetime64("2017-02-11 12:41:29"),
+ np.datetime64("1991-11-07 04:22:37"),
+ ]
+ }
+ )
+
+ df.loc[:, unit] = df.loc[:, "timestamp"].values.astype(
+ "datetime64[{unit}]".format(unit=unit)
+ )
+ df["expected"] = df.loc[:, "timestamp"].values.astype(
+ "datetime64[{unit}]".format(unit=unit)
+ )
+ expected = Series(df.loc[:, "expected"], name=unit)
+ tm.assert_series_equal(df.loc[:, unit], expected)
+
def test_loc_setitem_frame(self):
df = self.frame_labels
@@ -1070,6 +1092,16 @@ def test_series_indexing_zerodim_np_array(self):
result = s.loc[np.array(0)]
assert result == 1
+ def test_loc_reverse_assignment(self):
+ # GH26939
+ data = [1, 2, 3, 4, 5, 6] + [None] * 4
+ expected = Series(data, index=range(2010, 2020))
+
+ result = pd.Series(index=range(2010, 2020))
+ result.loc[2015:2010:-1] = [6, 5, 4, 3, 2, 1]
+
+ tm.assert_series_equal(result, expected)
+
def test_series_loc_getitem_label_list_missing_values():
# gh-11428
@@ -1081,3 +1113,21 @@ def test_series_loc_getitem_label_list_missing_values():
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = s.loc[key]
tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "columns, column_key, expected_columns, check_column_type",
+ [
+ ([2011, 2012, 2013], [2011, 2012], [0, 1], True),
+ ([2011, 2012, "All"], [2011, 2012], [0, 1], False),
+ ([2011, 2012, "All"], [2011, "All"], [0, 2], True),
+ ],
+)
+def test_loc_getitem_label_list_integer_labels(
+ columns, column_key, expected_columns, check_column_type
+):
+ # gh-14836
+ df = DataFrame(np.random.rand(3, 3), columns=columns, index=list("ABC"))
+ expected = df.iloc[:, expected_columns]
+ result = df.loc[["A", "B", "C"], column_key]
+ tm.assert_frame_equal(result, expected, check_column_type=check_column_type)
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 68e93f06e43dc..c4505231932c6 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -442,10 +442,10 @@ def test_partial_set_empty_frame(self):
# these work as they don't really change
# anything but the index
# GH5632
- expected = DataFrame(columns=["foo"], index=Index([], dtype="int64"))
+ expected = DataFrame(columns=["foo"], index=Index([], dtype="object"))
def f():
- df = DataFrame()
+ df = DataFrame(index=Index([], dtype="object"))
df["foo"] = Series([], dtype="object")
return df
@@ -469,22 +469,21 @@ def f():
expected["foo"] = expected["foo"].astype("float64")
def f():
- df = DataFrame()
+ df = DataFrame(index=Index([], dtype="int64"))
df["foo"] = []
return df
tm.assert_frame_equal(f(), expected)
def f():
- df = DataFrame()
+ df = DataFrame(index=Index([], dtype="int64"))
df["foo"] = Series(np.arange(len(df)), dtype="float64")
return df
tm.assert_frame_equal(f(), expected)
def f():
- df = DataFrame()
- tm.assert_index_equal(df.index, Index([], dtype="object"))
+ df = DataFrame(index=Index([], dtype="int64"))
df["foo"] = range(len(df))
return df
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 655e484bc34d1..5eb9a067b11e4 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -338,7 +338,7 @@ def test_try_coerce_arg(self):
vals = (np.datetime64("2010-10-10"), datetime(2010, 10, 10), date(2010, 10, 10))
for val in vals:
coerced = block._try_coerce_args(val)
- assert np.int64 == type(coerced)
+ assert np.datetime64 == type(coerced)
assert pd.Timestamp("2010-10-10") == pd.Timestamp(coerced)
@@ -528,32 +528,33 @@ def test_as_array_datetime_tz(self):
assert mgr.get("g").dtype == "datetime64[ns, CET]"
assert mgr.as_array().dtype == "object"
- def test_astype(self):
+ @pytest.mark.parametrize("t", ["float16", "float32", "float64", "int32", "int64"])
+ def test_astype(self, t):
# coerce all
mgr = create_mgr("c: f4; d: f2; e: f8")
- for t in ["float16", "float32", "float64", "int32", "int64"]:
- t = np.dtype(t)
- tmgr = mgr.astype(t)
- assert tmgr.get("c").dtype.type == t
- assert tmgr.get("d").dtype.type == t
- assert tmgr.get("e").dtype.type == t
+
+ t = np.dtype(t)
+ tmgr = mgr.astype(t)
+ assert tmgr.get("c").dtype.type == t
+ assert tmgr.get("d").dtype.type == t
+ assert tmgr.get("e").dtype.type == t
# mixed
mgr = create_mgr("a,b: object; c: bool; d: datetime; e: f4; f: f2; g: f8")
- for t in ["float16", "float32", "float64", "int32", "int64"]:
- t = np.dtype(t)
- tmgr = mgr.astype(t, errors="ignore")
- assert tmgr.get("c").dtype.type == t
- assert tmgr.get("e").dtype.type == t
- assert tmgr.get("f").dtype.type == t
- assert tmgr.get("g").dtype.type == t
-
- assert tmgr.get("a").dtype.type == np.object_
- assert tmgr.get("b").dtype.type == np.object_
- if t != np.int64:
- assert tmgr.get("d").dtype.type == np.datetime64
- else:
- assert tmgr.get("d").dtype.type == t
+
+ t = np.dtype(t)
+ tmgr = mgr.astype(t, errors="ignore")
+ assert tmgr.get("c").dtype.type == t
+ assert tmgr.get("e").dtype.type == t
+ assert tmgr.get("f").dtype.type == t
+ assert tmgr.get("g").dtype.type == t
+
+ assert tmgr.get("a").dtype.type == np.object_
+ assert tmgr.get("b").dtype.type == np.object_
+ if t != np.int64:
+ assert tmgr.get("d").dtype.type == np.datetime64
+ else:
+ assert tmgr.get("d").dtype.type == t
def test_convert(self):
def _compare(old_mgr, new_mgr):
@@ -584,10 +585,6 @@ def _compare(old_mgr, new_mgr):
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
- mgr = create_mgr("a, b: object; f: i8; g: f8")
- new_mgr = mgr.convert()
- _compare(mgr, new_mgr)
-
# convert
mgr = create_mgr("a,b,foo: object; f: i8; g: f8")
mgr.set("a", np.array(["1"] * N, dtype=np.object_))
diff --git a/pandas/tests/io/data/categorical.0.25.0.pickle b/pandas/tests/io/data/categorical.0.25.0.pickle
new file mode 100644
index 0000000000000..b756060c83d94
Binary files /dev/null and b/pandas/tests/io/data/categorical.0.25.0.pickle differ
diff --git a/pandas/tests/io/data/categorical_0_14_1.pickle b/pandas/tests/io/data/categorical_0_14_1.pickle
deleted file mode 100644
index 94f882b2f3027..0000000000000
--- a/pandas/tests/io/data/categorical_0_14_1.pickle
+++ /dev/null
@@ -1,94 +0,0 @@
-ccopy_reg
-_reconstructor
-p0
-(cpandas.core.categorical
-Categorical
-p1
-c__builtin__
-object
-p2
-Ntp3
-Rp4
-(dp5
-S'_levels'
-p6
-cnumpy.core.multiarray
-_reconstruct
-p7
-(cpandas.core.index
-Index
-p8
-(I0
-tp9
-S'b'
-p10
-tp11
-Rp12
-((I1
-(I4
-tp13
-cnumpy
-dtype
-p14
-(S'O8'
-p15
-I0
-I1
-tp16
-Rp17
-(I3
-S'|'
-p18
-NNNI-1
-I-1
-I63
-tp19
-bI00
-(lp20
-S'a'
-p21
-ag10
-aS'c'
-p22
-aS'd'
-p23
-atp24
-(Ntp25
-tp26
-bsS'labels'
-p27
-g7
-(cnumpy
-ndarray
-p28
-(I0
-tp29
-g10
-tp30
-Rp31
-(I1
-(I3
-tp32
-g14
-(S'i8'
-p33
-I0
-I1
-tp34
-Rp35
-(I3
-S'<'
-p36
-NNNI-1
-I-1
-I0
-tp37
-bI00
-S'\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00'
-p38
-tp39
-bsS'name'
-p40
-S'foobar'
-p41
-sb.
\ No newline at end of file
diff --git a/pandas/tests/io/data/categorical_0_15_2.pickle b/pandas/tests/io/data/categorical_0_15_2.pickle
deleted file mode 100644
index 25cd862976cab..0000000000000
Binary files a/pandas/tests/io/data/categorical_0_15_2.pickle and /dev/null differ
diff --git a/pandas/tests/io/data/legacy_hdf/gh26443.h5 b/pandas/tests/io/data/legacy_hdf/gh26443.h5
new file mode 100644
index 0000000000000..45aa64324530f
Binary files /dev/null and b/pandas/tests/io/data/legacy_hdf/gh26443.h5 differ
diff --git a/pandas/tests/io/data/legacy_pickle/0.20.3/0.20.3_x86_64_darwin_3.5.6.pickle b/pandas/tests/io/data/legacy_pickle/0.20.3/0.20.3_x86_64_darwin_3.5.6.pickle
new file mode 100644
index 0000000000000..88bb6989f5b08
Binary files /dev/null and b/pandas/tests/io/data/legacy_pickle/0.20.3/0.20.3_x86_64_darwin_3.5.6.pickle differ
diff --git a/pandas/tests/io/data/sparseframe-0.20.3.pickle.gz b/pandas/tests/io/data/sparseframe-0.20.3.pickle.gz
new file mode 100644
index 0000000000000..f4ff0dbaa1ff9
Binary files /dev/null and b/pandas/tests/io/data/sparseframe-0.20.3.pickle.gz differ
diff --git a/pandas/tests/io/data/sparseseries-0.20.3.pickle.gz b/pandas/tests/io/data/sparseseries-0.20.3.pickle.gz
new file mode 100644
index 0000000000000..b299e7d85808e
Binary files /dev/null and b/pandas/tests/io/data/sparseseries-0.20.3.pickle.gz differ
diff --git a/pandas/tests/io/data/stata1_119.dta.gz b/pandas/tests/io/data/stata1_119.dta.gz
new file mode 100644
index 0000000000000..0f75d8b92db14
Binary files /dev/null and b/pandas/tests/io/data/stata1_119.dta.gz differ
diff --git a/pandas/tests/io/excel/__init__.py b/pandas/tests/io/excel/__init__.py
index e69de29bb2d1d..550172329fc57 100644
--- a/pandas/tests/io/excel/__init__.py
+++ b/pandas/tests/io/excel/__init__.py
@@ -0,0 +1,6 @@
+import pytest
+
+pytestmark = pytest.mark.filterwarnings(
+ # Looks like tree.getiterator is deprecated in favor of tree.iter
+ "ignore:This method will be removed in future versions:PendingDeprecationWarning"
+)
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index a39cface0e015..5326f2df68972 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -3,6 +3,7 @@
from datetime import datetime, time
from functools import partial
import os
+from urllib.error import URLError
import warnings
import numpy as np
@@ -14,8 +15,6 @@
from pandas import DataFrame, Index, MultiIndex, Series
import pandas.util.testing as tm
-from pandas.io.common import URLError
-
@contextlib.contextmanager
def ignore_xlrd_time_clock_warning():
diff --git a/pandas/tests/io/excel/test_xlsxwriter.py b/pandas/tests/io/excel/test_xlsxwriter.py
index 4dae3db2e7abd..63e41c59ecb89 100644
--- a/pandas/tests/io/excel/test_xlsxwriter.py
+++ b/pandas/tests/io/excel/test_xlsxwriter.py
@@ -50,7 +50,7 @@ def test_column_format(ext):
try:
read_num_format = cell.number_format
- except Exception:
+ except AttributeError:
read_num_format = cell.style.number_format._format_code
assert read_num_format == num_format
diff --git a/pandas/tests/io/formats/data/html/html_repr_max_rows_10_min_rows_12.html b/pandas/tests/io/formats/data/html/html_repr_max_rows_10_min_rows_12.html
new file mode 100644
index 0000000000000..4eb3f5319749d
--- /dev/null
+++ b/pandas/tests/io/formats/data/html/html_repr_max_rows_10_min_rows_12.html
@@ -0,0 +1,70 @@
+<div>
+<style scoped>
+ .dataframe tbody tr th:only-of-type {
+ vertical-align: middle;
+ }
+
+ .dataframe tbody tr th {
+ vertical-align: top;
+ }
+
+ .dataframe thead th {
+ text-align: right;
+ }
+</style>
+<table border="1" class="dataframe">
+ <thead>
+ <tr style="text-align: right;">
+ <th></th>
+ <th>a</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th>0</th>
+ <td>0</td>
+ </tr>
+ <tr>
+ <th>1</th>
+ <td>1</td>
+ </tr>
+ <tr>
+ <th>2</th>
+ <td>2</td>
+ </tr>
+ <tr>
+ <th>3</th>
+ <td>3</td>
+ </tr>
+ <tr>
+ <th>4</th>
+ <td>4</td>
+ </tr>
+ <tr>
+ <th>...</th>
+ <td>...</td>
+ </tr>
+ <tr>
+ <th>56</th>
+ <td>56</td>
+ </tr>
+ <tr>
+ <th>57</th>
+ <td>57</td>
+ </tr>
+ <tr>
+ <th>58</th>
+ <td>58</td>
+ </tr>
+ <tr>
+ <th>59</th>
+ <td>59</td>
+ </tr>
+ <tr>
+ <th>60</th>
+ <td>60</td>
+ </tr>
+ </tbody>
+</table>
+<p>61 rows × 1 columns</p>
+</div>
diff --git a/pandas/tests/io/formats/data/html/html_repr_max_rows_10_min_rows_4.html b/pandas/tests/io/formats/data/html/html_repr_max_rows_10_min_rows_4.html
new file mode 100644
index 0000000000000..2b1d97aec517c
--- /dev/null
+++ b/pandas/tests/io/formats/data/html/html_repr_max_rows_10_min_rows_4.html
@@ -0,0 +1,46 @@
+<div>
+<style scoped>
+ .dataframe tbody tr th:only-of-type {
+ vertical-align: middle;
+ }
+
+ .dataframe tbody tr th {
+ vertical-align: top;
+ }
+
+ .dataframe thead th {
+ text-align: right;
+ }
+</style>
+<table border="1" class="dataframe">
+ <thead>
+ <tr style="text-align: right;">
+ <th></th>
+ <th>a</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th>0</th>
+ <td>0</td>
+ </tr>
+ <tr>
+ <th>1</th>
+ <td>1</td>
+ </tr>
+ <tr>
+ <th>...</th>
+ <td>...</td>
+ </tr>
+ <tr>
+ <th>59</th>
+ <td>59</td>
+ </tr>
+ <tr>
+ <th>60</th>
+ <td>60</td>
+ </tr>
+ </tbody>
+</table>
+<p>61 rows × 1 columns</p>
+</div>
diff --git a/pandas/tests/io/formats/data/html/html_repr_max_rows_12_min_rows_None.html b/pandas/tests/io/formats/data/html/html_repr_max_rows_12_min_rows_None.html
new file mode 100644
index 0000000000000..a539e5a4884a1
--- /dev/null
+++ b/pandas/tests/io/formats/data/html/html_repr_max_rows_12_min_rows_None.html
@@ -0,0 +1,78 @@
+<div>
+<style scoped>
+ .dataframe tbody tr th:only-of-type {
+ vertical-align: middle;
+ }
+
+ .dataframe tbody tr th {
+ vertical-align: top;
+ }
+
+ .dataframe thead th {
+ text-align: right;
+ }
+</style>
+<table border="1" class="dataframe">
+ <thead>
+ <tr style="text-align: right;">
+ <th></th>
+ <th>a</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th>0</th>
+ <td>0</td>
+ </tr>
+ <tr>
+ <th>1</th>
+ <td>1</td>
+ </tr>
+ <tr>
+ <th>2</th>
+ <td>2</td>
+ </tr>
+ <tr>
+ <th>3</th>
+ <td>3</td>
+ </tr>
+ <tr>
+ <th>4</th>
+ <td>4</td>
+ </tr>
+ <tr>
+ <th>5</th>
+ <td>5</td>
+ </tr>
+ <tr>
+ <th>...</th>
+ <td>...</td>
+ </tr>
+ <tr>
+ <th>55</th>
+ <td>55</td>
+ </tr>
+ <tr>
+ <th>56</th>
+ <td>56</td>
+ </tr>
+ <tr>
+ <th>57</th>
+ <td>57</td>
+ </tr>
+ <tr>
+ <th>58</th>
+ <td>58</td>
+ </tr>
+ <tr>
+ <th>59</th>
+ <td>59</td>
+ </tr>
+ <tr>
+ <th>60</th>
+ <td>60</td>
+ </tr>
+ </tbody>
+</table>
+<p>61 rows × 1 columns</p>
+</div>
diff --git a/pandas/tests/io/formats/data/html/html_repr_max_rows_None_min_rows_12.html b/pandas/tests/io/formats/data/html/html_repr_max_rows_None_min_rows_12.html
new file mode 100644
index 0000000000000..3e680a505c6d6
--- /dev/null
+++ b/pandas/tests/io/formats/data/html/html_repr_max_rows_None_min_rows_12.html
@@ -0,0 +1,269 @@
+<div>
+<style scoped>
+ .dataframe tbody tr th:only-of-type {
+ vertical-align: middle;
+ }
+
+ .dataframe tbody tr th {
+ vertical-align: top;
+ }
+
+ .dataframe thead th {
+ text-align: right;
+ }
+</style>
+<table border="1" class="dataframe">
+ <thead>
+ <tr style="text-align: right;">
+ <th></th>
+ <th>a</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th>0</th>
+ <td>0</td>
+ </tr>
+ <tr>
+ <th>1</th>
+ <td>1</td>
+ </tr>
+ <tr>
+ <th>2</th>
+ <td>2</td>
+ </tr>
+ <tr>
+ <th>3</th>
+ <td>3</td>
+ </tr>
+ <tr>
+ <th>4</th>
+ <td>4</td>
+ </tr>
+ <tr>
+ <th>5</th>
+ <td>5</td>
+ </tr>
+ <tr>
+ <th>6</th>
+ <td>6</td>
+ </tr>
+ <tr>
+ <th>7</th>
+ <td>7</td>
+ </tr>
+ <tr>
+ <th>8</th>
+ <td>8</td>
+ </tr>
+ <tr>
+ <th>9</th>
+ <td>9</td>
+ </tr>
+ <tr>
+ <th>10</th>
+ <td>10</td>
+ </tr>
+ <tr>
+ <th>11</th>
+ <td>11</td>
+ </tr>
+ <tr>
+ <th>12</th>
+ <td>12</td>
+ </tr>
+ <tr>
+ <th>13</th>
+ <td>13</td>
+ </tr>
+ <tr>
+ <th>14</th>
+ <td>14</td>
+ </tr>
+ <tr>
+ <th>15</th>
+ <td>15</td>
+ </tr>
+ <tr>
+ <th>16</th>
+ <td>16</td>
+ </tr>
+ <tr>
+ <th>17</th>
+ <td>17</td>
+ </tr>
+ <tr>
+ <th>18</th>
+ <td>18</td>
+ </tr>
+ <tr>
+ <th>19</th>
+ <td>19</td>
+ </tr>
+ <tr>
+ <th>20</th>
+ <td>20</td>
+ </tr>
+ <tr>
+ <th>21</th>
+ <td>21</td>
+ </tr>
+ <tr>
+ <th>22</th>
+ <td>22</td>
+ </tr>
+ <tr>
+ <th>23</th>
+ <td>23</td>
+ </tr>
+ <tr>
+ <th>24</th>
+ <td>24</td>
+ </tr>
+ <tr>
+ <th>25</th>
+ <td>25</td>
+ </tr>
+ <tr>
+ <th>26</th>
+ <td>26</td>
+ </tr>
+ <tr>
+ <th>27</th>
+ <td>27</td>
+ </tr>
+ <tr>
+ <th>28</th>
+ <td>28</td>
+ </tr>
+ <tr>
+ <th>29</th>
+ <td>29</td>
+ </tr>
+ <tr>
+ <th>30</th>
+ <td>30</td>
+ </tr>
+ <tr>
+ <th>31</th>
+ <td>31</td>
+ </tr>
+ <tr>
+ <th>32</th>
+ <td>32</td>
+ </tr>
+ <tr>
+ <th>33</th>
+ <td>33</td>
+ </tr>
+ <tr>
+ <th>34</th>
+ <td>34</td>
+ </tr>
+ <tr>
+ <th>35</th>
+ <td>35</td>
+ </tr>
+ <tr>
+ <th>36</th>
+ <td>36</td>
+ </tr>
+ <tr>
+ <th>37</th>
+ <td>37</td>
+ </tr>
+ <tr>
+ <th>38</th>
+ <td>38</td>
+ </tr>
+ <tr>
+ <th>39</th>
+ <td>39</td>
+ </tr>
+ <tr>
+ <th>40</th>
+ <td>40</td>
+ </tr>
+ <tr>
+ <th>41</th>
+ <td>41</td>
+ </tr>
+ <tr>
+ <th>42</th>
+ <td>42</td>
+ </tr>
+ <tr>
+ <th>43</th>
+ <td>43</td>
+ </tr>
+ <tr>
+ <th>44</th>
+ <td>44</td>
+ </tr>
+ <tr>
+ <th>45</th>
+ <td>45</td>
+ </tr>
+ <tr>
+ <th>46</th>
+ <td>46</td>
+ </tr>
+ <tr>
+ <th>47</th>
+ <td>47</td>
+ </tr>
+ <tr>
+ <th>48</th>
+ <td>48</td>
+ </tr>
+ <tr>
+ <th>49</th>
+ <td>49</td>
+ </tr>
+ <tr>
+ <th>50</th>
+ <td>50</td>
+ </tr>
+ <tr>
+ <th>51</th>
+ <td>51</td>
+ </tr>
+ <tr>
+ <th>52</th>
+ <td>52</td>
+ </tr>
+ <tr>
+ <th>53</th>
+ <td>53</td>
+ </tr>
+ <tr>
+ <th>54</th>
+ <td>54</td>
+ </tr>
+ <tr>
+ <th>55</th>
+ <td>55</td>
+ </tr>
+ <tr>
+ <th>56</th>
+ <td>56</td>
+ </tr>
+ <tr>
+ <th>57</th>
+ <td>57</td>
+ </tr>
+ <tr>
+ <th>58</th>
+ <td>58</td>
+ </tr>
+ <tr>
+ <th>59</th>
+ <td>59</td>
+ </tr>
+ <tr>
+ <th>60</th>
+ <td>60</td>
+ </tr>
+ </tbody>
+</table>
+</div>
diff --git a/pandas/tests/io/formats/data/html/html_repr_min_rows_default_no_truncation.html b/pandas/tests/io/formats/data/html/html_repr_min_rows_default_no_truncation.html
new file mode 100644
index 0000000000000..10f6247e37def
--- /dev/null
+++ b/pandas/tests/io/formats/data/html/html_repr_min_rows_default_no_truncation.html
@@ -0,0 +1,105 @@
+<div>
+<style scoped>
+ .dataframe tbody tr th:only-of-type {
+ vertical-align: middle;
+ }
+
+ .dataframe tbody tr th {
+ vertical-align: top;
+ }
+
+ .dataframe thead th {
+ text-align: right;
+ }
+</style>
+<table border="1" class="dataframe">
+ <thead>
+ <tr style="text-align: right;">
+ <th></th>
+ <th>a</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th>0</th>
+ <td>0</td>
+ </tr>
+ <tr>
+ <th>1</th>
+ <td>1</td>
+ </tr>
+ <tr>
+ <th>2</th>
+ <td>2</td>
+ </tr>
+ <tr>
+ <th>3</th>
+ <td>3</td>
+ </tr>
+ <tr>
+ <th>4</th>
+ <td>4</td>
+ </tr>
+ <tr>
+ <th>5</th>
+ <td>5</td>
+ </tr>
+ <tr>
+ <th>6</th>
+ <td>6</td>
+ </tr>
+ <tr>
+ <th>7</th>
+ <td>7</td>
+ </tr>
+ <tr>
+ <th>8</th>
+ <td>8</td>
+ </tr>
+ <tr>
+ <th>9</th>
+ <td>9</td>
+ </tr>
+ <tr>
+ <th>10</th>
+ <td>10</td>
+ </tr>
+ <tr>
+ <th>11</th>
+ <td>11</td>
+ </tr>
+ <tr>
+ <th>12</th>
+ <td>12</td>
+ </tr>
+ <tr>
+ <th>13</th>
+ <td>13</td>
+ </tr>
+ <tr>
+ <th>14</th>
+ <td>14</td>
+ </tr>
+ <tr>
+ <th>15</th>
+ <td>15</td>
+ </tr>
+ <tr>
+ <th>16</th>
+ <td>16</td>
+ </tr>
+ <tr>
+ <th>17</th>
+ <td>17</td>
+ </tr>
+ <tr>
+ <th>18</th>
+ <td>18</td>
+ </tr>
+ <tr>
+ <th>19</th>
+ <td>19</td>
+ </tr>
+ </tbody>
+</table>
+</div>
diff --git a/pandas/tests/io/formats/data/html/html_repr_min_rows_default_truncated.html b/pandas/tests/io/formats/data/html/html_repr_min_rows_default_truncated.html
new file mode 100644
index 0000000000000..4eb3f5319749d
--- /dev/null
+++ b/pandas/tests/io/formats/data/html/html_repr_min_rows_default_truncated.html
@@ -0,0 +1,70 @@
+<div>
+<style scoped>
+ .dataframe tbody tr th:only-of-type {
+ vertical-align: middle;
+ }
+
+ .dataframe tbody tr th {
+ vertical-align: top;
+ }
+
+ .dataframe thead th {
+ text-align: right;
+ }
+</style>
+<table border="1" class="dataframe">
+ <thead>
+ <tr style="text-align: right;">
+ <th></th>
+ <th>a</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th>0</th>
+ <td>0</td>
+ </tr>
+ <tr>
+ <th>1</th>
+ <td>1</td>
+ </tr>
+ <tr>
+ <th>2</th>
+ <td>2</td>
+ </tr>
+ <tr>
+ <th>3</th>
+ <td>3</td>
+ </tr>
+ <tr>
+ <th>4</th>
+ <td>4</td>
+ </tr>
+ <tr>
+ <th>...</th>
+ <td>...</td>
+ </tr>
+ <tr>
+ <th>56</th>
+ <td>56</td>
+ </tr>
+ <tr>
+ <th>57</th>
+ <td>57</td>
+ </tr>
+ <tr>
+ <th>58</th>
+ <td>58</td>
+ </tr>
+ <tr>
+ <th>59</th>
+ <td>59</td>
+ </tr>
+ <tr>
+ <th>60</th>
+ <td>60</td>
+ </tr>
+ </tbody>
+</table>
+<p>61 rows × 1 columns</p>
+</div>
diff --git a/pandas/tests/io/formats/data/html/truncate_formatter.html b/pandas/tests/io/formats/data/html/truncate_formatter.html
new file mode 100644
index 0000000000000..7615ef89d85d1
--- /dev/null
+++ b/pandas/tests/io/formats/data/html/truncate_formatter.html
@@ -0,0 +1,36 @@
+<table border="1" class="dataframe">
+ <thead>
+ <tr style="text-align: right;">
+ <th></th>
+ <th>A</th>
+ <th>...</th>
+ <th>D</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th>0</th>
+ <td>1_mod</td>
+ <td>...</td>
+ <td>4</td>
+ </tr>
+ <tr>
+ <th>1</th>
+ <td>5_mod</td>
+ <td>...</td>
+ <td>8</td>
+ </tr>
+ <tr>
+ <th>2</th>
+ <td>9_mod</td>
+ <td>...</td>
+ <td>12</td>
+ </tr>
+ <tr>
+ <th>3</th>
+ <td>13_mod</td>
+ <td>...</td>
+ <td>16</td>
+ </tr>
+ </tbody>
+</table>
diff --git a/pandas/tests/io/formats/test_console.py b/pandas/tests/io/formats/test_console.py
index f4bee99296a83..e56d14885f11e 100644
--- a/pandas/tests/io/formats/test_console.py
+++ b/pandas/tests/io/formats/test_console.py
@@ -1,3 +1,5 @@
+import locale
+
import pytest
from pandas._config import detect_console_encoding
@@ -50,11 +52,11 @@ def test_detect_console_encoding_fallback_to_locale(monkeypatch, encoding):
"std,locale",
[
["ascii", "ascii"],
- ["ascii", Exception],
+ ["ascii", locale.Error],
[AttributeError, "ascii"],
- [AttributeError, Exception],
+ [AttributeError, locale.Error],
[IOError, "ascii"],
- [IOError, Exception],
+ [IOError, locale.Error],
],
)
def test_detect_console_encoding_fallback_to_default(monkeypatch, std, locale):
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 818bbc566aca8..454e2afb8abe0 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -7,6 +7,7 @@
import itertools
from operator import methodcaller
import os
+from pathlib import Path
import re
from shutil import get_terminal_size
import sys
@@ -17,7 +18,7 @@
import pytest
import pytz
-from pandas.compat import is_platform_32bit, is_platform_windows
+from pandas.compat import PY36, is_platform_32bit, is_platform_windows
import pandas as pd
from pandas import (
@@ -42,6 +43,54 @@
use_32bit_repr = is_platform_windows() or is_platform_32bit()
+@pytest.fixture(params=["string", "pathlike", "buffer"])
+def filepath_or_buffer_id(request):
+ """
+ A fixture yielding test ids for filepath_or_buffer testing.
+ """
+ return request.param
+
+
+@pytest.fixture
+def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
+ """
+ A fixture yeilding a string representing a filepath, a path-like object
+ and a StringIO buffer. Also checks that buffer is not closed.
+ """
+ if filepath_or_buffer_id == "buffer":
+ buf = StringIO()
+ yield buf
+ assert not buf.closed
+ else:
+ if PY36:
+ assert isinstance(tmp_path, Path)
+ else:
+ assert hasattr(tmp_path, "__fspath__")
+ if filepath_or_buffer_id == "pathlike":
+ yield tmp_path / "foo"
+ else:
+ yield str(tmp_path / "foo")
+
+
+@pytest.fixture
+def assert_filepath_or_buffer_equals(filepath_or_buffer, filepath_or_buffer_id):
+ """
+ Assertion helper for checking filepath_or_buffer.
+ """
+
+ def _assert_filepath_or_buffer_equals(expected):
+ if filepath_or_buffer_id == "string":
+ with open(filepath_or_buffer) as f:
+ result = f.read()
+ elif filepath_or_buffer_id == "pathlike":
+ result = filepath_or_buffer.read_text()
+ elif filepath_or_buffer_id == "buffer":
+ result = filepath_or_buffer.getvalue()
+ assert result == expected
+
+ return _assert_filepath_or_buffer_equals
+
+
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
@@ -422,28 +471,35 @@ def test_repr_min_rows(self):
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
+ assert ".." not in df._repr_html_()
df = pd.DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
+ assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
+ assert "..." in df._repr_html_()
+ assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
+ assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
+ assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
+ assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
@@ -471,6 +527,45 @@ def test_str_max_colwidth(self):
"1 foo bar stuff 1"
)
+ def test_to_string_truncate(self):
+ # GH 9784 - dont truncate when calling DataFrame.to_string
+ df = pd.DataFrame(
+ [
+ {
+ "a": "foo",
+ "b": "bar",
+ "c": "let's make this a very VERY long line that is longer "
+ "than the default 50 character limit",
+ "d": 1,
+ },
+ {"a": "foo", "b": "bar", "c": "stuff", "d": 1},
+ ]
+ )
+ df.set_index(["a", "b", "c"])
+ assert df.to_string() == (
+ " a b "
+ " c d\n"
+ "0 foo bar let's make this a very VERY long line t"
+ "hat is longer than the default 50 character limit 1\n"
+ "1 foo bar "
+ " stuff 1"
+ )
+ with option_context("max_colwidth", 20):
+ # the display option has no effect on the to_string method
+ assert df.to_string() == (
+ " a b "
+ " c d\n"
+ "0 foo bar let's make this a very VERY long line t"
+ "hat is longer than the default 50 character limit 1\n"
+ "1 foo bar "
+ " stuff 1"
+ )
+ assert df.to_string(max_colwidth=20) == (
+ " a b c d\n"
+ "0 foo bar let's make this ... 1\n"
+ "1 foo bar stuff 1"
+ )
+
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
@@ -1537,7 +1632,7 @@ def test_to_string_float_index(self):
assert result == expected
def test_to_string_complex_float_formatting(self):
- # GH #25514
+ # GH #25514, 25745
with pd.option_context("display.precision", 5):
df = DataFrame(
{
@@ -1545,6 +1640,7 @@ def test_to_string_complex_float_formatting(self):
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
+ (-1j),
]
}
)
@@ -1552,7 +1648,8 @@ def test_to_string_complex_float_formatting(self):
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
- "2 0.26975+0.32506j"
+ "2 0.26975+0.32506j\n"
+ "3 -0.00000-1.00000j"
)
assert result == expected
@@ -3140,3 +3237,21 @@ def test_repr_html_ipython_config(ip):
)
result = ip.run_cell(code)
assert not result.error_in_exec
+
+
+@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
+def test_filepath_or_buffer_arg(
+ float_frame, method, filepath_or_buffer, assert_filepath_or_buffer_equals
+):
+ df = float_frame
+ expected = getattr(df, method)()
+
+ getattr(df, method)(buf=filepath_or_buffer)
+ assert_filepath_or_buffer_equals(expected)
+
+
+@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
+def test_filepath_or_buffer_bad_arg_raises(float_frame, method):
+ msg = "buf is not a file name and it has no write method"
+ with pytest.raises(TypeError, match=msg):
+ getattr(float_frame, method)(buf=object())
diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py
index c6485ff21bcfb..a85f3677bc3ab 100644
--- a/pandas/tests/io/formats/test_to_csv.py
+++ b/pandas/tests/io/formats/test_to_csv.py
@@ -340,7 +340,6 @@ def test_to_csv_string_array_ascii(self):
with open(path, "r") as f:
assert f.read() == expected_ascii
- @pytest.mark.xfail(strict=False)
def test_to_csv_string_array_utf8(self):
# GH 10813
str_array = [{"names": ["foo", "bar"]}, {"names": ["baz", "qux"]}]
@@ -515,3 +514,56 @@ def test_to_csv_compression(self, compression_only, read_infer, to_infer):
df.to_csv(path, compression=to_compression)
result = pd.read_csv(path, index_col=0, compression=read_compression)
tm.assert_frame_equal(result, df)
+
+ def test_to_csv_compression_dict(self, compression_only):
+ # GH 26023
+ method = compression_only
+ df = DataFrame({"ABC": [1]})
+ filename = "to_csv_compress_as_dict."
+ filename += "gz" if method == "gzip" else method
+ with tm.ensure_clean(filename) as path:
+ df.to_csv(path, compression={"method": method})
+ read_df = pd.read_csv(path, index_col=0)
+ tm.assert_frame_equal(read_df, df)
+
+ def test_to_csv_compression_dict_no_method_raises(self):
+ # GH 26023
+ df = DataFrame({"ABC": [1]})
+ compression = {"some_option": True}
+ msg = "must have key 'method'"
+
+ with tm.ensure_clean("out.zip") as path:
+ with pytest.raises(ValueError, match=msg):
+ df.to_csv(path, compression=compression)
+
+ @pytest.mark.parametrize("compression", ["zip", "infer"])
+ @pytest.mark.parametrize(
+ "archive_name", [None, "test_to_csv.csv", "test_to_csv.zip"]
+ )
+ def test_to_csv_zip_arguments(self, compression, archive_name):
+ # GH 26023
+ from zipfile import ZipFile
+
+ df = DataFrame({"ABC": [1]})
+ with tm.ensure_clean("to_csv_archive_name.zip") as path:
+ df.to_csv(
+ path, compression={"method": compression, "archive_name": archive_name}
+ )
+ zp = ZipFile(path)
+ expected_arcname = path if archive_name is None else archive_name
+ expected_arcname = os.path.basename(expected_arcname)
+ assert len(zp.filelist) == 1
+ archived_file = os.path.basename(zp.filelist[0].filename)
+ assert archived_file == expected_arcname
+
+ @pytest.mark.parametrize("df_new_type", ["Int64"])
+ def test_to_csv_na_rep_long_string(self, df_new_type):
+ # see gh-25099
+ df = pd.DataFrame({"c": [float("nan")] * 3})
+ df = df.astype(df_new_type)
+ expected_rows = ["c", "mynull", "mynull", "mynull"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+
+ result = df.to_csv(index=False, na_rep="mynull", encoding="ascii")
+
+ assert expected == result
diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py
index 448e869df950d..004dffd128dd6 100644
--- a/pandas/tests/io/formats/test_to_html.py
+++ b/pandas/tests/io/formats/test_to_html.py
@@ -235,6 +235,23 @@ def test_to_html_truncate(datapath):
assert result == expected
+def test_to_html_truncate_formatter(datapath):
+ # issue-25955
+ data = [
+ {"A": 1, "B": 2, "C": 3, "D": 4},
+ {"A": 5, "B": 6, "C": 7, "D": 8},
+ {"A": 9, "B": 10, "C": 11, "D": 12},
+ {"A": 13, "B": 14, "C": 15, "D": 16},
+ ]
+
+ df = DataFrame(data)
+ fmt = lambda x: str(x) + "_mod"
+ formatters = [fmt, fmt, None, None]
+ result = df.to_html(formatters=formatters, max_cols=3)
+ expected = expected_html(datapath, "truncate_formatter")
+ assert result == expected
+
+
@pytest.mark.parametrize(
"sparsify,expected",
[(True, "truncate_multi_index"), (False, "truncate_multi_index_sparse_off")],
@@ -713,3 +730,42 @@ def test_to_html_with_col_space_units(unit):
for h in hdrs:
expected = '<th style="min-width: {unit};">'.format(unit=unit)
assert expected in h
+
+
+def test_html_repr_min_rows_default(datapath):
+ # gh-27991
+
+ # default setting no truncation even if above min_rows
+ df = pd.DataFrame({"a": range(20)})
+ result = df._repr_html_()
+ expected = expected_html(datapath, "html_repr_min_rows_default_no_truncation")
+ assert result == expected
+
+ # default of max_rows 60 triggers truncation if above
+ df = pd.DataFrame({"a": range(61)})
+ result = df._repr_html_()
+ expected = expected_html(datapath, "html_repr_min_rows_default_truncated")
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "max_rows,min_rows,expected",
+ [
+ # truncated after first two rows
+ (10, 4, "html_repr_max_rows_10_min_rows_4"),
+ # when set to None, follow value of max_rows
+ (12, None, "html_repr_max_rows_12_min_rows_None"),
+ # when set value higher as max_rows, use the minimum
+ (10, 12, "html_repr_max_rows_10_min_rows_12"),
+ # max_rows of None -> never truncate
+ (None, 12, "html_repr_max_rows_None_min_rows_12"),
+ ],
+)
+def test_html_repr_min_rows(datapath, max_rows, min_rows, expected):
+ # gh-27991
+
+ df = pd.DataFrame({"a": range(61)})
+ expected = expected_html(datapath, expected)
+ with option_context("display.max_rows", max_rows, "display.min_rows", min_rows):
+ result = df._repr_html_()
+ assert result == expected
diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py
index 924b2a19e8504..9ffb54d23e37e 100644
--- a/pandas/tests/io/formats/test_to_latex.py
+++ b/pandas/tests/io/formats/test_to_latex.py
@@ -388,8 +388,7 @@ def test_to_latex_special_escape(self):
"""
assert escaped_result == escaped_expected
- def test_to_latex_longtable(self, float_frame):
- float_frame.to_latex(longtable=True)
+ def test_to_latex_longtable(self):
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
withindex_result = df.to_latex(longtable=True)
@@ -439,6 +438,141 @@ def test_to_latex_longtable(self, float_frame):
with3columns_result = df.to_latex(index=False, longtable=True)
assert r"\multicolumn{3}" in with3columns_result
+ def test_to_latex_caption_label(self):
+ # GH 25436
+ the_caption = "a table in a \\texttt{table/tabular} environment"
+ the_label = "tab:table_tabular"
+
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+
+ # test when only the caption is provided
+ result_c = df.to_latex(caption=the_caption)
+
+ expected_c = r"""\begin{table}
+\centering
+\caption{a table in a \texttt{table/tabular} environment}
+\begin{tabular}{lrl}
+\toprule
+{} & a & b \\
+\midrule
+0 & 1 & b1 \\
+1 & 2 & b2 \\
+\bottomrule
+\end{tabular}
+\end{table}
+"""
+ assert result_c == expected_c
+
+ # test when only the label is provided
+ result_l = df.to_latex(label=the_label)
+
+ expected_l = r"""\begin{table}
+\centering
+\label{tab:table_tabular}
+\begin{tabular}{lrl}
+\toprule
+{} & a & b \\
+\midrule
+0 & 1 & b1 \\
+1 & 2 & b2 \\
+\bottomrule
+\end{tabular}
+\end{table}
+"""
+ assert result_l == expected_l
+
+ # test when the caption and the label are provided
+ result_cl = df.to_latex(caption=the_caption, label=the_label)
+
+ expected_cl = r"""\begin{table}
+\centering
+\caption{a table in a \texttt{table/tabular} environment}
+\label{tab:table_tabular}
+\begin{tabular}{lrl}
+\toprule
+{} & a & b \\
+\midrule
+0 & 1 & b1 \\
+1 & 2 & b2 \\
+\bottomrule
+\end{tabular}
+\end{table}
+"""
+ assert result_cl == expected_cl
+
+ def test_to_latex_longtable_caption_label(self):
+ # GH 25436
+ the_caption = "a table in a \\texttt{longtable} environment"
+ the_label = "tab:longtable"
+
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+
+ # test when only the caption is provided
+ result_c = df.to_latex(longtable=True, caption=the_caption)
+
+ expected_c = r"""\begin{longtable}{lrl}
+\caption{a table in a \texttt{longtable} environment}\\
+\toprule
+{} & a & b \\
+\midrule
+\endhead
+\midrule
+\multicolumn{3}{r}{{Continued on next page}} \\
+\midrule
+\endfoot
+
+\bottomrule
+\endlastfoot
+0 & 1 & b1 \\
+1 & 2 & b2 \\
+\end{longtable}
+"""
+ assert result_c == expected_c
+
+ # test when only the label is provided
+ result_l = df.to_latex(longtable=True, label=the_label)
+
+ expected_l = r"""\begin{longtable}{lrl}
+\label{tab:longtable}\\
+\toprule
+{} & a & b \\
+\midrule
+\endhead
+\midrule
+\multicolumn{3}{r}{{Continued on next page}} \\
+\midrule
+\endfoot
+
+\bottomrule
+\endlastfoot
+0 & 1 & b1 \\
+1 & 2 & b2 \\
+\end{longtable}
+"""
+ assert result_l == expected_l
+
+ # test when the caption and the label are provided
+ result_cl = df.to_latex(longtable=True, caption=the_caption, label=the_label)
+
+ expected_cl = r"""\begin{longtable}{lrl}
+\caption{a table in a \texttt{longtable} environment}\label{tab:longtable}\\
+\toprule
+{} & a & b \\
+\midrule
+\endhead
+\midrule
+\multicolumn{3}{r}{{Continued on next page}} \\
+\midrule
+\endfoot
+
+\bottomrule
+\endlastfoot
+0 & 1 & b1 \\
+1 & 2 & b2 \\
+\end{longtable}
+"""
+ assert result_cl == expected_cl
+
def test_to_latex_escape_special_chars(self):
special_characters = ["&", "%", "$", "#", "_", "{", "}", "~", "^", "\\"]
df = DataFrame(data=special_characters)
diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py
index 2d2938697bd80..e63644a44a81f 100755
--- a/pandas/tests/io/generate_legacy_storage_files.py
+++ b/pandas/tests/io/generate_legacy_storage_files.py
@@ -11,12 +11,12 @@
cd ~/
$ python pandas/pandas/tests/io/generate_legacy_storage_files.py \
- pandas/pandas/tests/io/data/legacy_pickle/0.18.1/ pickle
+ pandas/pandas/tests/io/data/legacy_pickle/0.20.3/ pickle
This script generates a storage file for the current arch, system,
and python version
pandas version: 0.20.3
- output dir : pandas/pandas/tests/io/data/legacy_pickle/0.18.1/
+ output dir : pandas/pandas/tests/io/data/legacy_pickle/0.20.3/
storage format: pickle
created pickle file: 0.20.3_x86_64_darwin_3.5.2.pickle
@@ -53,8 +53,6 @@
Period,
RangeIndex,
Series,
- SparseDataFrame,
- SparseSeries,
Timestamp,
bdate_range,
date_range,
@@ -86,6 +84,13 @@
YearEnd,
)
+try:
+ # TODO: remove try/except when 0.24.0 is the legacy version.
+ from pandas.arrays import SparseArray
+except ImportError:
+ from pandas.core.sparse.api import SparseArray
+
+
_loose_version = LooseVersion(pandas.__version__)
@@ -97,7 +102,7 @@ def _create_sp_series():
arr[7:12] = nan
arr[-1:] = nan
- bseries = SparseSeries(arr, kind="block")
+ bseries = Series(SparseArray(arr, kind="block"))
bseries.name = "bseries"
return bseries
@@ -111,7 +116,7 @@ def _create_sp_tsseries():
arr[-1:] = nan
date_index = bdate_range("1/1/2011", periods=len(arr))
- bseries = SparseSeries(arr, index=date_index, kind="block")
+ bseries = Series(SparseArray(arr, kind="block"), index=date_index)
bseries.name = "btsseries"
return bseries
@@ -127,7 +132,7 @@ def _create_sp_frame():
}
dates = bdate_range("1/1/2011", periods=10)
- return SparseDataFrame(data, index=dates)
+ return DataFrame(data, index=dates).apply(SparseArray)
def create_data():
diff --git a/pandas/tests/io/json/conftest.py b/pandas/tests/io/json/conftest.py
new file mode 100644
index 0000000000000..4e848cd48b42d
--- /dev/null
+++ b/pandas/tests/io/json/conftest.py
@@ -0,0 +1,9 @@
+import pytest
+
+
+@pytest.fixture(params=["split", "records", "index", "columns", "values"])
+def orient(request):
+ """
+ Fixture for orients excluding the table format.
+ """
+ return request.param
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index b2fc9ec217ca6..569e299860614 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -5,6 +5,8 @@
import numpy as np
import pytest
+from pandas.compat import PY35
+
from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype, PeriodDtype
import pandas as pd
@@ -20,6 +22,14 @@
)
+def assert_results_equal(result, expected):
+ """Helper function for comparing deserialized JSON with Py35 compat."""
+ if PY35:
+ assert sorted(result.items()) == sorted(expected.items())
+ else:
+ assert result == expected
+
+
class TestBuildSchema:
def setup_method(self, method):
self.df = DataFrame(
@@ -234,7 +244,8 @@ def test_build_series(self):
),
]
)
- assert result == expected
+
+ assert_results_equal(result, expected)
def test_to_json(self):
df = self.df.copy()
@@ -323,7 +334,8 @@ def test_to_json(self):
),
]
expected = OrderedDict([("schema", schema), ("data", data)])
- assert result == expected
+
+ assert_results_equal(result, expected)
def test_to_json_float_index(self):
data = pd.Series(1, index=[1.0, 2.0])
@@ -352,7 +364,8 @@ def test_to_json_float_index(self):
),
]
)
- assert result == expected
+
+ assert_results_equal(result, expected)
def test_to_json_period_index(self):
idx = pd.period_range("2016", freq="Q-JAN", periods=2)
@@ -372,7 +385,8 @@ def test_to_json_period_index(self):
OrderedDict([("index", "2016-02-01T00:00:00.000Z"), ("values", 1)]),
]
expected = OrderedDict([("schema", schema), ("data", data)])
- assert result == expected
+
+ assert_results_equal(result, expected)
def test_to_json_categorical_index(self):
data = pd.Series(1, pd.CategoricalIndex(["a", "b"]))
@@ -406,7 +420,8 @@ def test_to_json_categorical_index(self):
),
]
)
- assert result == expected
+
+ assert_results_equal(result, expected)
def test_date_format_raises(self):
with pytest.raises(ValueError):
@@ -542,7 +557,8 @@ def test_categorical(self):
),
]
)
- assert result == expected
+
+ assert_results_equal(result, expected)
@pytest.mark.parametrize(
"idx,nm,prop",
@@ -596,7 +612,8 @@ def test_timestamp_in_columns(self):
)
result = df.to_json(orient="table")
js = json.loads(result)
- assert js["schema"]["fields"][1]["name"] == 1451606400000
+ assert js["schema"]["fields"][1]["name"] == "2016-01-01T00:00:00.000Z"
+ # TODO - below expectation is not correct; see GH 28256
assert js["schema"]["fields"][2]["name"] == 10000
@pytest.mark.parametrize(
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 9c687f036aa68..2195bf248f43a 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -7,16 +7,14 @@
import numpy as np
import pytest
-from pandas.compat import is_platform_32bit
+from pandas.compat import PY35, is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas.util.testing as tm
from pandas.util.testing import (
- assert_almost_equal,
assert_frame_equal,
- assert_index_equal,
assert_series_equal,
ensure_clean,
network,
@@ -39,6 +37,14 @@
_mixed_frame = _frame.copy()
+def assert_json_roundtrip_equal(result, expected, orient):
+ if orient == "records" or orient == "values":
+ expected = expected.reset_index(drop=True)
+ if orient == "values":
+ expected.columns = range(len(expected.columns))
+ assert_frame_equal(result, expected)
+
+
class TestPandasContainer:
@pytest.fixture(scope="function", autouse=True)
def setup(self, datapath):
@@ -82,489 +88,334 @@ def setup(self, datapath):
del self.tsframe
del self.mixed_frame
- def test_frame_double_encoded_labels(self):
+ def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
- assert_frame_equal(df, read_json(df.to_json(orient="split"), orient="split"))
- assert_frame_equal(
- df, read_json(df.to_json(orient="columns"), orient="columns")
- )
- assert_frame_equal(df, read_json(df.to_json(orient="index"), orient="index"))
- df_unser = read_json(df.to_json(orient="records"), orient="records")
- assert_index_equal(df.columns, df_unser.columns)
- tm.assert_numpy_array_equal(df.values, df_unser.values)
-
- def test_frame_non_unique_index(self):
- df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
+ result = read_json(df.to_json(orient=orient), orient=orient)
+ expected = df.copy()
- msg = "DataFrame index must be unique for orient='index'"
- with pytest.raises(ValueError, match=msg):
- df.to_json(orient="index")
- msg = "DataFrame index must be unique for orient='columns'"
- with pytest.raises(ValueError, match=msg):
- df.to_json(orient="columns")
+ assert_json_roundtrip_equal(result, expected, orient)
- assert_frame_equal(df, read_json(df.to_json(orient="split"), orient="split"))
- unser = read_json(df.to_json(orient="records"), orient="records")
- tm.assert_index_equal(df.columns, unser.columns)
- tm.assert_almost_equal(df.values, unser.values)
- unser = read_json(df.to_json(orient="values"), orient="values")
- tm.assert_numpy_array_equal(df.values, unser.values)
+ @pytest.mark.parametrize("orient", ["split", "records", "values"])
+ def test_frame_non_unique_index(self, orient):
+ df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
+ result = read_json(df.to_json(orient=orient), orient=orient)
+ expected = df.copy()
- def test_frame_non_unique_columns(self):
- df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
+ assert_json_roundtrip_equal(result, expected, orient)
- msg = "DataFrame columns must be unique for orient='index'"
- with pytest.raises(ValueError, match=msg):
- df.to_json(orient="index")
- msg = "DataFrame columns must be unique for orient='columns'"
- with pytest.raises(ValueError, match=msg):
- df.to_json(orient="columns")
- msg = "DataFrame columns must be unique for orient='records'"
+ @pytest.mark.parametrize("orient", ["index", "columns"])
+ def test_frame_non_unique_index_raises(self, orient):
+ df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
+ msg = "DataFrame index must be unique for orient='{}'".format(orient)
with pytest.raises(ValueError, match=msg):
- df.to_json(orient="records")
+ df.to_json(orient=orient)
- assert_frame_equal(
- df, read_json(df.to_json(orient="split"), orient="split", dtype=False)
- )
- unser = read_json(df.to_json(orient="values"), orient="values")
- tm.assert_numpy_array_equal(df.values, unser.values)
-
- # GH4377; duplicate columns not processing correctly
- df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "y"])
- result = read_json(df.to_json(orient="split"), orient="split")
- assert_frame_equal(result, df)
-
- def _check(df):
- result = read_json(
- df.to_json(orient="split"), orient="split", convert_dates=["x"]
- )
- assert_frame_equal(result, df)
-
- for o in [
+ @pytest.mark.parametrize("orient", ["split", "values"])
+ @pytest.mark.parametrize(
+ "data",
+ [
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
- ]:
- _check(DataFrame(o, index=[1, 2], columns=["x", "x"]))
-
- def test_frame_from_json_to_json(self):
- def _check_orient(
- df,
- orient,
- dtype=None,
- numpy=False,
- convert_axes=True,
- check_dtype=True,
- raise_ok=None,
- sort=None,
- check_index_type=True,
- check_column_type=True,
- check_numpy_dtype=False,
- ):
- if sort is not None:
- df = df.sort_values(sort)
- else:
- df = df.sort_index()
-
- # if we are not unique, then check that we are raising ValueError
- # for the appropriate orients
- if not df.index.is_unique and orient in ["index", "columns"]:
- msg = "DataFrame index must be unique for orient='{}'".format(orient)
- with pytest.raises(ValueError, match=msg):
- df.to_json(orient=orient)
- return
- if not df.columns.is_unique and orient in ["index", "columns", "records"]:
- # TODO: not executed. fix this.
- with pytest.raises(ValueError, match="ksjkajksfjksjfkjs"):
- df.to_json(orient=orient)
- return
-
- dfjson = df.to_json(orient=orient)
-
- try:
- unser = read_json(
- dfjson,
- orient=orient,
- dtype=dtype,
- numpy=numpy,
- convert_axes=convert_axes,
- )
- except Exception as detail:
- if raise_ok is not None:
- if isinstance(detail, raise_ok):
- return
- raise
-
- if sort is not None and sort in unser.columns:
- unser = unser.sort_values(sort)
- else:
- unser = unser.sort_index()
-
- if not dtype:
- check_dtype = False
-
- if not convert_axes and df.index.dtype.type == np.datetime64:
- unser.index = DatetimeIndex(unser.index.values.astype("i8") * 1e6)
- if orient == "records":
- # index is not captured in this orientation
- tm.assert_almost_equal(
- df.values, unser.values, check_dtype=check_numpy_dtype
- )
- tm.assert_index_equal(
- df.columns, unser.columns, exact=check_column_type
- )
- elif orient == "values":
- # index and cols are not captured in this orientation
- if numpy is True and df.shape == (0, 0):
- assert unser.shape[0] == 0
- else:
- tm.assert_almost_equal(
- df.values, unser.values, check_dtype=check_numpy_dtype
- )
- elif orient == "split":
- # index and col labels might not be strings
- unser.index = [str(i) for i in unser.index]
- unser.columns = [str(i) for i in unser.columns]
-
- if sort is None:
- unser = unser.sort_index()
- tm.assert_almost_equal(
- df.values, unser.values, check_dtype=check_numpy_dtype
- )
- else:
- if convert_axes:
- tm.assert_frame_equal(
- df,
- unser,
- check_dtype=check_dtype,
- check_index_type=check_index_type,
- check_column_type=check_column_type,
- )
- else:
- tm.assert_frame_equal(
- df, unser, check_less_precise=False, check_dtype=check_dtype
- )
-
- def _check_all_orients(
- df,
- dtype=None,
- convert_axes=True,
- raise_ok=None,
- sort=None,
- check_index_type=True,
- check_column_type=True,
- ):
+ ],
+ )
+ def test_frame_non_unique_columns(self, orient, data):
+ df = DataFrame(data, index=[1, 2], columns=["x", "x"])
- # numpy=False
- if convert_axes:
- _check_orient(
- df,
- "columns",
- dtype=dtype,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
- _check_orient(
- df,
- "records",
- dtype=dtype,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
- _check_orient(
- df,
- "split",
- dtype=dtype,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
- _check_orient(
- df,
- "index",
- dtype=dtype,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
- _check_orient(
- df,
- "values",
- dtype=dtype,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
+ result = read_json(
+ df.to_json(orient=orient), orient=orient, convert_dates=["x"]
+ )
+ if orient == "values":
+ expected = pd.DataFrame(data)
+ if expected.iloc[:, 0].dtype == "datetime64[ns]":
+ # orient == "values" by default will write Timestamp objects out
+ # in milliseconds; these are internally stored in nanosecond,
+ # so divide to get where we need
+ # TODO: a to_epoch method would also solve; see GH 14772
+ expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
+ elif orient == "split":
+ expected = df
- _check_orient(df, "columns", dtype=dtype, convert_axes=False, sort=sort)
- _check_orient(df, "records", dtype=dtype, convert_axes=False, sort=sort)
- _check_orient(df, "split", dtype=dtype, convert_axes=False, sort=sort)
- _check_orient(df, "index", dtype=dtype, convert_axes=False, sort=sort)
- _check_orient(df, "values", dtype=dtype, convert_axes=False, sort=sort)
-
- # numpy=True and raise_ok might be not None, so ignore the error
- if convert_axes:
- _check_orient(
- df,
- "columns",
- dtype=dtype,
- numpy=True,
- raise_ok=raise_ok,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
- _check_orient(
- df,
- "records",
- dtype=dtype,
- numpy=True,
- raise_ok=raise_ok,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
- _check_orient(
- df,
- "split",
- dtype=dtype,
- numpy=True,
- raise_ok=raise_ok,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
- _check_orient(
- df,
- "index",
- dtype=dtype,
- numpy=True,
- raise_ok=raise_ok,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
- _check_orient(
- df,
- "values",
- dtype=dtype,
- numpy=True,
- raise_ok=raise_ok,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
+ assert_frame_equal(result, expected)
- _check_orient(
- df,
- "columns",
- dtype=dtype,
- numpy=True,
- convert_axes=False,
- raise_ok=raise_ok,
- sort=sort,
- )
- _check_orient(
- df,
- "records",
- dtype=dtype,
- numpy=True,
- convert_axes=False,
- raise_ok=raise_ok,
- sort=sort,
- )
- _check_orient(
- df,
- "split",
- dtype=dtype,
- numpy=True,
- convert_axes=False,
- raise_ok=raise_ok,
- sort=sort,
- )
- _check_orient(
- df,
- "index",
- dtype=dtype,
- numpy=True,
- convert_axes=False,
- raise_ok=raise_ok,
- sort=sort,
- )
- _check_orient(
- df,
- "values",
- dtype=dtype,
- numpy=True,
- convert_axes=False,
- raise_ok=raise_ok,
- sort=sort,
- )
+ @pytest.mark.parametrize("orient", ["index", "columns", "records"])
+ def test_frame_non_unique_columns_raises(self, orient):
+ df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
- # basic
- _check_all_orients(self.frame)
+ msg = "DataFrame columns must be unique for orient='{}'".format(orient)
+ with pytest.raises(ValueError, match=msg):
+ df.to_json(orient=orient)
+
+ def test_frame_default_orient(self):
assert self.frame.to_json() == self.frame.to_json(orient="columns")
- _check_all_orients(self.intframe, dtype=self.intframe.values.dtype)
- _check_all_orients(self.intframe, dtype=False)
+ @pytest.mark.parametrize("dtype", [False, float])
+ @pytest.mark.parametrize("convert_axes", [True, False])
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype):
+ data = self.frame.to_json(orient=orient)
+ result = pd.read_json(
+ data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
+ )
+
+ expected = self.frame.copy()
- # big one
- # index and columns are strings as all unserialised JSON object keys
- # are assumed to be strings
- biggie = DataFrame(
+ if not numpy and (orient == "index" or (PY35 and orient == "columns")):
+ # TODO: debug why sort is required
+ expected = expected.sort_index()
+
+ assert_json_roundtrip_equal(result, expected, orient)
+
+ @pytest.mark.parametrize("dtype", [False, np.int64])
+ @pytest.mark.parametrize("convert_axes", [True, False])
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
+ data = self.intframe.to_json(orient=orient)
+ result = pd.read_json(
+ data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
+ )
+ expected = self.intframe.copy()
+ if not numpy and (orient == "index" or (PY35 and orient == "columns")):
+ expected = expected.sort_index()
+
+ if (
+ numpy
+ and (is_platform_32bit() or is_platform_windows())
+ and not dtype
+ and orient != "split"
+ ):
+ # TODO: see what is causing roundtrip dtype loss
+ expected = expected.astype(np.int32)
+
+ assert_json_roundtrip_equal(result, expected, orient)
+
+ @pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
+ @pytest.mark.parametrize("convert_axes", [True, False])
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
+ df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
+ dtype=dtype,
)
- _check_all_orients(biggie, dtype=False, convert_axes=False)
- # dtypes
- _check_all_orients(
- DataFrame(biggie, dtype=np.float64), dtype=np.float64, convert_axes=False
- )
- _check_all_orients(
- DataFrame(biggie, dtype=np.int), dtype=np.int, convert_axes=False
+ # TODO: do we even need to support U3 dtypes?
+ if numpy and dtype == "U3" and orient != "split":
+ pytest.xfail("Can't decode directly to array")
+
+ data = df.to_json(orient=orient)
+ result = pd.read_json(
+ data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
- _check_all_orients(
- DataFrame(biggie, dtype="U3"),
- dtype="U3",
- convert_axes=False,
- raise_ok=ValueError,
+
+ expected = df.copy()
+ if not numpy and (orient == "index" or (PY35 and orient == "columns")):
+ expected = expected.sort_index()
+
+ if not dtype:
+ expected = expected.astype(np.int64)
+
+ # index columns, and records orients cannot fully preserve the string
+ # dtype for axes as the index and column labels are used as keys in
+ # JSON objects. JSON keys are by definition strings, so there's no way
+ # to disambiguate whether those keys actually were strings or numeric
+ # beforehand and numeric wins out.
+ # TODO: Split should be able to support this
+ if convert_axes and (orient in ("split", "index", "columns")):
+ expected.columns = expected.columns.astype(np.int64)
+ expected.index = expected.index.astype(np.int64)
+ elif orient == "records" and convert_axes:
+ expected.columns = expected.columns.astype(np.int64)
+
+ assert_json_roundtrip_equal(result, expected, orient)
+
+ @pytest.mark.parametrize("convert_axes", [True, False])
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_roundtrip_categorical(self, orient, convert_axes, numpy):
+ # TODO: create a better frame to test with and improve coverage
+ if orient in ("index", "columns"):
+ pytest.xfail(
+ "Can't have duplicate index values for orient '{}')".format(orient)
+ )
+
+ data = self.categorical.to_json(orient=orient)
+ if numpy and orient in ("records", "values"):
+ pytest.xfail("Orient {} is broken with numpy=True".format(orient))
+
+ result = pd.read_json(
+ data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
- # categorical
- _check_all_orients(self.categorical, sort="sort", raise_ok=ValueError)
+ expected = self.categorical.copy()
+ expected.index = expected.index.astype(str) # Categorical not preserved
+ expected.index.name = None # index names aren't preserved in JSON
+
+ if not numpy and (orient == "index" or (PY35 and orient == "columns")):
+ expected = expected.sort_index()
- # empty
- _check_all_orients(
- self.empty_frame, check_index_type=False, check_column_type=False
+ assert_json_roundtrip_equal(result, expected, orient)
+
+ @pytest.mark.parametrize("convert_axes", [True, False])
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_roundtrip_empty(self, orient, convert_axes, numpy):
+ data = self.empty_frame.to_json(orient=orient)
+ result = pd.read_json(
+ data, orient=orient, convert_axes=convert_axes, numpy=numpy
+ )
+ expected = self.empty_frame.copy()
+
+ # TODO: both conditions below are probably bugs
+ if convert_axes:
+ expected.index = expected.index.astype(float)
+ expected.columns = expected.columns.astype(float)
+ if numpy and orient == "values":
+ expected = expected.reindex([0], axis=1).reset_index(drop=True)
+
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("convert_axes", [True, False])
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
+ # TODO: improve coverage with date_format parameter
+ data = self.tsframe.to_json(orient=orient)
+ result = pd.read_json(
+ data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
+ expected = self.tsframe.copy()
- # time series data
- _check_all_orients(self.tsframe)
+ if not convert_axes: # one off for ts handling
+ # DTI gets converted to epoch values
+ idx = expected.index.astype(np.int64) // 1000000
+ if orient != "split": # TODO: handle consistently across orients
+ idx = idx.astype(str)
+
+ expected.index = idx
+
+ assert_json_roundtrip_equal(result, expected, orient)
+
+ @pytest.mark.parametrize("convert_axes", [True, False])
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_roundtrip_mixed(self, orient, convert_axes, numpy):
+ if numpy and orient != "split":
+ pytest.xfail("Can't decode directly to array")
- # mixed data
index = pd.Index(["a", "b", "c", "d", "e"])
- data = {
+ values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
- df = DataFrame(data=data, index=index)
- _check_orient(df, "split", check_dtype=False)
- _check_orient(df, "records", check_dtype=False)
- _check_orient(df, "values", check_dtype=False)
- _check_orient(df, "columns", check_dtype=False)
- # index oriented is problematic as it is read back in in a transposed
- # state, so the columns are interpreted as having mixed data and
- # given object dtypes.
- # force everything to have object dtype beforehand
- _check_orient(df.transpose().transpose(), "index", dtype=False)
-
- def test_frame_from_json_bad_data(self):
- with pytest.raises(ValueError, match="Expected object or value"):
- read_json(StringIO('{"key":b:a:d}'))
-
- # too few indices
- json = StringIO(
- '{"columns":["A","B"],'
- '"index":["2","3"],'
- '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}'
- )
- msg = r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)"
- with pytest.raises(ValueError, match=msg):
- read_json(json, orient="split")
- # too many columns
- json = StringIO(
- '{"columns":["A","B","C"],'
- '"index":["1","2","3"],'
- '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}'
+ df = DataFrame(data=values, index=index)
+
+ data = df.to_json(orient=orient)
+ result = pd.read_json(
+ data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
- msg = "3 columns passed, passed data had 2 columns"
+
+ expected = df.copy()
+ expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
+
+ if not numpy and (orient == "index" or (PY35 and orient == "columns")):
+ expected = expected.sort_index()
+
+ assert_json_roundtrip_equal(result, expected, orient)
+
+ @pytest.mark.parametrize(
+ "data,msg,orient",
+ [
+ ('{"key":b:a:d}', "Expected object or value", "columns"),
+ # too few indices
+ (
+ '{"columns":["A","B"],'
+ '"index":["2","3"],'
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
+ r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)",
+ "split",
+ ),
+ # too many columns
+ (
+ '{"columns":["A","B","C"],'
+ '"index":["1","2","3"],'
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
+ "3 columns passed, passed data had 2 columns",
+ "split",
+ ),
+ # bad key
+ (
+ '{"badkey":["A","B"],'
+ '"index":["2","3"],'
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
+ r"unexpected key\(s\): badkey",
+ "split",
+ ),
+ ],
+ )
+ def test_frame_from_json_bad_data_raises(self, data, msg, orient):
with pytest.raises(ValueError, match=msg):
- read_json(json, orient="split")
+ read_json(StringIO(data), orient=orient)
- # bad key
- json = StringIO(
- '{"badkey":["A","B"],'
- '"index":["2","3"],'
- '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}'
+ @pytest.mark.parametrize("dtype", [True, False])
+ @pytest.mark.parametrize("convert_axes", [True, False])
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_frame_from_json_missing_data(self, orient, convert_axes, numpy, dtype):
+ num_df = DataFrame([[1, 2], [4, 5, 6]])
+ result = read_json(
+ num_df.to_json(orient=orient),
+ orient=orient,
+ convert_axes=convert_axes,
+ dtype=dtype,
)
- with pytest.raises(ValueError, match=r"unexpected key\(s\): badkey"):
- read_json(json, orient="split")
-
- def test_frame_from_json_nones(self):
- df = DataFrame([[1, 2], [4, 5, 6]])
- unser = read_json(df.to_json())
- assert np.isnan(unser[2][0])
-
- df = DataFrame([["1", "2"], ["4", "5", "6"]])
- unser = read_json(df.to_json())
- assert np.isnan(unser[2][0])
- unser = read_json(df.to_json(), dtype=False)
- assert unser[2][0] is None
- unser = read_json(df.to_json(), convert_axes=False, dtype=False)
- assert unser["2"]["0"] is None
-
- unser = read_json(df.to_json(), numpy=False)
- assert np.isnan(unser[2][0])
- unser = read_json(df.to_json(), numpy=False, dtype=False)
- assert unser[2][0] is None
- unser = read_json(df.to_json(), numpy=False, convert_axes=False, dtype=False)
- assert unser["2"]["0"] is None
+ assert np.isnan(result.iloc[0, 2])
+ obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
+ result = read_json(
+ obj_df.to_json(orient=orient),
+ orient=orient,
+ convert_axes=convert_axes,
+ dtype=dtype,
+ )
+ if not dtype: # TODO: Special case for object data; maybe a bug?
+ assert result.iloc[0, 2] is None
+ else:
+ assert np.isnan(result.iloc[0, 2])
+
+ @pytest.mark.parametrize("inf", [np.inf, np.NINF])
+ @pytest.mark.parametrize("dtype", [True, False])
+ def test_frame_infinity(self, orient, inf, dtype):
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
- df.loc[0, 2] = np.inf
- unser = read_json(df.to_json())
- assert np.isnan(unser[2][0])
- unser = read_json(df.to_json(), dtype=False)
- assert np.isnan(unser[2][0])
-
- df.loc[0, 2] = np.NINF
- unser = read_json(df.to_json())
- assert np.isnan(unser[2][0])
- unser = read_json(df.to_json(), dtype=False)
- assert np.isnan(unser[2][0])
+ df.loc[0, 2] = inf
+ result = read_json(df.to_json(), dtype=dtype)
+ assert np.isnan(result.iloc[0, 2])
@pytest.mark.skipif(
is_platform_32bit(), reason="not compliant on 32-bit, xref #15865"
)
- def test_frame_to_json_float_precision(self):
- df = pd.DataFrame([dict(a_float=0.95)])
- encoded = df.to_json(double_precision=1)
- assert encoded == '{"a_float":{"0":1.0}}'
-
- df = pd.DataFrame([dict(a_float=1.95)])
- encoded = df.to_json(double_precision=1)
- assert encoded == '{"a_float":{"0":2.0}}'
-
- df = pd.DataFrame([dict(a_float=-1.95)])
- encoded = df.to_json(double_precision=1)
- assert encoded == '{"a_float":{"0":-2.0}}'
-
- df = pd.DataFrame([dict(a_float=0.995)])
- encoded = df.to_json(double_precision=2)
- assert encoded == '{"a_float":{"0":1.0}}'
-
- df = pd.DataFrame([dict(a_float=0.9995)])
- encoded = df.to_json(double_precision=3)
- assert encoded == '{"a_float":{"0":1.0}}'
-
- df = pd.DataFrame([dict(a_float=0.99999999999999944)])
- encoded = df.to_json(double_precision=15)
- assert encoded == '{"a_float":{"0":1.0}}'
+ @pytest.mark.parametrize(
+ "value,precision,expected_val",
+ [
+ (0.95, 1, 1.0),
+ (1.95, 1, 2.0),
+ (-1.95, 1, -2.0),
+ (0.995, 2, 1.0),
+ (0.9995, 3, 1.0),
+ (0.99999999999999944, 15, 1.0),
+ ],
+ )
+ def test_frame_to_json_float_precision(self, value, precision, expected_val):
+ df = pd.DataFrame([dict(a_float=value)])
+ encoded = df.to_json(double_precision=precision)
+ assert encoded == '{{"a_float":{{"0":{}}}}}'.format(expected_val)
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
@@ -799,107 +650,83 @@ def test_series_non_unique_index(self):
unser = read_json(s.to_json(orient="records"), orient="records", typ="series")
tm.assert_numpy_array_equal(s.values, unser.values)
- def test_series_from_json_to_json(self):
- def _check_orient(
- series, orient, dtype=None, numpy=False, check_index_type=True
- ):
- series = series.sort_index()
- unser = read_json(
- series.to_json(orient=orient),
- typ="series",
- orient=orient,
- numpy=numpy,
- dtype=dtype,
- )
- unser = unser.sort_index()
- if orient == "records" or orient == "values":
- assert_almost_equal(series.values, unser.values)
- else:
- if orient == "split":
- assert_series_equal(
- series, unser, check_index_type=check_index_type
- )
- else:
- assert_series_equal(
- series,
- unser,
- check_names=False,
- check_index_type=check_index_type,
- )
-
- def _check_all_orients(series, dtype=None, check_index_type=True):
- _check_orient(
- series, "columns", dtype=dtype, check_index_type=check_index_type
- )
- _check_orient(
- series, "records", dtype=dtype, check_index_type=check_index_type
- )
- _check_orient(
- series, "split", dtype=dtype, check_index_type=check_index_type
- )
- _check_orient(
- series, "index", dtype=dtype, check_index_type=check_index_type
- )
- _check_orient(series, "values", dtype=dtype)
-
- _check_orient(
- series,
- "columns",
- dtype=dtype,
- numpy=True,
- check_index_type=check_index_type,
- )
- _check_orient(
- series,
- "records",
- dtype=dtype,
- numpy=True,
- check_index_type=check_index_type,
- )
- _check_orient(
- series,
- "split",
- dtype=dtype,
- numpy=True,
- check_index_type=check_index_type,
- )
- _check_orient(
- series,
- "index",
- dtype=dtype,
- numpy=True,
- check_index_type=check_index_type,
- )
- _check_orient(
- series,
- "values",
- dtype=dtype,
- numpy=True,
- check_index_type=check_index_type,
- )
-
- # basic
- _check_all_orients(self.series)
+ def test_series_default_orient(self):
assert self.series.to_json() == self.series.to_json(orient="index")
- objSeries = Series(
- [str(d) for d in self.objSeries],
- index=self.objSeries.index,
- name=self.objSeries.name,
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_series_roundtrip_simple(self, orient, numpy):
+ data = self.series.to_json(orient=orient)
+ result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
+ expected = self.series.copy()
+
+ if not numpy and PY35 and orient in ("index", "columns"):
+ expected = expected.sort_index()
+ if orient in ("values", "records"):
+ expected = expected.reset_index(drop=True)
+ if orient != "split":
+ expected.name = None
+
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("dtype", [False, None])
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_series_roundtrip_object(self, orient, numpy, dtype):
+ data = self.objSeries.to_json(orient=orient)
+ result = pd.read_json(
+ data, typ="series", orient=orient, numpy=numpy, dtype=dtype
)
- _check_all_orients(objSeries, dtype=False)
-
- # empty_series has empty index with object dtype
- # which cannot be revert
- assert self.empty_series.index.dtype == np.object_
- _check_all_orients(self.empty_series, check_index_type=False)
+ expected = self.objSeries.copy()
+
+ if not numpy and PY35 and orient in ("index", "columns"):
+ expected = expected.sort_index()
+ if orient in ("values", "records"):
+ expected = expected.reset_index(drop=True)
+ if orient != "split":
+ expected.name = None
+
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_series_roundtrip_empty(self, orient, numpy):
+ data = self.empty_series.to_json(orient=orient)
+ result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
+ expected = self.empty_series.copy()
+
+ # TODO: see what causes inconsistency
+ if not numpy and PY35 and orient == "index":
+ expected = expected.sort_index()
+ if orient in ("values", "records"):
+ expected = expected.reset_index(drop=True)
+ else:
+ expected.index = expected.index.astype(float)
+
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_series_roundtrip_timeseries(self, orient, numpy):
+ data = self.ts.to_json(orient=orient)
+ result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
+ expected = self.ts.copy()
+
+ if orient in ("values", "records"):
+ expected = expected.reset_index(drop=True)
+ if orient != "split":
+ expected.name = None
+
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("dtype", [np.float64, np.int])
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_series_roundtrip_numeric(self, orient, numpy, dtype):
+ s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
+ data = s.to_json(orient=orient)
+ result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
- _check_all_orients(self.ts)
+ expected = s.copy()
+ if orient in ("values", "records"):
+ expected = expected.reset_index(drop=True)
- # dtype
- s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
- _check_all_orients(Series(s, dtype=np.float64), dtype=np.float64)
- _check_all_orients(Series(s, dtype=np.int), dtype=np.int)
+ tm.assert_series_equal(result, expected)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
@@ -919,6 +746,19 @@ def test_series_with_dtype(self):
expected = Series([4] * 3)
assert_series_equal(result, expected)
+ @pytest.mark.parametrize(
+ "dtype,expected",
+ [
+ (True, Series(["2000-01-01"], dtype="datetime64[ns]")),
+ (False, Series([946684800000])),
+ ],
+ )
+ def test_series_with_dtype_datetime(self, dtype, expected):
+ s = Series(["2000-01-01"], dtype="datetime64[ns]")
+ data = s.to_json()
+ result = pd.read_json(data, typ="series", dtype=dtype)
+ assert_series_equal(result, expected)
+
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
@@ -991,11 +831,9 @@ def test_convert_dates(self):
result = read_json(json, typ="series")
assert_series_equal(result, ts)
- def test_convert_dates_infer(self):
- # GH10747
- from pandas.io.json import dumps
-
- infer_words = [
+ @pytest.mark.parametrize(
+ "infer_word",
+ [
"trade_time",
"date",
"datetime",
@@ -1003,69 +841,83 @@ def test_convert_dates_infer(self):
"modified",
"timestamp",
"timestamps",
- ]
- for infer_word in infer_words:
- data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
- expected = DataFrame(
- [[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
- )
- result = read_json(dumps(data))[["id", infer_word]]
- assert_frame_equal(result, expected)
+ ],
+ )
+ def test_convert_dates_infer(self, infer_word):
+ # GH10747
+ from pandas.io.json import dumps
- def test_date_format_frame(self):
- df = self.tsframe.copy()
+ data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
+ expected = DataFrame(
+ [[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
+ )
+ result = read_json(dumps(data))[["id", infer_word]]
+ assert_frame_equal(result, expected)
- def test_w_date(date, date_unit=None):
- df["date"] = Timestamp(date)
- df.iloc[1, df.columns.get_loc("date")] = pd.NaT
- df.iloc[5, df.columns.get_loc("date")] = pd.NaT
- if date_unit:
- json = df.to_json(date_format="iso", date_unit=date_unit)
- else:
- json = df.to_json(date_format="iso")
- result = read_json(json)
- expected = df.copy()
- expected.index = expected.index.tz_localize("UTC")
- expected["date"] = expected["date"].dt.tz_localize("UTC")
- assert_frame_equal(result, expected)
+ @pytest.mark.parametrize(
+ "date,date_unit",
+ [
+ ("20130101 20:43:42.123", None),
+ ("20130101 20:43:42", "s"),
+ ("20130101 20:43:42.123", "ms"),
+ ("20130101 20:43:42.123456", "us"),
+ ("20130101 20:43:42.123456789", "ns"),
+ ],
+ )
+ def test_date_format_frame(self, date, date_unit):
+ df = self.tsframe.copy()
- test_w_date("20130101 20:43:42.123")
- test_w_date("20130101 20:43:42", date_unit="s")
- test_w_date("20130101 20:43:42.123", date_unit="ms")
- test_w_date("20130101 20:43:42.123456", date_unit="us")
- test_w_date("20130101 20:43:42.123456789", date_unit="ns")
+ df["date"] = Timestamp(date)
+ df.iloc[1, df.columns.get_loc("date")] = pd.NaT
+ df.iloc[5, df.columns.get_loc("date")] = pd.NaT
+ if date_unit:
+ json = df.to_json(date_format="iso", date_unit=date_unit)
+ else:
+ json = df.to_json(date_format="iso")
+ result = read_json(json)
+ expected = df.copy()
+ # expected.index = expected.index.tz_localize("UTC")
+ expected["date"] = expected["date"].dt.tz_localize("UTC")
+ assert_frame_equal(result, expected)
+ def test_date_format_frame_raises(self):
+ df = self.tsframe.copy()
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
df.to_json(date_format="iso", date_unit="foo")
- def test_date_format_series(self):
- def test_w_date(date, date_unit=None):
- ts = Series(Timestamp(date), index=self.ts.index)
- ts.iloc[1] = pd.NaT
- ts.iloc[5] = pd.NaT
- if date_unit:
- json = ts.to_json(date_format="iso", date_unit=date_unit)
- else:
- json = ts.to_json(date_format="iso")
- result = read_json(json, typ="series")
- expected = ts.copy()
- expected.index = expected.index.tz_localize("UTC")
- expected = expected.dt.tz_localize("UTC")
- assert_series_equal(result, expected)
-
- test_w_date("20130101 20:43:42.123")
- test_w_date("20130101 20:43:42", date_unit="s")
- test_w_date("20130101 20:43:42.123", date_unit="ms")
- test_w_date("20130101 20:43:42.123456", date_unit="us")
- test_w_date("20130101 20:43:42.123456789", date_unit="ns")
+ @pytest.mark.parametrize(
+ "date,date_unit",
+ [
+ ("20130101 20:43:42.123", None),
+ ("20130101 20:43:42", "s"),
+ ("20130101 20:43:42.123", "ms"),
+ ("20130101 20:43:42.123456", "us"),
+ ("20130101 20:43:42.123456789", "ns"),
+ ],
+ )
+ def test_date_format_series(self, date, date_unit):
+ ts = Series(Timestamp(date), index=self.ts.index)
+ ts.iloc[1] = pd.NaT
+ ts.iloc[5] = pd.NaT
+ if date_unit:
+ json = ts.to_json(date_format="iso", date_unit=date_unit)
+ else:
+ json = ts.to_json(date_format="iso")
+ result = read_json(json, typ="series")
+ expected = ts.copy()
+ # expected.index = expected.index.tz_localize("UTC")
+ expected = expected.dt.tz_localize("UTC")
+ assert_series_equal(result, expected)
+ def test_date_format_series_raises(self):
ts = Series(Timestamp("20130101 20:43:42.123"), index=self.ts.index)
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
ts.to_json(date_format="iso", date_unit="foo")
- def test_date_unit(self):
+ @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
+ def test_date_unit(self, unit):
df = self.tsframe.copy()
df["date"] = Timestamp("20130101 20:43:42")
dl = df.columns.get_loc("date")
@@ -1073,16 +925,15 @@ def test_date_unit(self):
df.iloc[2, dl] = Timestamp("21460101 20:43:42")
df.iloc[4, dl] = pd.NaT
- for unit in ("s", "ms", "us", "ns"):
- json = df.to_json(date_format="epoch", date_unit=unit)
+ json = df.to_json(date_format="epoch", date_unit=unit)
- # force date unit
- result = read_json(json, date_unit=unit)
- assert_frame_equal(result, df)
+ # force date unit
+ result = read_json(json, date_unit=unit)
+ assert_frame_equal(result, df)
- # detect date unit
- result = read_json(json, date_unit=None)
- assert_frame_equal(result, df)
+ # detect date unit
+ result = read_json(json, date_unit=None)
+ assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
@@ -1287,46 +1138,48 @@ def test_datetime_tz(self):
s_naive = Series(tz_naive)
assert stz.to_json() == s_naive.to_json()
- @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
- @pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
- @pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
def test_sparse(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = pd.DataFrame(np.random.randn(10, 4))
df.loc[:8] = np.nan
- sdf = df.to_sparse()
+ sdf = df.astype("Sparse")
expected = df.to_json()
assert expected == sdf.to_json()
s = pd.Series(np.random.randn(10))
s.loc[:8] = np.nan
- ss = s.to_sparse()
+ ss = s.astype("Sparse")
expected = s.to_json()
assert expected == ss.to_json()
- def test_tz_is_utc(self):
+ @pytest.mark.parametrize(
+ "ts",
+ [
+ Timestamp("2013-01-10 05:00:00Z"),
+ Timestamp("2013-01-10 00:00:00", tz="US/Eastern"),
+ Timestamp("2013-01-10 00:00:00-0500"),
+ ],
+ )
+ def test_tz_is_utc(self, ts):
from pandas.io.json import dumps
exp = '"2013-01-10T05:00:00.000Z"'
- ts = Timestamp("2013-01-10 05:00:00Z")
- assert dumps(ts, iso_dates=True) == exp
- dt = ts.to_pydatetime()
- assert dumps(dt, iso_dates=True) == exp
-
- ts = Timestamp("2013-01-10 00:00:00", tz="US/Eastern")
- assert dumps(ts, iso_dates=True) == exp
- dt = ts.to_pydatetime()
- assert dumps(dt, iso_dates=True) == exp
-
- ts = Timestamp("2013-01-10 00:00:00-0500")
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
- def test_tz_range_is_utc(self):
+ @pytest.mark.parametrize(
+ "tz_range",
+ [
+ pd.date_range("2013-01-01 05:00:00Z", periods=2),
+ pd.date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"),
+ pd.date_range("2013-01-01 00:00:00-0500", periods=2),
+ ],
+ )
+ def test_tz_range_is_utc(self, tz_range):
from pandas.io.json import dumps
exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
@@ -1336,7 +1189,6 @@ def test_tz_range_is_utc(self):
'"1":"2013-01-02T05:00:00.000Z"}}'
)
- tz_range = pd.date_range("2013-01-01 05:00:00Z", periods=2)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
@@ -1344,20 +1196,6 @@ def test_tz_range_is_utc(self):
result = dumps(df, iso_dates=True)
assert result == dfexp
- tz_range = pd.date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern")
- assert dumps(tz_range, iso_dates=True) == exp
- dti = pd.DatetimeIndex(tz_range)
- assert dumps(dti, iso_dates=True) == exp
- df = DataFrame({"DT": dti})
- assert dumps(df, iso_dates=True) == dfexp
-
- tz_range = pd.date_range("2013-01-01 00:00:00-0500", periods=2)
- assert dumps(tz_range, iso_dates=True) == exp
- dti = pd.DatetimeIndex(tz_range)
- assert dumps(dti, iso_dates=True) == exp
- df = DataFrame({"DT": dti})
- assert dumps(df, iso_dates=True) == dfexp
-
def test_read_inline_jsonl(self):
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
@@ -1449,14 +1287,10 @@ def test_latin_encoding(self):
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
]
- def _try_decode(x, encoding="latin-1"):
- try:
- return x.decode(encoding)
- except AttributeError:
- return x
-
- # not sure how to remove latin-1 from code in python 2 and 3
- values = [[_try_decode(x) for x in y] for y in values]
+ values = [
+ [x.decode("latin-1") if isinstance(x, bytes) else x for x in y]
+ for y in values
+ ]
examples = []
for dtype in ["category", object]:
@@ -1611,3 +1445,183 @@ def test_read_timezone_information(self):
)
expected = Series([88], index=DatetimeIndex(["2019-01-01 11:00:00"], tz="UTC"))
assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "date_format,key", [("epoch", 86400000), ("iso", "P1DT0H0M0S")]
+ )
+ def test_timedelta_as_label(self, date_format, key):
+ df = pd.DataFrame([[1]], columns=[pd.Timedelta("1D")])
+ expected = '{{"{key}":{{"0":1}}}}'.format(key=key)
+ result = df.to_json(date_format=date_format)
+
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "orient,expected",
+ [
+ ("index", "{\"('a', 'b')\":{\"('c', 'd')\":1}}"),
+ ("columns", "{\"('c', 'd')\":{\"('a', 'b')\":1}}"),
+ # TODO: the below have separate encoding procedures
+ # They produce JSON but not in a consistent manner
+ pytest.param("split", "", marks=pytest.mark.skip),
+ pytest.param("table", "", marks=pytest.mark.skip),
+ ],
+ )
+ def test_tuple_labels(self, orient, expected):
+ # GH 20500
+ df = pd.DataFrame([[1]], index=[("a", "b")], columns=[("c", "d")])
+ result = df.to_json(orient=orient)
+ assert result == expected
+
+ @pytest.mark.parametrize("indent", [1, 2, 4])
+ def test_to_json_indent(self, indent):
+ # GH 12004
+ df = pd.DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"])
+
+ result = df.to_json(indent=indent)
+ spaces = " " * indent
+ expected = """{{
+{spaces}"a":{{
+{spaces}{spaces}"0":"foo",
+{spaces}{spaces}"1":"baz"
+{spaces}}},
+{spaces}"b":{{
+{spaces}{spaces}"0":"bar",
+{spaces}{spaces}"1":"qux"
+{spaces}}}
+}}""".format(
+ spaces=spaces
+ )
+
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "orient,expected",
+ [
+ (
+ "split",
+ """{
+ "columns":[
+ "a",
+ "b"
+ ],
+ "index":[
+ 0,
+ 1
+ ],
+ "data":[
+ [
+ "foo",
+ "bar"
+ ],
+ [
+ "baz",
+ "qux"
+ ]
+ ]
+}""",
+ ),
+ (
+ "records",
+ """[
+ {
+ "a":"foo",
+ "b":"bar"
+ },
+ {
+ "a":"baz",
+ "b":"qux"
+ }
+]""",
+ ),
+ (
+ "index",
+ """{
+ "0":{
+ "a":"foo",
+ "b":"bar"
+ },
+ "1":{
+ "a":"baz",
+ "b":"qux"
+ }
+}""",
+ ),
+ (
+ "columns",
+ """{
+ "a":{
+ "0":"foo",
+ "1":"baz"
+ },
+ "b":{
+ "0":"bar",
+ "1":"qux"
+ }
+}""",
+ ),
+ (
+ "values",
+ """[
+ [
+ "foo",
+ "bar"
+ ],
+ [
+ "baz",
+ "qux"
+ ]
+]""",
+ ),
+ (
+ "table",
+ """{
+ "schema":{
+ "fields":[
+ {
+ "name":"index",
+ "type":"integer"
+ },
+ {
+ "name":"a",
+ "type":"string"
+ },
+ {
+ "name":"b",
+ "type":"string"
+ }
+ ],
+ "primaryKey":[
+ "index"
+ ],
+ "pandas_version":"0.20.0"
+ },
+ "data":[
+ {
+ "index":0,
+ "a":"foo",
+ "b":"bar"
+ },
+ {
+ "index":1,
+ "a":"baz",
+ "b":"qux"
+ }
+ ]
+}""",
+ ),
+ ],
+ )
+ def test_json_indent_all_orients(self, orient, expected):
+ # GH 12004
+ df = pd.DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"])
+ result = df.to_json(orient=orient, indent=4)
+
+ if PY35:
+ assert json.loads(result) == json.loads(expected)
+ else:
+ assert result == expected
+
+ def test_json_negative_indent_raises(self):
+ with pytest.raises(ValueError, match="must be a nonnegative integer"):
+ pd.DataFrame().to_json(indent=-1)
diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py
index b94d5cd497ccf..3d657418e43cd 100644
--- a/pandas/tests/io/parser/test_common.py
+++ b/pandas/tests/io/parser/test_common.py
@@ -11,6 +11,7 @@
import os
import platform
from tempfile import TemporaryFile
+from urllib.error import URLError
import numpy as np
import pytest
@@ -21,7 +22,6 @@
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
import pandas.util.testing as tm
-from pandas.io.common import URLError
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
@@ -1865,6 +1865,23 @@ def test_inf_parsing(all_parsers, na_filter):
tm.assert_frame_equal(result, expected)
+@pytest.mark.parametrize("na_filter", [True, False])
+def test_infinity_parsing(all_parsers, na_filter):
+ parser = all_parsers
+ data = """\
+,A
+a,Infinity
+b,-Infinity
+c,+Infinity
+"""
+ expected = DataFrame(
+ {"A": [float("infinity"), float("-infinity"), float("+infinity")]},
+ index=["a", "b", "c"],
+ )
+ result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
+ tm.assert_frame_equal(result, expected)
+
+
@pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5])
def test_raise_on_no_columns(all_parsers, nrows):
parser = all_parsers
@@ -1898,7 +1915,10 @@ def test_null_byte_char(all_parsers):
out = parser.read_csv(StringIO(data), names=names)
tm.assert_frame_equal(out, expected)
else:
- msg = "NULL byte detected"
+ if compat.PY38:
+ msg = "line contains NUL"
+ else:
+ msg = "NULL byte detected"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), names=names)
@@ -2020,9 +2040,34 @@ def test_file_handles_with_open(all_parsers, csv1):
# Don't close user provided file handles.
parser = all_parsers
- with open(csv1, "r") as f:
- parser.read_csv(f)
- assert not f.closed
+ for mode in ["r", "rb"]:
+ with open(csv1, mode) as f:
+ parser.read_csv(f)
+ assert not f.closed
+
+
+@pytest.mark.parametrize(
+ "fname,encoding",
+ [
+ ("test1.csv", "utf-8"),
+ ("unicode_series.csv", "latin-1"),
+ ("sauron.SHIFT_JIS.csv", "shiftjis"),
+ ],
+)
+def test_binary_mode_file_buffers(all_parsers, csv_dir_path, fname, encoding):
+ # gh-23779: Python csv engine shouldn't error on files opened in binary.
+ parser = all_parsers
+
+ fpath = os.path.join(csv_dir_path, fname)
+ expected = parser.read_csv(fpath, encoding=encoding)
+
+ with open(fpath, mode="r", encoding=encoding) as fa:
+ result = parser.read_csv(fa)
+ tm.assert_frame_equal(expected, result)
+
+ with open(fpath, mode="rb") as fb:
+ result = parser.read_csv(fb, encoding=encoding)
+ tm.assert_frame_equal(expected, result)
def test_invalid_file_buffer_class(all_parsers):
@@ -2122,7 +2167,7 @@ def test_suppress_error_output(all_parsers, capsys):
compat.is_platform_windows() and not compat.PY36,
reason="On Python < 3.6 won't pass on Windows",
)
-@pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv"])
+@pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"])
def test_filename_with_special_chars(all_parsers, filename):
# see gh-15086.
parser = all_parsers
diff --git a/pandas/tests/io/parser/test_dtypes.py b/pandas/tests/io/parser/test_dtypes.py
index 92c91565e1c23..a68d46e8a6c15 100644
--- a/pandas/tests/io/parser/test_dtypes.py
+++ b/pandas/tests/io/parser/test_dtypes.py
@@ -79,7 +79,7 @@ def test_invalid_dtype_per_column(all_parsers):
3,4.5
4,5.5"""
- with pytest.raises(TypeError, match="data type 'foo' not understood"):
+ with pytest.raises(TypeError, match='data type "foo" not understood'):
parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"})
diff --git a/pandas/tests/io/parser/test_header.py b/pandas/tests/io/parser/test_header.py
index 99e0181741998..0ecd8be7ddc78 100644
--- a/pandas/tests/io/parser/test_header.py
+++ b/pandas/tests/io/parser/test_header.py
@@ -24,6 +24,35 @@ def test_read_with_bad_header(all_parsers):
parser.read_csv(s, header=[10])
+def test_negative_header(all_parsers):
+ # see gh-27779
+ parser = all_parsers
+ data = """1,2,3,4,5
+6,7,8,9,10
+11,12,13,14,15
+"""
+ with pytest.raises(
+ ValueError,
+ match="Passing negative integer to header is invalid. "
+ "For no header, use header=None instead",
+ ):
+ parser.read_csv(StringIO(data), header=-1)
+
+
+@pytest.mark.parametrize("header", [([-1, 2, 4]), ([-5, 0])])
+def test_negative_multi_index_header(all_parsers, header):
+ # see gh-27779
+ parser = all_parsers
+ data = """1,2,3,4,5
+ 6,7,8,9,10
+ 11,12,13,14,15
+ """
+ with pytest.raises(
+ ValueError, match="cannot specify multi-index header with negative integers"
+ ):
+ parser.read_csv(StringIO(data), header=header)
+
+
@pytest.mark.parametrize("header", [True, False])
def test_bool_header_arg(all_parsers, header):
# see gh-6114
diff --git a/pandas/tests/io/pytables/test_pytables.py b/pandas/tests/io/pytables/test_pytables.py
index d67f2c3b7bd66..ae604b1141204 100644
--- a/pandas/tests/io/pytables/test_pytables.py
+++ b/pandas/tests/io/pytables/test_pytables.py
@@ -37,8 +37,6 @@
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal, set_timezone
-from pandas.io import pytables as pytables # noqa:E402
-from pandas.io.formats.printing import pprint_thing
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
@@ -46,7 +44,9 @@
Term,
read_hdf,
)
-from pandas.io.pytables import TableIterator # noqa:E402
+
+from pandas.io import pytables as pytables # noqa: E402 isort:skip
+from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
tables = pytest.importorskip("tables")
@@ -70,13 +70,6 @@
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
-ignore_sparse = pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-ignore_dataframe_tosparse = pytest.mark.filterwarnings(
- "ignore:DataFrame.to_sparse:FutureWarning"
-)
-ignore_series_tosparse = pytest.mark.filterwarnings(
- "ignore:Series.to_sparse:FutureWarning"
-)
# contextmanager to ensure the file cleanup
@@ -2352,38 +2345,6 @@ def test_series(self):
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal, check_index_type=False)
- @ignore_sparse
- @ignore_series_tosparse
- def test_sparse_series(self):
-
- s = tm.makeStringSeries()
- s.iloc[3:5] = np.nan
- ss = s.to_sparse()
- self._check_roundtrip(ss, tm.assert_series_equal, check_series_type=True)
-
- ss2 = s.to_sparse(kind="integer")
- self._check_roundtrip(ss2, tm.assert_series_equal, check_series_type=True)
-
- ss3 = s.to_sparse(fill_value=0)
- self._check_roundtrip(ss3, tm.assert_series_equal, check_series_type=True)
-
- @ignore_sparse
- @ignore_dataframe_tosparse
- def test_sparse_frame(self):
-
- s = tm.makeDataFrame()
- s.iloc[3:5, 1:3] = np.nan
- s.iloc[8:10, -2] = np.nan
- ss = s.to_sparse()
-
- self._check_double_roundtrip(ss, tm.assert_frame_equal, check_frame_type=True)
-
- ss2 = s.to_sparse(kind="integer")
- self._check_double_roundtrip(ss2, tm.assert_frame_equal, check_frame_type=True)
-
- ss3 = s.to_sparse(fill_value=0)
- self._check_double_roundtrip(ss3, tm.assert_frame_equal, check_frame_type=True)
-
def test_float_index(self):
# GH #454
@@ -2708,40 +2669,6 @@ def test_overwrite_node(self):
tm.assert_series_equal(store["a"], ts)
- @ignore_sparse
- @ignore_dataframe_tosparse
- def test_sparse_with_compression(self):
-
- # GH 2931
-
- # make sparse dataframe
- arr = np.random.binomial(n=1, p=0.01, size=(1000, 10))
- df = DataFrame(arr).to_sparse(fill_value=0)
-
- # case 1: store uncompressed
- self._check_double_roundtrip(
- df, tm.assert_frame_equal, compression=False, check_frame_type=True
- )
-
- # case 2: store compressed (works)
- self._check_double_roundtrip(
- df, tm.assert_frame_equal, compression="zlib", check_frame_type=True
- )
-
- # set one series to be completely sparse
- df[0] = np.zeros(1000)
-
- # case 3: store df with completely sparse series uncompressed
- self._check_double_roundtrip(
- df, tm.assert_frame_equal, compression=False, check_frame_type=True
- )
-
- # case 4: try storing df with completely sparse series compressed
- # (fails)
- self._check_double_roundtrip(
- df, tm.assert_frame_equal, compression="zlib", check_frame_type=True
- )
-
def test_select(self):
with ensure_clean_store(self.path) as store:
@@ -3487,14 +3414,9 @@ def test_string_select(self):
expected = df[df.x == "none"]
assert_frame_equal(result, expected)
- try:
- result = store.select("df", "x!=none")
- expected = df[df.x != "none"]
- assert_frame_equal(result, expected)
- except Exception as detail:
- pprint_thing("[{0}]".format(detail))
- pprint_thing(store)
- pprint_thing(expected)
+ result = store.select("df", "x!=none")
+ expected = df[df.x != "none"]
+ assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
@@ -3889,8 +3811,6 @@ def test_start_stop_multiple(self):
expected = df.loc[[0], ["foo", "bar"]]
tm.assert_frame_equal(result, expected)
- @ignore_sparse
- @ignore_dataframe_tosparse
def test_start_stop_fixed(self):
with ensure_clean_store(self.path) as store:
@@ -3930,10 +3850,6 @@ def test_start_stop_fixed(self):
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
- dfs = df.to_sparse()
- store.put("dfs", dfs)
- with pytest.raises(NotImplementedError):
- store.select("dfs", start=0, stop=5)
def test_select_filter_corner(self):
@@ -5446,3 +5362,16 @@ def test_read_with_where_tz_aware_index(self):
store.append(key, expected, format="table", append=True)
result = pd.read_hdf(path, key, where="DATE > 20151130")
assert_frame_equal(result, expected)
+
+ def test_py2_created_with_datetimez(self, datapath):
+ # The test HDF5 file was created in Python 2, but could not be read in
+ # Python 3.
+ #
+ # GH26443
+ index = [pd.Timestamp("2019-01-01T18:00").tz_localize("America/New_York")]
+ expected = DataFrame({"data": 123}, index=index)
+ with ensure_clean_store(
+ datapath("io", "data", "legacy_hdf", "gh26443.h5"), mode="r"
+ ) as store:
+ result = store["key"]
+ assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index 8e09e96fbd471..655fd9d01c1c0 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -222,7 +222,7 @@ def test_read_expands_user_home_dir(
(pd.read_sas, "os", ("io", "sas", "data", "test1.sas7bdat")),
(pd.read_json, "os", ("io", "json", "data", "tsframe_v012.json")),
(pd.read_msgpack, "os", ("io", "msgpack", "data", "frame.mp")),
- (pd.read_pickle, "os", ("io", "data", "categorical_0_14_1.pickle")),
+ (pd.read_pickle, "os", ("io", "data", "categorical.0.25.0.pickle")),
],
)
def test_read_fspath_all(self, reader, module, path, datapath):
diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py
index ce459ab24afe0..d68b6a1effaa0 100644
--- a/pandas/tests/io/test_compression.py
+++ b/pandas/tests/io/test_compression.py
@@ -1,5 +1,8 @@
import contextlib
import os
+import subprocess
+import sys
+import textwrap
import warnings
import pytest
@@ -125,3 +128,33 @@ def test_compression_warning(compression_only):
with tm.assert_produces_warning(RuntimeWarning, check_stacklevel=False):
with f:
df.to_csv(f, compression=compression_only)
+
+
+def test_with_missing_lzma():
+ """Tests if import pandas works when lzma is not present."""
+ # https://github.com/pandas-dev/pandas/issues/27575
+ code = textwrap.dedent(
+ """\
+ import sys
+ sys.modules['lzma'] = None
+ import pandas
+ """
+ )
+ subprocess.check_output([sys.executable, "-c", code])
+
+
+def test_with_missing_lzma_runtime():
+ """Tests if RuntimeError is hit when calling lzma without
+ having the module available."""
+ code = textwrap.dedent(
+ """
+ import sys
+ import pytest
+ sys.modules['lzma'] = None
+ import pandas
+ df = pandas.DataFrame()
+ with pytest.raises(RuntimeError, match='lzma module'):
+ df.to_csv('foo.csv', compression='xz')
+ """
+ )
+ subprocess.check_output([sys.executable, "-c", code])
diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index 87a2405a10dd5..ea69245924b0c 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -8,14 +8,16 @@
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, ensure_clean
-from pandas.io.feather_format import read_feather, to_feather # noqa:E402
+from pandas.io.feather_format import read_feather, to_feather # noqa: E402 isort:skip
pyarrow = pytest.importorskip("pyarrow")
pyarrow_version = LooseVersion(pyarrow.__version__)
+filter_sparse = pytest.mark.filterwarnings("ignore:The Sparse")
+@filter_sparse
@pytest.mark.single
class TestFeather:
def check_error_on_write(self, df, exc):
diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py
index 2ca56230b5b8c..05d86d2c8aa5b 100644
--- a/pandas/tests/io/test_gcs.py
+++ b/pandas/tests/io/test_gcs.py
@@ -1,4 +1,5 @@
from io import StringIO
+import os
import numpy as np
import pytest
@@ -60,6 +61,31 @@ def open(*args):
assert_frame_equal(df1, df2)
+@td.skip_if_no("fastparquet")
+@td.skip_if_no("gcsfs")
+def test_to_parquet_gcs_new_file(monkeypatch, tmpdir):
+ """Regression test for writing to a not-yet-existent GCS Parquet file."""
+ df1 = DataFrame(
+ {
+ "int": [1, 3],
+ "float": [2.0, np.nan],
+ "str": ["t", "s"],
+ "dt": date_range("2018-06-18", periods=2),
+ }
+ )
+
+ class MockGCSFileSystem:
+ def open(self, path, mode="r", *args):
+ if "w" not in mode:
+ raise FileNotFoundError
+ return open(os.path.join(tmpdir, "test.parquet"), mode)
+
+ monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
+ df1.to_parquet(
+ "gs://test/test.csv", index=True, engine="fastparquet", compression=None
+ )
+
+
@td.skip_if_no("gcsfs")
def test_gcs_get_filepath_or_buffer(monkeypatch):
df1 = DataFrame(
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index 615e2735cd288..183d217eb09d6 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -4,6 +4,7 @@
import os
import re
import threading
+from urllib.error import URLError
import numpy as np
from numpy.random import rand
@@ -17,7 +18,7 @@
import pandas.util.testing as tm
from pandas.util.testing import makeCustomDataframe as mkdf, network
-from pandas.io.common import URLError, file_path_to_url
+from pandas.io.common import file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py
index 33a11087f622d..0bafbab069dd4 100644
--- a/pandas/tests/io/test_packers.py
+++ b/pandas/tests/io/test_packers.py
@@ -585,49 +585,6 @@ def test_dataframe_duplicate_column_names(self):
assert_frame_equal(result_3, expected_3)
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:.*msgpack:FutureWarning")
-class TestSparse(TestPackers):
- def _check_roundtrip(self, obj, comparator, **kwargs):
-
- # currently these are not implemetned
- # i_rec = self.encode_decode(obj)
- # comparator(obj, i_rec, **kwargs)
- msg = r"msgpack sparse (series|frame) is not implemented"
- with pytest.raises(NotImplementedError, match=msg):
- self.encode_decode(obj)
-
- def test_sparse_series(self):
-
- s = tm.makeStringSeries()
- s[3:5] = np.nan
- ss = s.to_sparse()
- self._check_roundtrip(ss, tm.assert_series_equal, check_series_type=True)
-
- ss2 = s.to_sparse(kind="integer")
- self._check_roundtrip(ss2, tm.assert_series_equal, check_series_type=True)
-
- ss3 = s.to_sparse(fill_value=0)
- self._check_roundtrip(ss3, tm.assert_series_equal, check_series_type=True)
-
- def test_sparse_frame(self):
-
- s = tm.makeDataFrame()
- s.loc[3:5, 1:3] = np.nan
- s.loc[8:10, -2] = np.nan
- ss = s.to_sparse()
-
- self._check_roundtrip(ss, tm.assert_frame_equal, check_frame_type=True)
-
- ss2 = s.to_sparse(kind="integer")
- self._check_roundtrip(ss2, tm.assert_frame_equal, check_frame_type=True)
-
- ss3 = s.to_sparse(fill_value=0)
- self._check_roundtrip(ss3, tm.assert_frame_equal, check_frame_type=True)
-
-
@pytest.mark.filterwarnings("ignore:.*msgpack:FutureWarning")
class TestCompression(TestPackers):
"""See https://github.com/pandas-dev/pandas/pull/9783
@@ -878,7 +835,6 @@ def legacy_packer(request, datapath):
return datapath(request.param)
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:.*msgpack:FutureWarning")
class TestMsgpack:
"""
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index a04fb9fd50257..efc2b6d6c5b3d 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -1,5 +1,6 @@
""" test parquet compat """
import datetime
+from distutils.version import LooseVersion
import os
from warnings import catch_warnings
@@ -33,6 +34,10 @@
except ImportError:
_HAVE_FASTPARQUET = False
+pytestmark = pytest.mark.filterwarnings(
+ "ignore:RangeIndex.* is deprecated:DeprecationWarning"
+)
+
# setup engines & skips
@pytest.fixture(
@@ -234,6 +239,15 @@ def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
+ if (
+ LooseVersion(pyarrow.__version__) < "0.15"
+ and LooseVersion(pyarrow.__version__) >= "0.13"
+ ):
+ pytest.xfail(
+ "Reading fastparquet with pyarrow in 0.14 fails: "
+ "https://issues.apache.org/jira/browse/ARROW-6492"
+ )
+
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=fp, compression=None)
@@ -408,8 +422,6 @@ def test_basic(self, pa, df_full):
check_round_trip(df, pa)
- # TODO: This doesn't fail on all systems; track down which
- @pytest.mark.xfail(reason="pyarrow fails on this (ARROW-1883)", strict=False)
def test_basic_subset_columns(self, pa, df_full):
# GH18628
@@ -476,6 +488,18 @@ def test_empty_dataframe(self, pa):
df = pd.DataFrame()
check_round_trip(df, pa)
+ @td.skip_if_no("pyarrow", min_version="0.14.1.dev")
+ def test_nullable_integer(self, pa):
+ df = pd.DataFrame({"a": pd.Series([1, 2, 3], dtype="Int64")})
+ # currently de-serialized as plain int
+ expected = df.assign(a=df.a.astype("int64"))
+ check_round_trip(df, pa, expected=expected)
+
+ df = pd.DataFrame({"a": pd.Series([1, 2, 3, None], dtype="Int64")})
+ # if missing values currently de-serialized as float
+ expected = df.assign(a=df.a.astype("float64"))
+ check_round_trip(df, pa, expected=expected)
+
class TestParquetFastParquet(Base):
@td.skip_if_no("fastparquet", min_version="0.2.1")
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index 076d0c9f947c7..edd0b09185e71 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -13,7 +13,6 @@
import bz2
import glob
import gzip
-import lzma
import os
import pickle
import shutil
@@ -22,7 +21,7 @@
import pytest
-from pandas.compat import is_platform_little_endian
+from pandas.compat import _get_lzma_file, _import_lzma, is_platform_little_endian
import pandas as pd
from pandas import Index
@@ -30,6 +29,8 @@
from pandas.tseries.offsets import Day, MonthEnd
+lzma = _import_lzma()
+
@pytest.fixture(scope="module")
def current_pickle_data():
@@ -48,8 +49,8 @@ def compare_element(result, expected, typ, version=None):
return
if typ.startswith("sp_"):
- comparator = getattr(tm, "assert_{typ}_equal".format(typ=typ))
- comparator(result, expected, exact_indices=False)
+ comparator = tm.assert_equal
+ comparator(result, expected)
elif typ == "timestamp":
if expected is pd.NaT:
assert result is pd.NaT
@@ -81,10 +82,6 @@ def compare(data, vf, version):
return data
-def compare_sp_series_ts(res, exp, typ, version):
- tm.assert_sp_series_equal(res, exp)
-
-
def compare_series_ts(result, expected, typ, version):
# GH 7748
tm.assert_series_equal(result, expected)
@@ -133,10 +130,6 @@ def compare_index_period(result, expected, typ, version):
tm.assert_index_equal(result.shift(2), expected.shift(2))
-def compare_sp_frame_float(result, expected, typ, version):
- tm.assert_sp_frame_equal(result, expected)
-
-
files = glob.glob(
os.path.join(os.path.dirname(__file__), "data", "legacy_pickle", "*", "*.pickle")
)
@@ -150,7 +143,6 @@ def legacy_pickle(request, datapath):
# ---------------------
# tests
# ---------------------
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_pickles(current_pickle_data, legacy_pickle):
if not is_platform_little_endian():
pytest.skip("known failure on non-little endian")
@@ -161,7 +153,6 @@ def test_pickles(current_pickle_data, legacy_pickle):
compare(current_pickle_data, legacy_pickle, version)
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_round_trip_current(current_pickle_data):
def python_pickler(obj, path):
with open(path, "wb") as fh:
@@ -193,38 +184,6 @@ def python_unpickler(path):
compare_element(result, expected, typ)
-def test_pickle_v0_14_1(datapath):
-
- cat = pd.Categorical(
- values=["a", "b", "c"], ordered=False, categories=["a", "b", "c", "d"]
- )
- pickle_path = datapath("io", "data", "categorical_0_14_1.pickle")
- # This code was executed once on v0.14.1 to generate the pickle:
- #
- # cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
- # name='foobar')
- # with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
- #
- tm.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
-
-
-def test_pickle_v0_15_2(datapath):
- # ordered -> _ordered
- # GH 9347
-
- cat = pd.Categorical(
- values=["a", "b", "c"], ordered=False, categories=["a", "b", "c", "d"]
- )
- pickle_path = datapath("io", "data", "categorical_0_15_2.pickle")
- # This code was executed once on v0.15.2 to generate the pickle:
- #
- # cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
- # name='foobar')
- # with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
- #
- tm.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
-
-
def test_pickle_path_pathlib():
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_pickle, pd.read_pickle)
@@ -237,6 +196,32 @@ def test_pickle_path_localpath():
tm.assert_frame_equal(df, result)
+def test_legacy_sparse_warning(datapath):
+ """
+
+ Generated with
+
+ >>> df = pd.DataFrame({"A": [1, 2, 3, 4], "B": [0, 0, 1, 1]}).to_sparse()
+ >>> df.to_pickle("pandas/tests/io/data/sparseframe-0.20.3.pickle.gz",
+ ... compression="gzip")
+
+ >>> s = df['B']
+ >>> s.to_pickle("pandas/tests/io/data/sparseseries-0.20.3.pickle.gz",
+ ... compression="gzip")
+ """
+ with tm.assert_produces_warning(FutureWarning):
+ simplefilter("ignore", DeprecationWarning) # from boto
+ pd.read_pickle(
+ datapath("io", "data", "sparseseries-0.20.3.pickle.gz"), compression="gzip"
+ )
+
+ with tm.assert_produces_warning(FutureWarning):
+ simplefilter("ignore", DeprecationWarning) # from boto
+ pd.read_pickle(
+ datapath("io", "data", "sparseframe-0.20.3.pickle.gz"), compression="gzip"
+ )
+
+
# ---------------------
# test pickle compression
# ---------------------
@@ -270,7 +255,7 @@ def compress_file(self, src_path, dest_path, compression):
with zipfile.ZipFile(dest_path, "w", compression=zipfile.ZIP_DEFLATED) as f:
f.write(src_path, os.path.basename(src_path))
elif compression == "xz":
- f = lzma.LZMAFile(dest_path, "w")
+ f = _get_lzma_file(lzma)(dest_path, "w")
else:
msg = "Unrecognized compression type: {}".format(compression)
raise ValueError(msg)
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index d8465a427eaea..89bc98b5a1006 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -26,8 +26,6 @@
import numpy as np
import pytest
-from pandas.compat import PY36
-
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
@@ -538,11 +536,11 @@ def _to_sql_save_index(self):
assert ix_cols == [["A"]]
def _transaction_test(self):
- self.pandasSQL.execute("CREATE TABLE test_trans (A INT, B TEXT)")
-
- ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
+ with self.pandasSQL.run_transaction() as trans:
+ trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
# Make sure when transaction is rolled back, no rows get inserted
+ ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
@@ -565,7 +563,6 @@ def _transaction_test(self):
class _TestSQLApi(PandasSQLTest):
-
"""
Base class to test the public API.
@@ -2214,8 +2211,6 @@ def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
- if PY36:
- pytest.skip("not working on python > 3.5")
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 1e7d568602656..a0ec06a2197ae 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -101,6 +101,8 @@ def setup_method(self, datapath):
self.dta24_111 = os.path.join(self.dirpath, "stata7_111.dta")
self.dta25_118 = os.path.join(self.dirpath, "stata16_118.dta")
+ self.dta26_119 = os.path.join(self.dirpath, "stata1_119.dta.gz")
+
self.stata_dates = os.path.join(self.dirpath, "stata13_dates.dta")
def read_dta(self, file):
@@ -1780,3 +1782,14 @@ def test_encoding_latin1_118(self):
expected = pd.DataFrame([["Düsseldorf"]] * 151, columns=["kreis1849"])
tm.assert_frame_equal(encoded, expected)
+
+ @pytest.mark.slow
+ def test_stata_119(self):
+ # Gzipped since contains 32,999 variables and uncompressed is 20MiB
+ with gzip.open(self.dta26_119, "rb") as gz:
+ df = read_stata(gz)
+ assert df.shape == (1, 32999)
+ assert df.iloc[0, 6] == "A" * 3000
+ assert df.iloc[0, 7] == 3.14
+ assert df.iloc[0, -1] == 1
+ assert df.iloc[0, 0] == pd.Timestamp(datetime(2012, 12, 21, 21, 12, 21))
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index 4929422d20e8a..5a591f72d7361 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -103,6 +103,28 @@ def _check_legend_labels(self, axes, labels=None, visible=True):
else:
assert ax.get_legend() is None
+ def _check_legend_marker(self, ax, expected_markers=None, visible=True):
+ """
+ Check ax has expected legend markers
+
+ Parameters
+ ----------
+ ax : matplotlib Axes object
+ expected_markers : list-like
+ expected legend markers
+ visible : bool
+ expected legend visibility. labels are checked only when visible is
+ True
+ """
+ if visible and (expected_markers is None):
+ raise ValueError("Markers must be specified when visible is True")
+ if visible:
+ handles, _ = ax.get_legend_handles_labels()
+ markers = [handle.get_marker() for handle in handles]
+ assert markers == expected_markers
+ else:
+ assert ax.get_legend() is None
+
def _check_data(self, xp, rs):
"""
Check each axes has identical lines
diff --git a/pandas/tests/plotting/test_backend.py b/pandas/tests/plotting/test_backend.py
index 51f2abb6cc2f4..6511d94aa4c09 100644
--- a/pandas/tests/plotting/test_backend.py
+++ b/pandas/tests/plotting/test_backend.py
@@ -1,38 +1,88 @@
+import sys
+import types
+
+import pkg_resources
import pytest
+import pandas.util._test_decorators as td
+
import pandas
+dummy_backend = types.ModuleType("pandas_dummy_backend")
+dummy_backend.plot = lambda *args, **kwargs: None
-def test_matplotlib_backend_error():
- msg = (
- "matplotlib is required for plotting when the default backend "
- '"matplotlib" is selected.'
- )
- try:
- import matplotlib # noqa
- except ImportError:
- with pytest.raises(ImportError, match=msg):
- pandas.set_option("plotting.backend", "matplotlib")
+
+@pytest.fixture
+def restore_backend():
+ """Restore the plotting backend to matplotlib"""
+ pandas.set_option("plotting.backend", "matplotlib")
+ yield
+ pandas.set_option("plotting.backend", "matplotlib")
def test_backend_is_not_module():
- msg = (
- '"not_an_existing_module" does not seem to be an installed module. '
- "A pandas plotting backend must be a module that can be imported"
- )
+ msg = "Could not find plotting backend 'not_an_existing_module'."
with pytest.raises(ValueError, match=msg):
pandas.set_option("plotting.backend", "not_an_existing_module")
+ assert pandas.options.plotting.backend == "matplotlib"
+
+
+def test_backend_is_correct(monkeypatch, restore_backend):
+ monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend)
+
+ pandas.set_option("plotting.backend", "pandas_dummy_backend")
+ assert pandas.get_option("plotting.backend") == "pandas_dummy_backend"
+ assert (
+ pandas.plotting._core._get_plot_backend("pandas_dummy_backend") is dummy_backend
+ )
+
+
+@td.skip_if_no_mpl
+def test_register_entrypoint(restore_backend):
-def test_backend_is_correct(monkeypatch):
- monkeypatch.setattr(
- "pandas.core.config_init.importlib.import_module", lambda name: None
+ dist = pkg_resources.get_distribution("pandas")
+ if dist.module_path not in pandas.__file__:
+ # We are running from a non-installed pandas, and this test is invalid
+ pytest.skip("Testing a non-installed pandas")
+
+ mod = types.ModuleType("my_backend")
+ mod.plot = lambda *args, **kwargs: 1
+
+ backends = pkg_resources.get_entry_map("pandas")
+ my_entrypoint = pkg_resources.EntryPoint(
+ "pandas_plotting_backend", mod.__name__, dist=dist
)
- pandas.set_option("plotting.backend", "correct_backend")
- assert pandas.get_option("plotting.backend") == "correct_backend"
-
- # Restore backend for other tests (matplotlib can be not installed)
- try:
- pandas.set_option("plotting.backend", "matplotlib")
- except ImportError:
- pass
+ backends["pandas_plotting_backends"]["my_backend"] = my_entrypoint
+ # TODO: the docs recommend importlib.util.module_from_spec. But this works for now.
+ sys.modules["my_backend"] = mod
+
+ result = pandas.plotting._core._get_plot_backend("my_backend")
+ assert result is mod
+
+ # TODO: https://github.com/pandas-dev/pandas/issues/27517
+ # Remove the td.skip_if_no_mpl
+ with pandas.option_context("plotting.backend", "my_backend"):
+ result = pandas.plotting._core._get_plot_backend()
+
+ assert result is mod
+
+
+def test_setting_backend_without_plot_raises():
+ # GH-28163
+ module = types.ModuleType("pandas_plot_backend")
+ sys.modules["pandas_plot_backend"] = module
+
+ assert pandas.options.plotting.backend == "matplotlib"
+ with pytest.raises(
+ ValueError, match="Could not find plotting backend 'pandas_plot_backend'."
+ ):
+ pandas.set_option("plotting.backend", "pandas_plot_backend")
+
+ assert pandas.options.plotting.backend == "matplotlib"
+
+
+@td.skip_if_mpl
+def test_no_matplotlib_ok():
+ with pytest.raises(ImportError):
+ pandas.plotting._core._get_plot_backend("matplotlib")
diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py
index cab0efe53f1fc..116d924f5a596 100644
--- a/pandas/tests/plotting/test_boxplot_method.py
+++ b/pandas/tests/plotting/test_boxplot_method.py
@@ -9,7 +9,7 @@
import pandas.util._test_decorators as td
-from pandas import DataFrame, MultiIndex, Series
+from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
import pandas.util.testing as tm
@@ -160,6 +160,49 @@ def test_fontsize(self):
df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16
)
+ def test_boxplot_numeric_data(self):
+ # GH 22799
+ df = DataFrame(
+ {
+ "a": date_range("2012-01-01", periods=100),
+ "b": np.random.randn(100),
+ "c": np.random.randn(100) + 2,
+ "d": date_range("2012-01-01", periods=100).astype(str),
+ "e": date_range("2012-01-01", periods=100, tz="UTC"),
+ "f": timedelta_range("1 days", periods=100),
+ }
+ )
+ ax = df.plot(kind="box")
+ assert [x.get_text() for x in ax.get_xticklabels()] == ["b", "c"]
+
+ @pytest.mark.parametrize(
+ "colors_kwd, expected",
+ [
+ (
+ dict(boxes="r", whiskers="b", medians="g", caps="c"),
+ dict(boxes="r", whiskers="b", medians="g", caps="c"),
+ ),
+ (dict(boxes="r"), dict(boxes="r")),
+ ("r", dict(boxes="r", whiskers="r", medians="r", caps="r")),
+ ],
+ )
+ def test_color_kwd(self, colors_kwd, expected):
+ # GH: 26214
+ df = DataFrame(random.rand(10, 2))
+ result = df.boxplot(color=colors_kwd, return_type="dict")
+ for k, v in expected.items():
+ assert result[k][0].get_color() == v
+
+ @pytest.mark.parametrize(
+ "dict_colors, msg",
+ [(dict(boxes="r", invalid_key="r"), "invalid key 'invalid_key'")],
+ )
+ def test_color_kwd_errors(self, dict_colors, msg):
+ # GH: 26214
+ df = DataFrame(random.rand(10, 2))
+ with pytest.raises(ValueError, match=msg):
+ df.boxplot(color=dict_colors, return_type="dict")
+
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py
index 35d12706f0590..aabe16d5050f9 100644
--- a/pandas/tests/plotting/test_converter.py
+++ b/pandas/tests/plotting/test_converter.py
@@ -40,6 +40,21 @@ def test_initial_warning():
assert "Using an implicitly" in out
+def test_registry_mpl_resets():
+ # Check that Matplotlib converters are properly reset (see issue #27481)
+ code = (
+ "import matplotlib.units as units; "
+ "import matplotlib.dates as mdates; "
+ "n_conv = len(units.registry); "
+ "import pandas as pd; "
+ "pd.plotting.register_matplotlib_converters(); "
+ "pd.plotting.deregister_matplotlib_converters(); "
+ "assert len(units.registry) == n_conv"
+ )
+ call = [sys.executable, "-c", code]
+ subprocess.check_output(call)
+
+
def test_timtetonum_accepts_unicode():
assert converter.time2num("00:01") == converter.time2num("00:01")
@@ -373,3 +388,21 @@ def test_convert_nested(self):
r1 = self.pc.convert([data, data], None, self.axis)
r2 = [self.pc.convert(data, None, self.axis) for _ in range(2)]
assert r1 == r2
+
+
+class TestTimeDeltaConverter:
+ """Test timedelta converter"""
+
+ @pytest.mark.parametrize(
+ "x, decimal, format_expected",
+ [
+ (0.0, 0, "00:00:00"),
+ (3972320000000, 1, "01:06:12.3"),
+ (713233432000000, 2, "8 days 06:07:13.43"),
+ (32423432000000, 4, "09:00:23.4320"),
+ ],
+ )
+ def test_format_timedelta_ticks(self, x, decimal, format_expected):
+ tdc = converter.TimeSeries_TimedeltaFormatter
+ result = tdc.format_timedelta_ticks(x, pos=None, n_decimals=decimal)
+ assert result == format_expected
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index e3bc3d452f038..e2b7f2819f957 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -1098,7 +1098,6 @@ def test_time(self):
assert xp == rs
@pytest.mark.slow
- @pytest.mark.xfail(strict=False, reason="Unreliable test")
def test_time_change_xlim(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
@@ -1411,7 +1410,7 @@ def test_plot_outofbounds_datetime(self):
def test_format_timedelta_ticks_narrow(self):
- expected_labels = ["00:00:00.0000000{:0>2d}".format(i) for i in range(10)]
+ expected_labels = ["00:00:00.0000000{:0>2d}".format(i) for i in np.arange(10)]
rng = timedelta_range("0", periods=10, freq="ns")
df = DataFrame(np.random.randn(len(rng), 3), rng)
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 65815bcedebfc..84badba271fce 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -1881,6 +1881,31 @@ def test_df_legend_labels(self):
self._check_legend_labels(ax, labels=["LABEL_b", "LABEL_c"])
assert df5.columns.tolist() == ["b", "c"]
+ def test_missing_marker_multi_plots_on_same_ax(self):
+ # GH 18222
+ df = pd.DataFrame(
+ data=[[1, 1, 1, 1], [2, 2, 4, 8]], columns=["x", "r", "g", "b"]
+ )
+ fig, ax = self.plt.subplots(nrows=1, ncols=3)
+ # Left plot
+ df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[0])
+ df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[0])
+ df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[0])
+ self._check_legend_labels(ax[0], labels=["r", "g", "b"])
+ self._check_legend_marker(ax[0], expected_markers=["o", "x", "o"])
+ # Center plot
+ df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[1])
+ df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[1])
+ df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[1])
+ self._check_legend_labels(ax[1], labels=["b", "r", "g"])
+ self._check_legend_marker(ax[1], expected_markers=["o", "o", "x"])
+ # Right plot
+ df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[2])
+ df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[2])
+ df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[2])
+ self._check_legend_labels(ax[2], labels=["g", "b", "r"])
+ self._check_legend_marker(ax[2], expected_markers=["x", "o", "o"])
+
def test_legend_name(self):
multi = DataFrame(
randn(4, 4),
@@ -3152,6 +3177,73 @@ def test_x_multiindex_values_ticks(self):
assert labels_position["(2013, 1)"] == 2.0
assert labels_position["(2013, 2)"] == 3.0
+ @pytest.mark.parametrize("kind", ["line", "area"])
+ def test_xlim_plot_line(self, kind):
+ # test if xlim is set correctly in plot.line and plot.area
+ # GH 27686
+ df = pd.DataFrame([2, 4], index=[1, 2])
+ ax = df.plot(kind=kind)
+ xlims = ax.get_xlim()
+ assert xlims[0] < 1
+ assert xlims[1] > 2
+
+ def test_xlim_plot_line_correctly_in_mixed_plot_type(self):
+ # test if xlim is set correctly when ax contains multiple different kinds
+ # of plots, GH 27686
+ fig, ax = self.plt.subplots()
+
+ indexes = ["k1", "k2", "k3", "k4"]
+ df = pd.DataFrame(
+ {
+ "s1": [1000, 2000, 1500, 2000],
+ "s2": [900, 1400, 2000, 3000],
+ "s3": [1500, 1500, 1600, 1200],
+ "secondary_y": [1, 3, 4, 3],
+ },
+ index=indexes,
+ )
+ df[["s1", "s2", "s3"]].plot.bar(ax=ax, stacked=False)
+ df[["secondary_y"]].plot(ax=ax, secondary_y=True)
+
+ xlims = ax.get_xlim()
+ assert xlims[0] < 0
+ assert xlims[1] > 3
+
+ # make sure axis labels are plotted correctly as well
+ xticklabels = [t.get_text() for t in ax.get_xticklabels()]
+ assert xticklabels == indexes
+
+ def test_subplots_sharex_false(self):
+ # test when sharex is set to False, two plots should have different
+ # labels, GH 25160
+ df = pd.DataFrame(np.random.rand(10, 2))
+ df.iloc[5:, 1] = np.nan
+ df.iloc[:5, 0] = np.nan
+
+ figs, axs = self.plt.subplots(2, 1)
+ df.plot.line(ax=axs, subplots=True, sharex=False)
+
+ expected_ax1 = np.arange(4.5, 10, 0.5)
+ expected_ax2 = np.arange(-0.5, 5, 0.5)
+
+ tm.assert_numpy_array_equal(axs[0].get_xticks(), expected_ax1)
+ tm.assert_numpy_array_equal(axs[1].get_xticks(), expected_ax2)
+
+ def test_plot_no_rows(self):
+ # GH 27758
+ df = pd.DataFrame(columns=["foo"], dtype=int)
+ assert df.empty
+ ax = df.plot()
+ assert len(ax.get_lines()) == 1
+ line = ax.get_lines()[0]
+ assert len(line.get_xdata()) == 0
+ assert len(line.get_ydata()) == 0
+
+ def test_plot_no_numeric_data(self):
+ df = pd.DataFrame(["a", "b", "c"])
+ with pytest.raises(TypeError):
+ df.plot()
+
def _generate_4_axes_via_gridspec():
import matplotlib.pyplot as plt
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index 6cb6f818d40fd..940cfef4058e0 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -21,7 +21,7 @@ def test_import_error_message():
# GH-19810
df = DataFrame({"A": [1, 2]})
- with pytest.raises(ImportError, match="No module named 'matplotlib'"):
+ with pytest.raises(ImportError, match="matplotlib is required for plotting"):
df.plot()
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 8b4a78e9195b5..89259cbb6c62d 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -167,6 +167,15 @@ def test_label(self):
ax.legend() # draw it
self._check_legend_labels(ax, labels=["LABEL"])
+ def test_boolean(self):
+ # GH 23719
+ s = Series([False, False, True])
+ _check_plot_works(s.plot, include_bool=True)
+
+ msg = "no numeric data to plot"
+ with pytest.raises(TypeError, match=msg):
+ _check_plot_works(s.plot)
+
def test_line_area_nan_series(self):
values = [1, 2, np.nan, 3]
s = Series(values)
@@ -862,6 +871,13 @@ def test_xticklabels(self):
exp = ["P{i:02d}".format(i=i) for i in [0, 3, 5, 9]]
self._check_text_labels(ax.get_xticklabels(), exp)
+ def test_xtick_barPlot(self):
+ # GH28172
+ s = pd.Series(range(10), index=["P{i:02d}".format(i=i) for i in range(10)])
+ ax = s.plot.bar(xticks=range(0, 11, 2))
+ exp = np.array(list(range(0, 11, 2)))
+ tm.assert_numpy_array_equal(exp, ax.get_xticks())
+
def test_custom_business_day_freq(self):
# GH7222
from pandas.tseries.offsets import CustomBusinessDay
@@ -888,3 +904,30 @@ def test_plot_accessor_updates_on_inplace(self):
_, ax = self.plt.subplots()
after = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(before, after)
+
+ @pytest.mark.parametrize("kind", ["line", "area"])
+ def test_plot_xlim_for_series(self, kind):
+ # test if xlim is also correctly plotted in Series for line and area
+ # GH 27686
+ s = Series([2, 3])
+ _, ax = self.plt.subplots()
+ s.plot(kind=kind, ax=ax)
+ xlims = ax.get_xlim()
+
+ assert xlims[0] < 0
+ assert xlims[1] > 1
+
+ def test_plot_no_rows(self):
+ # GH 27758
+ df = pd.Series(dtype=int)
+ assert df.empty
+ ax = df.plot()
+ assert len(ax.get_lines()) == 1
+ line = ax.get_lines()[0]
+ assert len(line.get_xdata()) == 0
+ assert len(line.get_ydata()) == 0
+
+ def test_plot_no_numeric_data(self):
+ df = pd.Series(["a", "b", "c"])
+ with pytest.raises(TypeError):
+ df.plot()
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index 929bd1725b30a..fb3d428bcf4bf 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -885,7 +885,7 @@ def test_resample_dtype_preservation():
assert result.val.dtype == np.int32
-def test_resample_dtype_coerceion():
+def test_resample_dtype_coercion():
pytest.importorskip("scipy.interpolate")
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index b6c6f967333a8..a04f093ee7818 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -1340,6 +1340,18 @@ def test_merge_take_missing_values_from_index_of_other_dtype(self):
expected = expected.reindex(columns=["a", "key", "b"])
tm.assert_frame_equal(result, expected)
+ def test_merge_readonly(self):
+ # https://github.com/pandas-dev/pandas/issues/27943
+ data1 = pd.DataFrame(
+ np.arange(20).reshape((4, 5)) + 1, columns=["a", "b", "c", "d", "e"]
+ )
+ data2 = pd.DataFrame(
+ np.arange(20).reshape((5, 4)) + 1, columns=["a", "b", "x", "y"]
+ )
+
+ data1._data.blocks[0].values.flags.writeable = False
+ data1.merge(data2) # no error
+
def _check_merge(x, y):
for how in ["inner", "left", "outer"]:
diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py
index 6b66386bafc5e..caf2539a9e150 100644
--- a/pandas/tests/reshape/merge/test_merge_asof.py
+++ b/pandas/tests/reshape/merge/test_merge_asof.py
@@ -1,3 +1,5 @@
+import datetime
+
import numpy as np
import pytest
import pytz
@@ -588,14 +590,17 @@ def test_non_sorted(self):
# ok, though has dupes
merge_asof(trades, self.quotes, on="time", by="ticker")
- def test_tolerance(self):
+ @pytest.mark.parametrize(
+ "tolerance",
+ [Timedelta("1day"), datetime.timedelta(days=1)],
+ ids=["pd.Timedelta", "datetime.timedelta"],
+ )
+ def test_tolerance(self, tolerance):
trades = self.trades
quotes = self.quotes
- result = merge_asof(
- trades, quotes, on="time", by="ticker", tolerance=Timedelta("1day")
- )
+ result = merge_asof(trades, quotes, on="time", by="ticker", tolerance=tolerance)
expected = self.tolerance
assert_frame_equal(result, expected)
@@ -1246,3 +1251,39 @@ def test_by_mixed_tz_aware(self):
)
expected["value_y"] = np.array([np.nan], dtype=object)
assert_frame_equal(result, expected)
+
+ def test_timedelta_tolerance_nearest(self):
+ # GH 27642
+
+ left = pd.DataFrame(
+ list(zip([0, 5, 10, 15, 20, 25], [0, 1, 2, 3, 4, 5])),
+ columns=["time", "left"],
+ )
+
+ left["time"] = pd.to_timedelta(left["time"], "ms")
+
+ right = pd.DataFrame(
+ list(zip([0, 3, 9, 12, 15, 18], [0, 1, 2, 3, 4, 5])),
+ columns=["time", "right"],
+ )
+
+ right["time"] = pd.to_timedelta(right["time"], "ms")
+
+ expected = pd.DataFrame(
+ list(
+ zip(
+ [0, 5, 10, 15, 20, 25],
+ [0, 1, 2, 3, 4, 5],
+ [0, np.nan, 2, 4, np.nan, np.nan],
+ )
+ ),
+ columns=["time", "left", "right"],
+ )
+
+ expected["time"] = pd.to_timedelta(expected["time"], "ms")
+
+ result = pd.merge_asof(
+ left, right, on="time", tolerance=Timedelta("1ms"), direction="nearest"
+ )
+
+ assert_frame_equal(result, expected)
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index 6366bf0521fbc..13f0f14014a31 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -50,7 +50,6 @@ def sort_with_none(request):
class TestConcatAppendCommon:
-
"""
Test common dtype coercion rules between concat and append.
"""
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index be82e7f595f8c..582084e3bfb5a 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -677,6 +677,32 @@ def test_pivot_periods(self, method):
pv = pd.pivot(df, index="p1", columns="p2", values="data1")
tm.assert_frame_equal(pv, expected)
+ def test_pivot_periods_with_margins(self):
+ # GH 28323
+ df = DataFrame(
+ {
+ "a": [1, 1, 2, 2],
+ "b": [
+ pd.Period("2019Q1"),
+ pd.Period("2019Q2"),
+ pd.Period("2019Q1"),
+ pd.Period("2019Q2"),
+ ],
+ "x": 1.0,
+ }
+ )
+
+ expected = DataFrame(
+ data=1.0,
+ index=pd.Index([1, 2, "All"], name="a"),
+ columns=pd.Index(
+ [pd.Period("2019Q1"), pd.Period("2019Q2"), "All"], name="b"
+ ),
+ )
+
+ result = df.pivot_table(index="a", columns="b", values="x", margins=True)
+ tm.assert_frame_equal(expected, result)
+
@pytest.mark.parametrize(
"values",
[
@@ -2447,3 +2473,84 @@ def test_crosstab_unsorted_order(self):
[[1, 0, 0], [0, 1, 0], [0, 0, 1]], index=e_idx, columns=e_columns
)
tm.assert_frame_equal(result, expected)
+
+ def test_margin_normalize(self):
+ # GH 27500
+ df = pd.DataFrame(
+ {
+ "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
+ "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
+ "C": [
+ "small",
+ "large",
+ "large",
+ "small",
+ "small",
+ "large",
+ "small",
+ "small",
+ "large",
+ ],
+ "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
+ "E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
+ }
+ )
+ # normalize on index
+ result = pd.crosstab(
+ [df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=0
+ )
+ expected = pd.DataFrame(
+ [[0.5, 0.5], [0.5, 0.5], [0.666667, 0.333333], [0, 1], [0.444444, 0.555556]]
+ )
+ expected.index = MultiIndex(
+ levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]],
+ codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]],
+ names=["A", "B"],
+ )
+ expected.columns = Index(["large", "small"], dtype="object", name="C")
+ tm.assert_frame_equal(result, expected)
+
+ # normalize on columns
+ result = pd.crosstab(
+ [df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=1
+ )
+ expected = pd.DataFrame(
+ [
+ [0.25, 0.2, 0.222222],
+ [0.25, 0.2, 0.222222],
+ [0.5, 0.2, 0.333333],
+ [0, 0.4, 0.222222],
+ ]
+ )
+ expected.columns = Index(
+ ["large", "small", "Sub-Total"], dtype="object", name="C"
+ )
+ expected.index = MultiIndex(
+ levels=[["bar", "foo"], ["one", "two"]],
+ codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
+ names=["A", "B"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+ # normalize on both index and column
+ result = pd.crosstab(
+ [df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=True
+ )
+ expected = pd.DataFrame(
+ [
+ [0.111111, 0.111111, 0.222222],
+ [0.111111, 0.111111, 0.222222],
+ [0.222222, 0.111111, 0.333333],
+ [0.000000, 0.222222, 0.222222],
+ [0.444444, 0.555555, 1],
+ ]
+ )
+ expected.columns = Index(
+ ["large", "small", "Sub-Total"], dtype="object", name="C"
+ )
+ expected.index = MultiIndex(
+ levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]],
+ codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]],
+ names=["A", "B"],
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py
index 149930059d868..5e80c317a587b 100644
--- a/pandas/tests/reshape/test_reshape.py
+++ b/pandas/tests/reshape/test_reshape.py
@@ -8,12 +8,11 @@
import pandas as pd
from pandas import Categorical, DataFrame, Index, Series, get_dummies
-from pandas.core.sparse.api import SparseArray, SparseDtype
+from pandas.core.arrays.sparse import SparseArray, SparseDtype
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestGetDummies:
@pytest.fixture
def df(self):
@@ -273,7 +272,7 @@ def test_dataframe_dummies_subset(self, df, sparse):
expected[["C"]] = df[["C"]]
if sparse:
cols = ["from_A_a", "from_A_b"]
- expected[cols] = expected[cols].apply(lambda x: pd.SparseSeries(x))
+ expected[cols] = expected[cols].astype(pd.SparseDtype("uint8", 0))
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self, df, sparse):
@@ -292,7 +291,7 @@ def test_dataframe_dummies_prefix_sep(self, df, sparse):
expected = expected[["C", "A..a", "A..b", "B..b", "B..c"]]
if sparse:
cols = ["A..a", "A..b", "B..b", "B..c"]
- expected[cols] = expected[cols].apply(lambda x: pd.SparseSeries(x))
+ expected[cols] = expected[cols].astype(pd.SparseDtype("uint8", 0))
assert_frame_equal(result, expected)
@@ -329,7 +328,7 @@ def test_dataframe_dummies_prefix_dict(self, sparse):
columns = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"]
expected[columns] = expected[columns].astype(np.uint8)
if sparse:
- expected[columns] = expected[columns].apply(lambda x: pd.SparseSeries(x))
+ expected[columns] = expected[columns].astype(pd.SparseDtype("uint8", 0))
assert_frame_equal(result, expected)
@@ -495,7 +494,7 @@ def test_dataframe_dummies_drop_first_with_categorical(self, df, sparse, dtype):
expected = expected[["C", "A_b", "B_c", "cat_y"]]
if sparse:
for col in cols:
- expected[col] = pd.SparseSeries(expected[col])
+ expected[col] = pd.SparseArray(expected[col])
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_na(self, df, sparse):
@@ -517,7 +516,7 @@ def test_dataframe_dummies_drop_first_with_na(self, df, sparse):
expected = expected.sort_index(axis=1)
if sparse:
for col in cols:
- expected[col] = pd.SparseSeries(expected[col])
+ expected[col] = pd.SparseArray(expected[col])
assert_frame_equal(result, expected)
diff --git a/pandas/tests/scalar/interval/test_interval.py b/pandas/tests/scalar/interval/test_interval.py
index e4987e4483fd9..b51429d0338e3 100644
--- a/pandas/tests/scalar/interval/test_interval.py
+++ b/pandas/tests/scalar/interval/test_interval.py
@@ -254,6 +254,6 @@ def test_constructor_errors_tz(self, tz_left, tz_right):
# GH 18538
left = Timestamp("2017-01-01", tz=tz_left)
right = Timestamp("2017-01-02", tz=tz_right)
- error = TypeError if com._any_none(tz_left, tz_right) else ValueError
+ error = TypeError if com.any_none(tz_left, tz_right) else ValueError
with pytest.raises(error):
Interval(left, right)
diff --git a/pandas/tests/scalar/period/test_asfreq.py b/pandas/tests/scalar/period/test_asfreq.py
index 4cff061cabc40..357274e724c68 100644
--- a/pandas/tests/scalar/period/test_asfreq.py
+++ b/pandas/tests/scalar/period/test_asfreq.py
@@ -30,11 +30,8 @@ def test_asfreq_near_zero_weekly(self):
assert week1.asfreq("D", "E") >= per1
assert week2.asfreq("D", "S") <= per2
- @pytest.mark.xfail(
- reason="GH#19643 period_helper asfreq functions fail to check for overflows"
- )
def test_to_timestamp_out_of_bounds(self):
- # GH#19643, currently gives Timestamp('1754-08-30 22:43:41.128654848')
+ # GH#19643, used to incorrectly give Timestamp in 1754
per = Period("0001-01-01", freq="B")
with pytest.raises(OutOfBoundsDatetime):
per.to_timestamp()
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index 4404b93e86218..a1de205afc0e2 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -10,6 +10,7 @@
from pandas._libs.tslibs.parsing import DateParseError
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz
+from pandas.compat import PY35
from pandas.compat.numpy import np_datetime64_compat
import pandas as pd
@@ -1297,23 +1298,13 @@ def test_add_offset_nat(self):
timedelta(365),
]:
assert p + o is NaT
-
- if isinstance(o, np.timedelta64):
- with pytest.raises(TypeError):
- o + p
- else:
- assert o + p is NaT
+ assert o + p is NaT
for freq in ["M", "2M", "3M"]:
p = Period("NaT", freq=freq)
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
assert p + o is NaT
-
- if isinstance(o, np.timedelta64):
- with pytest.raises(TypeError):
- o + p
- else:
- assert o + p is NaT
+ assert o + p is NaT
for o in [
offsets.YearBegin(2),
@@ -1323,12 +1314,7 @@ def test_add_offset_nat(self):
timedelta(365),
]:
assert p + o is NaT
-
- if isinstance(o, np.timedelta64):
- with pytest.raises(TypeError):
- o + p
- else:
- assert o + p is NaT
+ assert o + p is NaT
# freq is Tick
for freq in ["D", "2D", "3D"]:
@@ -1342,12 +1328,7 @@ def test_add_offset_nat(self):
timedelta(hours=48),
]:
assert p + o is NaT
-
- if isinstance(o, np.timedelta64):
- with pytest.raises(TypeError):
- o + p
- else:
- assert o + p is NaT
+ assert o + p is NaT
for o in [
offsets.YearBegin(2),
@@ -1357,12 +1338,7 @@ def test_add_offset_nat(self):
timedelta(hours=23),
]:
assert p + o is NaT
-
- if isinstance(o, np.timedelta64):
- with pytest.raises(TypeError):
- o + p
- else:
- assert o + p is NaT
+ assert o + p is NaT
for freq in ["H", "2H", "3H"]:
p = Period("NaT", freq=freq)
@@ -1375,9 +1351,7 @@ def test_add_offset_nat(self):
timedelta(days=4, minutes=180),
]:
assert p + o is NaT
-
- if not isinstance(o, np.timedelta64):
- assert o + p is NaT
+ assert o + p is NaT
for o in [
offsets.YearBegin(2),
@@ -1387,12 +1361,7 @@ def test_add_offset_nat(self):
timedelta(hours=23, minutes=30),
]:
assert p + o is NaT
-
- if isinstance(o, np.timedelta64):
- with pytest.raises(TypeError):
- o + p
- else:
- assert o + p is NaT
+ assert o + p is NaT
def test_sub_offset(self):
# freq is DateOffset
@@ -1579,8 +1548,13 @@ def test_period_immutable():
per.freq = 2 * freq
-# TODO: This doesn't fail on all systems; track down which
-@pytest.mark.xfail(reason="Parses as Jan 1, 0007 on some systems", strict=False)
+@pytest.mark.xfail(
+ # xpassing on MacPython with strict=False
+ # https://travis-ci.org/MacPython/pandas-wheels/jobs/574706922
+ PY35,
+ reason="Parsing as Period('0007-01-01', 'D') for reasons unknown",
+ strict=False,
+)
def test_small_year_parsing():
per1 = Period("0001-01-07", "D")
assert per1.year == 1
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index f935a7fa880c7..5eb69fb2952dc 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -1,4 +1,5 @@
from datetime import datetime, timedelta
+import operator
import numpy as np
import pytest
@@ -7,6 +8,8 @@
from pandas._libs.tslibs import iNaT
import pandas.compat as compat
+from pandas.core.dtypes.common import is_datetime64_any_dtype
+
from pandas import (
DatetimeIndex,
Index,
@@ -18,7 +21,8 @@
Timestamp,
isna,
)
-from pandas.core.arrays import PeriodArray
+from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray
+from pandas.core.ops import roperator
from pandas.util import testing as tm
@@ -248,6 +252,7 @@ def _get_overlap_public_nat_methods(klass, as_tuple=False):
"day_name",
"dst",
"floor",
+ "fromisocalendar",
"fromisoformat",
"fromordinal",
"fromtimestamp",
@@ -292,6 +297,8 @@ def test_overlap_public_nat_methods(klass, expected):
# "fromisoformat" was introduced in 3.7
if klass is Timestamp and not compat.PY37:
expected.remove("fromisoformat")
+ if klass is Timestamp and not compat.PY38:
+ expected.remove("fromisocalendar")
assert _get_overlap_public_nat_methods(klass) == expected
@@ -331,8 +338,9 @@ def test_nat_doc_strings(compare):
"value,val_type",
[
(2, "scalar"),
- (1.5, "scalar"),
- (np.nan, "scalar"),
+ (1.5, "floating"),
+ (np.nan, "floating"),
+ ("foo", "str"),
(timedelta(3600), "timedelta"),
(Timedelta("5s"), "timedelta"),
(datetime(2014, 1, 1), "timestamp"),
@@ -346,6 +354,14 @@ def test_nat_arithmetic_scalar(op_name, value, val_type):
# see gh-6873
invalid_ops = {
"scalar": {"right_div_left"},
+ "floating": {
+ "right_div_left",
+ "left_minus_right",
+ "right_minus_left",
+ "left_plus_right",
+ "right_plus_left",
+ },
+ "str": set(_ops.keys()),
"timedelta": {"left_times_right", "right_times_left"},
"timestamp": {
"left_times_right",
@@ -364,6 +380,16 @@ def test_nat_arithmetic_scalar(op_name, value, val_type):
and isinstance(value, Timedelta)
):
msg = "Cannot multiply"
+ elif val_type == "str":
+ # un-specific check here because the message comes from str
+ # and varies by method
+ msg = (
+ "can only concatenate str|"
+ "unsupported operand type|"
+ "can't multiply sequence|"
+ "Can't convert 'NaTType'|"
+ "must be str, not NaTType"
+ )
else:
msg = "unsupported operand type"
@@ -397,7 +423,9 @@ def test_nat_rfloordiv_timedelta(val, expected):
"value",
[
DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"),
- DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"),
+ DatetimeIndex(["2011-01-01", "2011-01-02"], tz="US/Eastern", name="x"),
+ DatetimeArray._from_sequence(["2011-01-01", "2011-01-02"]),
+ DatetimeArray._from_sequence(["2011-01-01", "2011-01-02"], tz="US/Pacific"),
TimedeltaIndex(["1 day", "2 day"], name="x"),
],
)
@@ -406,19 +434,24 @@ def test_nat_arithmetic_index(op_name, value):
exp_name = "x"
exp_data = [NaT] * 2
- if isinstance(value, DatetimeIndex) and "plus" in op_name:
- expected = DatetimeIndex(exp_data, name=exp_name, tz=value.tz)
+ if is_datetime64_any_dtype(value.dtype) and "plus" in op_name:
+ expected = DatetimeIndex(exp_data, tz=value.tz, name=exp_name)
else:
expected = TimedeltaIndex(exp_data, name=exp_name)
- tm.assert_index_equal(_ops[op_name](NaT, value), expected)
+ if not isinstance(value, Index):
+ expected = expected.array
+
+ op = _ops[op_name]
+ result = op(NaT, value)
+ tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"op_name",
["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"],
)
-@pytest.mark.parametrize("box", [TimedeltaIndex, Series])
+@pytest.mark.parametrize("box", [TimedeltaIndex, Series, TimedeltaArray._from_sequence])
def test_nat_arithmetic_td64_vector(op_name, box):
# see gh-19124
vec = box(["1 day", "2 day"], dtype="timedelta64[ns]")
@@ -426,6 +459,28 @@ def test_nat_arithmetic_td64_vector(op_name, box):
tm.assert_equal(_ops[op_name](vec, NaT), box_nat)
+@pytest.mark.parametrize(
+ "dtype,op,out_dtype",
+ [
+ ("datetime64[ns]", operator.add, "datetime64[ns]"),
+ ("datetime64[ns]", roperator.radd, "datetime64[ns]"),
+ ("datetime64[ns]", operator.sub, "timedelta64[ns]"),
+ ("datetime64[ns]", roperator.rsub, "timedelta64[ns]"),
+ ("timedelta64[ns]", operator.add, "datetime64[ns]"),
+ ("timedelta64[ns]", roperator.radd, "datetime64[ns]"),
+ ("timedelta64[ns]", operator.sub, "datetime64[ns]"),
+ ("timedelta64[ns]", roperator.rsub, "timedelta64[ns]"),
+ ],
+)
+def test_nat_arithmetic_ndarray(dtype, op, out_dtype):
+ other = np.arange(10).astype(dtype)
+ result = op(NaT, other)
+
+ expected = np.empty(other.shape, dtype=out_dtype)
+ expected.fill("NaT")
+ tm.assert_numpy_array_equal(result, expected)
+
+
def test_nat_pinned_docstrings():
# see gh-17327
assert NaT.ctime.__doc__ == datetime.ctime.__doc__
diff --git a/pandas/tests/scalar/timestamp/test_arithmetic.py b/pandas/tests/scalar/timestamp/test_arithmetic.py
index 58bd03129f2df..9634c6d822236 100644
--- a/pandas/tests/scalar/timestamp/test_arithmetic.py
+++ b/pandas/tests/scalar/timestamp/test_arithmetic.py
@@ -3,6 +3,8 @@
import numpy as np
import pytest
+from pandas.errors import NullFrequencyError
+
from pandas import Timedelta, Timestamp
import pandas.util.testing as tm
@@ -66,6 +68,20 @@ def test_delta_preserve_nanos(self):
result = val + timedelta(1)
assert result.nanosecond == val.nanosecond
+ def test_rsub_dtscalars(self, tz_naive_fixture):
+ # In particular, check that datetime64 - Timestamp works GH#28286
+ td = Timedelta(1235345642000)
+ ts = Timestamp.now(tz_naive_fixture)
+ other = ts + td
+
+ assert other - ts == td
+ assert other.to_pydatetime() - ts == td
+ if tz_naive_fixture is None:
+ assert other.to_datetime64() - ts == td
+ else:
+ with pytest.raises(TypeError, match="subtraction must have"):
+ other.to_datetime64() - ts
+
def test_timestamp_sub_datetime(self):
dt = datetime(2013, 10, 12)
ts = Timestamp(datetime(2013, 10, 13))
@@ -151,3 +167,56 @@ def test_timestamp_add_timedelta64_unit(self, other, expected_difference):
result = ts + other
valdiff = result.value - ts.value
assert valdiff == expected_difference
+
+ @pytest.mark.parametrize("ts", [Timestamp.now(), Timestamp.now("utc")])
+ @pytest.mark.parametrize(
+ "other",
+ [
+ 1,
+ np.int64(1),
+ np.array([1, 2], dtype=np.int32),
+ np.array([3, 4], dtype=np.uint64),
+ ],
+ )
+ def test_add_int_no_freq_raises(self, ts, other):
+ with pytest.raises(NullFrequencyError, match="without freq"):
+ ts + other
+ with pytest.raises(NullFrequencyError, match="without freq"):
+ other + ts
+
+ with pytest.raises(NullFrequencyError, match="without freq"):
+ ts - other
+ with pytest.raises(TypeError):
+ other - ts
+
+ @pytest.mark.parametrize(
+ "ts",
+ [
+ Timestamp("1776-07-04", freq="D"),
+ Timestamp("1776-07-04", tz="UTC", freq="D"),
+ ],
+ )
+ @pytest.mark.parametrize(
+ "other",
+ [
+ 1,
+ np.int64(1),
+ np.array([1, 2], dtype=np.int32),
+ np.array([3, 4], dtype=np.uint64),
+ ],
+ )
+ def test_add_int_with_freq(self, ts, other):
+ with tm.assert_produces_warning(FutureWarning):
+ result1 = ts + other
+ with tm.assert_produces_warning(FutureWarning):
+ result2 = other + ts
+
+ assert np.all(result1 == result2)
+
+ with tm.assert_produces_warning(FutureWarning):
+ result = result1 - other
+
+ assert np.all(result == ts)
+
+ with pytest.raises(TypeError):
+ other - ts
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index 401fc285424fe..652dd34ca7ce2 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -1047,3 +1047,23 @@ def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
ts = Timestamp(datetime.now())
assert ts.to_datetime64() == ts.to_numpy()
+
+
+class SubDatetime(datetime):
+ pass
+
+
+@pytest.mark.parametrize(
+ "lh,rh",
+ [
+ (SubDatetime(2000, 1, 1), Timedelta(hours=1)),
+ (Timedelta(hours=1), SubDatetime(2000, 1, 1)),
+ ],
+)
+def test_dt_subclass_add_timedelta(lh, rh):
+ # GH#25851
+ # ensure that subclassed datetime works for
+ # Timedelta operations
+ result = lh + rh
+ expected = SubDatetime(2000, 1, 1, 1)
+ assert result == expected
diff --git a/pandas/tests/series/conftest.py b/pandas/tests/series/conftest.py
index f5b401398d6d6..18d3c87a01f87 100644
--- a/pandas/tests/series/conftest.py
+++ b/pandas/tests/series/conftest.py
@@ -26,7 +26,7 @@ def string_series():
@pytest.fixture
def object_series():
"""
- Fixture for Series of dtype datetime64[ns] with Index of unique strings
+ Fixture for Series of dtype object with Index of unique strings
"""
s = tm.makeObjectSeries()
s.name = "objects"
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index e0b84e8708fa1..bf8d34cd62ff2 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -110,17 +110,16 @@ def test_series_set_value():
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- s = Series().set_value(dates[0], 1.0)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- s2 = s.set_value(dates[1], np.nan)
+ s = Series()._set_value(dates[0], 1.0)
+ s2 = s._set_value(dates[1], np.nan)
- exp = Series([1.0, np.nan], index=index)
+ expected = Series([1.0, np.nan], index=index)
- assert_series_equal(s2, exp)
+ assert_series_equal(s2, expected)
+ # FIXME: dont leave commented-out
# s = Series(index[:1], index[:1])
- # s2 = s.set_value(dates[1], index[1])
+ # s2 = s._set_value(dates[1], index[1])
# assert s2.values.dtype == 'M8[ns]'
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index d73be76795c88..f50b3ddbce7dc 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -372,15 +372,13 @@ def test_setitem_dtypes():
def test_set_value(test_data):
idx = test_data.ts.index[10]
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- res = test_data.ts.set_value(idx, 0)
+ res = test_data.ts._set_value(idx, 0)
assert res is test_data.ts
assert test_data.ts[idx] == 0
# equiv
s = test_data.series.copy()
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- res = s.set_value("foobar", 0)
+ res = s._set_value("foobar", 0)
assert res is s
assert res.index[-1] == "foobar"
assert res["foobar"] == 0
@@ -654,6 +652,38 @@ def test_timedelta_assignment():
tm.assert_series_equal(s, expected)
+@pytest.mark.parametrize(
+ "nat_val,should_cast",
+ [
+ (pd.NaT, True),
+ (np.timedelta64("NaT", "ns"), False),
+ (np.datetime64("NaT", "ns"), True),
+ ],
+)
+@pytest.mark.parametrize("tz", [None, "UTC"])
+def test_dt64_series_assign_nat(nat_val, should_cast, tz):
+ # some nat-like values should be cast to datetime64 when inserting
+ # into a datetime64 series. Others should coerce to object
+ # and retain their dtypes.
+ dti = pd.date_range("2016-01-01", periods=3, tz=tz)
+ base = pd.Series(dti)
+ expected = pd.Series([pd.NaT] + list(dti[1:]), dtype=dti.dtype)
+ if not should_cast:
+ expected = expected.astype(object)
+
+ ser = base.copy(deep=True)
+ ser[0] = nat_val
+ tm.assert_series_equal(ser, expected)
+
+ ser = base.copy(deep=True)
+ ser.loc[0] = nat_val
+ tm.assert_series_equal(ser, expected)
+
+ ser = base.copy(deep=True)
+ ser.iloc[0] = nat_val
+ tm.assert_series_equal(ser, expected)
+
+
@pytest.mark.parametrize(
"nat_val,should_cast",
[
diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py
index 63baa6af7c02a..5d74ad95be90d 100644
--- a/pandas/tests/series/test_alter_axes.py
+++ b/pandas/tests/series/test_alter_axes.py
@@ -267,6 +267,25 @@ def test_rename_axis_none(self, kwargs):
expected = Series([1, 2, 3], index=expected_index)
tm.assert_series_equal(result, expected)
+ def test_rename_with_custom_indexer(self):
+ # GH 27814
+ class MyIndexer:
+ pass
+
+ ix = MyIndexer()
+ s = Series([1, 2, 3]).rename(ix)
+ assert s.name is ix
+
+ def test_rename_with_custom_indexer_inplace(self):
+ # GH 27814
+ class MyIndexer:
+ pass
+
+ ix = MyIndexer()
+ s = Series([1, 2, 3])
+ s.rename(ix, inplace=True)
+ assert s.name is ix
+
def test_set_axis_inplace_axes(self, axis_series):
# GH14636
ser = Series(np.arange(4), index=[1, 3, 5, 7], dtype="int64")
@@ -277,12 +296,9 @@ def test_set_axis_inplace_axes(self, axis_series):
# inplace=True
# The FutureWarning comes from the fact that we would like to have
# inplace default to False some day
- for inplace, warn in [(None, FutureWarning), (True, None)]:
- result = ser.copy()
- kwargs = {"inplace": inplace}
- with tm.assert_produces_warning(warn):
- result.set_axis(list("abcd"), axis=axis_series, **kwargs)
- tm.assert_series_equal(result, expected)
+ result = ser.copy()
+ result.set_axis(list("abcd"), axis=axis_series, inplace=True)
+ tm.assert_series_equal(result, expected)
def test_set_axis_inplace(self):
# GH14636
@@ -322,9 +338,9 @@ def test_reset_index_drop_errors(self):
# KeyError raised for series index when passed level name is missing
s = Series(range(4))
- with pytest.raises(KeyError, match="must be same as name"):
+ with pytest.raises(KeyError, match="does not match index name"):
s.reset_index("wrong", drop=True)
- with pytest.raises(KeyError, match="must be same as name"):
+ with pytest.raises(KeyError, match="does not match index name"):
s.reset_index("wrong")
# KeyError raised for series when level to be dropped is missing
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 32d32a5d14fb2..1ddaa4692d741 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -1482,16 +1482,7 @@ def test_value_counts_with_nan(self):
@pytest.mark.parametrize(
"dtype",
- [
- "int_",
- "uint",
- "float_",
- "unicode_",
- "timedelta64[h]",
- pytest.param(
- "datetime64[D]", marks=pytest.mark.xfail(reason="GH#7996", strict=False)
- ),
- ],
+ ["int_", "uint", "float_", "unicode_", "timedelta64[h]", "datetime64[D]"],
)
def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture):
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
@@ -1499,6 +1490,10 @@ def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture):
# Test case 1
input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
tc1 = Series(Categorical(input1, categories=cat_array, ordered=ordered_fixture))
+ if dtype == "datetime64[D]":
+ # pre-empty flaky xfail, tc1 values are seemingly-random
+ if not (np.array(tc1) == input1).all():
+ pytest.xfail(reason="GH#7996")
expected = Series([False, False, False, True])
tm.assert_series_equal(tc1.duplicated(), expected)
@@ -1524,6 +1519,10 @@ def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture):
# Test case 2
input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
tc2 = Series(Categorical(input2, categories=cat_array, ordered=ordered_fixture))
+ if dtype == "datetime64[D]":
+ # pre-empty flaky xfail, tc2 values are seemingly-random
+ if not (np.array(tc2) == input2).all():
+ pytest.xfail(reason="GH#7996")
expected = Series([False, False, False, False, True, True, False])
tm.assert_series_equal(tc2.duplicated(), expected)
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index d204d7d2a1d7c..762f4a37d17cc 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -131,12 +131,6 @@ def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
assert result.name == self.ts.name
- @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
- @pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
- def test_to_sparse_pass_name(self):
- result = self.ts.to_sparse()
- assert result.name == self.ts.name
-
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = self.series_klass(d)
@@ -206,11 +200,9 @@ def test_constructor_dict_timedelta_index(self):
)
self._assert_series_equal(result, expected)
- @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_from_array_deprecated(self):
- # multiple FutureWarnings, so can't assert stacklevel
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=True):
self.series_klass.from_array([1, 2, 3])
def test_sparse_accessor_updates_on_inplace(self):
@@ -697,6 +689,7 @@ def test_dt_accessor_api_for_categorical(self):
("floor", ("D",), {}),
("ceil", ("D",), {}),
("asfreq", ("D",), {}),
+ # FIXME: don't leave commented-out
# ('tz_localize', ("UTC",), {}),
]
_special_func_names = [f[0] for f in special_func_defs]
@@ -729,20 +722,11 @@ def test_dt_accessor_api_for_categorical(self):
res = getattr(c.dt, func)(*args, **kwargs)
exp = getattr(s.dt, func)(*args, **kwargs)
- if isinstance(res, DataFrame):
- tm.assert_frame_equal(res, exp)
- elif isinstance(res, Series):
- tm.assert_series_equal(res, exp)
- else:
- tm.assert_almost_equal(res, exp)
+ tm.assert_equal(res, exp)
for attr in attr_names:
- try:
- res = getattr(c.dt, attr)
- exp = getattr(s.dt, attr)
- except Exception as e:
- print(name, attr)
- raise e
+ res = getattr(c.dt, attr)
+ exp = getattr(s.dt, attr)
if isinstance(res, DataFrame):
tm.assert_frame_equal(res, exp)
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index bf527bae297d9..819b9228219aa 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -54,6 +54,17 @@ def test_append_duplicates(self):
with pytest.raises(ValueError, match=msg):
pd.concat([s1, s2], verify_integrity=True)
+ def test_append_tuples(self):
+ # GH 28410
+ s = pd.Series([1, 2, 3])
+ list_input = [s, s]
+ tuple_input = (s, s)
+
+ expected = s.append(list_input)
+ result = s.append(tuple_input)
+
+ tm.assert_series_equal(expected, result)
+
def test_combine_scalar(self):
# GH 21248
# Note - combine() with another Series is tested elsewhere because
@@ -218,8 +229,6 @@ def test_combine_first_dt_tz_values(self, tz_naive_fixture):
exp = pd.Series(exp_vals, name="ser1")
assert_series_equal(exp, result)
- @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
- @pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
def test_concat_empty_series_dtypes(self):
# booleans
@@ -276,7 +285,10 @@ def test_concat_empty_series_dtypes(self):
# sparse
# TODO: move?
result = pd.concat(
- [Series(dtype="float64").to_sparse(), Series(dtype="float64").to_sparse()]
+ [
+ Series(dtype="float64").astype("Sparse"),
+ Series(dtype="float64").astype("Sparse"),
+ ]
)
assert result.dtype == "Sparse[float64]"
@@ -285,10 +297,10 @@ def test_concat_empty_series_dtypes(self):
assert result.ftype == "float64:sparse"
result = pd.concat(
- [Series(dtype="float64").to_sparse(), Series(dtype="float64")]
+ [Series(dtype="float64").astype("Sparse"), Series(dtype="float64")]
)
# TODO: release-note: concat sparse dtype
- expected = pd.core.sparse.api.SparseDtype(np.float64)
+ expected = pd.SparseDtype(np.float64)
assert result.dtype == expected
# GH 26705 - Assert .ftype is deprecated
@@ -296,10 +308,10 @@ def test_concat_empty_series_dtypes(self):
assert result.ftype == "float64:sparse"
result = pd.concat(
- [Series(dtype="float64").to_sparse(), Series(dtype="object")]
+ [Series(dtype="float64").astype("Sparse"), Series(dtype="object")]
)
# TODO: release-note: concat sparse dtype
- expected = pd.core.sparse.api.SparseDtype("object")
+ expected = pd.SparseDtype("object")
assert result.dtype == expected
# GH 26705 - Assert .ftype is deprecated
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index 9be79bf93ece7..6ee120f3bec64 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -228,11 +228,10 @@ def test_astype_dict_like(self, dtype_class):
with pytest.raises(KeyError, match=msg):
s.astype(dt5)
- def test_astype_categories_deprecation_raises(self):
-
- # deprecated 17636
+ def test_astype_categories_raises(self):
+ # deprecated 17636, removed in GH-27141
s = Series(["a", "b", "a"])
- with pytest.raises(ValueError, match="Got an unexpected"):
+ with pytest.raises(TypeError, match="got an unexpected"):
s.astype("category", categories=["a", "b"], ordered=True)
@pytest.mark.parametrize(
diff --git a/pandas/tests/series/test_explode.py b/pandas/tests/series/test_explode.py
index 331546f7dc73d..e4974bd0af145 100644
--- a/pandas/tests/series/test_explode.py
+++ b/pandas/tests/series/test_explode.py
@@ -111,3 +111,11 @@ def test_nested_EA():
pd.date_range("20170101", periods=6, tz="UTC"), index=[0, 0, 0, 1, 1, 1]
)
tm.assert_series_equal(result, expected)
+
+
+def test_duplicate_index():
+ # GH 28005
+ s = pd.Series([[1, 2], [3, 4]], index=[0, 0])
+ result = s.explode()
+ expected = pd.Series([1, 2, 3, 4], index=[0, 0, 0, 0], dtype=object)
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py
index 0686b397cbd81..0ddf1dfcabb59 100644
--- a/pandas/tests/series/test_io.py
+++ b/pandas/tests/series/test_io.py
@@ -191,6 +191,20 @@ def test_to_csv_compression(self, s, encoding, compression):
s, pd.read_csv(fh, index_col=0, squeeze=True, encoding=encoding)
)
+ def test_to_csv_interval_index(self):
+ # GH 28210
+ s = Series(["foo", "bar", "baz"], index=pd.interval_range(0, 3))
+
+ with ensure_clean("__tmp_to_csv_interval_index__.csv") as path:
+ s.to_csv(path, header=False)
+ result = self.read_csv(path, index_col=0, squeeze=True)
+
+ # can't roundtrip intervalindex via read_csv so check string repr (GH 23595)
+ expected = s.copy()
+ expected.index = expected.index.astype(str)
+
+ assert_series_equal(result, expected)
+
class TestSeriesIO:
def test_to_frame(self, datetime_series):
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index c5fc52b9b0c41..f459ae9e7845d 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -6,7 +6,6 @@
import pytz
from pandas._libs.tslib import iNaT
-from pandas.errors import PerformanceWarning
import pandas.util._test_decorators as td
import pandas as pd
@@ -578,6 +577,28 @@ def test_fillna_categorical(self, fill_value, expected_output):
exp = Series(Categorical(expected_output, categories=["a", "b"]))
tm.assert_series_equal(s.fillna(fill_value), exp)
+ @pytest.mark.parametrize(
+ "fill_value, expected_output",
+ [
+ (Series(["a", "b", "c", "d", "e"]), ["a", "b", "b", "d", "e"]),
+ (Series(["b", "d", "a", "d", "a"]), ["a", "d", "b", "d", "a"]),
+ (
+ Series(
+ Categorical(
+ ["b", "d", "a", "d", "a"], categories=["b", "c", "d", "e", "a"]
+ )
+ ),
+ ["a", "d", "b", "d", "a"],
+ ),
+ ],
+ )
+ def test_fillna_categorical_with_new_categories(self, fill_value, expected_output):
+ # GH 26215
+ data = ["a", np.nan, "b", np.nan, np.nan]
+ s = Series(Categorical(data, categories=["a", "b", "c", "d", "e"]))
+ exp = Series(Categorical(expected_output, categories=["a", "b", "c", "d", "e"]))
+ tm.assert_series_equal(s.fillna(fill_value), exp)
+
def test_fillna_categorical_raise(self):
data = ["a", np.nan, "b", np.nan, np.nan]
s = Series(Categorical(data, categories=["a", "b"]))
@@ -791,9 +812,11 @@ def test_timedelta64_nan(self):
td1[0] = td[0]
assert not isna(td1[0])
+ # GH#16674 iNaT is treated as an integer when given by the user
td1[1] = iNaT
- assert isna(td1[1])
- assert td1[1].value == iNaT
+ assert not isna(td1[1])
+ assert td1.dtype == np.object_
+ assert td1[1] == iNaT
td1[1] = td[1]
assert not isna(td1[1])
@@ -803,6 +826,7 @@ def test_timedelta64_nan(self):
td1[2] = td[2]
assert not isna(td1[2])
+ # FIXME: don't leave commented-out
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
@@ -967,65 +991,6 @@ def test_series_fillna_limit(self):
expected[:3] = np.nan
assert_series_equal(result, expected)
- @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
- @pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
- def test_sparse_series_fillna_limit(self):
- index = np.arange(10)
- s = Series(np.random.randn(10), index=index)
-
- ss = s[:2].reindex(index).to_sparse()
- # TODO: what is this test doing? why are result an expected
- # the same call to fillna?
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- # TODO: release-note fillna performance warning
- result = ss.fillna(method="pad", limit=5)
- expected = ss.fillna(method="pad", limit=5)
- expected = expected.to_dense()
- expected[-3:] = np.nan
- expected = expected.to_sparse()
- assert_series_equal(result, expected)
-
- ss = s[-2:].reindex(index).to_sparse()
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- result = ss.fillna(method="backfill", limit=5)
- expected = ss.fillna(method="backfill")
- expected = expected.to_dense()
- expected[:3] = np.nan
- expected = expected.to_sparse()
- assert_series_equal(result, expected)
-
- @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
- @pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
- def test_sparse_series_pad_backfill_limit(self):
- index = np.arange(10)
- s = Series(np.random.randn(10), index=index)
- s = s.to_sparse()
-
- result = s[:2].reindex(index, method="pad", limit=5)
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- expected = s[:2].reindex(index).fillna(method="pad")
- expected = expected.to_dense()
- expected[-3:] = np.nan
- expected = expected.to_sparse()
- assert_series_equal(result, expected)
-
- result = s[-2:].reindex(index, method="backfill", limit=5)
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- expected = s[-2:].reindex(index).fillna(method="backfill")
- expected = expected.to_dense()
- expected[:3] = np.nan
- expected = expected.to_sparse()
- assert_series_equal(result, expected)
-
- @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
@@ -1518,12 +1483,29 @@ def test_interp_nonmono_raise(self):
s.interpolate(method="krogh")
@td.skip_if_no_scipy
- def test_interp_datetime64(self):
- df = Series([1, np.nan, 3], index=date_range("1/1/2000", periods=3))
- result = df.interpolate(method="nearest")
- expected = Series([1.0, 1.0, 3.0], index=date_range("1/1/2000", periods=3))
+ @pytest.mark.parametrize("method", ["nearest", "pad"])
+ def test_interp_datetime64(self, method, tz_naive_fixture):
+ df = Series(
+ [1, np.nan, 3], index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture)
+ )
+ result = df.interpolate(method=method)
+ expected = Series(
+ [1.0, 1.0, 3.0],
+ index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture),
+ )
assert_series_equal(result, expected)
+ def test_interp_pad_datetime64tz_values(self):
+ # GH#27628 missing.interpolate_2d should handle datetimetz values
+ dti = pd.date_range("2015-04-05", periods=3, tz="US/Central")
+ ser = pd.Series(dti)
+ ser[1] = pd.NaT
+ result = ser.interpolate(method="pad")
+
+ expected = pd.Series(dti)
+ expected[1] = expected[0]
+ tm.assert_series_equal(result, expected)
+
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1.0, 2.0, 3.0])
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index 062c07cb6242a..c2cf91e582c47 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -36,22 +36,14 @@ def test_bool_operators_with_nas(self, bool_op):
expected[mask] = False
assert_series_equal(result, expected)
- def test_operators_bitwise(self):
+ def test_logical_operators_bool_dtype_with_empty(self):
# GH#9016: support bitwise op for integer types
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
- s_tff = Series([True, False, False], index=index)
s_empty = Series([])
- # TODO: unused
- # s_0101 = Series([0, 1, 0, 1])
-
- s_0123 = Series(range(4), dtype="int64")
- s_3333 = Series([3] * 4)
- s_4444 = Series([4] * 4)
-
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
@@ -60,6 +52,16 @@ def test_operators_bitwise(self):
expected = s_tft
assert_series_equal(res, expected)
+ def test_logical_operators_int_dtype_with_int_dtype(self):
+ # GH#9016: support bitwise op for integer types
+
+ # TODO: unused
+ # s_0101 = Series([0, 1, 0, 1])
+
+ s_0123 = Series(range(4), dtype="int64")
+ s_3333 = Series([3] * 4)
+ s_4444 = Series([4] * 4)
+
res = s_0123 & s_3333
expected = Series(range(4), dtype="int64")
assert_series_equal(res, expected)
@@ -68,76 +70,160 @@ def test_operators_bitwise(self):
expected = Series(range(4, 8), dtype="int64")
assert_series_equal(res, expected)
- s_a0b1c0 = Series([1], list("b"))
-
- res = s_tft & s_a0b1c0
- expected = s_tff.reindex(list("abc"))
+ s_1111 = Series([1] * 4, dtype="int8")
+ res = s_0123 & s_1111
+ expected = Series([0, 1, 0, 1], dtype="int64")
assert_series_equal(res, expected)
- res = s_tft | s_a0b1c0
- expected = s_tft.reindex(list("abc"))
+ res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
+ expected = Series([1, 1, 3, 3], dtype="int32")
assert_series_equal(res, expected)
- n0 = 0
- res = s_tft & n0
- expected = s_fff
- assert_series_equal(res, expected)
+ def test_logical_operators_int_dtype_with_int_scalar(self):
+ # GH#9016: support bitwise op for integer types
+ s_0123 = Series(range(4), dtype="int64")
- res = s_0123 & n0
+ res = s_0123 & 0
expected = Series([0] * 4)
assert_series_equal(res, expected)
- n1 = 1
- res = s_tft & n1
- expected = s_tft
- assert_series_equal(res, expected)
-
- res = s_0123 & n1
+ res = s_0123 & 1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
- s_1111 = Series([1] * 4, dtype="int8")
- res = s_0123 & s_1111
- expected = Series([0, 1, 0, 1], dtype="int64")
- assert_series_equal(res, expected)
-
- res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
- expected = Series([1, 1, 3, 3], dtype="int32")
- assert_series_equal(res, expected)
+ def test_logical_operators_int_dtype_with_float(self):
+ # GH#9016: support bitwise op for integer types
+ s_0123 = Series(range(4), dtype="int64")
- with pytest.raises(TypeError):
- s_1111 & "a"
- with pytest.raises(TypeError):
- s_1111 & ["a", "b", "c", "d"]
with pytest.raises(TypeError):
s_0123 & np.NaN
with pytest.raises(TypeError):
s_0123 & 3.14
with pytest.raises(TypeError):
s_0123 & [0.1, 4, 3.14, 2]
+ with pytest.raises(TypeError):
+ s_0123 & np.array([0.1, 4, 3.14, 2])
+ with pytest.raises(TypeError):
+ s_0123 & Series([0.1, 4, -3.14, 2])
- # s_0123 will be all false now because of reindexing like s_tft
- exp = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
- assert_series_equal(s_tft & s_0123, exp)
-
- # s_tft will be all false now because of reindexing like s_0123
- exp = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
- assert_series_equal(s_0123 & s_tft, exp)
-
- assert_series_equal(s_0123 & False, Series([False] * 4))
- assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
- assert_series_equal(s_0123 & [False], Series([False] * 4))
- assert_series_equal(s_0123 & (False), Series([False] * 4))
- assert_series_equal(
- s_0123 & Series([False, np.NaN, False, False]), Series([False] * 4)
- )
+ def test_logical_operators_int_dtype_with_str(self):
+ s_1111 = Series([1] * 4, dtype="int8")
+
+ with pytest.raises(TypeError):
+ s_1111 & "a"
+ with pytest.raises(TypeError):
+ s_1111 & ["a", "b", "c", "d"]
+
+ def test_logical_operators_int_dtype_with_bool(self):
+ # GH#9016: support bitwise op for integer types
+ s_0123 = Series(range(4), dtype="int64")
+
+ expected = Series([False] * 4)
+
+ result = s_0123 & False
+ assert_series_equal(result, expected)
+
+ result = s_0123 & [False]
+ assert_series_equal(result, expected)
+
+ result = s_0123 & (False,)
+ assert_series_equal(result, expected)
- s_ftft = Series([False, True, False, True])
- assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
+ result = s_0123 ^ False
+ expected = Series([False, True, True, True])
+ assert_series_equal(result, expected)
+
+ def test_logical_operators_int_dtype_with_object(self):
+ # GH#9016: support bitwise op for integer types
+ s_0123 = Series(range(4), dtype="int64")
+
+ result = s_0123 & Series([False, np.NaN, False, False])
+ expected = Series([False] * 4)
+ assert_series_equal(result, expected)
s_abNd = Series(["a", "b", np.NaN, "d"])
- res = s_0123 & s_abNd
- expected = s_ftft
+ with pytest.raises(TypeError, match="unsupported.* 'int' and 'str'"):
+ s_0123 & s_abNd
+
+ def test_logical_operators_bool_dtype_with_int(self):
+ index = list("bca")
+
+ s_tft = Series([True, False, True], index=index)
+ s_fff = Series([False, False, False], index=index)
+
+ res = s_tft & 0
+ expected = s_fff
+ assert_series_equal(res, expected)
+
+ res = s_tft & 1
+ expected = s_tft
+ assert_series_equal(res, expected)
+
+ def test_logical_ops_bool_dtype_with_ndarray(self):
+ # make sure we operate on ndarray the same as Series
+ left = pd.Series([True, True, True, False, True])
+ right = [True, False, None, True, np.nan]
+
+ expected = pd.Series([True, False, False, False, False])
+ result = left & right
+ tm.assert_series_equal(result, expected)
+ result = left & np.array(right)
+ tm.assert_series_equal(result, expected)
+ result = left & pd.Index(right)
+ tm.assert_series_equal(result, expected)
+ result = left & pd.Series(right)
+ tm.assert_series_equal(result, expected)
+
+ expected = pd.Series([True, True, True, True, True])
+ result = left | right
+ tm.assert_series_equal(result, expected)
+ result = left | np.array(right)
+ tm.assert_series_equal(result, expected)
+ result = left | pd.Index(right)
+ tm.assert_series_equal(result, expected)
+ result = left | pd.Series(right)
+ tm.assert_series_equal(result, expected)
+
+ expected = pd.Series([False, True, True, True, True])
+ result = left ^ right
+ tm.assert_series_equal(result, expected)
+ result = left ^ np.array(right)
+ tm.assert_series_equal(result, expected)
+ result = left ^ pd.Index(right)
+ tm.assert_series_equal(result, expected)
+ result = left ^ pd.Series(right)
+ tm.assert_series_equal(result, expected)
+
+ def test_logical_operators_int_dtype_with_bool_dtype_and_reindex(self):
+ # GH#9016: support bitwise op for integer types
+
+ # with non-matching indexes, logical operators will cast to object
+ # before operating
+ index = list("bca")
+
+ s_tft = Series([True, False, True], index=index)
+ s_tft = Series([True, False, True], index=index)
+ s_tff = Series([True, False, False], index=index)
+
+ s_0123 = Series(range(4), dtype="int64")
+
+ # s_0123 will be all false now because of reindexing like s_tft
+ expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
+ result = s_tft & s_0123
+ assert_series_equal(result, expected)
+
+ expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
+ result = s_0123 & s_tft
+ assert_series_equal(result, expected)
+
+ s_a0b1c0 = Series([1], list("b"))
+
+ res = s_tft & s_a0b1c0
+ expected = s_tff.reindex(list("abc"))
+ assert_series_equal(res, expected)
+
+ res = s_tft | s_a0b1c0
+ expected = s_tft.reindex(list("abc"))
assert_series_equal(res, expected)
def test_scalar_na_logical_ops_corners(self):
@@ -523,6 +609,7 @@ def test_comparison_operators_with_nas(self):
assert_series_equal(result, expected)
+ # FIXME: dont leave commented-out
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py
index 9b34b52bf39b9..4aeb211170d8f 100644
--- a/pandas/tests/series/test_period.py
+++ b/pandas/tests/series/test_period.py
@@ -71,10 +71,9 @@ def test_NaT_scalar(self):
series[2] = val
assert pd.isna(series[2])
- @pytest.mark.xfail(reason="PeriodDtype Series not supported yet")
def test_NaT_cast(self):
result = Series([np.nan]).astype("period[D]")
- expected = Series([pd.NaT])
+ expected = Series([pd.NaT], dtype="period[D]")
tm.assert_series_equal(result, expected)
def test_set_none(self):
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py
index 3e8f653c47424..3c6da304dd68d 100644
--- a/pandas/tests/series/test_repr.py
+++ b/pandas/tests/series/test_repr.py
@@ -14,7 +14,6 @@
period_range,
timedelta_range,
)
-from pandas.core.base import StringMixin
from pandas.core.index import MultiIndex
import pandas.util.testing as tm
@@ -226,11 +225,11 @@ class TestCategoricalRepr:
def test_categorical_repr_unicode(self):
# see gh-21002
- class County(StringMixin):
+ class County:
name = "San Sebastián"
state = "PR"
- def __str__(self):
+ def __repr__(self):
return self.name + ", " + self.state
cat = pd.Categorical([County() for _ in range(61)])
diff --git a/pandas/tests/series/test_subclass.py b/pandas/tests/series/test_subclass.py
index 450fdc3f4dd6f..6b82f890e974b 100644
--- a/pandas/tests/series/test_subclass.py
+++ b/pandas/tests/series/test_subclass.py
@@ -1,8 +1,3 @@
-import numpy as np
-import pytest
-
-import pandas as pd
-from pandas import SparseDtype
import pandas.util.testing as tm
@@ -38,67 +33,3 @@ def test_subclass_unstack(self):
def test_subclass_empty_repr(self):
assert "SubclassedSeries" in repr(tm.SubclassedSeries())
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-class TestSparseSeriesSubclassing:
- def test_subclass_sparse_slice(self):
- # int64
- s = tm.SubclassedSparseSeries([1, 2, 3, 4, 5])
- exp = tm.SubclassedSparseSeries([2, 3, 4], index=[1, 2, 3])
- tm.assert_sp_series_equal(s.loc[1:3], exp)
- assert s.loc[1:3].dtype == SparseDtype(np.int64)
-
- exp = tm.SubclassedSparseSeries([2, 3], index=[1, 2])
- tm.assert_sp_series_equal(s.iloc[1:3], exp)
- assert s.iloc[1:3].dtype == SparseDtype(np.int64)
-
- exp = tm.SubclassedSparseSeries([2, 3], index=[1, 2])
- tm.assert_sp_series_equal(s[1:3], exp)
- assert s[1:3].dtype == SparseDtype(np.int64)
-
- # float64
- s = tm.SubclassedSparseSeries([1.0, 2.0, 3.0, 4.0, 5.0])
- exp = tm.SubclassedSparseSeries([2.0, 3.0, 4.0], index=[1, 2, 3])
- tm.assert_sp_series_equal(s.loc[1:3], exp)
- assert s.loc[1:3].dtype == SparseDtype(np.float64)
-
- exp = tm.SubclassedSparseSeries([2.0, 3.0], index=[1, 2])
- tm.assert_sp_series_equal(s.iloc[1:3], exp)
- assert s.iloc[1:3].dtype == SparseDtype(np.float64)
-
- exp = tm.SubclassedSparseSeries([2.0, 3.0], index=[1, 2])
- tm.assert_sp_series_equal(s[1:3], exp)
- assert s[1:3].dtype == SparseDtype(np.float64)
-
- def test_subclass_sparse_addition(self):
- s1 = tm.SubclassedSparseSeries([1, 3, 5])
- s2 = tm.SubclassedSparseSeries([-2, 5, 12])
- exp = tm.SubclassedSparseSeries([-1, 8, 17])
- tm.assert_sp_series_equal(s1 + s2, exp)
-
- s1 = tm.SubclassedSparseSeries([4.0, 5.0, 6.0])
- s2 = tm.SubclassedSparseSeries([1.0, 2.0, 3.0])
- exp = tm.SubclassedSparseSeries([5.0, 7.0, 9.0])
- tm.assert_sp_series_equal(s1 + s2, exp)
-
- def test_subclass_sparse_to_frame(self):
- s = tm.SubclassedSparseSeries([1, 2], index=list("ab"), name="xxx")
- res = s.to_frame()
-
- exp_arr = pd.SparseArray([1, 2], dtype=np.int64, kind="block", fill_value=0)
- exp = tm.SubclassedSparseDataFrame(
- {"xxx": exp_arr}, index=list("ab"), default_fill_value=0
- )
- tm.assert_sp_frame_equal(res, exp)
-
- # create from int dict
- res = tm.SubclassedSparseDataFrame(
- {"xxx": [1, 2]}, index=list("ab"), default_fill_value=0
- )
- tm.assert_sp_frame_equal(res, exp)
-
- s = tm.SubclassedSparseSeries([1.1, 2.1], index=list("ab"), name="xxx")
- res = s.to_frame()
- exp = tm.SubclassedSparseDataFrame({"xxx": [1.1, 2.1]}, index=list("ab"))
- tm.assert_sp_frame_equal(res, exp)
diff --git a/pandas/tests/series/test_ufunc.py b/pandas/tests/series/test_ufunc.py
index c024e9caba156..8144a3931b9b8 100644
--- a/pandas/tests/series/test_ufunc.py
+++ b/pandas/tests/series/test_ufunc.py
@@ -252,10 +252,7 @@ def __add__(self, other):
"values",
[
pd.array([1, 3, 2]),
- pytest.param(
- pd.array([1, 10, 0], dtype="Sparse[int]"),
- marks=pytest.mark.xfail(resason="GH-27080. Bug in SparseArray"),
- ),
+ pd.array([1, 10, 0], dtype="Sparse[int]"),
pd.to_datetime(["2000", "2010", "2001"]),
pd.to_datetime(["2000", "2010", "2001"]).tz_localize("CET"),
pd.to_datetime(["2000", "2010", "2001"]).to_period(freq="D"),
diff --git a/pandas/tests/sparse/__init__.py b/pandas/tests/sparse/__init__.py
deleted file mode 100644
index e69de29bb2d1d..0000000000000
diff --git a/pandas/tests/sparse/common.py b/pandas/tests/sparse/common.py
deleted file mode 100644
index e69de29bb2d1d..0000000000000
diff --git a/pandas/tests/sparse/frame/__init__.py b/pandas/tests/sparse/frame/__init__.py
deleted file mode 100644
index e69de29bb2d1d..0000000000000
diff --git a/pandas/tests/sparse/frame/conftest.py b/pandas/tests/sparse/frame/conftest.py
deleted file mode 100644
index 989b58419c2cd..0000000000000
--- a/pandas/tests/sparse/frame/conftest.py
+++ /dev/null
@@ -1,120 +0,0 @@
-import numpy as np
-import pytest
-
-from pandas import DataFrame, SparseArray, SparseDataFrame, bdate_range
-
-data = {
- "A": [np.nan, np.nan, np.nan, 0, 1, 2, 3, 4, 5, 6],
- "B": [0, 1, 2, np.nan, np.nan, np.nan, 3, 4, 5, 6],
- "C": np.arange(10, dtype=np.float64),
- "D": [0, 1, 2, 3, 4, 5, np.nan, np.nan, np.nan, np.nan],
-}
-dates = bdate_range("1/1/2011", periods=10)
-
-
-# fixture names must be compatible with the tests in
-# tests/frame/test_api.SharedWithSparse
-
-
-@pytest.fixture
-def float_frame_dense():
- """
- Fixture for dense DataFrame of floats with DatetimeIndex
-
- Columns are ['A', 'B', 'C', 'D']; some entries are missing
- """
- return DataFrame(data, index=dates)
-
-
-@pytest.fixture
-def float_frame():
- """
- Fixture for sparse DataFrame of floats with DatetimeIndex
-
- Columns are ['A', 'B', 'C', 'D']; some entries are missing
- """
- # default_kind='block' is the default
- return SparseDataFrame(data, index=dates, default_kind="block")
-
-
-@pytest.fixture
-def float_frame_int_kind():
- """
- Fixture for sparse DataFrame of floats with DatetimeIndex
-
- Columns are ['A', 'B', 'C', 'D'] and default_kind='integer'.
- Some entries are missing.
- """
- return SparseDataFrame(data, index=dates, default_kind="integer")
-
-
-@pytest.fixture
-def float_string_frame():
- """
- Fixture for sparse DataFrame of floats and strings with DatetimeIndex
-
- Columns are ['A', 'B', 'C', 'D', 'foo']; some entries are missing
- """
- sdf = SparseDataFrame(data, index=dates)
- sdf["foo"] = SparseArray(["bar"] * len(dates))
- return sdf
-
-
-@pytest.fixture
-def float_frame_fill0_dense():
- """
- Fixture for dense DataFrame of floats with DatetimeIndex
-
- Columns are ['A', 'B', 'C', 'D']; missing entries have been filled with 0
- """
- values = SparseDataFrame(data).values
- values[np.isnan(values)] = 0
- return DataFrame(values, columns=["A", "B", "C", "D"], index=dates)
-
-
-@pytest.fixture
-def float_frame_fill0():
- """
- Fixture for sparse DataFrame of floats with DatetimeIndex
-
- Columns are ['A', 'B', 'C', 'D']; missing entries have been filled with 0
- """
- values = SparseDataFrame(data).values
- values[np.isnan(values)] = 0
- return SparseDataFrame(
- values, columns=["A", "B", "C", "D"], default_fill_value=0, index=dates
- )
-
-
-@pytest.fixture
-def float_frame_fill2_dense():
- """
- Fixture for dense DataFrame of floats with DatetimeIndex
-
- Columns are ['A', 'B', 'C', 'D']; missing entries have been filled with 2
- """
- values = SparseDataFrame(data).values
- values[np.isnan(values)] = 2
- return DataFrame(values, columns=["A", "B", "C", "D"], index=dates)
-
-
-@pytest.fixture
-def float_frame_fill2():
- """
- Fixture for sparse DataFrame of floats with DatetimeIndex
-
- Columns are ['A', 'B', 'C', 'D']; missing entries have been filled with 2
- """
- values = SparseDataFrame(data).values
- values[np.isnan(values)] = 2
- return SparseDataFrame(
- values, columns=["A", "B", "C", "D"], default_fill_value=2, index=dates
- )
-
-
-@pytest.fixture
-def empty_frame():
- """
- Fixture for empty SparseDataFrame
- """
- return SparseDataFrame()
diff --git a/pandas/tests/sparse/frame/test_analytics.py b/pandas/tests/sparse/frame/test_analytics.py
deleted file mode 100644
index fae879b3d33b5..0000000000000
--- a/pandas/tests/sparse/frame/test_analytics.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import numpy as np
-import pytest
-
-from pandas import DataFrame, SparseDataFrame, SparseSeries
-from pandas.util import testing as tm
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.xfail(reason="Wrong SparseBlock initialization (GH#17386)")
-def test_quantile():
- # GH 17386
- data = [[1, 1], [2, 10], [3, 100], [np.nan, np.nan]]
- q = 0.1
-
- sparse_df = SparseDataFrame(data)
- result = sparse_df.quantile(q)
-
- dense_df = DataFrame(data)
- dense_expected = dense_df.quantile(q)
- sparse_expected = SparseSeries(dense_expected)
-
- tm.assert_series_equal(result, dense_expected)
- tm.assert_sp_series_equal(result, sparse_expected)
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.xfail(reason="Wrong SparseBlock initialization (GH#17386)")
-def test_quantile_multi():
- # GH 17386
- data = [[1, 1], [2, 10], [3, 100], [np.nan, np.nan]]
- q = [0.1, 0.5]
-
- sparse_df = SparseDataFrame(data)
- result = sparse_df.quantile(q)
-
- dense_df = DataFrame(data)
- dense_expected = dense_df.quantile(q)
- sparse_expected = SparseDataFrame(dense_expected)
-
- tm.assert_frame_equal(result, dense_expected)
- tm.assert_sp_frame_equal(result, sparse_expected)
diff --git a/pandas/tests/sparse/frame/test_apply.py b/pandas/tests/sparse/frame/test_apply.py
deleted file mode 100644
index d8158db32d8f0..0000000000000
--- a/pandas/tests/sparse/frame/test_apply.py
+++ /dev/null
@@ -1,117 +0,0 @@
-import numpy as np
-import pytest
-
-from pandas import DataFrame, Series, SparseDataFrame, bdate_range
-from pandas.core import nanops
-from pandas.core.sparse.api import SparseDtype
-from pandas.util import testing as tm
-
-
-@pytest.fixture
-def dates():
- return bdate_range("1/1/2011", periods=10)
-
-
-@pytest.fixture
-def empty():
- return SparseDataFrame()
-
-
-@pytest.fixture
-def frame(dates):
- data = {
- "A": [np.nan, np.nan, np.nan, 0, 1, 2, 3, 4, 5, 6],
- "B": [0, 1, 2, np.nan, np.nan, np.nan, 3, 4, 5, 6],
- "C": np.arange(10, dtype=np.float64),
- "D": [0, 1, 2, 3, 4, 5, np.nan, np.nan, np.nan, np.nan],
- }
-
- return SparseDataFrame(data, index=dates)
-
-
-@pytest.fixture
-def fill_frame(frame):
- values = frame.values.copy()
- values[np.isnan(values)] = 2
-
- return SparseDataFrame(
- values, columns=["A", "B", "C", "D"], default_fill_value=2, index=frame.index
- )
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
-def test_apply(frame):
- applied = frame.apply(np.sqrt)
- assert isinstance(applied, SparseDataFrame)
- tm.assert_almost_equal(applied.values, np.sqrt(frame.values))
-
- # agg / broadcast
- # two FutureWarnings, so we can't check stacklevel properly.
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- broadcasted = frame.apply(np.sum, broadcast=True)
- assert isinstance(broadcasted, SparseDataFrame)
-
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- exp = frame.to_dense().apply(np.sum, broadcast=True)
- tm.assert_frame_equal(broadcasted.to_dense(), exp)
-
- applied = frame.apply(np.sum)
- tm.assert_series_equal(applied, frame.to_dense().apply(nanops.nansum).to_sparse())
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-def test_apply_fill(fill_frame):
- applied = fill_frame.apply(np.sqrt)
- assert applied["A"].fill_value == np.sqrt(2)
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-def test_apply_empty(empty):
- assert empty.apply(np.sqrt) is empty
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
-def test_apply_nonuq():
- orig = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "a", "c"])
- sparse = orig.to_sparse()
- res = sparse.apply(lambda s: s[0], axis=1)
- exp = orig.apply(lambda s: s[0], axis=1)
-
- # dtype must be kept
- assert res.dtype == SparseDtype(np.int64)
-
- # ToDo: apply must return subclassed dtype
- assert isinstance(res, Series)
- tm.assert_series_equal(res.to_dense(), exp)
-
- # df.T breaks
- sparse = orig.T.to_sparse()
- res = sparse.apply(lambda s: s[0], axis=0) # noqa
- exp = orig.T.apply(lambda s: s[0], axis=0)
-
- # TODO: no non-unique columns supported in sparse yet
- # tm.assert_series_equal(res.to_dense(), exp)
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-def test_applymap(frame):
- # just test that it works
- result = frame.applymap(lambda x: x * 2)
- assert isinstance(result, SparseDataFrame)
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-def test_apply_keep_sparse_dtype():
- # GH 23744
- sdf = SparseDataFrame(
- np.array([[0, 1, 0], [0, 0, 0], [0, 0, 1]]),
- columns=["b", "a", "c"],
- default_fill_value=1,
- )
- df = DataFrame(sdf)
-
- expected = sdf.apply(np.exp)
- result = df.apply(np.exp)
- tm.assert_frame_equal(expected, result)
diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py
deleted file mode 100644
index 5682c74a8b692..0000000000000
--- a/pandas/tests/sparse/frame/test_frame.py
+++ /dev/null
@@ -1,1585 +0,0 @@
-import operator
-from types import LambdaType
-
-import numpy as np
-from numpy import nan
-import pytest
-
-from pandas._libs.sparse import BlockIndex, IntIndex
-from pandas.errors import PerformanceWarning
-
-import pandas as pd
-from pandas import DataFrame, Series, bdate_range, compat
-from pandas.core import ops
-from pandas.core.indexes.datetimes import DatetimeIndex
-from pandas.core.sparse import frame as spf
-from pandas.core.sparse.api import (
- SparseArray,
- SparseDataFrame,
- SparseDtype,
- SparseSeries,
-)
-from pandas.tests.frame.test_api import SharedWithSparse
-from pandas.util import testing as tm
-
-from pandas.tseries.offsets import BDay
-
-
-def test_deprecated():
- with tm.assert_produces_warning(FutureWarning):
- pd.SparseDataFrame({"A": [1, 2]})
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
-class TestSparseDataFrame(SharedWithSparse):
- klass = SparseDataFrame
-
- # SharedWithSparse tests use generic, klass-agnostic assertion
- _assert_frame_equal = staticmethod(tm.assert_sp_frame_equal)
- _assert_series_equal = staticmethod(tm.assert_sp_series_equal)
-
- def test_iterrows(self, float_frame, float_string_frame):
- # Same as parent, but we don't ensure the sparse kind is the same.
- for k, v in float_frame.iterrows():
- exp = float_frame.loc[k]
- tm.assert_sp_series_equal(v, exp, check_kind=False)
-
- for k, v in float_string_frame.iterrows():
- exp = float_string_frame.loc[k]
- tm.assert_sp_series_equal(v, exp, check_kind=False)
-
- def test_itertuples(self, float_frame):
- for i, tup in enumerate(float_frame.itertuples()):
- s = self.klass._constructor_sliced(tup[1:])
- s.name = tup[0]
- expected = float_frame.iloc[i, :].reset_index(drop=True)
- tm.assert_sp_series_equal(s, expected, check_kind=False)
-
- def test_fill_value_when_combine_const(self):
- # GH12723
- dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
- df = SparseDataFrame({"foo": dat}, index=range(6))
-
- exp = df.fillna(0).add(2)
- res = df.add(2, fill_value=0)
- tm.assert_sp_frame_equal(res, exp)
-
- def test_values(self, empty_frame, float_frame):
- empty = empty_frame.values
- assert empty.shape == (0, 0)
-
- no_cols = SparseDataFrame(index=np.arange(10))
- mat = no_cols.values
- assert mat.shape == (10, 0)
-
- no_index = SparseDataFrame(columns=np.arange(10))
- mat = no_index.values
- assert mat.shape == (0, 10)
-
- def test_copy(self, float_frame):
- cp = float_frame.copy()
- assert isinstance(cp, SparseDataFrame)
- tm.assert_sp_frame_equal(cp, float_frame)
-
- # as of v0.15.0
- # this is now identical (but not is_a )
- assert cp.index.identical(float_frame.index)
-
- def test_constructor(self, float_frame, float_frame_int_kind, float_frame_fill0):
- for col, series in float_frame.items():
- assert isinstance(series, SparseSeries)
-
- assert isinstance(float_frame_int_kind["A"].sp_index, IntIndex)
-
- # constructed zframe from matrix above
- assert float_frame_fill0["A"].fill_value == 0
- # XXX: changed asarray
- expected = pd.SparseArray(
- [0, 0, 0, 0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], fill_value=0, kind="block"
- )
- tm.assert_sp_array_equal(expected, float_frame_fill0["A"].values)
- tm.assert_numpy_array_equal(
- np.array([0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]),
- float_frame_fill0["A"].to_dense().values,
- )
-
- # construct no data
- sdf = SparseDataFrame(columns=np.arange(10), index=np.arange(10))
- for col, series in sdf.items():
- assert isinstance(series, SparseSeries)
-
- # construct from nested dict
- data = {c: s.to_dict() for c, s in float_frame.items()}
-
- sdf = SparseDataFrame(data)
- tm.assert_sp_frame_equal(sdf, float_frame)
-
- # TODO: test data is copied from inputs
-
- # init dict with different index
- idx = float_frame.index[:5]
- cons = SparseDataFrame(
- float_frame,
- index=idx,
- columns=float_frame.columns,
- default_fill_value=float_frame.default_fill_value,
- default_kind=float_frame.default_kind,
- copy=True,
- )
- reindexed = float_frame.reindex(idx)
-
- tm.assert_sp_frame_equal(cons, reindexed, exact_indices=False)
-
- # assert level parameter breaks reindex
- with pytest.raises(TypeError):
- float_frame.reindex(idx, level=0)
-
- repr(float_frame)
-
- def test_constructor_fill_value_not_scalar_raises(self):
- d = {"b": [2, 3], "a": [0, 1]}
- fill_value = np.array(np.nan)
- with pytest.raises(ValueError, match="must be a scalar"):
- SparseDataFrame(data=d, default_fill_value=fill_value)
-
- def test_constructor_dict_order(self):
- # GH19018
- # initialization ordering: by insertion order if python>= 3.6, else
- # order by value
- d = {"b": [2, 3], "a": [0, 1]}
- frame = SparseDataFrame(data=d)
- if compat.PY36:
- expected = SparseDataFrame(data=d, columns=list("ba"))
- else:
- expected = SparseDataFrame(data=d, columns=list("ab"))
- tm.assert_sp_frame_equal(frame, expected)
-
- def test_constructor_ndarray(self, float_frame):
- # no index or columns
- sp = SparseDataFrame(float_frame.values)
-
- # 1d
- sp = SparseDataFrame(
- float_frame["A"].values, index=float_frame.index, columns=["A"]
- )
- tm.assert_sp_frame_equal(sp, float_frame.reindex(columns=["A"]))
-
- # raise on level argument
- msg = "Reindex by level not supported for sparse"
- with pytest.raises(TypeError, match=msg):
- float_frame.reindex(columns=["A"], level=1)
-
- # wrong length index / columns
- with pytest.raises(ValueError, match="^Index length"):
- SparseDataFrame(float_frame.values, index=float_frame.index[:-1])
-
- with pytest.raises(ValueError, match="^Column length"):
- SparseDataFrame(float_frame.values, columns=float_frame.columns[:-1])
-
- # GH 9272
- def test_constructor_empty(self):
- sp = SparseDataFrame()
- assert len(sp.index) == 0
- assert len(sp.columns) == 0
-
- def test_constructor_dataframe(self, float_frame):
- dense = float_frame.to_dense()
- sp = SparseDataFrame(dense)
- tm.assert_sp_frame_equal(sp, float_frame)
-
- def test_constructor_convert_index_once(self):
- arr = np.array([1.5, 2.5, 3.5])
- sdf = SparseDataFrame(columns=range(4), index=arr)
- assert sdf[0].index is sdf[1].index
-
- def test_constructor_from_series(self):
-
- # GH 2873
- x = Series(np.random.randn(10000), name="a")
- x = x.to_sparse(fill_value=0)
- assert isinstance(x, SparseSeries)
- df = SparseDataFrame(x)
- assert isinstance(df, SparseDataFrame)
-
- x = Series(np.random.randn(10000), name="a")
- y = Series(np.random.randn(10000), name="b")
- x2 = x.astype(float)
- x2.loc[:9998] = np.NaN
- # TODO: x_sparse is unused...fix
- x_sparse = x2.to_sparse(fill_value=np.NaN) # noqa
-
- # Currently fails too with weird ufunc error
- # df1 = SparseDataFrame([x_sparse, y])
-
- y.loc[:9998] = 0
- # TODO: y_sparse is unsused...fix
- y_sparse = y.to_sparse(fill_value=0) # noqa
- # without sparse value raises error
- # df2 = SparseDataFrame([x2_sparse, y])
-
- def test_constructor_from_dense_series(self):
- # GH 19393
- # series with name
- x = Series(np.random.randn(10000), name="a")
- result = SparseDataFrame(x)
- expected = x.to_frame().to_sparse()
- tm.assert_sp_frame_equal(result, expected)
-
- # series with no name
- x = Series(np.random.randn(10000))
- result = SparseDataFrame(x)
- expected = x.to_frame().to_sparse()
- tm.assert_sp_frame_equal(result, expected)
-
- def test_constructor_from_unknown_type(self):
- # GH 19393
- class Unknown:
- pass
-
- with pytest.raises(
- TypeError,
- match=(
- "SparseDataFrame called with unknown type "
- '"Unknown" for data argument'
- ),
- ):
- SparseDataFrame(Unknown())
-
- def test_constructor_preserve_attr(self):
- # GH 13866
- arr = pd.SparseArray([1, 0, 3, 0], dtype=np.int64, fill_value=0)
- assert arr.dtype == SparseDtype(np.int64)
- assert arr.fill_value == 0
-
- df = pd.SparseDataFrame({"x": arr})
- assert df["x"].dtype == SparseDtype(np.int64)
- assert df["x"].fill_value == 0
-
- s = pd.SparseSeries(arr, name="x")
- assert s.dtype == SparseDtype(np.int64)
- assert s.fill_value == 0
-
- df = pd.SparseDataFrame(s)
- assert df["x"].dtype == SparseDtype(np.int64)
- assert df["x"].fill_value == 0
-
- df = pd.SparseDataFrame({"x": s})
- assert df["x"].dtype == SparseDtype(np.int64)
- assert df["x"].fill_value == 0
-
- def test_constructor_nan_dataframe(self):
- # GH 10079
- trains = np.arange(100)
- thresholds = [10, 20, 30, 40, 50, 60]
- tuples = [(i, j) for i in trains for j in thresholds]
- index = pd.MultiIndex.from_tuples(tuples, names=["trains", "thresholds"])
- matrix = np.empty((len(index), len(trains)))
- matrix.fill(np.nan)
- df = pd.DataFrame(matrix, index=index, columns=trains, dtype=float)
- result = df.to_sparse()
- expected = pd.SparseDataFrame(matrix, index=index, columns=trains, dtype=float)
- tm.assert_sp_frame_equal(result, expected)
-
- def test_type_coercion_at_construction(self):
- # GH 15682
- result = pd.SparseDataFrame(
- {"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
- dtype="uint8",
- default_fill_value=0,
- )
- expected = pd.SparseDataFrame(
- {
- "a": pd.SparseSeries([1, 0, 0], dtype="uint8"),
- "b": pd.SparseSeries([0, 1, 0], dtype="uint8"),
- "c": pd.SparseSeries([0, 0, 1], dtype="uint8"),
- },
- default_fill_value=0,
- )
- tm.assert_sp_frame_equal(result, expected)
-
- def test_default_dtype(self):
- result = pd.SparseDataFrame(columns=list("ab"), index=range(2))
- expected = pd.SparseDataFrame(
- [[np.nan, np.nan], [np.nan, np.nan]], columns=list("ab"), index=range(2)
- )
- tm.assert_sp_frame_equal(result, expected)
-
- def test_nan_data_with_int_dtype_raises_error(self):
- sdf = pd.SparseDataFrame(
- [[np.nan, np.nan], [np.nan, np.nan]], columns=list("ab"), index=range(2)
- )
- msg = "Cannot convert non-finite values"
- with pytest.raises(ValueError, match=msg):
- pd.SparseDataFrame(sdf, dtype=np.int64)
-
- def test_dtypes(self):
- df = DataFrame(np.random.randn(10000, 4))
- df.loc[:9998] = np.nan
- sdf = df.to_sparse()
- result = sdf.dtypes
- expected = Series(["Sparse[float64, nan]"] * 4)
- tm.assert_series_equal(result, expected)
-
- def test_shape(
- self, float_frame, float_frame_int_kind, float_frame_fill0, float_frame_fill2
- ):
- # see gh-10452
- assert float_frame.shape == (10, 4)
- assert float_frame_int_kind.shape == (10, 4)
- assert float_frame_fill0.shape == (10, 4)
- assert float_frame_fill2.shape == (10, 4)
-
- def test_str(self):
- df = DataFrame(np.random.randn(10000, 4))
- df.loc[:9998] = np.nan
-
- sdf = df.to_sparse()
- str(sdf)
-
- def test_array_interface(self, float_frame):
- res = np.sqrt(float_frame)
- dres = np.sqrt(float_frame.to_dense())
- tm.assert_frame_equal(res.to_dense(), dres)
-
- def test_pickle(
- self,
- float_frame,
- float_frame_int_kind,
- float_frame_dense,
- float_frame_fill0,
- float_frame_fill0_dense,
- float_frame_fill2,
- float_frame_fill2_dense,
- ):
- def _test_roundtrip(frame, orig):
- result = tm.round_trip_pickle(frame)
- tm.assert_sp_frame_equal(frame, result)
- tm.assert_frame_equal(result.to_dense(), orig, check_dtype=False)
-
- _test_roundtrip(SparseDataFrame(), DataFrame())
- _test_roundtrip(float_frame, float_frame_dense)
- _test_roundtrip(float_frame_int_kind, float_frame_dense)
- _test_roundtrip(float_frame_fill0, float_frame_fill0_dense)
- _test_roundtrip(float_frame_fill2, float_frame_fill2_dense)
-
- def test_dense_to_sparse(self):
- df = DataFrame({"A": [nan, nan, nan, 1, 2], "B": [1, 2, nan, nan, nan]})
- sdf = df.to_sparse()
- assert isinstance(sdf, SparseDataFrame)
- assert np.isnan(sdf.default_fill_value)
- assert isinstance(sdf["A"].sp_index, BlockIndex)
- tm.assert_frame_equal(sdf.to_dense(), df)
-
- sdf = df.to_sparse(kind="integer")
- assert isinstance(sdf["A"].sp_index, IntIndex)
-
- df = DataFrame({"A": [0, 0, 0, 1, 2], "B": [1, 2, 0, 0, 0]}, dtype=float)
- sdf = df.to_sparse(fill_value=0)
- assert sdf.default_fill_value == 0
- tm.assert_frame_equal(sdf.to_dense(), df)
-
- def test_deprecated_dense_to_sparse(self):
- # GH 26557
- # Deprecated 0.25.0
-
- df = pd.DataFrame({"A": [1, np.nan, 3]})
- sparse_df = pd.SparseDataFrame({"A": [1, np.nan, 3]})
-
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- result = df.to_sparse()
- tm.assert_frame_equal(result, sparse_df)
-
- def test_density(self):
- df = SparseSeries([nan, nan, nan, 0, 1, 2, 3, 4, 5, 6])
- assert df.density == 0.7
-
- df = SparseDataFrame(
- {
- "A": [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
- "B": [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
- "C": np.arange(10),
- "D": [0, 1, 2, 3, 4, 5, nan, nan, nan, nan],
- }
- )
-
- assert df.density == 0.75
-
- def test_sparse_to_dense(self):
- pass
-
- def test_sparse_series_ops(self, float_frame):
- self._check_frame_ops(float_frame)
-
- def test_sparse_series_ops_i(self, float_frame_int_kind):
- self._check_frame_ops(float_frame_int_kind)
-
- def test_sparse_series_ops_z(self, float_frame_fill0):
- self._check_frame_ops(float_frame_fill0)
-
- def test_sparse_series_ops_fill(self, float_frame_fill2):
- self._check_frame_ops(float_frame_fill2)
-
- def _check_frame_ops(self, frame):
- def _compare_to_dense(a, b, da, db, op):
- sparse_result = op(a, b)
- dense_result = op(da, db)
-
- # catch lambdas but not non-lambdas e.g. operator.add
- if op in [operator.floordiv, ops.rfloordiv] or isinstance(op, LambdaType):
- # GH#27231 Series sets 1//0 to np.inf, which SparseArray
- # does not do (yet)
- mask = np.isinf(dense_result) & ~np.isinf(sparse_result.to_dense())
- dense_result[mask] = np.nan
-
- fill = sparse_result.default_fill_value
- dense_result = dense_result.to_sparse(fill_value=fill)
- tm.assert_sp_frame_equal(sparse_result, dense_result, exact_indices=False)
-
- if isinstance(a, DataFrame) and isinstance(db, DataFrame):
- mixed_result = op(a, db)
- assert isinstance(mixed_result, SparseDataFrame)
- tm.assert_sp_frame_equal(
- mixed_result, sparse_result, exact_indices=False
- )
-
- opnames = ["add", "sub", "mul", "truediv", "floordiv"]
-
- fidx = frame.index
-
- # time series operations
-
- series = [
- frame["A"],
- frame["B"],
- frame["C"],
- frame["D"],
- frame["A"].reindex(fidx[:7]),
- frame["A"].reindex(fidx[::2]),
- SparseSeries([], index=[]),
- ]
-
- for op in opnames:
- _compare_to_dense(
- frame,
- frame[::2],
- frame.to_dense(),
- frame[::2].to_dense(),
- getattr(operator, op),
- )
-
- # 2304, no auto-broadcasting
- for i, s in enumerate(series):
- f = lambda a, b: getattr(a, op)(b, axis="index")
- _compare_to_dense(frame, s, frame.to_dense(), s.to_dense(), f)
-
- # FIXME: dont leave commented-out
- # rops are not implemented
- # _compare_to_dense(s, frame, s.to_dense(),
- # frame.to_dense(), f)
-
- # cross-sectional operations
- series = [
- frame.xs(fidx[0]),
- frame.xs(fidx[3]),
- frame.xs(fidx[5]),
- frame.xs(fidx[7]),
- frame.xs(fidx[5])[:2],
- ]
-
- for name in opnames:
- op = getattr(operator, name)
- for s in series:
- _compare_to_dense(frame, s, frame.to_dense(), s, op)
- _compare_to_dense(s, frame, s, frame.to_dense(), op)
-
- # it works!
- frame + frame.loc[:, ["A", "B"]]
-
- def test_op_corners(self, float_frame, empty_frame):
- empty = empty_frame + empty_frame
- assert empty.empty
-
- foo = float_frame + empty_frame
- assert isinstance(foo.index, DatetimeIndex)
- tm.assert_frame_equal(foo, float_frame * np.nan)
-
- foo = empty_frame + float_frame
- tm.assert_frame_equal(foo, float_frame * np.nan)
-
- def test_scalar_ops(self):
- pass
-
- def test_getitem(self):
- # 1585 select multiple columns
- sdf = SparseDataFrame(index=[0, 1, 2], columns=["a", "b", "c"])
-
- result = sdf[["a", "b"]]
- exp = sdf.reindex(columns=["a", "b"])
- tm.assert_sp_frame_equal(result, exp)
-
- with pytest.raises(KeyError, match=r"\['d'\] not in index"):
- sdf[["a", "d"]]
-
- def test_iloc(self, float_frame):
-
- # GH 2227
- result = float_frame.iloc[:, 0]
- assert isinstance(result, SparseSeries)
- tm.assert_sp_series_equal(result, float_frame["A"])
-
- # preserve sparse index type. #2251
- data = {"A": [0, 1]}
- iframe = SparseDataFrame(data, default_kind="integer")
- tm.assert_class_equal(iframe["A"].sp_index, iframe.iloc[:, 0].sp_index)
-
- def test_set_value(self, float_frame):
-
- # ok, as the index gets converted to object
- frame = float_frame.copy()
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- res = frame.set_value("foobar", "B", 1.5)
- assert res.index.dtype == "object"
-
- res = float_frame
- res.index = res.index.astype(object)
-
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- res = float_frame.set_value("foobar", "B", 1.5)
- assert res is not float_frame
- assert res.index[-1] == "foobar"
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- assert res.get_value("foobar", "B") == 1.5
-
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- res2 = res.set_value("foobar", "qux", 1.5)
- assert res2 is not res
- tm.assert_index_equal(
- res2.columns, pd.Index(list(float_frame.columns) + ["qux"])
- )
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- assert res2.get_value("foobar", "qux") == 1.5
-
- def test_fancy_index_misc(self, float_frame):
- # axis = 0
- sliced = float_frame.iloc[-2:, :]
- expected = float_frame.reindex(index=float_frame.index[-2:])
- tm.assert_sp_frame_equal(sliced, expected)
-
- # axis = 1
- sliced = float_frame.iloc[:, -2:]
- expected = float_frame.reindex(columns=float_frame.columns[-2:])
- tm.assert_sp_frame_equal(sliced, expected)
-
- def test_getitem_overload(self, float_frame):
- # slicing
- sl = float_frame[:20]
- tm.assert_sp_frame_equal(sl, float_frame.reindex(float_frame.index[:20]))
-
- # boolean indexing
- d = float_frame.index[5]
- indexer = float_frame.index > d
-
- subindex = float_frame.index[indexer]
- subframe = float_frame[indexer]
-
- tm.assert_index_equal(subindex, subframe.index)
- msg = "Item wrong length 9 instead of 10"
- with pytest.raises(ValueError, match=msg):
- float_frame[indexer[:-1]]
-
- def test_setitem(
- self,
- float_frame,
- float_frame_int_kind,
- float_frame_dense,
- float_frame_fill0,
- float_frame_fill0_dense,
- float_frame_fill2,
- float_frame_fill2_dense,
- ):
- def _check_frame(frame, orig):
- N = len(frame)
-
- # insert SparseSeries
- frame["E"] = frame["A"]
- assert isinstance(frame["E"], SparseSeries)
- tm.assert_sp_series_equal(frame["E"], frame["A"], check_names=False)
-
- # insert SparseSeries differently-indexed
- to_insert = frame["A"][::2]
- frame["E"] = to_insert
- expected = to_insert.to_dense().reindex(frame.index)
- result = frame["E"].to_dense()
- tm.assert_series_equal(result, expected, check_names=False)
- assert result.name == "E"
-
- # insert Series
- frame["F"] = frame["A"].to_dense()
- assert isinstance(frame["F"], SparseSeries)
- tm.assert_sp_series_equal(frame["F"], frame["A"], check_names=False)
-
- # insert Series differently-indexed
- to_insert = frame["A"].to_dense()[::2]
- frame["G"] = to_insert
- expected = to_insert.reindex(frame.index)
- expected.name = "G"
- tm.assert_series_equal(frame["G"].to_dense(), expected)
-
- # insert ndarray
- frame["H"] = np.random.randn(N)
- assert isinstance(frame["H"], SparseSeries)
-
- to_sparsify = np.random.randn(N)
- to_sparsify[N // 2 :] = frame.default_fill_value
- frame["I"] = to_sparsify
- assert len(frame["I"].sp_values) == N // 2
-
- # insert ndarray wrong size
- # GH 25484
- msg = "Length of values does not match length of index"
- with pytest.raises(ValueError, match=msg):
- frame["foo"] = np.random.randn(N - 1)
-
- # scalar value
- frame["J"] = 5
- assert len(frame["J"].sp_values) == N
- assert (frame["J"].sp_values == 5).all()
-
- frame["K"] = frame.default_fill_value
- assert len(frame["K"].sp_values) == 0
-
- _check_frame(float_frame, float_frame_dense)
- _check_frame(float_frame_int_kind, float_frame_dense)
- _check_frame(float_frame_fill0, float_frame_fill0_dense)
- _check_frame(float_frame_fill2, float_frame_fill2_dense)
-
- @pytest.mark.parametrize(
- "values",
- [
- [True, False],
- [0, 1],
- [1, None],
- ["a", "b"],
- [pd.Timestamp("2017"), pd.NaT],
- [pd.Timedelta("10s"), pd.NaT],
- ],
- )
- def test_setitem_more(self, values):
- df = pd.DataFrame({"A": values})
- df["A"] = pd.SparseArray(values)
- expected = pd.DataFrame({"A": pd.SparseArray(values)})
- tm.assert_frame_equal(df, expected)
-
- def test_setitem_corner(self, float_frame):
- float_frame["a"] = float_frame["B"]
- tm.assert_sp_series_equal(float_frame["a"], float_frame["B"], check_names=False)
-
- def test_setitem_array(self, float_frame):
- arr = float_frame["B"]
-
- float_frame["E"] = arr
- tm.assert_sp_series_equal(float_frame["E"], float_frame["B"], check_names=False)
-
- float_frame["F"] = arr[:-1]
- index = float_frame.index[:-1]
- tm.assert_sp_series_equal(
- float_frame["E"].reindex(index),
- float_frame["F"].reindex(index),
- check_names=False,
- )
-
- def test_setitem_chained_no_consolidate(self):
- # https://github.com/pandas-dev/pandas/pull/19268
- # issuecomment-361696418
- # chained setitem used to cause consolidation
- sdf = pd.SparseDataFrame([[np.nan, 1], [2, np.nan]])
- with pd.option_context("mode.chained_assignment", None):
- sdf[0][1] = 2
- assert len(sdf._data.blocks) == 2
-
- def test_delitem(self, float_frame):
- A = float_frame["A"]
- C = float_frame["C"]
-
- del float_frame["B"]
- assert "B" not in float_frame
- tm.assert_sp_series_equal(float_frame["A"], A)
- tm.assert_sp_series_equal(float_frame["C"], C)
-
- del float_frame["D"]
- assert "D" not in float_frame
-
- del float_frame["A"]
- assert "A" not in float_frame
-
- def test_set_columns(self, float_frame):
- float_frame.columns = float_frame.columns
- msg = (
- "Length mismatch: Expected axis has 4 elements, new values have"
- " 3 elements"
- )
- with pytest.raises(ValueError, match=msg):
- float_frame.columns = float_frame.columns[:-1]
-
- def test_set_index(self, float_frame):
- float_frame.index = float_frame.index
- msg = (
- "Length mismatch: Expected axis has 10 elements, new values"
- " have 9 elements"
- )
- with pytest.raises(ValueError, match=msg):
- float_frame.index = float_frame.index[:-1]
-
- def test_ctor_reindex(self):
- idx = pd.Index([0, 1, 2, 3])
- msg = "Length of passed values is 2, index implies 4"
- with pytest.raises(ValueError, match=msg):
- pd.SparseDataFrame({"A": [1, 2]}, index=idx)
-
- def test_append(self, float_frame):
- a = float_frame[:5]
- b = float_frame[5:]
-
- appended = a.append(b)
- tm.assert_sp_frame_equal(appended, float_frame, exact_indices=False)
-
- a = float_frame.iloc[:5, :3]
- b = float_frame.iloc[5:]
- with tm.assert_produces_warning(
- FutureWarning, check_stacklevel=False, raise_on_extra_warnings=False
- ):
- # Stacklevel is set for pd.concat, not append
- appended = a.append(b)
- tm.assert_sp_frame_equal(
- appended.iloc[:, :3], float_frame.iloc[:, :3], exact_indices=False
- )
-
- a = a[["B", "C", "A"]].head(2)
- b = b.head(2)
-
- expected = pd.SparseDataFrame(
- {
- "B": [0.0, 1, None, 3],
- "C": [0.0, 1, 5, 6],
- "A": [None, None, 2, 3],
- "D": [None, None, 5, None],
- },
- index=a.index | b.index,
- columns=["B", "C", "A", "D"],
- )
- with tm.assert_produces_warning(None, raise_on_extra_warnings=False):
- appended = a.append(b, sort=False)
-
- tm.assert_frame_equal(appended, expected)
-
- with tm.assert_produces_warning(None, raise_on_extra_warnings=False):
- appended = a.append(b, sort=True)
-
- tm.assert_sp_frame_equal(
- appended,
- expected[["A", "B", "C", "D"]],
- consolidate_block_indices=True,
- check_kind=False,
- )
-
- def test_astype(self):
- sparse = pd.SparseDataFrame(
- {
- "A": SparseArray([1, 2, 3, 4], dtype=np.int64),
- "B": SparseArray([4, 5, 6, 7], dtype=np.int64),
- }
- )
- assert sparse["A"].dtype == SparseDtype(np.int64)
- assert sparse["B"].dtype == SparseDtype(np.int64)
-
- # retain fill_value
- res = sparse.astype(np.float64)
- exp = pd.SparseDataFrame(
- {
- "A": SparseArray([1.0, 2.0, 3.0, 4.0], fill_value=0, kind="integer"),
- "B": SparseArray([4.0, 5.0, 6.0, 7.0], fill_value=0, kind="integer"),
- },
- default_fill_value=np.nan,
- )
- tm.assert_sp_frame_equal(res, exp)
- assert res["A"].dtype == SparseDtype(np.float64, 0)
- assert res["B"].dtype == SparseDtype(np.float64, 0)
-
- # update fill_value
- res = sparse.astype(SparseDtype(np.float64, np.nan))
- exp = pd.SparseDataFrame(
- {
- "A": SparseArray(
- [1.0, 2.0, 3.0, 4.0], fill_value=np.nan, kind="integer"
- ),
- "B": SparseArray(
- [4.0, 5.0, 6.0, 7.0], fill_value=np.nan, kind="integer"
- ),
- },
- default_fill_value=np.nan,
- )
- tm.assert_sp_frame_equal(res, exp)
- assert res["A"].dtype == SparseDtype(np.float64, np.nan)
- assert res["B"].dtype == SparseDtype(np.float64, np.nan)
-
- def test_astype_bool(self):
- sparse = pd.SparseDataFrame(
- {
- "A": SparseArray([0, 2, 0, 4], fill_value=0, dtype=np.int64),
- "B": SparseArray([0, 5, 0, 7], fill_value=0, dtype=np.int64),
- },
- default_fill_value=0,
- )
- assert sparse["A"].dtype == SparseDtype(np.int64)
- assert sparse["B"].dtype == SparseDtype(np.int64)
-
- res = sparse.astype(SparseDtype(bool, False))
- exp = pd.SparseDataFrame(
- {
- "A": SparseArray(
- [False, True, False, True],
- dtype=np.bool,
- fill_value=False,
- kind="integer",
- ),
- "B": SparseArray(
- [False, True, False, True],
- dtype=np.bool,
- fill_value=False,
- kind="integer",
- ),
- },
- default_fill_value=False,
- )
- tm.assert_sp_frame_equal(res, exp)
- assert res["A"].dtype == SparseDtype(np.bool)
- assert res["B"].dtype == SparseDtype(np.bool)
-
- def test_astype_object(self):
- # This may change in GH-23125
- df = pd.DataFrame({"A": SparseArray([0, 1]), "B": SparseArray([0, 1])})
- result = df.astype(object)
- dtype = SparseDtype(object, 0)
- expected = pd.DataFrame(
- {
- "A": SparseArray([0, 1], dtype=dtype),
- "B": SparseArray([0, 1], dtype=dtype),
- }
- )
- tm.assert_frame_equal(result, expected)
-
- def test_fillna(self, float_frame_fill0, float_frame_fill0_dense):
- df = float_frame_fill0.reindex(list(range(5)))
- dense = float_frame_fill0_dense.reindex(list(range(5)))
-
- result = df.fillna(0)
- expected = dense.fillna(0)
- tm.assert_sp_frame_equal(
- result, expected.to_sparse(fill_value=0), exact_indices=False
- )
- tm.assert_frame_equal(result.to_dense(), expected)
-
- result = df.copy()
- result.fillna(0, inplace=True)
- expected = dense.fillna(0)
-
- tm.assert_sp_frame_equal(
- result, expected.to_sparse(fill_value=0), exact_indices=False
- )
- tm.assert_frame_equal(result.to_dense(), expected)
-
- result = df.copy()
- result = df["A"]
- result.fillna(0, inplace=True)
-
- expected = dense["A"].fillna(0)
- # this changes internal SparseArray repr
- # tm.assert_sp_series_equal(result, expected.to_sparse(fill_value=0))
- tm.assert_series_equal(result.to_dense(), expected)
-
- def test_fillna_fill_value(self):
- df = pd.DataFrame({"A": [1, 0, 0], "B": [np.nan, np.nan, 4]})
-
- sparse = pd.SparseDataFrame(df)
- tm.assert_frame_equal(
- sparse.fillna(-1).to_dense(), df.fillna(-1), check_dtype=False
- )
-
- sparse = pd.SparseDataFrame(df, default_fill_value=0)
- tm.assert_frame_equal(
- sparse.fillna(-1).to_dense(), df.fillna(-1), check_dtype=False
- )
-
- def test_sparse_frame_pad_backfill_limit(self):
- index = np.arange(10)
- df = DataFrame(np.random.randn(10, 4), index=index)
- sdf = df.to_sparse()
-
- result = sdf[:2].reindex(index, method="pad", limit=5)
-
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- expected = sdf[:2].reindex(index).fillna(method="pad")
- expected = expected.to_dense()
- expected.values[-3:] = np.nan
- expected = expected.to_sparse()
- tm.assert_frame_equal(result, expected)
-
- result = sdf[-2:].reindex(index, method="backfill", limit=5)
-
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- expected = sdf[-2:].reindex(index).fillna(method="backfill")
- expected = expected.to_dense()
- expected.values[:3] = np.nan
- expected = expected.to_sparse()
- tm.assert_frame_equal(result, expected)
-
- def test_sparse_frame_fillna_limit(self):
- index = np.arange(10)
- df = DataFrame(np.random.randn(10, 4), index=index)
- sdf = df.to_sparse()
-
- result = sdf[:2].reindex(index)
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- result = result.fillna(method="pad", limit=5)
-
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- expected = sdf[:2].reindex(index).fillna(method="pad")
- expected = expected.to_dense()
- expected.values[-3:] = np.nan
- expected = expected.to_sparse()
- tm.assert_frame_equal(result, expected)
-
- result = sdf[-2:].reindex(index)
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- result = result.fillna(method="backfill", limit=5)
-
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- expected = sdf[-2:].reindex(index).fillna(method="backfill")
- expected = expected.to_dense()
- expected.values[:3] = np.nan
- expected = expected.to_sparse()
- tm.assert_frame_equal(result, expected)
-
- def test_rename(self, float_frame):
- result = float_frame.rename(index=str)
- expected = SparseDataFrame(
- float_frame.values,
- index=float_frame.index.strftime("%Y-%m-%d %H:%M:%S"),
- columns=list("ABCD"),
- )
- tm.assert_sp_frame_equal(result, expected)
-
- result = float_frame.rename(columns="{}1".format)
- data = {
- "A1": [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
- "B1": [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
- "C1": np.arange(10, dtype=np.float64),
- "D1": [0, 1, 2, 3, 4, 5, nan, nan, nan, nan],
- }
- expected = SparseDataFrame(data, index=float_frame.index)
- tm.assert_sp_frame_equal(result, expected)
-
- def test_corr(self, float_frame):
- res = float_frame.corr()
- # XXX: this stays sparse
- tm.assert_frame_equal(res, float_frame.to_dense().corr().to_sparse())
-
- def test_describe(self, float_frame):
- float_frame["foo"] = np.nan
- float_frame.dtypes.value_counts()
- str(float_frame)
- desc = float_frame.describe() # noqa
-
- def test_join(self, float_frame):
- left = float_frame.loc[:, ["A", "B"]]
- right = float_frame.loc[:, ["C", "D"]]
- joined = left.join(right)
- tm.assert_sp_frame_equal(joined, float_frame, exact_indices=False)
-
- right = float_frame.loc[:, ["B", "D"]]
- msg = (
- r"columns overlap but no suffix specified: Index\(\['B'\],"
- r" dtype='object'\)"
- )
- with pytest.raises(ValueError, match=msg):
- left.join(right)
-
- with pytest.raises(ValueError, match="Other Series must have a name"):
- float_frame.join(
- Series(np.random.randn(len(float_frame)), index=float_frame.index)
- )
-
- def test_reindex(
- self, float_frame, float_frame_int_kind, float_frame_fill0, float_frame_fill2
- ):
- def _check_frame(frame):
- index = frame.index
- sidx = index[::2]
- sidx2 = index[:5] # noqa
-
- sparse_result = frame.reindex(sidx)
- dense_result = frame.to_dense().reindex(sidx)
- tm.assert_frame_equal(sparse_result.to_dense(), dense_result)
-
- tm.assert_frame_equal(frame.reindex(list(sidx)).to_dense(), dense_result)
-
- sparse_result2 = sparse_result.reindex(index)
- dense_result2 = dense_result.reindex(index)
- tm.assert_frame_equal(sparse_result2.to_dense(), dense_result2)
-
- # propagate CORRECT fill value
- tm.assert_almost_equal(
- sparse_result.default_fill_value, frame.default_fill_value
- )
- tm.assert_almost_equal(sparse_result["A"].fill_value, frame["A"].fill_value)
-
- # length zero
- length_zero = frame.reindex([])
- assert len(length_zero) == 0
- assert len(length_zero.columns) == len(frame.columns)
- assert len(length_zero["A"]) == 0
-
- # frame being reindexed has length zero
- length_n = length_zero.reindex(index)
- assert len(length_n) == len(frame)
- assert len(length_n.columns) == len(frame.columns)
- assert len(length_n["A"]) == len(frame)
-
- # reindex columns
- reindexed = frame.reindex(columns=["A", "B", "Z"])
- assert len(reindexed.columns) == 3
- tm.assert_almost_equal(reindexed["Z"].fill_value, frame.default_fill_value)
- assert np.isnan(reindexed["Z"].sp_values).all()
-
- _check_frame(float_frame)
- _check_frame(float_frame_int_kind)
- _check_frame(float_frame_fill0)
- _check_frame(float_frame_fill2)
-
- # with copy=False
- reindexed = float_frame.reindex(float_frame.index, copy=False)
- reindexed["F"] = reindexed["A"]
- assert "F" in float_frame
-
- reindexed = float_frame.reindex(float_frame.index)
- reindexed["G"] = reindexed["A"]
- assert "G" not in float_frame
-
- def test_reindex_fill_value(self, float_frame_fill0, float_frame_fill0_dense):
- rng = bdate_range("20110110", periods=20)
-
- result = float_frame_fill0.reindex(rng, fill_value=0)
- exp = float_frame_fill0_dense.reindex(rng, fill_value=0)
- exp = exp.to_sparse(float_frame_fill0.default_fill_value)
- tm.assert_sp_frame_equal(result, exp)
-
- def test_reindex_method(self):
-
- sparse = SparseDataFrame(
- data=[[11.0, 12.0, 14.0], [21.0, 22.0, 24.0], [41.0, 42.0, 44.0]],
- index=[1, 2, 4],
- columns=[1, 2, 4],
- dtype=float,
- )
-
- # Over indices
-
- # default method
- result = sparse.reindex(index=range(6))
- expected = SparseDataFrame(
- data=[
- [nan, nan, nan],
- [11.0, 12.0, 14.0],
- [21.0, 22.0, 24.0],
- [nan, nan, nan],
- [41.0, 42.0, 44.0],
- [nan, nan, nan],
- ],
- index=range(6),
- columns=[1, 2, 4],
- dtype=float,
- )
- tm.assert_sp_frame_equal(result, expected)
-
- # method='bfill'
- result = sparse.reindex(index=range(6), method="bfill")
- expected = SparseDataFrame(
- data=[
- [11.0, 12.0, 14.0],
- [11.0, 12.0, 14.0],
- [21.0, 22.0, 24.0],
- [41.0, 42.0, 44.0],
- [41.0, 42.0, 44.0],
- [nan, nan, nan],
- ],
- index=range(6),
- columns=[1, 2, 4],
- dtype=float,
- )
- tm.assert_sp_frame_equal(result, expected)
-
- # method='ffill'
- result = sparse.reindex(index=range(6), method="ffill")
- expected = SparseDataFrame(
- data=[
- [nan, nan, nan],
- [11.0, 12.0, 14.0],
- [21.0, 22.0, 24.0],
- [21.0, 22.0, 24.0],
- [41.0, 42.0, 44.0],
- [41.0, 42.0, 44.0],
- ],
- index=range(6),
- columns=[1, 2, 4],
- dtype=float,
- )
- tm.assert_sp_frame_equal(result, expected)
-
- # Over columns
-
- # default method
- result = sparse.reindex(columns=range(6))
- expected = SparseDataFrame(
- data=[
- [nan, 11.0, 12.0, nan, 14.0, nan],
- [nan, 21.0, 22.0, nan, 24.0, nan],
- [nan, 41.0, 42.0, nan, 44.0, nan],
- ],
- index=[1, 2, 4],
- columns=range(6),
- dtype=float,
- )
- tm.assert_sp_frame_equal(result, expected)
-
- # method='bfill'
- with pytest.raises(NotImplementedError):
- sparse.reindex(columns=range(6), method="bfill")
-
- # method='ffill'
- with pytest.raises(NotImplementedError):
- sparse.reindex(columns=range(6), method="ffill")
-
- def test_take(self, float_frame):
- result = float_frame.take([1, 0, 2], axis=1)
- expected = float_frame.reindex(columns=["B", "A", "C"])
- tm.assert_sp_frame_equal(result, expected)
-
- def test_to_dense(
- self,
- float_frame,
- float_frame_int_kind,
- float_frame_dense,
- float_frame_fill0,
- float_frame_fill0_dense,
- float_frame_fill2,
- float_frame_fill2_dense,
- ):
- def _check(frame, orig):
- dense_dm = frame.to_dense()
- # Sparse[float] != float
- tm.assert_frame_equal(frame, dense_dm, check_dtype=False)
- tm.assert_frame_equal(dense_dm, orig, check_dtype=False)
-
- _check(float_frame, float_frame_dense)
- _check(float_frame_int_kind, float_frame_dense)
- _check(float_frame_fill0, float_frame_fill0_dense)
- _check(float_frame_fill2, float_frame_fill2_dense)
-
- def test_stack_sparse_frame(
- self, float_frame, float_frame_int_kind, float_frame_fill0, float_frame_fill2
- ):
- def _check(frame):
- dense_frame = frame.to_dense() # noqa
-
- from_dense_lp = frame.stack().to_frame()
-
- from_sparse_lp = spf.stack_sparse_frame(frame)
-
- tm.assert_numpy_array_equal(from_dense_lp.values, from_sparse_lp.values)
-
- _check(float_frame)
- _check(float_frame_int_kind)
-
- # for now
- msg = "This routine assumes NaN fill value"
- with pytest.raises(TypeError, match=msg):
- _check(float_frame_fill0)
- with pytest.raises(TypeError, match=msg):
- _check(float_frame_fill2)
-
- def test_transpose(
- self,
- float_frame,
- float_frame_int_kind,
- float_frame_dense,
- float_frame_fill0,
- float_frame_fill0_dense,
- float_frame_fill2,
- float_frame_fill2_dense,
- ):
- def _check(frame, orig):
- transposed = frame.T
- untransposed = transposed.T
- tm.assert_sp_frame_equal(frame, untransposed)
-
- tm.assert_frame_equal(frame.T.to_dense(), orig.T)
- tm.assert_frame_equal(frame.T.T.to_dense(), orig.T.T)
- tm.assert_sp_frame_equal(frame, frame.T.T, exact_indices=False)
-
- _check(float_frame, float_frame_dense)
- _check(float_frame_int_kind, float_frame_dense)
- _check(float_frame_fill0, float_frame_fill0_dense)
- _check(float_frame_fill2, float_frame_fill2_dense)
-
- def test_shift(
- self,
- float_frame,
- float_frame_int_kind,
- float_frame_dense,
- float_frame_fill0,
- float_frame_fill0_dense,
- float_frame_fill2,
- float_frame_fill2_dense,
- ):
- def _check(frame, orig):
- shifted = frame.shift(0)
- exp = orig.shift(0)
- tm.assert_frame_equal(shifted.to_dense(), exp)
-
- shifted = frame.shift(1)
- exp = orig.shift(1)
- tm.assert_frame_equal(shifted.to_dense(), exp)
-
- shifted = frame.shift(-2)
- exp = orig.shift(-2)
- tm.assert_frame_equal(shifted.to_dense(), exp)
-
- shifted = frame.shift(2, freq="B")
- exp = orig.shift(2, freq="B")
- exp = exp.to_sparse(frame.default_fill_value, kind=frame.default_kind)
- tm.assert_frame_equal(shifted, exp)
-
- shifted = frame.shift(2, freq=BDay())
- exp = orig.shift(2, freq=BDay())
- exp = exp.to_sparse(frame.default_fill_value, kind=frame.default_kind)
- tm.assert_frame_equal(shifted, exp)
-
- _check(float_frame, float_frame_dense)
- _check(float_frame_int_kind, float_frame_dense)
- _check(float_frame_fill0, float_frame_fill0_dense)
- _check(float_frame_fill2, float_frame_fill2_dense)
-
- def test_count(self, float_frame):
- dense_result = float_frame.to_dense().count()
-
- result = float_frame.count()
- tm.assert_series_equal(result.to_dense(), dense_result)
-
- result = float_frame.count(axis=None)
- tm.assert_series_equal(result.to_dense(), dense_result)
-
- result = float_frame.count(axis=0)
- tm.assert_series_equal(result.to_dense(), dense_result)
-
- result = float_frame.count(axis=1)
- dense_result = float_frame.to_dense().count(axis=1)
-
- # win32 don't check dtype
- tm.assert_series_equal(result, dense_result, check_dtype=False)
-
- def test_numpy_transpose(self):
- sdf = SparseDataFrame([1, 2, 3], index=[1, 2, 3], columns=["a"])
- result = np.transpose(np.transpose(sdf))
- tm.assert_sp_frame_equal(result, sdf)
-
- msg = "the 'axes' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- np.transpose(sdf, axes=1)
-
- def test_combine_first(self, float_frame):
- df = float_frame
-
- result = df[::2].combine_first(df)
-
- expected = df[::2].to_dense().combine_first(df.to_dense())
- expected = expected.to_sparse(fill_value=df.default_fill_value)
-
- tm.assert_sp_frame_equal(result, expected)
-
- @pytest.mark.xfail(reason="No longer supported.")
- def test_combine_first_with_dense(self):
- # We could support this if we allow
- # pd.core.dtypes.cast.find_common_type to special case SparseDtype
- # but I don't think that's worth it.
- df = self.frame
-
- result = df[::2].combine_first(df.to_dense())
- expected = df[::2].to_dense().combine_first(df.to_dense())
- expected = expected.to_sparse(fill_value=df.default_fill_value)
-
- tm.assert_sp_frame_equal(result, expected)
-
- def test_combine_add(self, float_frame):
- df = float_frame.to_dense()
- df2 = df.copy()
- df2["C"][:3] = np.nan
- df["A"][:3] = 5.7
-
- result = df.to_sparse().add(df2.to_sparse(), fill_value=0)
- expected = df.add(df2, fill_value=0).to_sparse()
- tm.assert_sp_frame_equal(result, expected)
-
- def test_isin(self):
- sparse_df = DataFrame({"flag": [1.0, 0.0, 1.0]}).to_sparse(fill_value=0.0)
- xp = sparse_df[sparse_df.flag == 1.0]
- rs = sparse_df[sparse_df.flag.isin([1.0])]
- tm.assert_frame_equal(xp, rs)
-
- def test_sparse_pow_issue(self):
- # 2220
- df = SparseDataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
-
- # note : no error without nan
- df = SparseDataFrame({"A": [nan, 0, 1]})
-
- # note that 2 ** df works fine, also df ** 1
- result = 1 ** df
-
- r1 = result.take([0], 1)["A"]
- r2 = result["A"]
-
- assert len(r2.sp_values) == len(r1.sp_values)
-
- def test_as_blocks(self):
- df = SparseDataFrame({"A": [1.1, 3.3], "B": [nan, -3.9]}, dtype="float64")
-
- # deprecated 0.21.0
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- df_blocks = df.blocks
- assert list(df_blocks.keys()) == ["Sparse[float64, nan]"]
- tm.assert_frame_equal(df_blocks["Sparse[float64, nan]"], df)
-
- @pytest.mark.xfail(reason="nan column names in _init_dict problematic (GH#16894)")
- def test_nan_columnname(self):
- # GH 8822
- nan_colname = DataFrame(Series(1.0, index=[0]), columns=[nan])
- nan_colname_sparse = nan_colname.to_sparse()
- assert np.isnan(nan_colname_sparse.columns[0])
-
- def test_isna(self):
- # GH 8276
- df = pd.SparseDataFrame(
- {"A": [np.nan, np.nan, 1, 2, np.nan], "B": [0, np.nan, np.nan, 2, np.nan]}
- )
-
- res = df.isna()
- exp = pd.SparseDataFrame(
- {
- "A": [True, True, False, False, True],
- "B": [False, True, True, False, True],
- },
- default_fill_value=True,
- )
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(res, exp)
-
- # if fill_value is not nan, True can be included in sp_values
- df = pd.SparseDataFrame(
- {"A": [0, 0, 1, 2, np.nan], "B": [0, np.nan, 0, 2, np.nan]},
- default_fill_value=0.0,
- )
- res = df.isna()
- assert isinstance(res, pd.SparseDataFrame)
- exp = pd.DataFrame(
- {
- "A": [False, False, False, False, True],
- "B": [False, True, False, False, True],
- }
- )
- tm.assert_frame_equal(res.to_dense(), exp)
-
- def test_notna(self):
- # GH 8276
- df = pd.SparseDataFrame(
- {"A": [np.nan, np.nan, 1, 2, np.nan], "B": [0, np.nan, np.nan, 2, np.nan]}
- )
-
- res = df.notna()
- exp = pd.SparseDataFrame(
- {
- "A": [False, False, True, True, False],
- "B": [True, False, False, True, False],
- },
- default_fill_value=False,
- )
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(res, exp)
-
- # if fill_value is not nan, True can be included in sp_values
- df = pd.SparseDataFrame(
- {"A": [0, 0, 1, 2, np.nan], "B": [0, np.nan, 0, 2, np.nan]},
- default_fill_value=0.0,
- )
- res = df.notna()
- assert isinstance(res, pd.SparseDataFrame)
- exp = pd.DataFrame(
- {
- "A": [True, True, True, True, False],
- "B": [True, False, True, True, False],
- }
- )
- tm.assert_frame_equal(res.to_dense(), exp)
-
- def test_default_fill_value_with_no_data(self):
- # GH 16807
- expected = pd.SparseDataFrame(
- [[1.0, 1.0], [1.0, 1.0]], columns=list("ab"), index=range(2)
- )
- result = pd.SparseDataFrame(
- columns=list("ab"), index=range(2), default_fill_value=1.0
- )
- tm.assert_frame_equal(expected, result)
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
-class TestSparseDataFrameArithmetic:
- def test_numeric_op_scalar(self):
- df = pd.DataFrame(
- {
- "A": [nan, nan, 0, 1],
- "B": [0, 1, 2, nan],
- "C": [1.0, 2.0, 3.0, 4.0],
- "D": [nan, nan, nan, nan],
- }
- )
- sparse = df.to_sparse()
-
- tm.assert_sp_frame_equal(sparse + 1, (df + 1).to_sparse())
-
- def test_comparison_op_scalar(self):
- # GH 13001
- df = pd.DataFrame(
- {
- "A": [nan, nan, 0, 1],
- "B": [0, 1, 2, nan],
- "C": [1.0, 2.0, 3.0, 4.0],
- "D": [nan, nan, nan, nan],
- }
- )
- sparse = df.to_sparse()
-
- # comparison changes internal repr, compare with dense
- res = sparse > 1
- assert isinstance(res, pd.SparseDataFrame)
- tm.assert_frame_equal(res.to_dense(), df > 1)
-
- res = sparse != 0
- assert isinstance(res, pd.SparseDataFrame)
- tm.assert_frame_equal(res.to_dense(), df != 0)
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
-class TestSparseDataFrameAnalytics:
- def test_cumsum(self, float_frame):
- expected = SparseDataFrame(float_frame.to_dense().cumsum())
-
- result = float_frame.cumsum()
- tm.assert_sp_frame_equal(result, expected)
-
- result = float_frame.cumsum(axis=None)
- tm.assert_sp_frame_equal(result, expected)
-
- result = float_frame.cumsum(axis=0)
- tm.assert_sp_frame_equal(result, expected)
-
- def test_numpy_cumsum(self, float_frame):
- result = np.cumsum(float_frame)
- expected = SparseDataFrame(float_frame.to_dense().cumsum())
- tm.assert_sp_frame_equal(result, expected)
-
- msg = "the 'dtype' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- np.cumsum(float_frame, dtype=np.int64)
-
- msg = "the 'out' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- np.cumsum(float_frame, out=result)
-
- def test_numpy_func_call(self, float_frame):
- # no exception should be raised even though
- # numpy passes in 'axis=None' or `axis=-1'
- funcs = ["sum", "cumsum", "var", "mean", "prod", "cumprod", "std", "min", "max"]
- for func in funcs:
- getattr(np, func)(float_frame)
-
- @pytest.mark.xfail(reason="Wrong SparseBlock initialization (GH 17386)")
- def test_quantile(self):
- # GH 17386
- data = [[1, 1], [2, 10], [3, 100], [nan, nan]]
- q = 0.1
-
- sparse_df = SparseDataFrame(data)
- result = sparse_df.quantile(q)
-
- dense_df = DataFrame(data)
- dense_expected = dense_df.quantile(q)
- sparse_expected = SparseSeries(dense_expected)
-
- tm.assert_series_equal(result, dense_expected)
- tm.assert_sp_series_equal(result, sparse_expected)
-
- @pytest.mark.xfail(reason="Wrong SparseBlock initialization (GH 17386)")
- def test_quantile_multi(self):
- # GH 17386
- data = [[1, 1], [2, 10], [3, 100], [nan, nan]]
- q = [0.1, 0.5]
-
- sparse_df = SparseDataFrame(data)
- result = sparse_df.quantile(q)
-
- dense_df = DataFrame(data)
- dense_expected = dense_df.quantile(q)
- sparse_expected = SparseDataFrame(dense_expected)
-
- tm.assert_frame_equal(result, dense_expected)
- tm.assert_sp_frame_equal(result, sparse_expected)
-
- def test_assign_with_sparse_frame(self):
- # GH 19163
- df = pd.DataFrame({"a": [1, 2, 3]})
- res = df.to_sparse(fill_value=False).assign(newcol=False)
- exp = df.assign(newcol=False).to_sparse(fill_value=False)
-
- tm.assert_sp_frame_equal(res, exp)
-
- for column in res.columns:
- assert type(res[column]) is SparseSeries
-
- @pytest.mark.parametrize("inplace", [True, False])
- @pytest.mark.parametrize("how", ["all", "any"])
- def test_dropna(self, inplace, how):
- # Tests regression #21172.
- expected = pd.SparseDataFrame({"F2": [0, 1]})
- input_df = pd.SparseDataFrame(
- {"F1": [float("nan"), float("nan")], "F2": [0, 1]}
- )
- result_df = input_df.dropna(axis=1, inplace=inplace, how=how)
- if inplace:
- result_df = input_df
- tm.assert_sp_frame_equal(expected, result_df)
diff --git a/pandas/tests/sparse/frame/test_indexing.py b/pandas/tests/sparse/frame/test_indexing.py
deleted file mode 100644
index c93e9d1e0e8d1..0000000000000
--- a/pandas/tests/sparse/frame/test_indexing.py
+++ /dev/null
@@ -1,103 +0,0 @@
-import numpy as np
-import pytest
-
-from pandas import DataFrame, SparseDataFrame
-from pandas.util import testing as tm
-
-pytestmark = pytest.mark.skip("Wrong SparseBlock initialization (GH 17386)")
-
-
-@pytest.mark.parametrize(
- "data",
- [
- [[1, 1], [2, 2], [3, 3], [4, 4], [0, 0]],
- [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [np.nan, np.nan]],
- [
- [1.0, 1.0 + 1.0j],
- [2.0 + 2.0j, 2.0],
- [3.0, 3.0 + 3.0j],
- [4.0 + 4.0j, 4.0],
- [np.nan, np.nan],
- ],
- ],
-)
-@pytest.mark.xfail(reason="Wrong SparseBlock initialization (GH#17386)")
-def test_where_with_numeric_data(data):
- # GH 17386
- lower_bound = 1.5
-
- sparse = SparseDataFrame(data)
- result = sparse.where(sparse > lower_bound)
-
- dense = DataFrame(data)
- dense_expected = dense.where(dense > lower_bound)
- sparse_expected = SparseDataFrame(dense_expected)
-
- tm.assert_frame_equal(result, dense_expected)
- tm.assert_sp_frame_equal(result, sparse_expected)
-
-
-@pytest.mark.parametrize(
- "data",
- [
- [[1, 1], [2, 2], [3, 3], [4, 4], [0, 0]],
- [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [np.nan, np.nan]],
- [
- [1.0, 1.0 + 1.0j],
- [2.0 + 2.0j, 2.0],
- [3.0, 3.0 + 3.0j],
- [4.0 + 4.0j, 4.0],
- [np.nan, np.nan],
- ],
- ],
-)
-@pytest.mark.parametrize("other", [True, -100, 0.1, 100.0 + 100.0j])
-@pytest.mark.xfail(reason="Wrong SparseBlock initialization (GH#17386)")
-def test_where_with_numeric_data_and_other(data, other):
- # GH 17386
- lower_bound = 1.5
-
- sparse = SparseDataFrame(data)
- result = sparse.where(sparse > lower_bound, other)
-
- dense = DataFrame(data)
- dense_expected = dense.where(dense > lower_bound, other)
- sparse_expected = SparseDataFrame(dense_expected, default_fill_value=other)
-
- tm.assert_frame_equal(result, dense_expected)
- tm.assert_sp_frame_equal(result, sparse_expected)
-
-
-@pytest.mark.xfail(reason="Wrong SparseBlock initialization (GH#17386)")
-def test_where_with_bool_data():
- # GH 17386
- data = [[False, False], [True, True], [False, False]]
- cond = True
-
- sparse = SparseDataFrame(data)
- result = sparse.where(sparse == cond)
-
- dense = DataFrame(data)
- dense_expected = dense.where(dense == cond)
- sparse_expected = SparseDataFrame(dense_expected)
-
- tm.assert_frame_equal(result, dense_expected)
- tm.assert_sp_frame_equal(result, sparse_expected)
-
-
-@pytest.mark.parametrize("other", [True, 0, 0.1, 100.0 + 100.0j])
-@pytest.mark.xfail(reason="Wrong SparseBlock initialization (GH#17386)")
-def test_where_with_bool_data_and_other(other):
- # GH 17386
- data = [[False, False], [True, True], [False, False]]
- cond = True
-
- sparse = SparseDataFrame(data)
- result = sparse.where(sparse == cond, other)
-
- dense = DataFrame(data)
- dense_expected = dense.where(dense == cond, other)
- sparse_expected = SparseDataFrame(dense_expected, default_fill_value=other)
-
- tm.assert_frame_equal(result, dense_expected)
- tm.assert_sp_frame_equal(result, sparse_expected)
diff --git a/pandas/tests/sparse/frame/test_to_csv.py b/pandas/tests/sparse/frame/test_to_csv.py
deleted file mode 100644
index 4ba4fba7391d4..0000000000000
--- a/pandas/tests/sparse/frame/test_to_csv.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import numpy as np
-import pytest
-
-from pandas import SparseDataFrame, read_csv
-from pandas.util import testing as tm
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
-class TestSparseDataFrameToCsv:
- fill_values = [np.nan, 0, None, 1]
-
- @pytest.mark.parametrize("fill_value", fill_values)
- def test_to_csv_sparse_dataframe(self, fill_value):
- # GH19384
- sdf = SparseDataFrame(
- {"a": type(self).fill_values}, default_fill_value=fill_value
- )
-
- with tm.ensure_clean("sparse_df.csv") as path:
- sdf.to_csv(path, index=False)
- df = read_csv(path, skip_blank_lines=False)
-
- tm.assert_sp_frame_equal(df.to_sparse(fill_value=fill_value), sdf)
diff --git a/pandas/tests/sparse/frame/test_to_from_scipy.py b/pandas/tests/sparse/frame/test_to_from_scipy.py
deleted file mode 100644
index 9d1ccc62146ab..0000000000000
--- a/pandas/tests/sparse/frame/test_to_from_scipy.py
+++ /dev/null
@@ -1,196 +0,0 @@
-import numpy as np
-import pytest
-
-from pandas.core.dtypes.common import is_bool_dtype
-
-import pandas as pd
-from pandas import SparseDataFrame, SparseSeries
-from pandas.core.sparse.api import SparseDtype
-from pandas.util import testing as tm
-
-scipy = pytest.importorskip("scipy")
-ignore_matrix_warning = pytest.mark.filterwarnings(
- "ignore:the matrix subclass:PendingDeprecationWarning"
-)
-
-
-@pytest.mark.parametrize("index", [None, list("abc")]) # noqa: F811
-@pytest.mark.parametrize("columns", [None, list("def")])
-@pytest.mark.parametrize("fill_value", [None, 0, np.nan])
-@pytest.mark.parametrize("dtype", [bool, int, float, np.uint16])
-@ignore_matrix_warning
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-def test_from_to_scipy(spmatrix, index, columns, fill_value, dtype):
- # GH 4343
- # Make one ndarray and from it one sparse matrix, both to be used for
- # constructing frames and comparing results
- arr = np.eye(3, dtype=dtype)
- # GH 16179
- arr[0, 1] = dtype(2)
- try:
- spm = spmatrix(arr)
- assert spm.dtype == arr.dtype
- except (TypeError, AssertionError):
- # If conversion to sparse fails for this spmatrix type and arr.dtype,
- # then the combination is not currently supported in NumPy, so we
- # can just skip testing it thoroughly
- return
-
- sdf = SparseDataFrame(
- spm, index=index, columns=columns, default_fill_value=fill_value
- )
-
- # Expected result construction is kind of tricky for all
- # dtype-fill_value combinations; easiest to cast to something generic
- # and except later on
- rarr = arr.astype(object)
- rarr[arr == 0] = np.nan
- expected = SparseDataFrame(rarr, index=index, columns=columns).fillna(
- fill_value if fill_value is not None else np.nan
- )
-
- # Assert frame is as expected
- sdf_obj = sdf.astype(object)
- tm.assert_sp_frame_equal(sdf_obj, expected)
- tm.assert_frame_equal(sdf_obj.to_dense(), expected.to_dense())
-
- # Assert spmatrices equal
- assert dict(sdf.to_coo().todok()) == dict(spm.todok())
-
- # Ensure dtype is preserved if possible
- # XXX: verify this
- res_dtype = bool if is_bool_dtype(dtype) else dtype
- tm.assert_contains_all(
- sdf.dtypes.apply(lambda dtype: dtype.subtype), {np.dtype(res_dtype)}
- )
- assert sdf.to_coo().dtype == res_dtype
-
- # However, adding a str column results in an upcast to object
- sdf["strings"] = np.arange(len(sdf)).astype(str)
- assert sdf.to_coo().dtype == np.object_
-
-
-@pytest.mark.parametrize("fill_value", [None, 0, np.nan]) # noqa: F811
-@ignore_matrix_warning
-@pytest.mark.filterwarnings("ignore:object dtype is not supp:UserWarning")
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-def test_from_to_scipy_object(spmatrix, fill_value):
- # GH 4343
- dtype = object
- columns = list("cd")
- index = list("ab")
-
- if spmatrix is scipy.sparse.dok_matrix:
- pytest.skip("dok_matrix from object does not work in SciPy")
-
- # Make one ndarray and from it one sparse matrix, both to be used for
- # constructing frames and comparing results
- arr = np.eye(2, dtype=dtype)
- try:
- spm = spmatrix(arr)
- assert spm.dtype == arr.dtype
- except (TypeError, AssertionError):
- # If conversion to sparse fails for this spmatrix type and arr.dtype,
- # then the combination is not currently supported in NumPy, so we
- # can just skip testing it thoroughly
- return
-
- sdf = SparseDataFrame(
- spm, index=index, columns=columns, default_fill_value=fill_value
- )
-
- # Expected result construction is kind of tricky for all
- # dtype-fill_value combinations; easiest to cast to something generic
- # and except later on
- rarr = arr.astype(object)
- rarr[arr == 0] = np.nan
- expected = SparseDataFrame(rarr, index=index, columns=columns).fillna(
- fill_value if fill_value is not None else np.nan
- )
-
- # Assert frame is as expected
- sdf_obj = sdf.astype(SparseDtype(object, fill_value))
- tm.assert_sp_frame_equal(sdf_obj, expected)
- tm.assert_frame_equal(sdf_obj.to_dense(), expected.to_dense())
-
- # Assert spmatrices equal
- assert dict(sdf.to_coo().todok()) == dict(spm.todok())
-
- # Ensure dtype is preserved if possible
- res_dtype = object
- tm.assert_contains_all(
- sdf.dtypes.apply(lambda dtype: dtype.subtype), {np.dtype(res_dtype)}
- )
- assert sdf.to_coo().dtype == res_dtype
-
-
-@ignore_matrix_warning
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-def test_from_scipy_correct_ordering(spmatrix):
- # GH 16179
- arr = np.arange(1, 5).reshape(2, 2)
- try:
- spm = spmatrix(arr)
- assert spm.dtype == arr.dtype
- except (TypeError, AssertionError):
- # If conversion to sparse fails for this spmatrix type and arr.dtype,
- # then the combination is not currently supported in NumPy, so we
- # can just skip testing it thoroughly
- return
-
- sdf = SparseDataFrame(spm)
- expected = SparseDataFrame(arr)
- tm.assert_sp_frame_equal(sdf, expected)
- tm.assert_frame_equal(sdf.to_dense(), expected.to_dense())
-
-
-@ignore_matrix_warning
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-def test_from_scipy_fillna(spmatrix):
- # GH 16112
- arr = np.eye(3)
- arr[1:, 0] = np.nan
-
- try:
- spm = spmatrix(arr)
- assert spm.dtype == arr.dtype
- except (TypeError, AssertionError):
- # If conversion to sparse fails for this spmatrix type and arr.dtype,
- # then the combination is not currently supported in NumPy, so we
- # can just skip testing it thoroughly
- return
-
- sdf = SparseDataFrame(spm).fillna(-1.0)
-
- # Returning frame should fill all nan values with -1.0
- expected = SparseDataFrame(
- {
- 0: SparseSeries([1.0, -1, -1]),
- 1: SparseSeries([np.nan, 1, np.nan]),
- 2: SparseSeries([np.nan, np.nan, 1]),
- },
- default_fill_value=-1,
- )
-
- # fill_value is expected to be what .fillna() above was called with
- # We don't use -1 as initial fill_value in expected SparseSeries
- # construction because this way we obtain "compressed" SparseArrays,
- # avoiding having to construct them ourselves
- for col in expected:
- expected[col].fill_value = -1
-
- tm.assert_sp_frame_equal(sdf, expected)
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
-def test_index_names_multiple_nones():
- # https://github.com/pandas-dev/pandas/pull/24092
- sparse = pytest.importorskip("scipy.sparse")
-
- s = pd.Series(1, index=pd.MultiIndex.from_product([["A", "B"], [0, 1]])).to_sparse()
- result, _, _ = s.to_coo()
- assert isinstance(result, sparse.coo_matrix)
- result = result.toarray()
- expected = np.ones((2, 2), dtype="int64")
- tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/sparse/series/__init__.py b/pandas/tests/sparse/series/__init__.py
deleted file mode 100644
index e69de29bb2d1d..0000000000000
diff --git a/pandas/tests/sparse/series/test_indexing.py b/pandas/tests/sparse/series/test_indexing.py
deleted file mode 100644
index c75f3b2134f91..0000000000000
--- a/pandas/tests/sparse/series/test_indexing.py
+++ /dev/null
@@ -1,113 +0,0 @@
-import numpy as np
-import pytest
-
-from pandas import Series, SparseSeries
-from pandas.util import testing as tm
-
-pytestmark = pytest.mark.skip("Wrong SparseBlock initialization (GH 17386)")
-
-
-@pytest.mark.parametrize(
- "data",
- [
- [1, 1, 2, 2, 3, 3, 4, 4, 0, 0],
- [1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, np.nan, np.nan],
- [
- 1.0,
- 1.0 + 1.0j,
- 2.0 + 2.0j,
- 2.0,
- 3.0,
- 3.0 + 3.0j,
- 4.0 + 4.0j,
- 4.0,
- np.nan,
- np.nan,
- ],
- ],
-)
-@pytest.mark.xfail(reason="Wrong SparseBlock initialization (GH#17386)")
-def test_where_with_numeric_data(data):
- # GH 17386
- lower_bound = 1.5
-
- sparse = SparseSeries(data)
- result = sparse.where(sparse > lower_bound)
-
- dense = Series(data)
- dense_expected = dense.where(dense > lower_bound)
- sparse_expected = SparseSeries(dense_expected)
-
- tm.assert_series_equal(result, dense_expected)
- tm.assert_sp_series_equal(result, sparse_expected)
-
-
-@pytest.mark.parametrize(
- "data",
- [
- [1, 1, 2, 2, 3, 3, 4, 4, 0, 0],
- [1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, np.nan, np.nan],
- [
- 1.0,
- 1.0 + 1.0j,
- 2.0 + 2.0j,
- 2.0,
- 3.0,
- 3.0 + 3.0j,
- 4.0 + 4.0j,
- 4.0,
- np.nan,
- np.nan,
- ],
- ],
-)
-@pytest.mark.parametrize("other", [True, -100, 0.1, 100.0 + 100.0j])
-@pytest.mark.skip(reason="Wrong SparseBlock initialization (Segfault) (GH 17386)")
-def test_where_with_numeric_data_and_other(data, other):
- # GH 17386
- lower_bound = 1.5
-
- sparse = SparseSeries(data)
- result = sparse.where(sparse > lower_bound, other)
-
- dense = Series(data)
- dense_expected = dense.where(dense > lower_bound, other)
- sparse_expected = SparseSeries(dense_expected, fill_value=other)
-
- tm.assert_series_equal(result, dense_expected)
- tm.assert_sp_series_equal(result, sparse_expected)
-
-
-@pytest.mark.xfail(reason="Wrong SparseBlock initialization (GH#17386)")
-def test_where_with_bool_data():
- # GH 17386
- data = [False, False, True, True, False, False]
- cond = True
-
- sparse = SparseSeries(data)
- result = sparse.where(sparse == cond)
-
- dense = Series(data)
- dense_expected = dense.where(dense == cond)
- sparse_expected = SparseSeries(dense_expected)
-
- tm.assert_series_equal(result, dense_expected)
- tm.assert_sp_series_equal(result, sparse_expected)
-
-
-@pytest.mark.parametrize("other", [True, 0, 0.1, 100.0 + 100.0j])
-@pytest.mark.skip(reason="Wrong SparseBlock initialization (Segfault) (GH 17386)")
-def test_where_with_bool_data_and_other(other):
- # GH 17386
- data = [False, False, True, True, False, False]
- cond = True
-
- sparse = SparseSeries(data)
- result = sparse.where(sparse == cond, other)
-
- dense = Series(data)
- dense_expected = dense.where(dense == cond, other)
- sparse_expected = SparseSeries(dense_expected, fill_value=other)
-
- tm.assert_series_equal(result, dense_expected)
- tm.assert_sp_series_equal(result, sparse_expected)
diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py
deleted file mode 100644
index a9c3d157dd69b..0000000000000
--- a/pandas/tests/sparse/series/test_series.py
+++ /dev/null
@@ -1,1599 +0,0 @@
-from datetime import datetime
-import operator
-
-import numpy as np
-from numpy import nan
-import pytest
-
-from pandas._libs.sparse import BlockIndex, IntIndex
-from pandas.compat import PY36
-from pandas.errors import PerformanceWarning
-import pandas.util._test_decorators as td
-
-import pandas as pd
-from pandas import DataFrame, Series, SparseDtype, SparseSeries, bdate_range, isna
-from pandas.core import ops
-from pandas.core.reshape.util import cartesian_product
-import pandas.core.sparse.frame as spf
-from pandas.tests.series.test_api import SharedWithSparse
-import pandas.util.testing as tm
-
-from pandas.tseries.offsets import BDay
-
-
-def test_deprecated():
- with tm.assert_produces_warning(FutureWarning):
- pd.SparseSeries([0, 1])
-
-
-def _test_data1():
- # nan-based
- arr = np.arange(20, dtype=float)
- index = np.arange(20)
- arr[:2] = nan
- arr[5:10] = nan
- arr[-3:] = nan
-
- return arr, index
-
-
-def _test_data2():
- # nan-based
- arr = np.arange(15, dtype=float)
- index = np.arange(15)
- arr[7:12] = nan
- arr[-1:] = nan
- return arr, index
-
-
-def _test_data1_zero():
- # zero-based
- arr, index = _test_data1()
- arr[np.isnan(arr)] = 0
- return arr, index
-
-
-def _test_data2_zero():
- # zero-based
- arr, index = _test_data2()
- arr[np.isnan(arr)] = 0
- return arr, index
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
-class TestSparseSeries(SharedWithSparse):
-
- series_klass = SparseSeries
- # SharedWithSparse tests use generic, series_klass-agnostic assertion
- _assert_series_equal = staticmethod(tm.assert_sp_series_equal)
-
- def setup_method(self, method):
- arr, index = _test_data1()
-
- date_index = bdate_range("1/1/2011", periods=len(index))
-
- self.bseries = SparseSeries(arr, index=index, kind="block", name="bseries")
- self.ts = self.bseries
-
- self.btseries = SparseSeries(arr, index=date_index, kind="block")
-
- self.iseries = SparseSeries(arr, index=index, kind="integer", name="iseries")
-
- arr, index = _test_data2()
- self.bseries2 = SparseSeries(arr, index=index, kind="block")
- self.iseries2 = SparseSeries(arr, index=index, kind="integer")
-
- arr, index = _test_data1_zero()
- self.zbseries = SparseSeries(
- arr, index=index, kind="block", fill_value=0, name="zbseries"
- )
- self.ziseries = SparseSeries(arr, index=index, kind="integer", fill_value=0)
-
- arr, index = _test_data2_zero()
- self.zbseries2 = SparseSeries(arr, index=index, kind="block", fill_value=0)
- self.ziseries2 = SparseSeries(arr, index=index, kind="integer", fill_value=0)
-
- def test_constructor_dict_input(self):
- # gh-16905
- constructor_dict = {1: 1.0}
- index = [0, 1, 2]
-
- # Series with index passed in
- series = pd.Series(constructor_dict)
- expected = SparseSeries(series, index=index)
-
- result = SparseSeries(constructor_dict, index=index)
- tm.assert_sp_series_equal(result, expected)
-
- # Series with index and dictionary with no index
- expected = SparseSeries(series)
-
- result = SparseSeries(constructor_dict)
- tm.assert_sp_series_equal(result, expected)
-
- def test_constructor_dict_order(self):
- # GH19018
- # initialization ordering: by insertion order if python>= 3.6, else
- # order by value
- d = {"b": 1, "a": 0, "c": 2}
- result = SparseSeries(d)
- if PY36:
- expected = SparseSeries([1, 0, 2], index=list("bac"))
- else:
- expected = SparseSeries([0, 1, 2], index=list("abc"))
- tm.assert_sp_series_equal(result, expected)
-
- def test_constructor_dtype(self):
- arr = SparseSeries([np.nan, 1, 2, np.nan])
- assert arr.dtype == SparseDtype(np.float64)
- assert np.isnan(arr.fill_value)
-
- arr = SparseSeries([np.nan, 1, 2, np.nan], fill_value=0)
- assert arr.dtype == SparseDtype(np.float64, 0)
- assert arr.fill_value == 0
-
- arr = SparseSeries([0, 1, 2, 4], dtype=np.int64, fill_value=np.nan)
- assert arr.dtype == SparseDtype(np.int64, np.nan)
- assert np.isnan(arr.fill_value)
-
- arr = SparseSeries([0, 1, 2, 4], dtype=np.int64)
- assert arr.dtype == SparseDtype(np.int64, 0)
- assert arr.fill_value == 0
-
- arr = SparseSeries([0, 1, 2, 4], fill_value=0, dtype=np.int64)
- assert arr.dtype == SparseDtype(np.int64, 0)
- assert arr.fill_value == 0
-
- def test_iteration_and_str(self):
- [x for x in self.bseries]
- str(self.bseries)
-
- def test_construct_DataFrame_with_sp_series(self):
- # it works!
- df = DataFrame({"col": self.bseries})
-
- # printing & access
- df.iloc[:1]
- df["col"]
- df.dtypes
- str(df)
-
- # blocking
- expected = Series({"col": "float64:sparse"})
-
- # GH 26705 - Assert .ftypes is deprecated
- with tm.assert_produces_warning(FutureWarning):
- result = df.ftypes
- tm.assert_series_equal(expected, result)
-
- def test_constructor_preserve_attr(self):
- arr = pd.SparseArray([1, 0, 3, 0], dtype=np.int64, fill_value=0)
- assert arr.dtype == SparseDtype(np.int64)
- assert arr.fill_value == 0
-
- s = pd.SparseSeries(arr, name="x")
- assert s.dtype == SparseDtype(np.int64)
- assert s.fill_value == 0
-
- def test_series_density(self):
- # GH2803
- ts = Series(np.random.randn(10))
- ts[2:-2] = nan
- sts = ts.to_sparse()
- density = sts.density # don't die
- assert density == 4 / 10.0
-
- def test_sparse_to_dense(self):
- arr, index = _test_data1()
- series = self.bseries.to_dense()
- tm.assert_series_equal(series, Series(arr, name="bseries"))
-
- series = self.iseries.to_dense()
- tm.assert_series_equal(series, Series(arr, name="iseries"))
-
- arr, index = _test_data1_zero()
- series = self.zbseries.to_dense()
- tm.assert_series_equal(series, Series(arr, name="zbseries"))
-
- series = self.ziseries.to_dense()
- tm.assert_series_equal(series, Series(arr))
-
- def test_to_dense_fill_value(self):
- s = pd.Series([1, np.nan, np.nan, 3, np.nan])
- res = SparseSeries(s).to_dense()
- tm.assert_series_equal(res, s)
-
- res = SparseSeries(s, fill_value=0).to_dense()
- tm.assert_series_equal(res, s)
-
- s = pd.Series([1, np.nan, 0, 3, 0])
- res = SparseSeries(s, fill_value=0).to_dense()
- tm.assert_series_equal(res, s)
-
- res = SparseSeries(s, fill_value=0).to_dense()
- tm.assert_series_equal(res, s)
-
- s = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan])
- res = SparseSeries(s).to_dense()
- tm.assert_series_equal(res, s)
-
- s = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan])
- res = SparseSeries(s, fill_value=0).to_dense()
- tm.assert_series_equal(res, s)
-
- def test_dense_to_sparse(self):
- series = self.bseries.to_dense()
- bseries = series.to_sparse(kind="block")
- iseries = series.to_sparse(kind="integer")
- tm.assert_sp_series_equal(bseries, self.bseries)
- tm.assert_sp_series_equal(iseries, self.iseries, check_names=False)
- assert iseries.name == self.bseries.name
-
- assert len(series) == len(bseries)
- assert len(series) == len(iseries)
- assert series.shape == bseries.shape
- assert series.shape == iseries.shape
-
- # non-NaN fill value
- series = self.zbseries.to_dense()
- zbseries = series.to_sparse(kind="block", fill_value=0)
- ziseries = series.to_sparse(kind="integer", fill_value=0)
- tm.assert_sp_series_equal(zbseries, self.zbseries)
- tm.assert_sp_series_equal(ziseries, self.ziseries, check_names=False)
- assert ziseries.name == self.zbseries.name
-
- assert len(series) == len(zbseries)
- assert len(series) == len(ziseries)
- assert series.shape == zbseries.shape
- assert series.shape == ziseries.shape
-
- def test_to_dense_preserve_name(self):
- assert self.bseries.name is not None
- result = self.bseries.to_dense()
- assert result.name == self.bseries.name
-
- def test_constructor(self):
- # test setup guys
- assert np.isnan(self.bseries.fill_value)
- assert isinstance(self.bseries.sp_index, BlockIndex)
- assert np.isnan(self.iseries.fill_value)
- assert isinstance(self.iseries.sp_index, IntIndex)
-
- assert self.zbseries.fill_value == 0
- tm.assert_numpy_array_equal(
- self.zbseries.values.to_dense(), self.bseries.to_dense().fillna(0).values
- )
-
- # pass SparseSeries
- def _check_const(sparse, name):
- # use passed series name
- result = SparseSeries(sparse)
- tm.assert_sp_series_equal(result, sparse)
- assert sparse.name == name
- assert result.name == name
-
- # use passed name
- result = SparseSeries(sparse, name="x")
- tm.assert_sp_series_equal(result, sparse, check_names=False)
- assert result.name == "x"
-
- _check_const(self.bseries, "bseries")
- _check_const(self.iseries, "iseries")
- _check_const(self.zbseries, "zbseries")
-
- # Sparse time series works
- date_index = bdate_range("1/1/2000", periods=len(self.bseries))
- s5 = SparseSeries(self.bseries, index=date_index)
- assert isinstance(s5, SparseSeries)
-
- # pass Series
- bseries2 = SparseSeries(self.bseries.to_dense())
- tm.assert_numpy_array_equal(self.bseries.sp_values, bseries2.sp_values)
-
- # pass dict?
-
- # don't copy the data by default
- values = np.ones(self.bseries.npoints)
- sp = SparseSeries(values, sparse_index=self.bseries.sp_index)
- sp.sp_values[:5] = 97
- assert values[0] == 97
-
- assert len(sp) == 20
- assert sp.shape == (20,)
-
- # but can make it copy!
- sp = SparseSeries(values, sparse_index=self.bseries.sp_index, copy=True)
- sp.sp_values[:5] = 100
- assert values[0] == 97
-
- assert len(sp) == 20
- assert sp.shape == (20,)
-
- def test_constructor_scalar(self):
- data = 5
- sp = SparseSeries(data, np.arange(100))
- sp = sp.reindex(np.arange(200))
- assert (sp.loc[:99] == data).all()
- assert isna(sp.loc[100:]).all()
-
- data = np.nan
- sp = SparseSeries(data, np.arange(100))
- assert len(sp) == 100
- assert sp.shape == (100,)
-
- def test_constructor_ndarray(self):
- pass
-
- def test_constructor_nonnan(self):
- arr = [0, 0, 0, nan, nan]
- sp_series = SparseSeries(arr, fill_value=0)
- tm.assert_numpy_array_equal(sp_series.values.to_dense(), np.array(arr))
- assert len(sp_series) == 5
- assert sp_series.shape == (5,)
-
- def test_constructor_empty(self):
- # see gh-9272
- sp = SparseSeries()
- assert len(sp.index) == 0
- assert sp.shape == (0,)
-
- def test_copy_astype(self):
- cop = self.bseries.astype(np.float64)
- assert cop is not self.bseries
- assert cop.sp_index is self.bseries.sp_index
- assert cop.dtype == SparseDtype(np.float64)
-
- cop2 = self.iseries.copy()
-
- tm.assert_sp_series_equal(cop, self.bseries)
- tm.assert_sp_series_equal(cop2, self.iseries)
-
- # test that data is copied
- cop[:5] = 97
- assert cop.sp_values[0] == 97
- assert self.bseries.sp_values[0] != 97
-
- # correct fill value
- zbcop = self.zbseries.copy()
- zicop = self.ziseries.copy()
-
- tm.assert_sp_series_equal(zbcop, self.zbseries)
- tm.assert_sp_series_equal(zicop, self.ziseries)
-
- # no deep copy
- view = self.bseries.copy(deep=False)
- view.sp_values[:5] = 5
- assert (self.bseries.sp_values[:5] == 5).all()
-
- def test_shape(self):
- # see gh-10452
- assert self.bseries.shape == (20,)
- assert self.btseries.shape == (20,)
- assert self.iseries.shape == (20,)
-
- assert self.bseries2.shape == (15,)
- assert self.iseries2.shape == (15,)
-
- assert self.zbseries2.shape == (15,)
- assert self.ziseries2.shape == (15,)
-
- def test_astype(self):
- result = self.bseries.astype(SparseDtype(np.int64, 0))
- expected = (
- self.bseries.to_dense().fillna(0).astype(np.int64).to_sparse(fill_value=0)
- )
- tm.assert_sp_series_equal(result, expected)
-
- def test_astype_all(self):
- orig = pd.Series(np.array([1, 2, 3]))
- s = SparseSeries(orig)
-
- types = [np.float64, np.float32, np.int64, np.int32, np.int16, np.int8]
- for typ in types:
- dtype = SparseDtype(typ)
- res = s.astype(dtype)
- assert res.dtype == dtype
- tm.assert_series_equal(res.to_dense(), orig.astype(typ))
-
- def test_kind(self):
- assert self.bseries.kind == "block"
- assert self.iseries.kind == "integer"
-
- def test_to_frame(self):
- # GH 9850
- s = pd.SparseSeries([1, 2, 0, nan, 4, nan, 0], name="x")
- exp = pd.SparseDataFrame({"x": [1, 2, 0, nan, 4, nan, 0]})
- tm.assert_sp_frame_equal(s.to_frame(), exp)
-
- exp = pd.SparseDataFrame({"y": [1, 2, 0, nan, 4, nan, 0]})
- tm.assert_sp_frame_equal(s.to_frame(name="y"), exp)
-
- s = pd.SparseSeries([1, 2, 0, nan, 4, nan, 0], name="x", fill_value=0)
- exp = pd.SparseDataFrame({"x": [1, 2, 0, nan, 4, nan, 0]}, default_fill_value=0)
-
- tm.assert_sp_frame_equal(s.to_frame(), exp)
- exp = pd.DataFrame({"y": [1, 2, 0, nan, 4, nan, 0]})
- tm.assert_frame_equal(s.to_frame(name="y").to_dense(), exp)
-
- def test_pickle(self):
- def _test_roundtrip(series):
- unpickled = tm.round_trip_pickle(series)
- tm.assert_sp_series_equal(series, unpickled)
- tm.assert_series_equal(series.to_dense(), unpickled.to_dense())
-
- self._check_all(_test_roundtrip)
-
- def _check_all(self, check_func):
- check_func(self.bseries)
- check_func(self.iseries)
- check_func(self.zbseries)
- check_func(self.ziseries)
-
- def test_getitem(self):
- def _check_getitem(sp, dense):
- for idx, val in dense.items():
- tm.assert_almost_equal(val, sp[idx])
-
- for i in range(len(dense)):
- tm.assert_almost_equal(sp[i], dense[i])
- # j = np.float64(i)
- # assert_almost_equal(sp[j], dense[j])
-
- # API change 1/6/2012
- # negative getitem works
- # for i in xrange(len(dense)):
- # assert_almost_equal(sp[-i], dense[-i])
-
- _check_getitem(self.bseries, self.bseries.to_dense())
- _check_getitem(self.btseries, self.btseries.to_dense())
-
- _check_getitem(self.zbseries, self.zbseries.to_dense())
- _check_getitem(self.iseries, self.iseries.to_dense())
- _check_getitem(self.ziseries, self.ziseries.to_dense())
-
- # exception handling
- with pytest.raises(IndexError, match="Out of bounds access"):
- self.bseries[len(self.bseries) + 1]
-
- # index not contained
- msg = r"Timestamp\('2011-01-31 00:00:00', freq='B'\)"
- with pytest.raises(KeyError, match=msg):
- self.btseries[self.btseries.index[-1] + BDay()]
-
- def test_get_get_value(self):
- tm.assert_almost_equal(self.bseries.get(10), self.bseries[10])
- assert self.bseries.get(len(self.bseries) + 1) is None
-
- dt = self.btseries.index[10]
- result = self.btseries.get(dt)
- expected = self.btseries.to_dense()[dt]
- tm.assert_almost_equal(result, expected)
-
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- tm.assert_almost_equal(self.bseries.get_value(10), self.bseries[10])
-
- def test_set_value(self):
-
- idx = self.btseries.index[7]
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- self.btseries.set_value(idx, 0)
- assert self.btseries[idx] == 0
-
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- self.iseries.set_value("foobar", 0)
- assert self.iseries.index[-1] == "foobar"
- assert self.iseries["foobar"] == 0
-
- def test_getitem_slice(self):
- idx = self.bseries.index
- res = self.bseries[::2]
- assert isinstance(res, SparseSeries)
-
- expected = self.bseries.reindex(idx[::2])
- tm.assert_sp_series_equal(res, expected)
-
- res = self.bseries[:5]
- assert isinstance(res, SparseSeries)
- tm.assert_sp_series_equal(res, self.bseries.reindex(idx[:5]))
-
- res = self.bseries[5:]
- tm.assert_sp_series_equal(res, self.bseries.reindex(idx[5:]))
-
- # negative indices
- res = self.bseries[:-3]
- tm.assert_sp_series_equal(res, self.bseries.reindex(idx[:-3]))
-
- def test_take(self):
- def _compare_with_dense(sp):
- dense = sp.to_dense()
-
- def _compare(idx):
- dense_result = dense.take(idx).values
- sparse_result = sp.take(idx)
- assert isinstance(sparse_result, SparseSeries)
- tm.assert_almost_equal(dense_result, sparse_result.values.to_dense())
-
- _compare([1.0, 2.0, 3.0, 4.0, 5.0, 0.0])
- _compare([7, 2, 9, 0, 4])
- _compare([3, 6, 3, 4, 7])
-
- self._check_all(_compare_with_dense)
-
- msg = "index 21 is out of bounds for size 20"
- with pytest.raises(IndexError, match=msg):
- self.bseries.take([0, len(self.bseries) + 1])
-
- # Corner case
- # XXX: changed test. Why wsa this considered a corner case?
- sp = SparseSeries(np.ones(10) * nan)
- exp = pd.Series(np.repeat(nan, 5))
- tm.assert_series_equal(sp.take([0, 1, 2, 3, 4]), exp.to_sparse())
-
- def test_numpy_take(self):
- sp = SparseSeries([1.0, 2.0, 3.0])
- indices = [1, 2]
-
- tm.assert_series_equal(
- np.take(sp, indices, axis=0).to_dense(),
- np.take(sp.to_dense(), indices, axis=0),
- )
-
- msg = "the 'out' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- np.take(sp, indices, out=np.empty(sp.shape))
-
- msg = "the 'mode' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- np.take(sp, indices, out=None, mode="clip")
-
- def test_setitem(self):
- self.bseries[5] = 7.0
- assert self.bseries[5] == 7.0
-
- def test_setslice(self):
- self.bseries[5:10] = 7.0
- tm.assert_series_equal(
- self.bseries[5:10].to_dense(),
- Series(7.0, index=range(5, 10), name=self.bseries.name),
- )
-
- def test_operators(self):
- def _check_op(a, b, op):
- sp_result = op(a, b)
- adense = a.to_dense() if isinstance(a, SparseSeries) else a
- bdense = b.to_dense() if isinstance(b, SparseSeries) else b
- dense_result = op(adense, bdense)
- if "floordiv" in op.__name__:
- # Series sets 1//0 to np.inf, which SparseSeries does not do (yet)
- mask = np.isinf(dense_result)
- dense_result[mask] = np.nan
- tm.assert_almost_equal(sp_result.to_dense(), dense_result)
-
- def check(a, b):
- _check_op(a, b, operator.add)
- _check_op(a, b, operator.sub)
- _check_op(a, b, operator.truediv)
- _check_op(a, b, operator.floordiv)
- _check_op(a, b, operator.mul)
-
- _check_op(a, b, ops.radd)
- _check_op(a, b, ops.rsub)
- _check_op(a, b, ops.rtruediv)
- _check_op(a, b, ops.rfloordiv)
- _check_op(a, b, ops.rmul)
-
- # FIXME: don't leave commented-out
- # NaN ** 0 = 1 in C?
- # _check_op(a, b, operator.pow)
- # _check_op(a, b, ops.rpow)
-
- check(self.bseries, self.bseries)
- check(self.iseries, self.iseries)
- check(self.bseries, self.iseries)
-
- check(self.bseries, self.bseries2)
- check(self.bseries, self.iseries2)
- check(self.iseries, self.iseries2)
-
- # scalar value
- check(self.bseries, 5)
-
- # zero-based
- check(self.zbseries, self.zbseries * 2)
- check(self.zbseries, self.zbseries2)
- check(self.ziseries, self.ziseries2)
-
- # with dense
- result = self.bseries + self.bseries.to_dense()
- tm.assert_sp_series_equal(result, self.bseries + self.bseries)
-
- def test_binary_operators(self):
-
- # skipping for now #####
- import pytest
-
- pytest.skip("skipping sparse binary operators test")
-
- def _check_inplace_op(iop, op):
- tmp = self.bseries.copy()
-
- expected = op(tmp, self.bseries)
- iop(tmp, self.bseries)
- tm.assert_sp_series_equal(tmp, expected)
-
- inplace_ops = ["add", "sub", "mul", "truediv", "floordiv", "pow"]
- for op in inplace_ops:
- _check_inplace_op(
- getattr(operator, "i{op}".format(op=op)), getattr(operator, op)
- )
-
- @pytest.mark.parametrize(
- "values, op, fill_value",
- [
- ([True, False, False, True], operator.invert, True),
- ([True, False, False, True], operator.invert, False),
- ([0, 1, 2, 3], operator.pos, 0),
- ([0, 1, 2, 3], operator.neg, 0),
- ([0, np.nan, 2, 3], operator.pos, np.nan),
- ([0, np.nan, 2, 3], operator.neg, np.nan),
- ],
- )
- def test_unary_operators(self, values, op, fill_value):
- # https://github.com/pandas-dev/pandas/issues/22835
- values = np.asarray(values)
- if op is operator.invert:
- new_fill_value = not fill_value
- else:
- new_fill_value = op(fill_value)
- s = SparseSeries(
- values, fill_value=fill_value, index=["a", "b", "c", "d"], name="name"
- )
- result = op(s)
- expected = SparseSeries(
- op(values),
- fill_value=new_fill_value,
- index=["a", "b", "c", "d"],
- name="name",
- )
- tm.assert_sp_series_equal(result, expected)
-
- def test_abs(self):
- s = SparseSeries([1, 2, -3], name="x")
- expected = SparseSeries([1, 2, 3], name="x")
- result = s.abs()
- tm.assert_sp_series_equal(result, expected)
- assert result.name == "x"
-
- result = abs(s)
- tm.assert_sp_series_equal(result, expected)
- assert result.name == "x"
-
- result = np.abs(s)
- tm.assert_sp_series_equal(result, expected)
- assert result.name == "x"
-
- s = SparseSeries([1, -2, 2, -3], fill_value=-2, name="x")
- expected = SparseSeries(
- [1, 2, 3], sparse_index=s.sp_index, fill_value=2, name="x"
- )
- result = s.abs()
- tm.assert_sp_series_equal(result, expected)
- assert result.name == "x"
-
- result = abs(s)
- tm.assert_sp_series_equal(result, expected)
- assert result.name == "x"
-
- result = np.abs(s)
- tm.assert_sp_series_equal(result, expected)
- assert result.name == "x"
-
- def test_reindex(self):
- def _compare_with_series(sps, new_index):
- spsre = sps.reindex(new_index)
-
- series = sps.to_dense()
- seriesre = series.reindex(new_index)
- seriesre = seriesre.to_sparse(fill_value=sps.fill_value)
-
- tm.assert_sp_series_equal(spsre, seriesre)
- tm.assert_series_equal(spsre.to_dense(), seriesre.to_dense())
-
- _compare_with_series(self.bseries, self.bseries.index[::2])
- _compare_with_series(self.bseries, list(self.bseries.index[::2]))
- _compare_with_series(self.bseries, self.bseries.index[:10])
- _compare_with_series(self.bseries, self.bseries.index[5:])
-
- _compare_with_series(self.zbseries, self.zbseries.index[::2])
- _compare_with_series(self.zbseries, self.zbseries.index[:10])
- _compare_with_series(self.zbseries, self.zbseries.index[5:])
-
- # special cases
- same_index = self.bseries.reindex(self.bseries.index)
- tm.assert_sp_series_equal(self.bseries, same_index)
- assert same_index is not self.bseries
-
- # corner cases
- sp = SparseSeries([], index=[])
- # TODO: sp_zero is not used anywhere...remove?
- sp_zero = SparseSeries([], index=[], fill_value=0) # noqa
- _compare_with_series(sp, np.arange(10))
-
- # with copy=False
- reindexed = self.bseries.reindex(self.bseries.index, copy=True)
- reindexed.sp_values[:] = 1.0
- assert (self.bseries.sp_values != 1.0).all()
-
- reindexed = self.bseries.reindex(self.bseries.index, copy=False)
- reindexed.sp_values[:] = 1.0
- tm.assert_numpy_array_equal(self.bseries.sp_values, np.repeat(1.0, 10))
-
- def test_sparse_reindex(self):
- length = 10
-
- def _check(values, index1, index2, fill_value):
- first_series = SparseSeries(
- values, sparse_index=index1, fill_value=fill_value
- )
- reindexed = first_series.sparse_reindex(index2)
- assert reindexed.sp_index is index2
-
- int_indices1 = index1.to_int_index().indices
- int_indices2 = index2.to_int_index().indices
-
- expected = Series(values, index=int_indices1)
- expected = expected.reindex(int_indices2).fillna(fill_value)
- tm.assert_almost_equal(expected.values, reindexed.sp_values)
-
- # make sure level argument asserts
- # TODO: expected is not used anywhere...remove?
- expected = expected.reindex(int_indices2).fillna(fill_value) # noqa
-
- def _check_with_fill_value(values, first, second, fill_value=nan):
- i_index1 = IntIndex(length, first)
- i_index2 = IntIndex(length, second)
-
- b_index1 = i_index1.to_block_index()
- b_index2 = i_index2.to_block_index()
-
- _check(values, i_index1, i_index2, fill_value)
- _check(values, b_index1, b_index2, fill_value)
-
- def _check_all(values, first, second):
- _check_with_fill_value(values, first, second, fill_value=nan)
- _check_with_fill_value(values, first, second, fill_value=0)
-
- index1 = [2, 4, 5, 6, 8, 9]
- values1 = np.arange(6.0)
-
- _check_all(values1, index1, [2, 4, 5])
- _check_all(values1, index1, [2, 3, 4, 5, 6, 7, 8, 9])
- _check_all(values1, index1, [0, 1])
- _check_all(values1, index1, [0, 1, 7, 8, 9])
- _check_all(values1, index1, [])
-
- first_series = SparseSeries(
- values1, sparse_index=IntIndex(length, index1), fill_value=nan
- )
- with pytest.raises(TypeError, match="new index must be a SparseIndex"):
- first_series.sparse_reindex(0)
-
- def test_repr(self):
- # TODO: These aren't used
- bsrepr = repr(self.bseries) # noqa
- isrepr = repr(self.iseries) # noqa
-
- def test_iter(self):
- pass
-
- def test_truncate(self):
- pass
-
- def test_fillna(self):
- pass
-
- def test_groupby(self):
- pass
-
- def test_reductions(self):
- def _compare_with_dense(obj, op):
- sparse_result = getattr(obj, op)()
- series = obj.to_dense()
- dense_result = getattr(series, op)()
- assert sparse_result == dense_result
-
- to_compare = ["count", "sum", "mean", "std", "var", "skew"]
-
- def _compare_all(obj):
- for op in to_compare:
- _compare_with_dense(obj, op)
-
- _compare_all(self.bseries)
-
- self.bseries.sp_values[5:10] = np.NaN
- _compare_all(self.bseries)
-
- _compare_all(self.zbseries)
- self.zbseries.sp_values[5:10] = np.NaN
- _compare_all(self.zbseries)
-
- series = self.zbseries.copy()
- series.fill_value = 2
- _compare_all(series)
-
- nonna = Series(np.random.randn(20)).to_sparse()
- _compare_all(nonna)
-
- nonna2 = Series(np.random.randn(20)).to_sparse(fill_value=0)
- _compare_all(nonna2)
-
- def test_dropna(self):
- sp = SparseSeries([0, 0, 0, nan, nan, 5, 6], fill_value=0)
-
- sp_valid = sp.dropna()
-
- expected = sp.to_dense().dropna()
- expected = expected[expected != 0]
- exp_arr = pd.SparseArray(expected.values, fill_value=0, kind="block")
- tm.assert_sp_array_equal(sp_valid.values, exp_arr)
- tm.assert_index_equal(sp_valid.index, expected.index)
- assert len(sp_valid.sp_values) == 2
-
- result = self.bseries.dropna()
- expected = self.bseries.to_dense().dropna()
- assert not isinstance(result, SparseSeries)
- tm.assert_series_equal(result, expected)
-
- def test_homogenize(self):
- def _check_matches(indices, expected):
- data = {
- i: SparseSeries(
- idx.to_int_index().indices, sparse_index=idx, fill_value=np.nan
- )
- for i, idx in enumerate(indices)
- }
-
- # homogenized is only valid with NaN fill values
- homogenized = spf.homogenize(data)
-
- for k, v in homogenized.items():
- assert v.sp_index.equals(expected)
-
- indices1 = [
- BlockIndex(10, [2], [7]),
- BlockIndex(10, [1, 6], [3, 4]),
- BlockIndex(10, [0], [10]),
- ]
- expected1 = BlockIndex(10, [2, 6], [2, 3])
- _check_matches(indices1, expected1)
-
- indices2 = [BlockIndex(10, [2], [7]), BlockIndex(10, [2], [7])]
- expected2 = indices2[0]
- _check_matches(indices2, expected2)
-
- # must have NaN fill value
- data = {"a": SparseSeries(np.arange(7), sparse_index=expected2, fill_value=0)}
- with pytest.raises(TypeError, match="NaN fill value"):
- spf.homogenize(data)
-
- def test_fill_value_corner(self):
- cop = self.zbseries.copy()
- cop.fill_value = 0
- result = self.bseries / cop
-
- assert np.isnan(result.fill_value)
-
- cop2 = self.zbseries.copy()
- cop2.fill_value = 1
- result = cop2 / cop
- # 1 / 0 is inf
- assert np.isinf(result.fill_value)
-
- def test_fill_value_when_combine_const(self):
- # GH12723
- s = SparseSeries([0, 1, np.nan, 3, 4, 5], index=np.arange(6))
-
- exp = s.fillna(0).add(2)
- res = s.add(2, fill_value=0)
- tm.assert_series_equal(res, exp)
-
- def test_shift(self):
- series = SparseSeries([nan, 1.0, 2.0, 3.0, nan, nan], index=np.arange(6))
-
- shifted = series.shift(0)
- # assert shifted is not series
- tm.assert_sp_series_equal(shifted, series)
-
- f = lambda s: s.shift(1)
- _dense_series_compare(series, f)
-
- f = lambda s: s.shift(-2)
- _dense_series_compare(series, f)
-
- series = SparseSeries(
- [nan, 1.0, 2.0, 3.0, nan, nan], index=bdate_range("1/1/2000", periods=6)
- )
- f = lambda s: s.shift(2, freq="B")
- _dense_series_compare(series, f)
-
- f = lambda s: s.shift(2, freq=BDay())
- _dense_series_compare(series, f)
-
- def test_shift_nan(self):
- # GH 12908
- orig = pd.Series([np.nan, 2, np.nan, 4, 0, np.nan, 0])
- sparse = orig.to_sparse()
-
- tm.assert_sp_series_equal(
- sparse.shift(0), orig.shift(0).to_sparse(), check_kind=False
- )
- tm.assert_sp_series_equal(
- sparse.shift(1), orig.shift(1).to_sparse(), check_kind=False
- )
- tm.assert_sp_series_equal(
- sparse.shift(2), orig.shift(2).to_sparse(), check_kind=False
- )
- tm.assert_sp_series_equal(
- sparse.shift(3), orig.shift(3).to_sparse(), check_kind=False
- )
-
- tm.assert_sp_series_equal(sparse.shift(-1), orig.shift(-1).to_sparse())
- tm.assert_sp_series_equal(sparse.shift(-2), orig.shift(-2).to_sparse())
- tm.assert_sp_series_equal(sparse.shift(-3), orig.shift(-3).to_sparse())
- tm.assert_sp_series_equal(sparse.shift(-4), orig.shift(-4).to_sparse())
-
- sparse = orig.to_sparse(fill_value=0)
- tm.assert_sp_series_equal(
- sparse.shift(0), orig.shift(0).to_sparse(fill_value=sparse.fill_value)
- )
- tm.assert_sp_series_equal(
- sparse.shift(1), orig.shift(1).to_sparse(fill_value=0), check_kind=False
- )
- tm.assert_sp_series_equal(
- sparse.shift(2), orig.shift(2).to_sparse(fill_value=0), check_kind=False
- )
- tm.assert_sp_series_equal(
- sparse.shift(3), orig.shift(3).to_sparse(fill_value=0), check_kind=False
- )
-
- tm.assert_sp_series_equal(
- sparse.shift(-1), orig.shift(-1).to_sparse(fill_value=0), check_kind=False
- )
- tm.assert_sp_series_equal(
- sparse.shift(-2), orig.shift(-2).to_sparse(fill_value=0), check_kind=False
- )
- tm.assert_sp_series_equal(
- sparse.shift(-3), orig.shift(-3).to_sparse(fill_value=0), check_kind=False
- )
- tm.assert_sp_series_equal(
- sparse.shift(-4), orig.shift(-4).to_sparse(fill_value=0), check_kind=False
- )
-
- def test_shift_dtype(self):
- # GH 12908
- orig = pd.Series([1, 2, 3, 4], dtype=np.int64)
-
- sparse = orig.to_sparse()
- tm.assert_sp_series_equal(sparse.shift(0), orig.shift(0).to_sparse())
-
- sparse = orig.to_sparse(fill_value=np.nan)
- tm.assert_sp_series_equal(
- sparse.shift(0), orig.shift(0).to_sparse(fill_value=np.nan)
- )
- # shift(1) or more span changes dtype to float64
- # XXX: SparseSeries doesn't need to shift dtype here.
- # Do we want to astype in shift, for backwards compat?
- # If not, document it.
- tm.assert_sp_series_equal(
- sparse.shift(1).astype("f8"), orig.shift(1).to_sparse(kind="integer")
- )
- tm.assert_sp_series_equal(
- sparse.shift(2).astype("f8"), orig.shift(2).to_sparse(kind="integer")
- )
- tm.assert_sp_series_equal(
- sparse.shift(3).astype("f8"), orig.shift(3).to_sparse(kind="integer")
- )
-
- tm.assert_sp_series_equal(
- sparse.shift(-1).astype("f8"), orig.shift(-1).to_sparse(), check_kind=False
- )
- tm.assert_sp_series_equal(
- sparse.shift(-2).astype("f8"), orig.shift(-2).to_sparse(), check_kind=False
- )
- tm.assert_sp_series_equal(
- sparse.shift(-3).astype("f8"), orig.shift(-3).to_sparse(), check_kind=False
- )
- tm.assert_sp_series_equal(
- sparse.shift(-4).astype("f8"), orig.shift(-4).to_sparse(), check_kind=False
- )
-
- @pytest.mark.parametrize("fill_value", [0, 1, np.nan])
- @pytest.mark.parametrize("periods", [0, 1, 2, 3, -1, -2, -3, -4])
- def test_shift_dtype_fill_value(self, fill_value, periods):
- # GH 12908
- orig = pd.Series([1, 0, 0, 4], dtype=np.dtype("int64"))
-
- sparse = orig.to_sparse(fill_value=fill_value)
-
- result = sparse.shift(periods)
- expected = orig.shift(periods).to_sparse(fill_value=fill_value)
-
- tm.assert_sp_series_equal(
- result, expected, check_kind=False, consolidate_block_indices=True
- )
-
- def test_combine_first(self):
- s = self.bseries
-
- result = s[::2].combine_first(s)
- result2 = s[::2].combine_first(s.to_dense())
-
- expected = s[::2].to_dense().combine_first(s.to_dense())
- expected = expected.to_sparse(fill_value=s.fill_value)
-
- tm.assert_sp_series_equal(result, result2)
- tm.assert_sp_series_equal(result, expected)
-
- @pytest.mark.parametrize("deep", [True, False])
- @pytest.mark.parametrize("fill_value", [0, 1, np.nan, None])
- def test_memory_usage_deep(self, deep, fill_value):
- values = [1.0] + [fill_value] * 20
- sparse_series = SparseSeries(values, fill_value=fill_value)
- dense_series = Series(values)
- sparse_usage = sparse_series.memory_usage(deep=deep)
- dense_usage = dense_series.memory_usage(deep=deep)
-
- assert sparse_usage < dense_usage
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
-class TestSparseHandlingMultiIndexes:
- def setup_method(self, method):
- miindex = pd.MultiIndex.from_product(
- [["x", "y"], ["10", "20"]], names=["row-foo", "row-bar"]
- )
- micol = pd.MultiIndex.from_product(
- [["a", "b", "c"], ["1", "2"]], names=["col-foo", "col-bar"]
- )
- dense_multiindex_frame = (
- pd.DataFrame(index=miindex, columns=micol).sort_index().sort_index(axis=1)
- )
- self.dense_multiindex_frame = dense_multiindex_frame.fillna(value=3.14)
-
- def test_to_sparse_preserve_multiindex_names_columns(self):
- sparse_multiindex_frame = self.dense_multiindex_frame.to_sparse()
- sparse_multiindex_frame = sparse_multiindex_frame.copy()
- tm.assert_index_equal(
- sparse_multiindex_frame.columns, self.dense_multiindex_frame.columns
- )
-
- def test_round_trip_preserve_multiindex_names(self):
- sparse_multiindex_frame = self.dense_multiindex_frame.to_sparse()
- round_trip_multiindex_frame = sparse_multiindex_frame.to_dense()
- tm.assert_frame_equal(
- self.dense_multiindex_frame,
- round_trip_multiindex_frame,
- check_column_type=True,
- check_names=True,
- )
-
-
-@td.skip_if_no_scipy
-@pytest.mark.filterwarnings("ignore:the matrix subclass:PendingDeprecationWarning")
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
-class TestSparseSeriesScipyInteraction:
- # Issue 8048: add SparseSeries coo methods
-
- def setup_method(self, method):
- import scipy.sparse
-
- # SparseSeries inputs used in tests, the tests rely on the order
- self.sparse_series = []
- s = pd.Series([3.0, nan, 1.0, 2.0, nan, nan])
- s.index = pd.MultiIndex.from_tuples(
- [
- (1, 2, "a", 0),
- (1, 2, "a", 1),
- (1, 1, "b", 0),
- (1, 1, "b", 1),
- (2, 1, "b", 0),
- (2, 1, "b", 1),
- ],
- names=["A", "B", "C", "D"],
- )
- self.sparse_series.append(s.to_sparse())
-
- ss = self.sparse_series[0].copy()
- ss.index.names = [3, 0, 1, 2]
- self.sparse_series.append(ss)
-
- ss = pd.Series(
- [nan] * 12, index=cartesian_product((range(3), range(4)))
- ).to_sparse()
- for k, v in zip([(0, 0), (1, 2), (1, 3)], [3.0, 1.0, 2.0]):
- ss[k] = v
- self.sparse_series.append(ss)
-
- # results used in tests
- self.coo_matrices = []
- self.coo_matrices.append(
- scipy.sparse.coo_matrix(
- ([3.0, 1.0, 2.0], ([0, 1, 1], [0, 2, 3])), shape=(3, 4)
- )
- )
- self.coo_matrices.append(
- scipy.sparse.coo_matrix(
- ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)
- )
- )
- self.coo_matrices.append(
- scipy.sparse.coo_matrix(
- ([3.0, 1.0, 2.0], ([0, 1, 1], [0, 0, 1])), shape=(3, 2)
- )
- )
- self.ils = [
- [(1, 2), (1, 1), (2, 1)],
- [(1, 1), (1, 2), (2, 1)],
- [(1, 2, "a"), (1, 1, "b"), (2, 1, "b")],
- ]
- self.jls = [[("a", 0), ("a", 1), ("b", 0), ("b", 1)], [0, 1]]
-
- def test_to_coo_text_names_integer_row_levels_nosort(self):
- ss = self.sparse_series[0]
- kwargs = {"row_levels": [0, 1], "column_levels": [2, 3]}
- result = (self.coo_matrices[0], self.ils[0], self.jls[0])
- self._run_test(ss, kwargs, result)
-
- def test_to_coo_text_names_integer_row_levels_sort(self):
- ss = self.sparse_series[0]
- kwargs = {"row_levels": [0, 1], "column_levels": [2, 3], "sort_labels": True}
- result = (self.coo_matrices[1], self.ils[1], self.jls[0])
- self._run_test(ss, kwargs, result)
-
- def test_to_coo_text_names_text_row_levels_nosort_col_level_single(self):
- ss = self.sparse_series[0]
- kwargs = {
- "row_levels": ["A", "B", "C"],
- "column_levels": ["D"],
- "sort_labels": False,
- }
- result = (self.coo_matrices[2], self.ils[2], self.jls[1])
- self._run_test(ss, kwargs, result)
-
- def test_to_coo_integer_names_integer_row_levels_nosort(self):
- ss = self.sparse_series[1]
- kwargs = {"row_levels": [3, 0], "column_levels": [1, 2]}
- result = (self.coo_matrices[0], self.ils[0], self.jls[0])
- self._run_test(ss, kwargs, result)
-
- def test_to_coo_text_names_text_row_levels_nosort(self):
- ss = self.sparse_series[0]
- kwargs = {"row_levels": ["A", "B"], "column_levels": ["C", "D"]}
- result = (self.coo_matrices[0], self.ils[0], self.jls[0])
- self._run_test(ss, kwargs, result)
-
- def test_to_coo_bad_partition_nonnull_intersection(self):
- ss = self.sparse_series[0]
- msg = "Is not a partition because intersection is not null"
- with pytest.raises(ValueError, match=msg):
- ss.to_coo(["A", "B", "C"], ["C", "D"])
-
- def test_to_coo_bad_partition_small_union(self):
- ss = self.sparse_series[0]
- msg = "Is not a partition because union is not the whole"
- with pytest.raises(ValueError, match=msg):
- ss.to_coo(["A"], ["C", "D"])
-
- def test_to_coo_nlevels_less_than_two(self):
- ss = self.sparse_series[0]
- ss.index = np.arange(len(ss.index))
- msg = "to_coo requires MultiIndex with nlevels > 2"
- with pytest.raises(ValueError, match=msg):
- ss.to_coo()
-
- def test_to_coo_bad_ilevel(self):
- ss = self.sparse_series[0]
- with pytest.raises(KeyError, match="Level E not found"):
- ss.to_coo(["A", "B"], ["C", "D", "E"])
-
- def test_to_coo_duplicate_index_entries(self):
- ss = pd.concat([self.sparse_series[0], self.sparse_series[0]]).to_sparse()
- msg = "Duplicate index entries are not allowed in to_coo transformation"
- with pytest.raises(ValueError, match=msg):
- ss.to_coo(["A", "B"], ["C", "D"])
-
- def test_from_coo_dense_index(self):
- ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=True)
- check = self.sparse_series[2]
- tm.assert_sp_series_equal(ss, check)
-
- def test_from_coo_nodense_index(self):
- ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=False)
- check = self.sparse_series[2]
- check = check.dropna().to_sparse()
- tm.assert_sp_series_equal(ss, check)
-
- def test_from_coo_long_repr(self):
- # GH 13114
- # test it doesn't raise error. Formatting is tested in test_format
- import scipy.sparse
-
- sparse = SparseSeries.from_coo(scipy.sparse.rand(350, 18))
- repr(sparse)
-
- def _run_test(self, ss, kwargs, check):
- results = ss.to_coo(**kwargs)
- self._check_results_to_coo(results, check)
- # for every test, also test symmetry property (transpose), switch
- # row_levels and column_levels
- d = kwargs.copy()
- d["row_levels"] = kwargs["column_levels"]
- d["column_levels"] = kwargs["row_levels"]
- results = ss.to_coo(**d)
- results = (results[0].T, results[2], results[1])
- self._check_results_to_coo(results, check)
-
- def _check_results_to_coo(self, results, check):
- (A, il, jl) = results
- (A_result, il_result, jl_result) = check
- # convert to dense and compare
- tm.assert_numpy_array_equal(A.todense(), A_result.todense())
- # or compare directly as difference of sparse
- # assert(abs(A - A_result).max() < 1e-12) # max is failing in python
- # 2.6
- assert il == il_result
- assert jl == jl_result
-
- def test_concat(self):
- val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
- val2 = np.array([3, np.nan, 4, 0, 0])
-
- for kind in ["integer", "block"]:
- sparse1 = pd.SparseSeries(val1, name="x", kind=kind)
- sparse2 = pd.SparseSeries(val2, name="y", kind=kind)
-
- res = pd.concat([sparse1, sparse2])
- exp = pd.concat([pd.Series(val1), pd.Series(val2)])
- exp = pd.SparseSeries(exp, kind=kind)
- tm.assert_sp_series_equal(res, exp)
-
- sparse1 = pd.SparseSeries(val1, fill_value=0, name="x", kind=kind)
- sparse2 = pd.SparseSeries(val2, fill_value=0, name="y", kind=kind)
-
- res = pd.concat([sparse1, sparse2])
- exp = pd.concat([pd.Series(val1), pd.Series(val2)])
- exp = pd.SparseSeries(exp, fill_value=0, kind=kind)
- tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
-
- def test_concat_axis1(self):
- val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
- val2 = np.array([3, np.nan, 4, 0, 0])
-
- sparse1 = pd.SparseSeries(val1, name="x")
- sparse2 = pd.SparseSeries(val2, name="y")
-
- res = pd.concat([sparse1, sparse2], axis=1)
- exp = pd.concat([pd.Series(val1, name="x"), pd.Series(val2, name="y")], axis=1)
- exp = pd.SparseDataFrame(exp)
- tm.assert_sp_frame_equal(res, exp)
-
- def test_concat_different_fill(self):
- val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
- val2 = np.array([3, np.nan, 4, 0, 0])
-
- for kind in ["integer", "block"]:
- sparse1 = pd.SparseSeries(val1, name="x", kind=kind)
- sparse2 = pd.SparseSeries(val2, name="y", kind=kind, fill_value=0)
-
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- res = pd.concat([sparse1, sparse2])
- exp = pd.concat([pd.Series(val1), pd.Series(val2)])
- exp = pd.SparseSeries(exp, kind=kind)
- tm.assert_sp_series_equal(res, exp)
-
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- res = pd.concat([sparse2, sparse1])
- exp = pd.concat([pd.Series(val2), pd.Series(val1)])
- exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
- tm.assert_sp_series_equal(res, exp)
-
- def test_concat_axis1_different_fill(self):
- val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
- val2 = np.array([3, np.nan, 4, 0, 0])
-
- sparse1 = pd.SparseSeries(val1, name="x")
- sparse2 = pd.SparseSeries(val2, name="y", fill_value=0)
-
- res = pd.concat([sparse1, sparse2], axis=1)
- exp = pd.concat([pd.Series(val1, name="x"), pd.Series(val2, name="y")], axis=1)
- assert isinstance(res, pd.SparseDataFrame)
- tm.assert_frame_equal(res.to_dense(), exp)
-
- def test_concat_different_kind(self):
- val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
- val2 = np.array([3, np.nan, 4, 0, 0])
-
- sparse1 = pd.SparseSeries(val1, name="x", kind="integer")
- sparse2 = pd.SparseSeries(val2, name="y", kind="block", fill_value=0)
-
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- res = pd.concat([sparse1, sparse2])
- exp = pd.concat([pd.Series(val1), pd.Series(val2)])
- exp = pd.SparseSeries(exp, kind="integer")
- tm.assert_sp_series_equal(res, exp)
-
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- res = pd.concat([sparse2, sparse1])
- exp = pd.concat([pd.Series(val2), pd.Series(val1)])
- exp = pd.SparseSeries(exp, kind="block", fill_value=0)
- tm.assert_sp_series_equal(res, exp)
-
- def test_concat_sparse_dense(self):
- # use first input's fill_value
- val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
- val2 = np.array([3, np.nan, 4, 0, 0])
-
- for kind in ["integer", "block"]:
- sparse = pd.SparseSeries(val1, name="x", kind=kind)
- dense = pd.Series(val2, name="y")
-
- res = pd.concat([sparse, dense])
- exp = pd.concat([pd.Series(val1), dense])
- exp = pd.SparseSeries(exp, kind=kind)
- tm.assert_sp_series_equal(res, exp)
-
- res = pd.concat([dense, sparse, dense])
- exp = pd.concat([dense, pd.Series(val1), dense])
- exp = exp.astype("Sparse")
- tm.assert_series_equal(res, exp)
-
- sparse = pd.SparseSeries(val1, name="x", kind=kind, fill_value=0)
- dense = pd.Series(val2, name="y")
-
- res = pd.concat([sparse, dense])
- exp = pd.concat([pd.Series(val1), dense])
- exp = exp.astype(SparseDtype(exp.dtype, 0))
- tm.assert_series_equal(res, exp)
-
- res = pd.concat([dense, sparse, dense])
- exp = pd.concat([dense, pd.Series(val1), dense])
- exp = exp.astype(SparseDtype(exp.dtype, 0))
- tm.assert_series_equal(res, exp)
-
- def test_value_counts(self):
- vals = [1, 2, nan, 0, nan, 1, 2, nan, nan, 1, 2, 0, 1, 1]
- dense = pd.Series(vals, name="xx")
-
- sparse = pd.SparseSeries(vals, name="xx")
- tm.assert_series_equal(sparse.value_counts(), dense.value_counts())
- tm.assert_series_equal(
- sparse.value_counts(dropna=False), dense.value_counts(dropna=False)
- )
-
- sparse = pd.SparseSeries(vals, name="xx", fill_value=0)
- tm.assert_series_equal(sparse.value_counts(), dense.value_counts())
- tm.assert_series_equal(
- sparse.value_counts(dropna=False), dense.value_counts(dropna=False)
- )
-
- def test_value_counts_dup(self):
- vals = [1, 2, nan, 0, nan, 1, 2, nan, nan, 1, 2, 0, 1, 1]
-
- # numeric op may cause sp_values to include the same value as
- # fill_value
- dense = pd.Series(vals, name="xx") / 0.0
- sparse = pd.SparseSeries(vals, name="xx") / 0.0
- tm.assert_series_equal(sparse.value_counts(), dense.value_counts())
- tm.assert_series_equal(
- sparse.value_counts(dropna=False), dense.value_counts(dropna=False)
- )
-
- vals = [1, 2, 0, 0, 0, 1, 2, 0, 0, 1, 2, 0, 1, 1]
-
- dense = pd.Series(vals, name="xx") * 0.0
- sparse = pd.SparseSeries(vals, name="xx") * 0.0
- tm.assert_series_equal(sparse.value_counts(), dense.value_counts())
- tm.assert_series_equal(
- sparse.value_counts(dropna=False), dense.value_counts(dropna=False)
- )
-
- def test_value_counts_int(self):
- vals = [1, 2, 0, 1, 2, 1, 2, 0, 1, 1]
- dense = pd.Series(vals, name="xx")
-
- # fill_value is np.nan, but should not be included in the result
- sparse = pd.SparseSeries(vals, name="xx")
- tm.assert_series_equal(sparse.value_counts(), dense.value_counts())
- tm.assert_series_equal(
- sparse.value_counts(dropna=False), dense.value_counts(dropna=False)
- )
-
- sparse = pd.SparseSeries(vals, name="xx", fill_value=0)
- tm.assert_series_equal(sparse.value_counts(), dense.value_counts())
- tm.assert_series_equal(
- sparse.value_counts(dropna=False), dense.value_counts(dropna=False)
- )
-
- def test_isna(self):
- # GH 8276
- s = pd.SparseSeries([np.nan, np.nan, 1, 2, np.nan], name="xxx")
-
- res = s.isna()
- exp = pd.SparseSeries(
- [True, True, False, False, True], name="xxx", fill_value=True
- )
- tm.assert_sp_series_equal(res, exp)
-
- # if fill_value is not nan, True can be included in sp_values
- s = pd.SparseSeries([np.nan, 0.0, 1.0, 2.0, 0.0], name="xxx", fill_value=0.0)
- res = s.isna()
- assert isinstance(res, pd.SparseSeries)
- exp = pd.Series([True, False, False, False, False], name="xxx")
- tm.assert_series_equal(res.to_dense(), exp)
-
- def test_notna(self):
- # GH 8276
- s = pd.SparseSeries([np.nan, np.nan, 1, 2, np.nan], name="xxx")
-
- res = s.notna()
- exp = pd.SparseSeries(
- [False, False, True, True, False], name="xxx", fill_value=False
- )
- tm.assert_sp_series_equal(res, exp)
-
- # if fill_value is not nan, True can be included in sp_values
- s = pd.SparseSeries([np.nan, 0.0, 1.0, 2.0, 0.0], name="xxx", fill_value=0.0)
- res = s.notna()
- assert isinstance(res, pd.SparseSeries)
- exp = pd.Series([False, True, True, True, True], name="xxx")
- tm.assert_series_equal(res.to_dense(), exp)
-
-
-def _dense_series_compare(s, f):
- result = f(s)
- assert isinstance(result, SparseSeries)
- dense_result = f(s.to_dense())
- tm.assert_series_equal(result.to_dense(), dense_result)
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
-class TestSparseSeriesAnalytics:
- def setup_method(self, method):
- arr, index = _test_data1()
- self.bseries = SparseSeries(arr, index=index, kind="block", name="bseries")
-
- arr, index = _test_data1_zero()
- self.zbseries = SparseSeries(
- arr, index=index, kind="block", fill_value=0, name="zbseries"
- )
-
- def test_cumsum(self):
- result = self.bseries.cumsum()
- expected = SparseSeries(self.bseries.to_dense().cumsum())
- tm.assert_sp_series_equal(result, expected)
-
- result = self.zbseries.cumsum()
- expected = self.zbseries.to_dense().cumsum().to_sparse()
- tm.assert_series_equal(result, expected)
-
- axis = 1 # Series is 1-D, so only axis = 0 is valid.
- msg = "No axis named {axis}".format(axis=axis)
- with pytest.raises(ValueError, match=msg):
- self.bseries.cumsum(axis=axis)
-
- def test_numpy_cumsum(self):
- result = np.cumsum(self.bseries)
- expected = SparseSeries(self.bseries.to_dense().cumsum())
- tm.assert_sp_series_equal(result, expected)
-
- result = np.cumsum(self.zbseries)
- expected = self.zbseries.to_dense().cumsum().to_sparse()
- tm.assert_series_equal(result, expected)
-
- msg = "the 'dtype' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- np.cumsum(self.bseries, dtype=np.int64)
-
- msg = "the 'out' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- np.cumsum(self.zbseries, out=result)
-
- def test_numpy_func_call(self):
- # no exception should be raised even though
- # numpy passes in 'axis=None' or `axis=-1'
- funcs = [
- "sum",
- "cumsum",
- "var",
- "mean",
- "prod",
- "cumprod",
- "std",
- "argsort",
- "min",
- "max",
- ]
- for func in funcs:
- for series in ("bseries", "zbseries"):
- getattr(np, func)(getattr(self, series))
-
- def test_deprecated_numpy_func_call(self):
- # NOTE: These should be add to the 'test_numpy_func_call' test above
- # once the behavior of argmin/argmax is corrected.
- funcs = ["argmin", "argmax"]
- for func in funcs:
- for series in ("bseries", "zbseries"):
- with tm.assert_produces_warning(
- FutureWarning, check_stacklevel=False, raise_on_extra_warnings=False
- ):
- getattr(np, func)(getattr(self, series))
-
- with tm.assert_produces_warning(
- FutureWarning, check_stacklevel=False, raise_on_extra_warnings=False
- ):
- getattr(getattr(self, series), func)()
-
-
-@pytest.mark.parametrize(
- "datetime_type",
- (np.datetime64, pd.Timestamp, lambda x: datetime.strptime(x, "%Y-%m-%d")),
-)
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-def test_constructor_dict_datetime64_index(datetime_type):
- # GH 9456
- dates = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"]
- values = [42544017.198965244, 1234565, 40512335.181958228, -1]
-
- result = SparseSeries(dict(zip(map(datetime_type, dates), values)))
- expected = SparseSeries(values, map(pd.Timestamp, dates))
-
- tm.assert_sp_series_equal(result, expected)
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
-def test_to_sparse():
- # https://github.com/pandas-dev/pandas/issues/22389
- arr = pd.SparseArray([1, 2, None, 3])
- result = pd.Series(arr).to_sparse()
- assert len(result) == 4
- tm.assert_sp_array_equal(result.values, arr, check_kind=False)
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-def test_deprecated_to_sparse():
- # GH 26557
- # Deprecated 0.25.0
-
- ser = Series([1, np.nan, 3])
- sparse_ser = pd.SparseSeries([1, np.nan, 3])
-
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- result = ser.to_sparse()
- tm.assert_series_equal(result, sparse_ser)
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-def test_constructor_mismatched_raises():
- msg = "Length of passed values is 2, index implies 3"
- with pytest.raises(ValueError, match=msg):
- SparseSeries([1, 2], index=[1, 2, 3])
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-def test_block_deprecated():
- s = SparseSeries([1])
- with tm.assert_produces_warning(FutureWarning):
- s.block
diff --git a/pandas/tests/sparse/test_combine_concat.py b/pandas/tests/sparse/test_combine_concat.py
deleted file mode 100644
index d7295c4bfe5f0..0000000000000
--- a/pandas/tests/sparse/test_combine_concat.py
+++ /dev/null
@@ -1,474 +0,0 @@
-import itertools
-
-import numpy as np
-import pytest
-
-from pandas.errors import PerformanceWarning
-
-import pandas as pd
-import pandas.util.testing as tm
-
-
-class TestSparseArrayConcat:
- @pytest.mark.parametrize("kind", ["integer", "block"])
- def test_basic(self, kind):
- a = pd.SparseArray([1, 0, 0, 2], kind=kind)
- b = pd.SparseArray([1, 0, 2, 2], kind=kind)
-
- result = pd.SparseArray._concat_same_type([a, b])
- # Can't make any assertions about the sparse index itself
- # since we aren't don't merge sparse blocs across arrays
- # in to_concat
- expected = np.array([1, 2, 1, 2, 2], dtype="int64")
- tm.assert_numpy_array_equal(result.sp_values, expected)
- assert result.kind == kind
-
- @pytest.mark.parametrize("kind", ["integer", "block"])
- def test_uses_first_kind(self, kind):
- other = "integer" if kind == "block" else "block"
- a = pd.SparseArray([1, 0, 0, 2], kind=kind)
- b = pd.SparseArray([1, 0, 2, 2], kind=other)
-
- result = pd.SparseArray._concat_same_type([a, b])
- expected = np.array([1, 2, 1, 2, 2], dtype="int64")
- tm.assert_numpy_array_equal(result.sp_values, expected)
- assert result.kind == kind
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-class TestSparseSeriesConcat:
- @pytest.mark.parametrize("kind", ["integer", "block"])
- def test_concat(self, kind):
- val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
- val2 = np.array([3, np.nan, 4, 0, 0])
-
- sparse1 = pd.SparseSeries(val1, name="x", kind=kind)
- sparse2 = pd.SparseSeries(val2, name="y", kind=kind)
-
- res = pd.concat([sparse1, sparse2])
- exp = pd.concat([pd.Series(val1), pd.Series(val2)])
- exp = pd.SparseSeries(exp, kind=kind)
- tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
-
- sparse1 = pd.SparseSeries(val1, fill_value=0, name="x", kind=kind)
- sparse2 = pd.SparseSeries(val2, fill_value=0, name="y", kind=kind)
-
- res = pd.concat([sparse1, sparse2])
- exp = pd.concat([pd.Series(val1), pd.Series(val2)])
- exp = pd.SparseSeries(exp, fill_value=0, kind=kind)
- tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
-
- def test_concat_axis1(self):
- val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
- val2 = np.array([3, np.nan, 4, 0, 0])
-
- sparse1 = pd.SparseSeries(val1, name="x")
- sparse2 = pd.SparseSeries(val2, name="y")
-
- res = pd.concat([sparse1, sparse2], axis=1)
- exp = pd.concat([pd.Series(val1, name="x"), pd.Series(val2, name="y")], axis=1)
- exp = pd.SparseDataFrame(exp)
- tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
-
- def test_concat_different_fill(self):
- val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
- val2 = np.array([3, np.nan, 4, 0, 0])
-
- for kind in ["integer", "block"]:
- sparse1 = pd.SparseSeries(val1, name="x", kind=kind)
- sparse2 = pd.SparseSeries(val2, name="y", kind=kind, fill_value=0)
-
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- res = pd.concat([sparse1, sparse2])
-
- exp = pd.concat([pd.Series(val1), pd.Series(val2)])
- exp = pd.SparseSeries(exp, kind=kind)
- tm.assert_sp_series_equal(res, exp)
-
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- res = pd.concat([sparse2, sparse1])
-
- exp = pd.concat([pd.Series(val2), pd.Series(val1)])
- exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
- tm.assert_sp_series_equal(res, exp)
-
- def test_concat_axis1_different_fill(self):
- val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
- val2 = np.array([3, np.nan, 4, 0, 0])
-
- sparse1 = pd.SparseSeries(val1, name="x")
- sparse2 = pd.SparseSeries(val2, name="y", fill_value=0)
-
- res = pd.concat([sparse1, sparse2], axis=1)
- exp = pd.concat([pd.Series(val1, name="x"), pd.Series(val2, name="y")], axis=1)
- assert isinstance(res, pd.SparseDataFrame)
- tm.assert_frame_equal(res.to_dense(), exp)
-
- def test_concat_different_kind(self):
- val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
- val2 = np.array([3, np.nan, 4, 0, 0])
-
- sparse1 = pd.SparseSeries(val1, name="x", kind="integer")
- sparse2 = pd.SparseSeries(val2, name="y", kind="block")
-
- res = pd.concat([sparse1, sparse2])
- exp = pd.concat([pd.Series(val1), pd.Series(val2)])
- exp = pd.SparseSeries(exp, kind=sparse1.kind)
- tm.assert_sp_series_equal(res, exp)
-
- res = pd.concat([sparse2, sparse1])
- exp = pd.concat([pd.Series(val2), pd.Series(val1)])
- exp = pd.SparseSeries(exp, kind=sparse2.kind)
- tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
-
- @pytest.mark.parametrize("kind", ["integer", "block"])
- def test_concat_sparse_dense(self, kind):
- # use first input's fill_value
- val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
- val2 = np.array([3, np.nan, 4, 0, 0])
-
- sparse = pd.SparseSeries(val1, name="x", kind=kind)
- dense = pd.Series(val2, name="y")
-
- res = pd.concat([sparse, dense])
- exp = pd.SparseSeries(pd.concat([pd.Series(val1), dense]), kind=kind)
- tm.assert_sp_series_equal(res, exp)
-
- res = pd.concat([dense, sparse, dense])
- exp = pd.concat([dense, pd.Series(val1), dense])
- # XXX: changed from SparseSeries to Series[sparse]
- exp = pd.Series(pd.SparseArray(exp, kind=kind), index=exp.index, name=exp.name)
- tm.assert_series_equal(res, exp)
-
- sparse = pd.SparseSeries(val1, name="x", kind=kind, fill_value=0)
- dense = pd.Series(val2, name="y")
-
- res = pd.concat([sparse, dense])
- # XXX: changed from SparseSeries to Series[sparse]
- exp = pd.concat([pd.Series(val1), dense])
- exp = pd.Series(
- pd.SparseArray(exp, kind=kind, fill_value=0), index=exp.index, name=exp.name
- )
- tm.assert_series_equal(res, exp)
-
- res = pd.concat([dense, sparse, dense])
- exp = pd.concat([dense, pd.Series(val1), dense])
- # XXX: changed from SparseSeries to Series[sparse]
- exp = pd.Series(
- pd.SparseArray(exp, kind=kind, fill_value=0), index=exp.index, name=exp.name
- )
- tm.assert_series_equal(res, exp)
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
-class TestSparseDataFrameConcat:
- def setup_method(self, method):
-
- self.dense1 = pd.DataFrame(
- {
- "A": [0.0, 1.0, 2.0, np.nan],
- "B": [0.0, 0.0, 0.0, 0.0],
- "C": [np.nan, np.nan, np.nan, np.nan],
- "D": [1.0, 2.0, 3.0, 4.0],
- }
- )
-
- self.dense2 = pd.DataFrame(
- {
- "A": [5.0, 6.0, 7.0, 8.0],
- "B": [np.nan, 0.0, 7.0, 8.0],
- "C": [5.0, 6.0, np.nan, np.nan],
- "D": [np.nan, np.nan, np.nan, np.nan],
- }
- )
-
- self.dense3 = pd.DataFrame(
- {
- "E": [5.0, 6.0, 7.0, 8.0],
- "F": [np.nan, 0.0, 7.0, 8.0],
- "G": [5.0, 6.0, np.nan, np.nan],
- "H": [np.nan, np.nan, np.nan, np.nan],
- }
- )
-
- def test_concat(self):
- # fill_value = np.nan
- sparse = self.dense1.to_sparse()
- sparse2 = self.dense2.to_sparse()
-
- res = pd.concat([sparse, sparse])
- exp = pd.concat([self.dense1, self.dense1]).to_sparse()
- tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
-
- res = pd.concat([sparse2, sparse2])
- exp = pd.concat([self.dense2, self.dense2]).to_sparse()
- tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
-
- res = pd.concat([sparse, sparse2])
- exp = pd.concat([self.dense1, self.dense2]).to_sparse()
- tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
-
- res = pd.concat([sparse2, sparse])
- exp = pd.concat([self.dense2, self.dense1]).to_sparse()
- tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
-
- # fill_value = 0
- sparse = self.dense1.to_sparse(fill_value=0)
- sparse2 = self.dense2.to_sparse(fill_value=0)
-
- res = pd.concat([sparse, sparse])
- exp = pd.concat([self.dense1, self.dense1]).to_sparse(fill_value=0)
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
-
- res = pd.concat([sparse2, sparse2])
- exp = pd.concat([self.dense2, self.dense2]).to_sparse(fill_value=0)
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
-
- res = pd.concat([sparse, sparse2])
- exp = pd.concat([self.dense1, self.dense2]).to_sparse(fill_value=0)
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
-
- res = pd.concat([sparse2, sparse])
- exp = pd.concat([self.dense2, self.dense1]).to_sparse(fill_value=0)
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
-
- def test_concat_different_fill_value(self):
- # 1st fill_value will be used
- sparse = self.dense1.to_sparse()
- sparse2 = self.dense2.to_sparse(fill_value=0)
-
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- res = pd.concat([sparse, sparse2])
- exp = pd.concat([self.dense1, self.dense2]).to_sparse()
- tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
-
- with tm.assert_produces_warning(
- PerformanceWarning, raise_on_extra_warnings=False
- ):
- res = pd.concat([sparse2, sparse])
- exp = pd.concat([self.dense2, self.dense1]).to_sparse(fill_value=0)
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
-
- def test_concat_different_columns_sort_warns(self):
- sparse = self.dense1.to_sparse()
- sparse3 = self.dense3.to_sparse()
-
- # stacklevel is wrong since we have two FutureWarnings,
- # one for depr, one for sorting.
- with tm.assert_produces_warning(
- FutureWarning, check_stacklevel=False, raise_on_extra_warnings=False
- ):
- res = pd.concat([sparse, sparse3])
- with tm.assert_produces_warning(
- FutureWarning, check_stacklevel=False, raise_on_extra_warnings=False
- ):
- exp = pd.concat([self.dense1, self.dense3])
-
- exp = exp.to_sparse()
- tm.assert_sp_frame_equal(res, exp, check_kind=False)
-
- def test_concat_different_columns(self):
- # fill_value = np.nan
- sparse = self.dense1.to_sparse()
- sparse3 = self.dense3.to_sparse()
-
- res = pd.concat([sparse, sparse3], sort=True)
- exp = pd.concat([self.dense1, self.dense3], sort=True).to_sparse()
- tm.assert_sp_frame_equal(res, exp, check_kind=False)
-
- res = pd.concat([sparse3, sparse], sort=True)
- exp = pd.concat([self.dense3, self.dense1], sort=True).to_sparse()
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(res, exp, check_kind=False)
-
- def test_concat_bug(self):
- from pandas.core.sparse.api import SparseDtype
-
- x = pd.SparseDataFrame({"A": pd.SparseArray([np.nan, np.nan], fill_value=0)})
- y = pd.SparseDataFrame({"B": []})
- res = pd.concat([x, y], sort=False)[["A"]]
- exp = pd.DataFrame(
- {"A": pd.SparseArray([np.nan, np.nan], dtype=SparseDtype(float, 0))}
- )
- tm.assert_frame_equal(res, exp)
-
- def test_concat_different_columns_buggy(self):
- sparse = self.dense1.to_sparse(fill_value=0)
- sparse3 = self.dense3.to_sparse(fill_value=0)
-
- res = pd.concat([sparse, sparse3], sort=True)
- exp = pd.concat([self.dense1, self.dense3], sort=True).to_sparse(fill_value=0)
- exp._default_fill_value = np.nan
-
- tm.assert_sp_frame_equal(
- res, exp, check_kind=False, consolidate_block_indices=True
- )
-
- res = pd.concat([sparse3, sparse], sort=True)
- exp = pd.concat([self.dense3, self.dense1], sort=True).to_sparse(fill_value=0)
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(
- res, exp, check_kind=False, consolidate_block_indices=True
- )
-
- # different fill values
- sparse = self.dense1.to_sparse()
- sparse3 = self.dense3.to_sparse(fill_value=0)
- # each columns keeps its fill_value, thus compare in dense
- res = pd.concat([sparse, sparse3], sort=True)
- exp = pd.concat([self.dense1, self.dense3], sort=True)
- assert isinstance(res, pd.SparseDataFrame)
- tm.assert_frame_equal(res.to_dense(), exp)
-
- res = pd.concat([sparse3, sparse], sort=True)
- exp = pd.concat([self.dense3, self.dense1], sort=True)
- assert isinstance(res, pd.SparseDataFrame)
- tm.assert_frame_equal(res.to_dense(), exp)
-
- def test_concat_series(self):
- # fill_value = np.nan
- sparse = self.dense1.to_sparse()
- sparse2 = self.dense2.to_sparse()
-
- for col in ["A", "D"]:
- res = pd.concat([sparse, sparse2[col]])
- exp = pd.concat([self.dense1, self.dense2[col]]).to_sparse()
- tm.assert_sp_frame_equal(res, exp, check_kind=False)
-
- res = pd.concat([sparse2[col], sparse])
- exp = pd.concat([self.dense2[col], self.dense1]).to_sparse()
- tm.assert_sp_frame_equal(res, exp, check_kind=False)
-
- # fill_value = 0
- sparse = self.dense1.to_sparse(fill_value=0)
- sparse2 = self.dense2.to_sparse(fill_value=0)
-
- for col in ["C", "D"]:
- res = pd.concat([sparse, sparse2[col]])
- exp = pd.concat([self.dense1, self.dense2[col]]).to_sparse(fill_value=0)
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(
- res, exp, check_kind=False, consolidate_block_indices=True
- )
-
- res = pd.concat([sparse2[col], sparse])
- exp = pd.concat([self.dense2[col], self.dense1]).to_sparse(fill_value=0)
- exp["C"] = res["C"]
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(
- res, exp, consolidate_block_indices=True, check_kind=False
- )
-
- def test_concat_axis1(self):
- # fill_value = np.nan
- sparse = self.dense1.to_sparse()
- sparse3 = self.dense3.to_sparse()
-
- res = pd.concat([sparse, sparse3], axis=1)
- exp = pd.concat([self.dense1, self.dense3], axis=1).to_sparse()
- tm.assert_sp_frame_equal(res, exp)
-
- res = pd.concat([sparse3, sparse], axis=1)
- exp = pd.concat([self.dense3, self.dense1], axis=1).to_sparse()
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(res, exp)
-
- # fill_value = 0
- sparse = self.dense1.to_sparse(fill_value=0)
- sparse3 = self.dense3.to_sparse(fill_value=0)
-
- res = pd.concat([sparse, sparse3], axis=1)
- exp = pd.concat([self.dense1, self.dense3], axis=1).to_sparse(fill_value=0)
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(res, exp)
-
- res = pd.concat([sparse3, sparse], axis=1)
- exp = pd.concat([self.dense3, self.dense1], axis=1).to_sparse(fill_value=0)
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(res, exp)
-
- # different fill values
- sparse = self.dense1.to_sparse()
- sparse3 = self.dense3.to_sparse(fill_value=0)
- # each columns keeps its fill_value, thus compare in dense
- res = pd.concat([sparse, sparse3], axis=1)
- exp = pd.concat([self.dense1, self.dense3], axis=1)
- assert isinstance(res, pd.SparseDataFrame)
- tm.assert_frame_equal(res.to_dense(), exp)
-
- res = pd.concat([sparse3, sparse], axis=1)
- exp = pd.concat([self.dense3, self.dense1], axis=1)
- assert isinstance(res, pd.SparseDataFrame)
- tm.assert_frame_equal(res.to_dense(), exp)
-
- @pytest.mark.parametrize(
- "fill_value,sparse_idx,dense_idx",
- itertools.product([None, 0, 1, np.nan], [0, 1], [1, 0]),
- )
- def test_concat_sparse_dense_rows(self, fill_value, sparse_idx, dense_idx):
- frames = [self.dense1, self.dense2]
- sparse_frame = [
- frames[dense_idx],
- frames[sparse_idx].to_sparse(fill_value=fill_value),
- ]
- dense_frame = [frames[dense_idx], frames[sparse_idx]]
-
- # This will try both directions sparse + dense and dense + sparse
- for _ in range(2):
- res = pd.concat(sparse_frame)
- exp = pd.concat(dense_frame)
-
- assert isinstance(res, pd.SparseDataFrame)
- tm.assert_frame_equal(res.to_dense(), exp)
-
- sparse_frame = sparse_frame[::-1]
- dense_frame = dense_frame[::-1]
-
- @pytest.mark.parametrize(
- "fill_value,sparse_idx,dense_idx",
- itertools.product([None, 0, 1, np.nan], [0, 1], [1, 0]),
- )
- @pytest.mark.xfail(reason="The iloc fails and I can't make expected", strict=False)
- def test_concat_sparse_dense_cols(self, fill_value, sparse_idx, dense_idx):
- # See GH16874, GH18914 and #18686 for why this should be a DataFrame
- from pandas.core.dtypes.common import is_sparse
-
- frames = [self.dense1, self.dense3]
-
- sparse_frame = [
- frames[dense_idx],
- frames[sparse_idx].to_sparse(fill_value=fill_value),
- ]
- dense_frame = [frames[dense_idx], frames[sparse_idx]]
-
- # This will try both directions sparse + dense and dense + sparse
- for _ in range(2):
- res = pd.concat(sparse_frame, axis=1)
- exp = pd.concat(dense_frame, axis=1)
- cols = [i for (i, x) in enumerate(res.dtypes) if is_sparse(x)]
-
- for col in cols:
- exp.iloc[:, col] = exp.iloc[:, col].astype("Sparse")
-
- for column in frames[dense_idx].columns:
- if dense_idx == sparse_idx:
- tm.assert_frame_equal(res[column], exp[column])
- else:
- tm.assert_series_equal(res[column], exp[column])
-
- tm.assert_frame_equal(res, exp)
-
- sparse_frame = sparse_frame[::-1]
- dense_frame = dense_frame[::-1]
diff --git a/pandas/tests/sparse/test_format.py b/pandas/tests/sparse/test_format.py
deleted file mode 100644
index cf8734910cd19..0000000000000
--- a/pandas/tests/sparse/test_format.py
+++ /dev/null
@@ -1,165 +0,0 @@
-import warnings
-
-import numpy as np
-import pytest
-
-from pandas.compat import is_platform_32bit, is_platform_windows
-
-import pandas as pd
-from pandas import option_context
-import pandas.util.testing as tm
-
-use_32bit_repr = is_platform_windows() or is_platform_32bit()
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
-class TestSparseSeriesFormatting:
- @property
- def dtype_format_for_platform(self):
- return "" if use_32bit_repr else ", dtype=int32"
-
- def test_sparse_max_row(self):
- s = pd.Series([1, np.nan, np.nan, 3, np.nan]).to_sparse()
- result = repr(s)
- dfm = self.dtype_format_for_platform
- exp = (
- "0 1.0\n1 NaN\n2 NaN\n3 3.0\n"
- "4 NaN\ndtype: Sparse[float64, nan]\nBlockIndex\n"
- "Block locations: array([0, 3]{0})\n"
- "Block lengths: array([1, 1]{0})".format(dfm)
- )
- assert result == exp
-
- def test_sparsea_max_row_truncated(self):
- s = pd.Series([1, np.nan, np.nan, 3, np.nan]).to_sparse()
- dfm = self.dtype_format_for_platform
-
- with option_context("display.max_rows", 3):
- # GH 10560
- result = repr(s)
- exp = (
- "0 1.0\n ... \n4 NaN\n"
- "Length: 5, dtype: Sparse[float64, nan]\nBlockIndex\n"
- "Block locations: array([0, 3]{0})\n"
- "Block lengths: array([1, 1]{0})".format(dfm)
- )
- assert result == exp
-
- def test_sparse_mi_max_row(self):
- idx = pd.MultiIndex.from_tuples(
- [("A", 0), ("A", 1), ("B", 0), ("C", 0), ("C", 1), ("C", 2)]
- )
- s = pd.Series([1, np.nan, np.nan, 3, np.nan, np.nan], index=idx).to_sparse()
- result = repr(s)
- dfm = self.dtype_format_for_platform
- exp = (
- "A 0 1.0\n 1 NaN\nB 0 NaN\n"
- "C 0 3.0\n 1 NaN\n 2 NaN\n"
- "dtype: Sparse[float64, nan]\nBlockIndex\n"
- "Block locations: array([0, 3]{0})\n"
- "Block lengths: array([1, 1]{0})".format(dfm)
- )
- assert result == exp
-
- with option_context("display.max_rows", 3, "display.show_dimensions", False):
- # GH 13144
- result = repr(s)
- exp = (
- "A 0 1.0\n ... \nC 2 NaN\n"
- "dtype: Sparse[float64, nan]\nBlockIndex\n"
- "Block locations: array([0, 3]{0})\n"
- "Block lengths: array([1, 1]{0})".format(dfm)
- )
- assert result == exp
-
- def test_sparse_bool(self):
- # GH 13110
- s = pd.SparseSeries([True, False, False, True, False, False], fill_value=False)
- result = repr(s)
- dtype = "" if use_32bit_repr else ", dtype=int32"
- exp = (
- "0 True\n1 False\n2 False\n"
- "3 True\n4 False\n5 False\n"
- "dtype: Sparse[bool, False]\nBlockIndex\n"
- "Block locations: array([0, 3]{0})\n"
- "Block lengths: array([1, 1]{0})".format(dtype)
- )
- assert result == exp
-
- with option_context("display.max_rows", 3):
- result = repr(s)
- exp = (
- "0 True\n ... \n5 False\n"
- "Length: 6, dtype: Sparse[bool, False]\nBlockIndex\n"
- "Block locations: array([0, 3]{0})\n"
- "Block lengths: array([1, 1]{0})".format(dtype)
- )
- assert result == exp
-
- def test_sparse_int(self):
- # GH 13110
- s = pd.SparseSeries([0, 1, 0, 0, 1, 0], fill_value=False)
-
- result = repr(s)
- dtype = "" if use_32bit_repr else ", dtype=int32"
- exp = (
- "0 0\n1 1\n2 0\n3 0\n4 1\n"
- "5 0\ndtype: Sparse[int64, False]\nBlockIndex\n"
- "Block locations: array([1, 4]{0})\n"
- "Block lengths: array([1, 1]{0})".format(dtype)
- )
- assert result == exp
-
- with option_context("display.max_rows", 3, "display.show_dimensions", False):
- result = repr(s)
- exp = (
- "0 0\n ..\n5 0\n"
- "dtype: Sparse[int64, False]\nBlockIndex\n"
- "Block locations: array([1, 4]{0})\n"
- "Block lengths: array([1, 1]{0})".format(dtype)
- )
- assert result == exp
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
-class TestSparseDataFrameFormatting:
- def test_sparse_frame(self):
- # GH 13110
- df = pd.DataFrame(
- {
- "A": [True, False, True, False, True],
- "B": [True, False, True, False, True],
- "C": [0, 0, 3, 0, 5],
- "D": [np.nan, np.nan, np.nan, 1, 2],
- }
- )
- sparse = df.to_sparse()
- assert repr(sparse) == repr(df)
-
- with option_context("display.max_rows", 3):
- assert repr(sparse) == repr(df)
-
- def test_sparse_repr_after_set(self):
- # GH 15488
- sdf = pd.SparseDataFrame([[np.nan, 1], [2, np.nan]])
- res = sdf.copy()
-
- # Ignore the warning
- with pd.option_context("mode.chained_assignment", None):
- sdf[0][1] = 2 # This line triggers the bug
-
- repr(sdf)
- tm.assert_sp_frame_equal(sdf, res)
-
-
-def test_repr_no_warning():
- with warnings.catch_warnings():
- warnings.simplefilter("ignore", FutureWarning)
- df = pd.SparseDataFrame({"A": [1, 2]})
- s = df["A"]
-
- with tm.assert_produces_warning(None):
- repr(df)
- repr(s)
diff --git a/pandas/tests/sparse/test_groupby.py b/pandas/tests/sparse/test_groupby.py
deleted file mode 100644
index 04e49a272a77a..0000000000000
--- a/pandas/tests/sparse/test_groupby.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import numpy as np
-import pytest
-
-import pandas as pd
-import pandas.util.testing as tm
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
-class TestSparseGroupBy:
- def setup_method(self, method):
- self.dense = pd.DataFrame(
- {
- "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
- "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
- "C": np.random.randn(8),
- "D": np.random.randn(8),
- "E": [np.nan, np.nan, 1, 2, np.nan, 1, np.nan, np.nan],
- }
- )
- self.sparse = self.dense.to_sparse()
-
- def test_first_last_nth(self):
- # tests for first / last / nth
- sparse_grouped = self.sparse.groupby("A")
- dense_grouped = self.dense.groupby("A")
-
- sparse_grouped_first = sparse_grouped.first()
- sparse_grouped_last = sparse_grouped.last()
- sparse_grouped_nth = sparse_grouped.nth(1)
-
- dense_grouped_first = pd.DataFrame(dense_grouped.first().to_sparse())
- dense_grouped_last = pd.DataFrame(dense_grouped.last().to_sparse())
- dense_grouped_nth = pd.DataFrame(dense_grouped.nth(1).to_sparse())
-
- tm.assert_frame_equal(sparse_grouped_first, dense_grouped_first)
- tm.assert_frame_equal(sparse_grouped_last, dense_grouped_last)
- tm.assert_frame_equal(sparse_grouped_nth, dense_grouped_nth)
-
- def test_aggfuncs(self):
- sparse_grouped = self.sparse.groupby("A")
- dense_grouped = self.dense.groupby("A")
-
- result = sparse_grouped.mean().to_sparse()
- expected = dense_grouped.mean().to_sparse()
-
- tm.assert_frame_equal(result, expected)
-
- # ToDo: sparse sum includes str column
- # tm.assert_frame_equal(sparse_grouped.sum(),
- # dense_grouped.sum())
-
- result = sparse_grouped.count().to_sparse()
- expected = dense_grouped.count().to_sparse()
-
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("fill_value", [0, np.nan])
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
-def test_groupby_includes_fill_value(fill_value):
- # https://github.com/pandas-dev/pandas/issues/5078
- df = pd.DataFrame(
- {
- "a": [fill_value, 1, fill_value, fill_value],
- "b": [fill_value, 1, fill_value, fill_value],
- }
- )
- sdf = df.to_sparse(fill_value=fill_value)
- result = sdf.groupby("a").sum()
- expected = pd.DataFrame(df.groupby("a").sum().to_sparse(fill_value=fill_value))
- tm.assert_frame_equal(result, expected, check_index_type=False)
diff --git a/pandas/tests/sparse/test_indexing.py b/pandas/tests/sparse/test_indexing.py
deleted file mode 100644
index ea5e939b57566..0000000000000
--- a/pandas/tests/sparse/test_indexing.py
+++ /dev/null
@@ -1,1058 +0,0 @@
-import numpy as np
-import pytest
-
-import pandas as pd
-from pandas.core.sparse.api import SparseDtype
-import pandas.util.testing as tm
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
-class TestSparseSeriesIndexing:
- def setup_method(self, method):
- self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
- self.sparse = self.orig.to_sparse()
-
- def test_getitem(self):
- orig = self.orig
- sparse = self.sparse
-
- assert sparse[0] == 1
- assert np.isnan(sparse[1])
- assert sparse[3] == 3
-
- result = sparse[[1, 3, 4]]
- exp = orig[[1, 3, 4]].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- # dense array
- result = sparse[orig % 2 == 1]
- exp = orig[orig % 2 == 1].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- # sparse array (actuary it coerces to normal Series)
- result = sparse[sparse % 2 == 1]
- exp = orig[orig % 2 == 1].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- # sparse array
- result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
- tm.assert_sp_series_equal(result, exp)
-
- def test_getitem_slice(self):
- orig = self.orig
- sparse = self.sparse
-
- tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse())
- tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse())
- tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse())
- tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse())
-
- def test_getitem_int_dtype(self):
- # GH 8292
- s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], name="xxx")
- res = s[::2]
- exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6], name="xxx")
- tm.assert_sp_series_equal(res, exp)
- assert res.dtype == SparseDtype(np.int64)
-
- s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], fill_value=0, name="xxx")
- res = s[::2]
- exp = pd.SparseSeries(
- [0, 2, 4, 6], index=[0, 2, 4, 6], fill_value=0, name="xxx"
- )
- tm.assert_sp_series_equal(res, exp)
- assert res.dtype == SparseDtype(np.int64)
-
- def test_getitem_fill_value(self):
- orig = pd.Series([1, np.nan, 0, 3, 0])
- sparse = orig.to_sparse(fill_value=0)
-
- assert sparse[0] == 1
- assert np.isnan(sparse[1])
- assert sparse[2] == 0
- assert sparse[3] == 3
-
- result = sparse[[1, 3, 4]]
- exp = orig[[1, 3, 4]].to_sparse(fill_value=0)
- tm.assert_sp_series_equal(result, exp)
-
- # dense array
- result = sparse[orig % 2 == 1]
- exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
- tm.assert_sp_series_equal(result, exp)
-
- # sparse array (actuary it coerces to normal Series)
- result = sparse[sparse % 2 == 1]
- exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
- tm.assert_sp_series_equal(result, exp)
-
- # sparse array
- result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
- tm.assert_sp_series_equal(result, exp)
-
- def test_getitem_ellipsis(self):
- # GH 9467
- s = pd.SparseSeries([1, np.nan, 2, 0, np.nan])
- tm.assert_sp_series_equal(s[...], s)
-
- s = pd.SparseSeries([1, np.nan, 2, 0, np.nan], fill_value=0)
- tm.assert_sp_series_equal(s[...], s)
-
- def test_getitem_slice_fill_value(self):
- orig = pd.Series([1, np.nan, 0, 3, 0])
- sparse = orig.to_sparse(fill_value=0)
- tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse(fill_value=0))
- tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse(fill_value=0))
- tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse(fill_value=0))
- tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse(fill_value=0))
-
- def test_loc(self):
- orig = self.orig
- sparse = self.sparse
-
- assert sparse.loc[0] == 1
- assert np.isnan(sparse.loc[1])
-
- result = sparse.loc[[1, 3, 4]]
- exp = orig.loc[[1, 3, 4]].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- # exceeds the bounds
- result = sparse.reindex([1, 3, 4, 5])
- exp = orig.reindex([1, 3, 4, 5]).to_sparse()
- tm.assert_sp_series_equal(result, exp)
- # padded with NaN
- assert np.isnan(result[-1])
-
- # dense array
- result = sparse.loc[orig % 2 == 1]
- exp = orig.loc[orig % 2 == 1].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- # sparse array (actuary it coerces to normal Series)
- result = sparse.loc[sparse % 2 == 1]
- exp = orig.loc[orig % 2 == 1].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- # sparse array
- result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
- tm.assert_sp_series_equal(result, exp)
-
- def test_loc_index(self):
- orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list("ABCDE"))
- sparse = orig.to_sparse()
-
- assert sparse.loc["A"] == 1
- assert np.isnan(sparse.loc["B"])
-
- result = sparse.loc[["A", "C", "D"]]
- exp = orig.loc[["A", "C", "D"]].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- # dense array
- result = sparse.loc[orig % 2 == 1]
- exp = orig.loc[orig % 2 == 1].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- # sparse array (actuary it coerces to normal Series)
- result = sparse.loc[sparse % 2 == 1]
- exp = orig.loc[orig % 2 == 1].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- # sparse array
- result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
- tm.assert_sp_series_equal(result, exp)
-
- def test_loc_index_fill_value(self):
- orig = pd.Series([1, np.nan, 0, 3, 0], index=list("ABCDE"))
- sparse = orig.to_sparse(fill_value=0)
-
- assert sparse.loc["A"] == 1
- assert np.isnan(sparse.loc["B"])
-
- result = sparse.loc[["A", "C", "D"]]
- exp = orig.loc[["A", "C", "D"]].to_sparse(fill_value=0)
- tm.assert_sp_series_equal(result, exp)
-
- # dense array
- result = sparse.loc[orig % 2 == 1]
- exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
- tm.assert_sp_series_equal(result, exp)
-
- # sparse array (actuary it coerces to normal Series)
- result = sparse.loc[sparse % 2 == 1]
- exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
- tm.assert_sp_series_equal(result, exp)
-
- def test_loc_slice(self):
- orig = self.orig
- sparse = self.sparse
- tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
-
- def test_loc_slice_index_fill_value(self):
- orig = pd.Series([1, np.nan, 0, 3, 0], index=list("ABCDE"))
- sparse = orig.to_sparse(fill_value=0)
-
- tm.assert_sp_series_equal(
- sparse.loc["C":], orig.loc["C":].to_sparse(fill_value=0)
- )
-
- def test_loc_slice_fill_value(self):
- orig = pd.Series([1, np.nan, 0, 3, 0])
- sparse = orig.to_sparse(fill_value=0)
- tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse(fill_value=0))
-
- def test_iloc(self):
- orig = self.orig
- sparse = self.sparse
-
- assert sparse.iloc[3] == 3
- assert np.isnan(sparse.iloc[2])
-
- result = sparse.iloc[[1, 3, 4]]
- exp = orig.iloc[[1, 3, 4]].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- result = sparse.iloc[[1, -2, -4]]
- exp = orig.iloc[[1, -2, -4]].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- with pytest.raises(IndexError):
- sparse.iloc[[1, 3, 5]]
-
- def test_iloc_fill_value(self):
- orig = pd.Series([1, np.nan, 0, 3, 0])
- sparse = orig.to_sparse(fill_value=0)
-
- assert sparse.iloc[3] == 3
- assert np.isnan(sparse.iloc[1])
- assert sparse.iloc[4] == 0
-
- result = sparse.iloc[[1, 3, 4]]
- exp = orig.iloc[[1, 3, 4]].to_sparse(fill_value=0)
- tm.assert_sp_series_equal(result, exp)
-
- def test_iloc_slice(self):
- orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
- sparse = orig.to_sparse()
- tm.assert_sp_series_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
-
- def test_iloc_slice_fill_value(self):
- orig = pd.Series([1, np.nan, 0, 3, 0])
- sparse = orig.to_sparse(fill_value=0)
- tm.assert_sp_series_equal(
- sparse.iloc[2:], orig.iloc[2:].to_sparse(fill_value=0)
- )
-
- def test_at(self):
- orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
- sparse = orig.to_sparse()
- assert sparse.at[0] == orig.at[0]
- assert np.isnan(sparse.at[1])
- assert np.isnan(sparse.at[2])
- assert sparse.at[3] == orig.at[3]
- assert np.isnan(sparse.at[4])
-
- orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list("abcde"))
- sparse = orig.to_sparse()
- assert sparse.at["a"] == orig.at["a"]
- assert np.isnan(sparse.at["b"])
- assert np.isnan(sparse.at["c"])
- assert sparse.at["d"] == orig.at["d"]
- assert np.isnan(sparse.at["e"])
-
- def test_at_fill_value(self):
- orig = pd.Series([1, np.nan, 0, 3, 0], index=list("abcde"))
- sparse = orig.to_sparse(fill_value=0)
- assert sparse.at["a"] == orig.at["a"]
- assert np.isnan(sparse.at["b"])
- assert sparse.at["c"] == orig.at["c"]
- assert sparse.at["d"] == orig.at["d"]
- assert sparse.at["e"] == orig.at["e"]
-
- def test_iat(self):
- orig = self.orig
- sparse = self.sparse
-
- assert sparse.iat[0] == orig.iat[0]
- assert np.isnan(sparse.iat[1])
- assert np.isnan(sparse.iat[2])
- assert sparse.iat[3] == orig.iat[3]
- assert np.isnan(sparse.iat[4])
-
- assert np.isnan(sparse.iat[-1])
- assert sparse.iat[-5] == orig.iat[-5]
-
- def test_iat_fill_value(self):
- orig = pd.Series([1, np.nan, 0, 3, 0])
- sparse = orig.to_sparse()
- assert sparse.iat[0] == orig.iat[0]
- assert np.isnan(sparse.iat[1])
- assert sparse.iat[2] == orig.iat[2]
- assert sparse.iat[3] == orig.iat[3]
- assert sparse.iat[4] == orig.iat[4]
-
- assert sparse.iat[-1] == orig.iat[-1]
- assert sparse.iat[-5] == orig.iat[-5]
-
- def test_get(self):
- s = pd.SparseSeries([1, np.nan, np.nan, 3, np.nan])
- assert s.get(0) == 1
- assert np.isnan(s.get(1))
- assert s.get(5) is None
-
- s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list("ABCDE"))
- assert s.get("A") == 1
- assert np.isnan(s.get("B"))
- assert s.get("C") == 0
- assert s.get("XX") is None
-
- s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list("ABCDE"), fill_value=0)
- assert s.get("A") == 1
- assert np.isnan(s.get("B"))
- assert s.get("C") == 0
- assert s.get("XX") is None
-
- def test_take(self):
- orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list("ABCDE"))
- sparse = orig.to_sparse()
-
- tm.assert_sp_series_equal(sparse.take([0]), orig.take([0]).to_sparse())
- tm.assert_sp_series_equal(
- sparse.take([0, 1, 3]), orig.take([0, 1, 3]).to_sparse()
- )
- tm.assert_sp_series_equal(
- sparse.take([-1, -2]), orig.take([-1, -2]).to_sparse()
- )
-
- def test_take_fill_value(self):
- orig = pd.Series([1, np.nan, 0, 3, 0], index=list("ABCDE"))
- sparse = orig.to_sparse(fill_value=0)
-
- tm.assert_sp_series_equal(
- sparse.take([0]), orig.take([0]).to_sparse(fill_value=0)
- )
-
- exp = orig.take([0, 1, 3]).to_sparse(fill_value=0)
- tm.assert_sp_series_equal(sparse.take([0, 1, 3]), exp)
-
- exp = orig.take([-1, -2]).to_sparse(fill_value=0)
- tm.assert_sp_series_equal(sparse.take([-1, -2]), exp)
-
- def test_reindex(self):
- orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list("ABCDE"))
- sparse = orig.to_sparse()
-
- res = sparse.reindex(["A", "E", "C", "D"])
- exp = orig.reindex(["A", "E", "C", "D"]).to_sparse()
- tm.assert_sp_series_equal(res, exp)
-
- # all missing & fill_value
- res = sparse.reindex(["B", "E", "C"])
- exp = orig.reindex(["B", "E", "C"]).to_sparse()
- tm.assert_sp_series_equal(res, exp)
-
- orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan], index=list("ABCDE"))
- sparse = orig.to_sparse()
-
- res = sparse.reindex(["A", "E", "C", "D"])
- exp = orig.reindex(["A", "E", "C", "D"]).to_sparse()
- tm.assert_sp_series_equal(res, exp)
-
- def test_fill_value_reindex(self):
- orig = pd.Series([1, np.nan, 0, 3, 0], index=list("ABCDE"))
- sparse = orig.to_sparse(fill_value=0)
-
- res = sparse.reindex(["A", "E", "C", "D"])
- exp = orig.reindex(["A", "E", "C", "D"]).to_sparse(fill_value=0)
- tm.assert_sp_series_equal(res, exp)
-
- # includes missing and fill_value
- res = sparse.reindex(["A", "B", "C"])
- exp = orig.reindex(["A", "B", "C"]).to_sparse(fill_value=0)
- tm.assert_sp_series_equal(res, exp)
-
- # all missing
- orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan], index=list("ABCDE"))
- sparse = orig.to_sparse(fill_value=0)
-
- res = sparse.reindex(["A", "E", "C", "D"])
- exp = orig.reindex(["A", "E", "C", "D"]).to_sparse(fill_value=0)
- tm.assert_sp_series_equal(res, exp)
-
- # all fill_value
- orig = pd.Series([0.0, 0.0, 0.0, 0.0, 0.0], index=list("ABCDE"))
- sparse = orig.to_sparse(fill_value=0)
-
- def test_fill_value_reindex_coerces_float_int(self):
- orig = pd.Series([1, np.nan, 0, 3, 0], index=list("ABCDE"))
- sparse = orig.to_sparse(fill_value=0)
-
- res = sparse.reindex(["A", "E", "C", "D"])
- exp = orig.reindex(["A", "E", "C", "D"]).to_sparse(fill_value=0)
- tm.assert_sp_series_equal(res, exp)
-
- def test_reindex_fill_value(self):
- floats = pd.Series([1.0, 2.0, 3.0]).to_sparse()
- result = floats.reindex([1, 2, 3], fill_value=0)
- expected = pd.Series([2.0, 3.0, 0], index=[1, 2, 3]).to_sparse()
- tm.assert_sp_series_equal(result, expected)
-
- def test_reindex_nearest(self):
- s = pd.Series(np.arange(10, dtype="float64")).to_sparse()
- target = [0.1, 0.9, 1.5, 2.0]
- actual = s.reindex(target, method="nearest")
- expected = pd.Series(np.around(target), target).to_sparse()
- tm.assert_sp_series_equal(expected, actual)
-
- actual = s.reindex(target, method="nearest", tolerance=0.2)
- expected = pd.Series([0, 1, np.nan, 2], target).to_sparse()
- tm.assert_sp_series_equal(expected, actual)
-
- actual = s.reindex(target, method="nearest", tolerance=[0.3, 0.01, 0.4, 3])
- expected = pd.Series([0, np.nan, np.nan, 2], target).to_sparse()
- tm.assert_sp_series_equal(expected, actual)
-
- @pytest.mark.parametrize("kind", ["integer", "block"])
- @pytest.mark.parametrize("fill", [True, False, np.nan])
- def tests_indexing_with_sparse(self, kind, fill):
- # see gh-13985
- arr = pd.SparseArray([1, 2, 3], kind=kind)
- indexer = pd.SparseArray([True, False, True], fill_value=fill, dtype=bool)
-
- expected = arr[indexer]
- result = pd.SparseArray([1, 3], kind=kind)
- tm.assert_sp_array_equal(result, expected)
-
- s = pd.SparseSeries(arr, index=["a", "b", "c"], dtype=np.float64)
- expected = pd.SparseSeries(
- [1, 3],
- index=["a", "c"],
- kind=kind,
- dtype=SparseDtype(np.float64, s.fill_value),
- )
-
- tm.assert_sp_series_equal(s[indexer], expected)
- tm.assert_sp_series_equal(s.loc[indexer], expected)
- tm.assert_sp_series_equal(s.iloc[indexer], expected)
-
- indexer = pd.SparseSeries(indexer, index=["a", "b", "c"])
- tm.assert_sp_series_equal(s[indexer], expected)
- tm.assert_sp_series_equal(s.loc[indexer], expected)
-
- msg = "iLocation based boolean indexing cannot use an indexable as a mask"
- with pytest.raises(ValueError, match=msg):
- s.iloc[indexer]
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-class TestSparseSeriesMultiIndexing(TestSparseSeriesIndexing):
- def setup_method(self, method):
- # Mi with duplicated values
- idx = pd.MultiIndex.from_tuples(
- [("A", 0), ("A", 1), ("B", 0), ("C", 0), ("C", 1)]
- )
- self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=idx)
- self.sparse = self.orig.to_sparse()
-
- def test_getitem_multi(self):
- orig = self.orig
- sparse = self.sparse
-
- assert sparse[0] == orig[0]
- assert np.isnan(sparse[1])
- assert sparse[3] == orig[3]
-
- tm.assert_sp_series_equal(sparse["A"], orig["A"].to_sparse())
- tm.assert_sp_series_equal(sparse["B"], orig["B"].to_sparse())
-
- result = sparse[[1, 3, 4]]
- exp = orig[[1, 3, 4]].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- # dense array
- result = sparse[orig % 2 == 1]
- exp = orig[orig % 2 == 1].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- # sparse array (actuary it coerces to normal Series)
- result = sparse[sparse % 2 == 1]
- exp = orig[orig % 2 == 1].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- # sparse array
- result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
- tm.assert_sp_series_equal(result, exp)
-
- def test_getitem_multi_tuple(self):
- orig = self.orig
- sparse = self.sparse
-
- assert sparse["C", 0] == orig["C", 0]
- assert np.isnan(sparse["A", 1])
- assert np.isnan(sparse["B", 0])
-
- def test_getitems_slice_multi(self):
- orig = self.orig
- sparse = self.sparse
-
- tm.assert_sp_series_equal(sparse[2:], orig[2:].to_sparse())
- tm.assert_sp_series_equal(sparse.loc["B":], orig.loc["B":].to_sparse())
- tm.assert_sp_series_equal(sparse.loc["C":], orig.loc["C":].to_sparse())
-
- tm.assert_sp_series_equal(sparse.loc["A":"B"], orig.loc["A":"B"].to_sparse())
- tm.assert_sp_series_equal(sparse.loc[:"B"], orig.loc[:"B"].to_sparse())
-
- def test_loc(self):
- # need to be override to use different label
- orig = self.orig
- sparse = self.sparse
-
- tm.assert_sp_series_equal(sparse.loc["A"], orig.loc["A"].to_sparse())
- tm.assert_sp_series_equal(sparse.loc["B"], orig.loc["B"].to_sparse())
-
- result = sparse.loc[[1, 3, 4]]
- exp = orig.loc[[1, 3, 4]].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- # exceeds the bounds
- result = sparse.loc[[1, 3, 4, 5]]
- exp = orig.loc[[1, 3, 4, 5]].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- # single element list (GH 15447)
- result = sparse.loc[["A"]]
- exp = orig.loc[["A"]].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- # dense array
- result = sparse.loc[orig % 2 == 1]
- exp = orig.loc[orig % 2 == 1].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- # sparse array (actuary it coerces to normal Series)
- result = sparse.loc[sparse % 2 == 1]
- exp = orig.loc[orig % 2 == 1].to_sparse()
- tm.assert_sp_series_equal(result, exp)
-
- # sparse array
- result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
- tm.assert_sp_series_equal(result, exp)
-
- def test_loc_multi_tuple(self):
- orig = self.orig
- sparse = self.sparse
-
- assert sparse.loc["C", 0] == orig.loc["C", 0]
- assert np.isnan(sparse.loc["A", 1])
- assert np.isnan(sparse.loc["B", 0])
-
- def test_loc_slice(self):
- orig = self.orig
- sparse = self.sparse
- tm.assert_sp_series_equal(sparse.loc["A":], orig.loc["A":].to_sparse())
- tm.assert_sp_series_equal(sparse.loc["B":], orig.loc["B":].to_sparse())
- tm.assert_sp_series_equal(sparse.loc["C":], orig.loc["C":].to_sparse())
-
- tm.assert_sp_series_equal(sparse.loc["A":"B"], orig.loc["A":"B"].to_sparse())
- tm.assert_sp_series_equal(sparse.loc[:"B"], orig.loc[:"B"].to_sparse())
-
- def test_reindex(self):
- # GH 15447
- orig = self.orig
- sparse = self.sparse
-
- res = sparse.reindex([("A", 0), ("C", 1)])
- exp = orig.reindex([("A", 0), ("C", 1)]).to_sparse()
- tm.assert_sp_series_equal(res, exp)
-
- # On specific level:
- res = sparse.reindex(["A", "C", "B"], level=0)
- exp = orig.reindex(["A", "C", "B"], level=0).to_sparse()
- tm.assert_sp_series_equal(res, exp)
-
- # single element list (GH 15447)
- res = sparse.reindex(["A"], level=0)
- exp = orig.reindex(["A"], level=0).to_sparse()
- tm.assert_sp_series_equal(res, exp)
-
- with pytest.raises(TypeError):
- # Incomplete keys are not accepted for reindexing:
- sparse.reindex(["A", "C"])
-
- # "copy" argument:
- res = sparse.reindex(sparse.index, copy=True)
- exp = orig.reindex(orig.index, copy=True).to_sparse()
- tm.assert_sp_series_equal(res, exp)
- assert sparse is not res
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
-class TestSparseDataFrameIndexing:
- def test_getitem(self):
- orig = pd.DataFrame(
- [[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4], [0, np.nan, 5]],
- columns=list("xyz"),
- )
- sparse = orig.to_sparse()
-
- tm.assert_sp_series_equal(sparse["x"], orig["x"].to_sparse())
- tm.assert_sp_frame_equal(sparse[["x"]], orig[["x"]].to_sparse())
- tm.assert_sp_frame_equal(sparse[["z", "x"]], orig[["z", "x"]].to_sparse())
-
- tm.assert_sp_frame_equal(
- sparse[[True, False, True, True]],
- orig[[True, False, True, True]].to_sparse(),
- )
-
- tm.assert_sp_frame_equal(sparse.iloc[[1, 2]], orig.iloc[[1, 2]].to_sparse())
-
- def test_getitem_fill_value(self):
- orig = pd.DataFrame(
- [[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
- columns=list("xyz"),
- )
- sparse = orig.to_sparse(fill_value=0)
-
- result = sparse[["z"]]
- expected = orig[["z"]].to_sparse(fill_value=0)
- tm.assert_sp_frame_equal(result, expected, check_fill_value=False)
-
- tm.assert_sp_series_equal(sparse["y"], orig["y"].to_sparse(fill_value=0))
-
- exp = orig[["x"]].to_sparse(fill_value=0)
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(sparse[["x"]], exp)
-
- exp = orig[["z", "x"]].to_sparse(fill_value=0)
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(sparse[["z", "x"]], exp)
-
- indexer = [True, False, True, True]
- exp = orig[indexer].to_sparse(fill_value=0)
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(sparse[indexer], exp)
-
- exp = orig.iloc[[1, 2]].to_sparse(fill_value=0)
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(sparse.iloc[[1, 2]], exp)
-
- def test_loc(self):
- orig = pd.DataFrame(
- [[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]],
- columns=list("xyz"),
- )
- sparse = orig.to_sparse()
-
- assert sparse.loc[0, "x"] == 1
- assert np.isnan(sparse.loc[1, "z"])
- assert sparse.loc[2, "z"] == 4
-
- # have to specify `kind='integer'`, since we construct a
- # new SparseArray here, and the default sparse type is
- # integer there, but block in SparseSeries
- tm.assert_sp_series_equal(sparse.loc[0], orig.loc[0].to_sparse(kind="integer"))
- tm.assert_sp_series_equal(sparse.loc[1], orig.loc[1].to_sparse(kind="integer"))
- tm.assert_sp_series_equal(
- sparse.loc[2, :], orig.loc[2, :].to_sparse(kind="integer")
- )
- tm.assert_sp_series_equal(
- sparse.loc[2, :], orig.loc[2, :].to_sparse(kind="integer")
- )
- tm.assert_sp_series_equal(sparse.loc[:, "y"], orig.loc[:, "y"].to_sparse())
- tm.assert_sp_series_equal(sparse.loc[:, "y"], orig.loc[:, "y"].to_sparse())
-
- result = sparse.loc[[1, 2]]
- exp = orig.loc[[1, 2]].to_sparse()
- tm.assert_sp_frame_equal(result, exp)
-
- result = sparse.loc[[1, 2], :]
- exp = orig.loc[[1, 2], :].to_sparse()
- tm.assert_sp_frame_equal(result, exp)
-
- result = sparse.loc[:, ["x", "z"]]
- exp = orig.loc[:, ["x", "z"]].to_sparse()
- tm.assert_sp_frame_equal(result, exp)
-
- result = sparse.loc[[0, 2], ["x", "z"]]
- exp = orig.loc[[0, 2], ["x", "z"]].to_sparse()
- tm.assert_sp_frame_equal(result, exp)
-
- # exceeds the bounds
- result = sparse.reindex([1, 3, 4, 5])
- exp = orig.reindex([1, 3, 4, 5]).to_sparse()
- tm.assert_sp_frame_equal(result, exp)
-
- # dense array
- result = sparse.loc[orig.x % 2 == 1]
- exp = orig.loc[orig.x % 2 == 1].to_sparse()
- tm.assert_sp_frame_equal(result, exp)
-
- # sparse array (actuary it coerces to normal Series)
- result = sparse.loc[sparse.x % 2 == 1]
- exp = orig.loc[orig.x % 2 == 1].to_sparse()
- tm.assert_sp_frame_equal(result, exp)
-
- # sparse array
- result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
- tm.assert_sp_frame_equal(result, exp)
-
- def test_loc_index(self):
- orig = pd.DataFrame(
- [[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]],
- index=list("abc"),
- columns=list("xyz"),
- )
- sparse = orig.to_sparse()
-
- assert sparse.loc["a", "x"] == 1
- assert np.isnan(sparse.loc["b", "z"])
- assert sparse.loc["c", "z"] == 4
-
- tm.assert_sp_series_equal(
- sparse.loc["a"], orig.loc["a"].to_sparse(kind="integer")
- )
- tm.assert_sp_series_equal(
- sparse.loc["b"], orig.loc["b"].to_sparse(kind="integer")
- )
- tm.assert_sp_series_equal(
- sparse.loc["b", :], orig.loc["b", :].to_sparse(kind="integer")
- )
- tm.assert_sp_series_equal(
- sparse.loc["b", :], orig.loc["b", :].to_sparse(kind="integer")
- )
-
- tm.assert_sp_series_equal(sparse.loc[:, "z"], orig.loc[:, "z"].to_sparse())
- tm.assert_sp_series_equal(sparse.loc[:, "z"], orig.loc[:, "z"].to_sparse())
-
- result = sparse.loc[["a", "b"]]
- exp = orig.loc[["a", "b"]].to_sparse()
- tm.assert_sp_frame_equal(result, exp)
-
- result = sparse.loc[["a", "b"], :]
- exp = orig.loc[["a", "b"], :].to_sparse()
- tm.assert_sp_frame_equal(result, exp)
-
- result = sparse.loc[:, ["x", "z"]]
- exp = orig.loc[:, ["x", "z"]].to_sparse()
- tm.assert_sp_frame_equal(result, exp)
-
- result = sparse.loc[["c", "a"], ["x", "z"]]
- exp = orig.loc[["c", "a"], ["x", "z"]].to_sparse()
- tm.assert_sp_frame_equal(result, exp)
-
- # dense array
- result = sparse.loc[orig.x % 2 == 1]
- exp = orig.loc[orig.x % 2 == 1].to_sparse()
- tm.assert_sp_frame_equal(result, exp)
-
- # sparse array (actuary it coerces to normal Series)
- result = sparse.loc[sparse.x % 2 == 1]
- exp = orig.loc[orig.x % 2 == 1].to_sparse()
- tm.assert_sp_frame_equal(result, exp)
-
- # sparse array
- result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
- tm.assert_sp_frame_equal(result, exp)
-
- def test_loc_slice(self):
- orig = pd.DataFrame(
- [[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]],
- columns=list("xyz"),
- )
- sparse = orig.to_sparse()
- tm.assert_sp_frame_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
-
- def test_iloc(self):
- orig = pd.DataFrame([[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]])
- sparse = orig.to_sparse()
-
- assert sparse.iloc[1, 1] == 3
- assert np.isnan(sparse.iloc[2, 0])
-
- tm.assert_sp_series_equal(sparse.iloc[0], orig.loc[0].to_sparse(kind="integer"))
- tm.assert_sp_series_equal(sparse.iloc[1], orig.loc[1].to_sparse(kind="integer"))
- tm.assert_sp_series_equal(
- sparse.iloc[2, :], orig.iloc[2, :].to_sparse(kind="integer")
- )
- tm.assert_sp_series_equal(
- sparse.iloc[2, :], orig.iloc[2, :].to_sparse(kind="integer")
- )
- tm.assert_sp_series_equal(sparse.iloc[:, 1], orig.iloc[:, 1].to_sparse())
- tm.assert_sp_series_equal(sparse.iloc[:, 1], orig.iloc[:, 1].to_sparse())
-
- result = sparse.iloc[[1, 2]]
- exp = orig.iloc[[1, 2]].to_sparse()
- tm.assert_sp_frame_equal(result, exp)
-
- result = sparse.iloc[[1, 2], :]
- exp = orig.iloc[[1, 2], :].to_sparse()
- tm.assert_sp_frame_equal(result, exp)
-
- result = sparse.iloc[:, [1, 0]]
- exp = orig.iloc[:, [1, 0]].to_sparse()
- tm.assert_sp_frame_equal(result, exp)
-
- result = sparse.iloc[[2], [1, 0]]
- exp = orig.iloc[[2], [1, 0]].to_sparse()
- tm.assert_sp_frame_equal(result, exp)
-
- with pytest.raises(IndexError):
- sparse.iloc[[1, 3, 5]]
-
- def test_iloc_slice(self):
- orig = pd.DataFrame(
- [[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]],
- columns=list("xyz"),
- )
- sparse = orig.to_sparse()
- tm.assert_sp_frame_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
-
- def test_at(self):
- orig = pd.DataFrame(
- [[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
- index=list("ABCD"),
- columns=list("xyz"),
- )
- sparse = orig.to_sparse()
- assert sparse.at["A", "x"] == orig.at["A", "x"]
- assert np.isnan(sparse.at["B", "z"])
- assert np.isnan(sparse.at["C", "y"])
- assert sparse.at["D", "x"] == orig.at["D", "x"]
-
- def test_at_fill_value(self):
- orig = pd.DataFrame(
- [[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
- index=list("ABCD"),
- columns=list("xyz"),
- )
- sparse = orig.to_sparse(fill_value=0)
- assert sparse.at["A", "x"] == orig.at["A", "x"]
- assert np.isnan(sparse.at["B", "z"])
- assert np.isnan(sparse.at["C", "y"])
- assert sparse.at["D", "x"] == orig.at["D", "x"]
-
- def test_iat(self):
- orig = pd.DataFrame(
- [[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
- index=list("ABCD"),
- columns=list("xyz"),
- )
- sparse = orig.to_sparse()
- assert sparse.iat[0, 0] == orig.iat[0, 0]
- assert np.isnan(sparse.iat[1, 2])
- assert np.isnan(sparse.iat[2, 1])
- assert sparse.iat[2, 0] == orig.iat[2, 0]
-
- assert np.isnan(sparse.iat[-1, -2])
- assert sparse.iat[-1, -1] == orig.iat[-1, -1]
-
- def test_iat_fill_value(self):
- orig = pd.DataFrame(
- [[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
- index=list("ABCD"),
- columns=list("xyz"),
- )
- sparse = orig.to_sparse(fill_value=0)
- assert sparse.iat[0, 0] == orig.iat[0, 0]
- assert np.isnan(sparse.iat[1, 2])
- assert np.isnan(sparse.iat[2, 1])
- assert sparse.iat[2, 0] == orig.iat[2, 0]
-
- assert np.isnan(sparse.iat[-1, -2])
- assert sparse.iat[-1, -1] == orig.iat[-1, -1]
-
- def test_take(self):
- orig = pd.DataFrame(
- [[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
- columns=list("xyz"),
- )
- sparse = orig.to_sparse()
-
- tm.assert_sp_frame_equal(sparse.take([0]), orig.take([0]).to_sparse())
- tm.assert_sp_frame_equal(sparse.take([0, 1]), orig.take([0, 1]).to_sparse())
- tm.assert_sp_frame_equal(sparse.take([-1, -2]), orig.take([-1, -2]).to_sparse())
-
- def test_take_fill_value(self):
- orig = pd.DataFrame(
- [[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
- columns=list("xyz"),
- )
- sparse = orig.to_sparse(fill_value=0)
-
- exp = orig.take([0]).to_sparse(fill_value=0)
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(sparse.take([0]), exp)
-
- exp = orig.take([0, 1]).to_sparse(fill_value=0)
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(sparse.take([0, 1]), exp)
-
- exp = orig.take([-1, -2]).to_sparse(fill_value=0)
- exp._default_fill_value = np.nan
- tm.assert_sp_frame_equal(sparse.take([-1, -2]), exp)
-
- def test_reindex(self):
- orig = pd.DataFrame(
- [[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
- index=list("ABCD"),
- columns=list("xyz"),
- )
- sparse = orig.to_sparse()
-
- res = sparse.reindex(["A", "C", "B"])
- exp = orig.reindex(["A", "C", "B"]).to_sparse()
- tm.assert_sp_frame_equal(res, exp)
-
- orig = pd.DataFrame(
- [
- [np.nan, np.nan, np.nan],
- [np.nan, np.nan, np.nan],
- [np.nan, np.nan, np.nan],
- [np.nan, np.nan, np.nan],
- ],
- index=list("ABCD"),
- columns=list("xyz"),
- )
- sparse = orig.to_sparse()
-
- res = sparse.reindex(["A", "C", "B"])
- exp = orig.reindex(["A", "C", "B"]).to_sparse()
- tm.assert_sp_frame_equal(res, exp)
-
- def test_reindex_fill_value(self):
- orig = pd.DataFrame(
- [[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
- index=list("ABCD"),
- columns=list("xyz"),
- )
- sparse = orig.to_sparse(fill_value=0)
-
- res = sparse.reindex(["A", "C", "B"])
- exp = orig.reindex(["A", "C", "B"]).to_sparse(fill_value=0)
- tm.assert_sp_frame_equal(res, exp)
-
- # all missing
- orig = pd.DataFrame(
- [
- [np.nan, np.nan, np.nan],
- [np.nan, np.nan, np.nan],
- [np.nan, np.nan, np.nan],
- [np.nan, np.nan, np.nan],
- ],
- index=list("ABCD"),
- columns=list("xyz"),
- )
- sparse = orig.to_sparse(fill_value=0)
-
- res = sparse.reindex(["A", "C", "B"])
- exp = orig.reindex(["A", "C", "B"]).to_sparse(fill_value=0)
- tm.assert_sp_frame_equal(res, exp)
-
- # all fill_value
- orig = pd.DataFrame(
- [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
- index=list("ABCD"),
- columns=list("xyz"),
- dtype=np.int,
- )
- sparse = orig.to_sparse(fill_value=0)
-
- res = sparse.reindex(["A", "C", "B"])
- exp = orig.reindex(["A", "C", "B"]).to_sparse(fill_value=0)
- tm.assert_sp_frame_equal(res, exp)
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-class TestMultitype:
- def setup_method(self, method):
- self.cols = ["string", "int", "float", "object"]
-
- self.string_series = pd.SparseSeries(["a", "b", "c"])
- self.int_series = pd.SparseSeries([1, 2, 3])
- self.float_series = pd.SparseSeries([1.1, 1.2, 1.3])
- self.object_series = pd.SparseSeries([[], {}, set()])
- self.sdf = pd.SparseDataFrame(
- {
- "string": self.string_series,
- "int": self.int_series,
- "float": self.float_series,
- "object": self.object_series,
- }
- )
- self.sdf = self.sdf[self.cols]
- self.ss = pd.SparseSeries(["a", 1, 1.1, []], index=self.cols)
-
- def test_frame_basic_dtypes(self):
- for _, row in self.sdf.iterrows():
- assert row.dtype == SparseDtype(object)
- tm.assert_sp_series_equal(
- self.sdf["string"], self.string_series, check_names=False
- )
- tm.assert_sp_series_equal(self.sdf["int"], self.int_series, check_names=False)
- tm.assert_sp_series_equal(
- self.sdf["float"], self.float_series, check_names=False
- )
- tm.assert_sp_series_equal(
- self.sdf["object"], self.object_series, check_names=False
- )
-
- def test_frame_indexing_single(self):
- tm.assert_sp_series_equal(
- self.sdf.iloc[0],
- pd.SparseSeries(["a", 1, 1.1, []], index=self.cols),
- check_names=False,
- )
- tm.assert_sp_series_equal(
- self.sdf.iloc[1],
- pd.SparseSeries(["b", 2, 1.2, {}], index=self.cols),
- check_names=False,
- )
- tm.assert_sp_series_equal(
- self.sdf.iloc[2],
- pd.SparseSeries(["c", 3, 1.3, set()], index=self.cols),
- check_names=False,
- )
-
- def test_frame_indexing_multiple(self):
- tm.assert_sp_frame_equal(self.sdf, self.sdf[:])
- tm.assert_sp_frame_equal(self.sdf, self.sdf.loc[:])
- tm.assert_sp_frame_equal(
- self.sdf.iloc[[1, 2]],
- pd.SparseDataFrame(
- {
- "string": self.string_series.iloc[[1, 2]],
- "int": self.int_series.iloc[[1, 2]],
- "float": self.float_series.iloc[[1, 2]],
- "object": self.object_series.iloc[[1, 2]],
- },
- index=[1, 2],
- )[self.cols],
- )
- tm.assert_sp_frame_equal(
- self.sdf[["int", "string"]],
- pd.SparseDataFrame({"int": self.int_series, "string": self.string_series}),
- )
-
- def test_series_indexing_single(self):
- for i, idx in enumerate(self.cols):
- assert self.ss.iloc[i] == self.ss[idx]
- tm.assert_class_equal(self.ss.iloc[i], self.ss[idx], obj="series index")
-
- assert self.ss["string"] == "a"
- assert self.ss["int"] == 1
- assert self.ss["float"] == 1.1
- assert self.ss["object"] == []
-
- def test_series_indexing_multiple(self):
- tm.assert_sp_series_equal(
- self.ss.loc[["string", "int"]],
- pd.SparseSeries(["a", 1], index=["string", "int"]),
- )
- tm.assert_sp_series_equal(
- self.ss.loc[["string", "object"]],
- pd.SparseSeries(["a", []], index=["string", "object"]),
- )
diff --git a/pandas/tests/sparse/test_pivot.py b/pandas/tests/sparse/test_pivot.py
deleted file mode 100644
index 85b899dfe76d5..0000000000000
--- a/pandas/tests/sparse/test_pivot.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import numpy as np
-import pytest
-
-import pandas as pd
-from pandas import _np_version_under1p17
-import pandas.util.testing as tm
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
-@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
-class TestPivotTable:
- def setup_method(self, method):
- rs = np.random.RandomState(0)
- self.dense = pd.DataFrame(
- {
- "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
- "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
- "C": rs.randn(8),
- "D": rs.randn(8),
- "E": [np.nan, np.nan, 1, 2, np.nan, 1, np.nan, np.nan],
- }
- )
- self.sparse = self.dense.to_sparse()
-
- def test_pivot_table(self):
- res_sparse = pd.pivot_table(self.sparse, index="A", columns="B", values="C")
- res_dense = pd.pivot_table(self.dense, index="A", columns="B", values="C")
- tm.assert_frame_equal(res_sparse, res_dense)
-
- res_sparse = pd.pivot_table(self.sparse, index="A", columns="B", values="E")
- res_dense = pd.pivot_table(self.dense, index="A", columns="B", values="E")
- tm.assert_frame_equal(res_sparse, res_dense)
-
- res_sparse = pd.pivot_table(
- self.sparse, index="A", columns="B", values="E", aggfunc="mean"
- )
- res_dense = pd.pivot_table(
- self.dense, index="A", columns="B", values="E", aggfunc="mean"
- )
- tm.assert_frame_equal(res_sparse, res_dense)
-
- def test_pivot_table_with_nans(self):
- res_sparse = pd.pivot_table(
- self.sparse, index="A", columns="B", values="E", aggfunc="sum"
- )
- res_dense = pd.pivot_table(
- self.dense, index="A", columns="B", values="E", aggfunc="sum"
- )
- tm.assert_frame_equal(res_sparse, res_dense)
-
- @pytest.mark.xfail(
- not _np_version_under1p17,
- reason="failing occasionally on numpy > 1.17",
- strict=False,
- )
- def test_pivot_table_multi(self):
- res_sparse = pd.pivot_table(
- self.sparse, index="A", columns="B", values=["D", "E"]
- )
- res_dense = pd.pivot_table(
- self.dense, index="A", columns="B", values=["D", "E"]
- )
- res_dense = res_dense.apply(lambda x: x.astype("Sparse[float64]"))
- tm.assert_frame_equal(res_sparse, res_dense)
diff --git a/pandas/tests/sparse/test_reshape.py b/pandas/tests/sparse/test_reshape.py
deleted file mode 100644
index bb5232f065a04..0000000000000
--- a/pandas/tests/sparse/test_reshape.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import numpy as np
-import pytest
-
-import pandas as pd
-import pandas.util.testing as tm
-
-
-@pytest.fixture
-def sparse_df():
- return pd.SparseDataFrame({0: {0: 1}, 1: {1: 1}, 2: {2: 1}}) # eye
-
-
-@pytest.fixture
-def multi_index3():
- return pd.MultiIndex.from_tuples([(0, 0), (1, 1), (2, 2)])
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-def test_sparse_frame_stack(sparse_df, multi_index3):
- ss = sparse_df.stack()
- expected = pd.SparseSeries(np.ones(3), index=multi_index3)
- tm.assert_sp_series_equal(ss, expected)
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-def test_sparse_frame_unstack(sparse_df):
- mi = pd.MultiIndex.from_tuples([(0, 0), (1, 0), (1, 2)])
- sparse_df.index = mi
- arr = np.array([[1, np.nan, np.nan], [np.nan, 1, np.nan], [np.nan, np.nan, 1]])
- unstacked_df = pd.DataFrame(arr, index=mi).unstack()
- unstacked_sdf = sparse_df.unstack()
-
- tm.assert_numpy_array_equal(unstacked_df.values, unstacked_sdf.values)
-
-
-@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
-def test_sparse_series_unstack(sparse_df, multi_index3):
- frame = pd.SparseSeries(np.ones(3), index=multi_index3).unstack()
-
- arr = np.array([1, np.nan, np.nan])
- arrays = {i: pd.SparseArray(np.roll(arr, i)) for i in range(3)}
- expected = pd.DataFrame(arrays)
- tm.assert_frame_equal(frame, expected)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index c0d73821020b5..d81ee79418e9c 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -1685,12 +1685,6 @@ def test_pad_backfill_object_segfault():
tm.assert_numpy_array_equal(result, expected)
-def test_arrmap():
- values = np.array(["foo", "foo", "bar", "bar", "baz", "qux"], dtype="O")
- result = libalgos.arrmap_object(values, lambda x: x in ["foo", "bar"])
- assert result.dtype == np.bool_
-
-
class TestTseriesUtil:
def test_combineFunc(self):
pass
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index d75016824d6cf..c760c75e44f6b 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -418,7 +418,7 @@ def test_value_counts_unique_nunique_null(self, null_obj):
values = o._shallow_copy(v)
else:
o = o.copy()
- o[0:2] = iNaT
+ o[0:2] = pd.NaT
values = o._values
elif needs_i8_conversion(o):
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index d96f806bc383f..65b2dab1b02a8 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -1,4 +1,5 @@
import collections
+from distutils.version import LooseVersion
from functools import partial
import string
@@ -33,14 +34,14 @@ def __call__(self):
def test_any_none():
- assert com._any_none(1, 2, 3, None)
- assert not com._any_none(1, 2, 3, 4)
+ assert com.any_none(1, 2, 3, None)
+ assert not com.any_none(1, 2, 3, 4)
def test_all_not_none():
- assert com._all_not_none(1, 2, 3, 4)
- assert not com._all_not_none(1, 2, 3, None)
- assert not com._all_not_none(None, None, None, None)
+ assert com.all_not_none(1, 2, 3, 4)
+ assert not com.all_not_none(1, 2, 3, None)
+ assert not com.all_not_none(None, None, None, None)
def test_random_state():
@@ -117,3 +118,13 @@ def test_git_version():
git_version = pd.__git_version__
assert len(git_version) == 40
assert all(c in string.hexdigits for c in git_version)
+
+
+def test_version_tag():
+ version = pd.__version__
+ try:
+ version > LooseVersion("0.0.1")
+ except TypeError:
+ raise ValueError(
+ "No git tags exist, please sync tags between upstream and your repo"
+ )
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index 93baafddedeb4..b4d575682ffca 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -50,6 +50,7 @@ def test_dask(df):
assert ddf.compute() is not None
+@pytest.mark.filterwarnings("ignore:Panel class is removed")
def test_xarray(df):
xarray = import_module("xarray") # noqa
@@ -145,6 +146,7 @@ def _getitem_tuple(self, tup):
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
+@pytest.mark.filterwarnings("ignore:RangeIndex.* is deprecated:DeprecationWarning")
def test_pyarrow(df):
pyarrow = import_module("pyarrow") # noqa
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index 4070624985068..6edd3125331b9 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -8,13 +8,7 @@
from pandas.core.api import DataFrame
from pandas.core.computation import expressions as expr
import pandas.util.testing as tm
-from pandas.util.testing import (
- assert_almost_equal,
- assert_frame_equal,
- assert_series_equal,
-)
-
-from pandas.io.formats.printing import pprint_thing
+from pandas.util.testing import assert_frame_equal
_frame = DataFrame(randn(10000, 4), columns=list("ABCD"), dtype="float64")
_frame2 = DataFrame(randn(100, 4), columns=list("ABCD"), dtype="float64")
@@ -50,57 +44,35 @@ def setup_method(self, method):
self.frame2 = _frame2.copy()
self.mixed = _mixed.copy()
self.mixed2 = _mixed2.copy()
- self.integer = _integer.copy()
self._MIN_ELEMENTS = expr._MIN_ELEMENTS
def teardown_method(self, method):
expr._MIN_ELEMENTS = self._MIN_ELEMENTS
- def run_arithmetic(self, df, other, assert_func, check_dtype=False, test_flex=True):
+ def run_arithmetic(self, df, other):
expr._MIN_ELEMENTS = 0
operations = ["add", "sub", "mul", "mod", "truediv", "floordiv"]
- for arith in operations:
-
- operator_name = arith
- if arith == "div":
- operator_name = "truediv"
-
- if test_flex:
- op = lambda x, y: getattr(df, arith)(y)
- op.__name__ = arith
- else:
- op = getattr(operator, operator_name)
- expr.set_use_numexpr(False)
- expected = op(df, other)
- expr.set_use_numexpr(True)
+ for test_flex in [True, False]:
+ for arith in operations:
+ # TODO: share with run_binary
+ if test_flex:
+ op = lambda x, y: getattr(x, arith)(y)
+ op.__name__ = arith
+ else:
+ op = getattr(operator, arith)
+ expr.set_use_numexpr(False)
+ expected = op(df, other)
+ expr.set_use_numexpr(True)
- result = op(df, other)
- try:
- if check_dtype:
- if arith == "truediv":
+ result = op(df, other)
+ if arith == "truediv":
+ if expected.ndim == 1:
assert expected.dtype.kind == "f"
- assert_func(expected, result)
- except Exception:
- pprint_thing("Failed test with operator {op.__name__!r}".format(op=op))
- raise
-
- def test_integer_arithmetic(self):
- self.run_arithmetic(self.integer, self.integer, assert_frame_equal)
- self.run_arithmetic(
- self.integer.iloc[:, 0],
- self.integer.iloc[:, 0],
- assert_series_equal,
- check_dtype=True,
- )
+ else:
+ assert all(x.kind == "f" for x in expected.dtypes.values)
+ tm.assert_equal(expected, result)
- def run_binary(
- self,
- df,
- other,
- assert_func,
- test_flex=False,
- numexpr_ops={"gt", "lt", "ge", "le", "eq", "ne"},
- ):
+ def run_binary(self, df, other):
"""
tests solely that the result is the same whether or not numexpr is
enabled. Need to test whether the function does the correct thing
@@ -110,97 +82,59 @@ def run_binary(
expr.set_test_mode(True)
operations = ["gt", "lt", "ge", "le", "eq", "ne"]
- for arith in operations:
- if test_flex:
- op = lambda x, y: getattr(df, arith)(y)
- op.__name__ = arith
- else:
- op = getattr(operator, arith)
- expr.set_use_numexpr(False)
- expected = op(df, other)
- expr.set_use_numexpr(True)
- expr.get_test_result()
- result = op(df, other)
- used_numexpr = expr.get_test_result()
- try:
- if arith in numexpr_ops:
- assert used_numexpr, "Did not use numexpr as expected."
+ for test_flex in [True, False]:
+ for arith in operations:
+ if test_flex:
+ op = lambda x, y: getattr(x, arith)(y)
+ op.__name__ = arith
else:
- assert not used_numexpr, "Used numexpr unexpectedly."
- assert_func(expected, result)
- except Exception:
- pprint_thing("Failed test with operation {arith!r}".format(arith=arith))
- pprint_thing("test_flex was {test_flex!r}".format(test_flex=test_flex))
- raise
-
- def run_frame(self, df, other, binary_comp=None, run_binary=True, **kwargs):
- self.run_arithmetic(df, other, assert_frame_equal, test_flex=False, **kwargs)
- self.run_arithmetic(df, other, assert_frame_equal, test_flex=True, **kwargs)
- if run_binary:
- if binary_comp is None:
+ op = getattr(operator, arith)
expr.set_use_numexpr(False)
- binary_comp = other + 1
+ expected = op(df, other)
expr.set_use_numexpr(True)
- self.run_binary(
- df, binary_comp, assert_frame_equal, test_flex=False, **kwargs
- )
- self.run_binary(
- df, binary_comp, assert_frame_equal, test_flex=True, **kwargs
- )
-
- def run_series(self, ser, other, binary_comp=None, **kwargs):
- self.run_arithmetic(ser, other, assert_series_equal, test_flex=False, **kwargs)
- self.run_arithmetic(ser, other, assert_almost_equal, test_flex=True, **kwargs)
- # series doesn't uses vec_compare instead of numexpr...
- # if binary_comp is None:
- # binary_comp = other + 1
- # self.run_binary(ser, binary_comp, assert_frame_equal,
- # test_flex=False, **kwargs)
- # self.run_binary(ser, binary_comp, assert_frame_equal,
- # test_flex=True, **kwargs)
-
- def test_integer_arithmetic_frame(self):
- self.run_frame(self.integer, self.integer)
-
- def test_integer_arithmetic_series(self):
- self.run_series(self.integer.iloc[:, 0], self.integer.iloc[:, 0])
-
- def test_float_arithemtic_frame(self):
- self.run_frame(self.frame2, self.frame2)
-
- def test_float_arithmetic_series(self):
- self.run_series(self.frame2.iloc[:, 0], self.frame2.iloc[:, 0])
-
- def test_mixed_arithmetic_frame(self):
- # TODO: FIGURE OUT HOW TO GET IT TO WORK...
+
+ expr.get_test_result()
+ result = op(df, other)
+ used_numexpr = expr.get_test_result()
+ assert used_numexpr, "Did not use numexpr as expected."
+ tm.assert_equal(expected, result)
+
+ def run_frame(self, df, other, run_binary=True):
+ self.run_arithmetic(df, other)
+ if run_binary:
+ expr.set_use_numexpr(False)
+ binary_comp = other + 1
+ expr.set_use_numexpr(True)
+ self.run_binary(df, binary_comp)
+
+ for i in range(len(df.columns)):
+ self.run_arithmetic(df.iloc[:, i], other.iloc[:, i])
+ # FIXME: dont leave commented-out
+ # series doesn't uses vec_compare instead of numexpr...
+ # binary_comp = other.iloc[:, i] + 1
+ # self.run_binary(df.iloc[:, i], binary_comp)
+
+ @pytest.mark.parametrize(
+ "df",
+ [
+ _integer,
+ _integer2,
+ # randint to get a case with zeros
+ _integer * np.random.randint(0, 2, size=np.shape(_integer)),
+ _frame,
+ _frame2,
+ _mixed,
+ _mixed2,
+ ],
+ )
+ def test_arithmetic(self, df):
+ # TODO: FIGURE OUT HOW TO GET RUN_BINARY TO WORK WITH MIXED=...
# can't do arithmetic because comparison methods try to do *entire*
# frame instead of by-column
- self.run_frame(self.mixed2, self.mixed2, run_binary=False)
-
- def test_mixed_arithmetic_series(self):
- for col in self.mixed2.columns:
- self.run_series(self.mixed2[col], self.mixed2[col], binary_comp=4)
-
- def test_float_arithemtic(self):
- self.run_arithmetic(self.frame, self.frame, assert_frame_equal)
- self.run_arithmetic(
- self.frame.iloc[:, 0],
- self.frame.iloc[:, 0],
- assert_series_equal,
- check_dtype=True,
- )
-
- def test_mixed_arithmetic(self):
- self.run_arithmetic(self.mixed, self.mixed, assert_frame_equal)
- for col in self.mixed.columns:
- self.run_arithmetic(self.mixed[col], self.mixed[col], assert_series_equal)
+ kinds = {x.kind for x in df.dtypes.values}
+ should = len(kinds) == 1
- def test_integer_with_zeros(self):
- self.integer *= np.random.randint(0, 2, size=np.shape(self.integer))
- self.run_arithmetic(self.integer, self.integer, assert_frame_equal)
- self.run_arithmetic(
- self.integer.iloc[:, 0], self.integer.iloc[:, 0], assert_series_equal
- )
+ self.run_frame(df, df, run_binary=should)
def test_invalid(self):
@@ -228,40 +162,33 @@ def test_invalid(self):
)
assert result
- def test_binary_ops(self):
+ @pytest.mark.parametrize(
+ "opname,op_str",
+ [("add", "+"), ("sub", "-"), ("mul", "*"), ("truediv", "/"), ("pow", "**")],
+ )
+ @pytest.mark.parametrize("left,right", [(_frame, _frame2), (_mixed, _mixed2)])
+ def test_binary_ops(self, opname, op_str, left, right):
def testit():
- for f, f2 in [(self.frame, self.frame2), (self.mixed, self.mixed2)]:
-
- for op, op_str in [
- ("add", "+"),
- ("sub", "-"),
- ("mul", "*"),
- ("div", "/"),
- ("pow", "**"),
- ]:
+ if opname == "pow":
+ # TODO: get this working
+ return
- if op == "pow":
- continue
+ op = getattr(operator, opname)
- if op == "div":
- op = getattr(operator, "truediv", None)
- else:
- op = getattr(operator, op, None)
- if op is not None:
- result = expr._can_use_numexpr(op, op_str, f, f, "evaluate")
- assert result != f._is_mixed_type
+ result = expr._can_use_numexpr(op, op_str, left, left, "evaluate")
+ assert result != left._is_mixed_type
- result = expr.evaluate(op, op_str, f, f, use_numexpr=True)
- expected = expr.evaluate(op, op_str, f, f, use_numexpr=False)
+ result = expr.evaluate(op, op_str, left, left, use_numexpr=True)
+ expected = expr.evaluate(op, op_str, left, left, use_numexpr=False)
- if isinstance(result, DataFrame):
- tm.assert_frame_equal(result, expected)
- else:
- tm.assert_numpy_array_equal(result, expected.values)
+ if isinstance(result, DataFrame):
+ tm.assert_frame_equal(result, expected)
+ else:
+ tm.assert_numpy_array_equal(result, expected.values)
- result = expr._can_use_numexpr(op, op_str, f2, f2, "evaluate")
- assert not result
+ result = expr._can_use_numexpr(op, op_str, right, right, "evaluate")
+ assert not result
expr.set_use_numexpr(False)
testit()
@@ -271,39 +198,37 @@ def testit():
expr.set_numexpr_threads()
testit()
- def test_boolean_ops(self):
+ @pytest.mark.parametrize(
+ "opname,op_str",
+ [
+ ("gt", ">"),
+ ("lt", "<"),
+ ("ge", ">="),
+ ("le", "<="),
+ ("eq", "=="),
+ ("ne", "!="),
+ ],
+ )
+ @pytest.mark.parametrize("left,right", [(_frame, _frame2), (_mixed, _mixed2)])
+ def test_comparison_ops(self, opname, op_str, left, right):
def testit():
- for f, f2 in [(self.frame, self.frame2), (self.mixed, self.mixed2)]:
-
- f11 = f
- f12 = f + 1
+ f12 = left + 1
+ f22 = right + 1
- f21 = f2
- f22 = f2 + 1
+ op = getattr(operator, opname)
- for op, op_str in [
- ("gt", ">"),
- ("lt", "<"),
- ("ge", ">="),
- ("le", "<="),
- ("eq", "=="),
- ("ne", "!="),
- ]:
+ result = expr._can_use_numexpr(op, op_str, left, f12, "evaluate")
+ assert result != left._is_mixed_type
- op = getattr(operator, op)
-
- result = expr._can_use_numexpr(op, op_str, f11, f12, "evaluate")
- assert result != f11._is_mixed_type
-
- result = expr.evaluate(op, op_str, f11, f12, use_numexpr=True)
- expected = expr.evaluate(op, op_str, f11, f12, use_numexpr=False)
- if isinstance(result, DataFrame):
- tm.assert_frame_equal(result, expected)
- else:
- tm.assert_numpy_array_equal(result, expected.values)
+ result = expr.evaluate(op, op_str, left, f12, use_numexpr=True)
+ expected = expr.evaluate(op, op_str, left, f12, use_numexpr=False)
+ if isinstance(result, DataFrame):
+ tm.assert_frame_equal(result, expected)
+ else:
+ tm.assert_numpy_array_equal(result, expected.values)
- result = expr._can_use_numexpr(op, op_str, f21, f22, "evaluate")
- assert not result
+ result = expr._can_use_numexpr(op, op_str, right, f22, "evaluate")
+ assert not result
expr.set_use_numexpr(False)
testit()
@@ -313,17 +238,15 @@ def testit():
expr.set_numexpr_threads()
testit()
- def test_where(self):
+ @pytest.mark.parametrize("cond", [True, False])
+ @pytest.mark.parametrize("df", [_frame, _frame2, _mixed, _mixed2])
+ def test_where(self, cond, df):
def testit():
- for f in [self.frame, self.frame2, self.mixed, self.mixed2]:
-
- for cond in [True, False]:
-
- c = np.empty(f.shape, dtype=np.bool_)
- c.fill(cond)
- result = expr.where(c, f.values, f.values + 1)
- expected = np.where(c, f.values, f.values + 1)
- tm.assert_numpy_array_equal(result, expected)
+ c = np.empty(df.shape, dtype=np.bool_)
+ c.fill(cond)
+ result = expr.where(c, df.values, df.values + 1)
+ expected = np.where(c, df.values, df.values + 1)
+ tm.assert_numpy_array_equal(result, expected)
expr.set_use_numexpr(False)
testit()
@@ -333,78 +256,81 @@ def testit():
expr.set_numexpr_threads()
testit()
- def test_bool_ops_raise_on_arithmetic(self):
+ @pytest.mark.parametrize(
+ "op_str,opname", [("/", "truediv"), ("//", "floordiv"), ("**", "pow")]
+ )
+ def test_bool_ops_raise_on_arithmetic(self, op_str, opname):
df = DataFrame({"a": np.random.rand(10) > 0.5, "b": np.random.rand(10) > 0.5})
- names = "truediv", "floordiv", "pow"
- ops = "/", "//", "**"
+
msg = "operator %r not implemented for bool dtypes"
- for op, name in zip(ops, names):
- f = getattr(operator, name)
- err_msg = re.escape(msg % op)
+ f = getattr(operator, opname)
+ err_msg = re.escape(msg % op_str)
- with pytest.raises(NotImplementedError, match=err_msg):
- f(df, df)
+ with pytest.raises(NotImplementedError, match=err_msg):
+ f(df, df)
- with pytest.raises(NotImplementedError, match=err_msg):
- f(df.a, df.b)
+ with pytest.raises(NotImplementedError, match=err_msg):
+ f(df.a, df.b)
- with pytest.raises(NotImplementedError, match=err_msg):
- f(df.a, True)
+ with pytest.raises(NotImplementedError, match=err_msg):
+ f(df.a, True)
- with pytest.raises(NotImplementedError, match=err_msg):
- f(False, df.a)
+ with pytest.raises(NotImplementedError, match=err_msg):
+ f(False, df.a)
- with pytest.raises(NotImplementedError, match=err_msg):
- f(False, df)
+ with pytest.raises(NotImplementedError, match=err_msg):
+ f(False, df)
- with pytest.raises(NotImplementedError, match=err_msg):
- f(df, True)
+ with pytest.raises(NotImplementedError, match=err_msg):
+ f(df, True)
- def test_bool_ops_warn_on_arithmetic(self):
+ @pytest.mark.parametrize(
+ "op_str,opname", [("+", "add"), ("*", "mul"), ("-", "sub")]
+ )
+ def test_bool_ops_warn_on_arithmetic(self, op_str, opname):
n = 10
df = DataFrame({"a": np.random.rand(n) > 0.5, "b": np.random.rand(n) > 0.5})
- names = "add", "mul", "sub"
- ops = "+", "*", "-"
+
subs = {"+": "|", "*": "&", "-": "^"}
sub_funcs = {"|": "or_", "&": "and_", "^": "xor"}
- for op, name in zip(ops, names):
- f = getattr(operator, name)
- fe = getattr(operator, sub_funcs[subs[op]])
-
- if op == "-":
- # raises TypeError
- continue
-
- with tm.use_numexpr(True, min_elements=5):
- with tm.assert_produces_warning(check_stacklevel=False):
- r = f(df, df)
- e = fe(df, df)
- tm.assert_frame_equal(r, e)
-
- with tm.assert_produces_warning(check_stacklevel=False):
- r = f(df.a, df.b)
- e = fe(df.a, df.b)
- tm.assert_series_equal(r, e)
-
- with tm.assert_produces_warning(check_stacklevel=False):
- r = f(df.a, True)
- e = fe(df.a, True)
- tm.assert_series_equal(r, e)
-
- with tm.assert_produces_warning(check_stacklevel=False):
- r = f(False, df.a)
- e = fe(False, df.a)
- tm.assert_series_equal(r, e)
-
- with tm.assert_produces_warning(check_stacklevel=False):
- r = f(False, df)
- e = fe(False, df)
- tm.assert_frame_equal(r, e)
-
- with tm.assert_produces_warning(check_stacklevel=False):
- r = f(df, True)
- e = fe(df, True)
- tm.assert_frame_equal(r, e)
+
+ f = getattr(operator, opname)
+ fe = getattr(operator, sub_funcs[subs[op_str]])
+
+ if op_str == "-":
+ # raises TypeError
+ return
+
+ with tm.use_numexpr(True, min_elements=5):
+ with tm.assert_produces_warning(check_stacklevel=False):
+ r = f(df, df)
+ e = fe(df, df)
+ tm.assert_frame_equal(r, e)
+
+ with tm.assert_produces_warning(check_stacklevel=False):
+ r = f(df.a, df.b)
+ e = fe(df.a, df.b)
+ tm.assert_series_equal(r, e)
+
+ with tm.assert_produces_warning(check_stacklevel=False):
+ r = f(df.a, True)
+ e = fe(df.a, True)
+ tm.assert_series_equal(r, e)
+
+ with tm.assert_produces_warning(check_stacklevel=False):
+ r = f(False, df.a)
+ e = fe(False, df.a)
+ tm.assert_series_equal(r, e)
+
+ with tm.assert_produces_warning(check_stacklevel=False):
+ r = f(False, df)
+ e = fe(False, df)
+ tm.assert_frame_equal(r, e)
+
+ with tm.assert_produces_warning(check_stacklevel=False):
+ r = f(df, True)
+ e = fe(df, True)
+ tm.assert_frame_equal(r, e)
@pytest.mark.parametrize(
"test_input,expected",
@@ -431,3 +357,29 @@ def test_bool_ops_column_name_dtype(self, test_input, expected):
# GH 22383 - .ne fails if columns containing column name 'dtype'
result = test_input.loc[:, ["a", "dtype"]].ne(test_input.loc[:, ["a", "dtype"]])
assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "arith", ("add", "sub", "mul", "mod", "truediv", "floordiv")
+ )
+ @pytest.mark.parametrize("axis", (0, 1))
+ def test_frame_series_axis(self, axis, arith):
+ # GH#26736 Dataframe.floordiv(Series, axis=1) fails
+ if axis == 1 and arith == "floordiv":
+ pytest.xfail("'floordiv' does not succeed with axis=1 #27636")
+
+ df = self.frame
+ if axis == 1:
+ other = self.frame.iloc[0, :]
+ else:
+ other = self.frame.iloc[:, 0]
+
+ expr._MIN_ELEMENTS = 0
+
+ op_func = getattr(df, arith)
+
+ expr.set_use_numexpr(False)
+ expected = op_func(other, axis=axis)
+ expr.set_use_numexpr(True)
+
+ result = op_func(other, axis=axis)
+ assert_frame_equal(expected, result)
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index c97c69c323b56..dc4db6e7902a8 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -524,6 +524,22 @@ def test_stack_unstack_preserve_names(self):
restacked = unstacked.stack()
assert restacked.index.names == self.frame.index.names
+ @pytest.mark.parametrize("method", ["stack", "unstack"])
+ def test_stack_unstack_wrong_level_name(self, method):
+ # GH 18303 - wrong level name should raise
+
+ # A DataFrame with flat axes:
+ df = self.frame.loc["foo"]
+
+ with pytest.raises(KeyError, match="does not match index name"):
+ getattr(df, method)("mistake")
+
+ if method == "unstack":
+ # Same on a Series:
+ s = df.iloc[:, 0]
+ with pytest.raises(KeyError, match="does not match index name"):
+ getattr(s, method)("mistake")
+
def test_unstack_level_name(self):
result = self.frame.unstack("second")
expected = self.frame.unstack(level=1)
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index 21ab28c94c978..49d1777df0751 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -1,4 +1,5 @@
from functools import partial
+import operator
import warnings
import numpy as np
@@ -15,6 +16,7 @@
import pandas.util.testing as tm
use_bn = nanops._USE_BOTTLENECK
+has_c16 = hasattr(np, "complex128")
class TestnanopsDataFrame:
@@ -131,14 +133,9 @@ def _coerce_tds(targ, res):
if targ.dtype.kind != "O":
res = res.astype(targ.dtype)
else:
- try:
- res = res.astype("c16")
- except RuntimeError:
- res = res.astype("f8")
- try:
- targ = targ.astype("c16")
- except RuntimeError:
- targ = targ.astype("f8")
+ cast_dtype = "c16" if has_c16 else "f8"
+ res = res.astype(cast_dtype)
+ targ = targ.astype(cast_dtype)
# there should never be a case where numpy returns an object
# but nanops doesn't, so make that an exception
elif targ.dtype.kind == "O":
@@ -152,93 +149,62 @@ def check_fun_data(
targfunc,
testarval,
targarval,
- targarnanval,
check_dtype=True,
empty_targfunc=None,
**kwargs
):
for axis in list(range(targarval.ndim)) + [None]:
for skipna in [False, True]:
- targartempval = targarval if skipna else targarnanval
+ targartempval = targarval if skipna else testarval
if skipna and empty_targfunc and isna(targartempval).all():
targ = empty_targfunc(targartempval, axis=axis, **kwargs)
else:
targ = targfunc(targartempval, axis=axis, **kwargs)
- try:
- res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs)
+ res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs)
+ self.check_results(targ, res, axis, check_dtype=check_dtype)
+ if skipna:
+ res = testfunc(testarval, axis=axis, **kwargs)
+ self.check_results(targ, res, axis, check_dtype=check_dtype)
+ if axis is None:
+ res = testfunc(testarval, skipna=skipna, **kwargs)
+ self.check_results(targ, res, axis, check_dtype=check_dtype)
+ if skipna and axis is None:
+ res = testfunc(testarval, **kwargs)
self.check_results(targ, res, axis, check_dtype=check_dtype)
- if skipna:
- res = testfunc(testarval, axis=axis, **kwargs)
- self.check_results(targ, res, axis, check_dtype=check_dtype)
- if axis is None:
- res = testfunc(testarval, skipna=skipna, **kwargs)
- self.check_results(targ, res, axis, check_dtype=check_dtype)
- if skipna and axis is None:
- res = testfunc(testarval, **kwargs)
- self.check_results(targ, res, axis, check_dtype=check_dtype)
- except BaseException as exc:
- exc.args += (
- "axis: {axis} of {of}".format(axis=axis, of=testarval.ndim - 1),
- "skipna: {skipna}".format(skipna=skipna),
- "kwargs: {kwargs}".format(kwargs=kwargs),
- )
- raise
if testarval.ndim <= 1:
return
- try:
- testarval2 = np.take(testarval, 0, axis=-1)
- targarval2 = np.take(targarval, 0, axis=-1)
- targarnanval2 = np.take(targarnanval, 0, axis=-1)
- except ValueError:
- return
+ # Recurse on lower-dimension
+ testarval2 = np.take(testarval, 0, axis=-1)
+ targarval2 = np.take(targarval, 0, axis=-1)
self.check_fun_data(
testfunc,
targfunc,
testarval2,
targarval2,
- targarnanval2,
check_dtype=check_dtype,
empty_targfunc=empty_targfunc,
**kwargs
)
- def check_fun(
- self,
- testfunc,
- targfunc,
- testar,
- targar=None,
- targarnan=None,
- empty_targfunc=None,
- **kwargs
- ):
- if targar is None:
- targar = testar
- if targarnan is None:
- targarnan = testar
+ def check_fun(self, testfunc, targfunc, testar, empty_targfunc=None, **kwargs):
+
+ targar = testar
+ if testar.endswith("_nan") and hasattr(self, testar[:-4]):
+ targar = testar[:-4]
+
testarval = getattr(self, testar)
targarval = getattr(self, targar)
- targarnanval = getattr(self, targarnan)
- try:
- self.check_fun_data(
- testfunc,
- targfunc,
- testarval,
- targarval,
- targarnanval,
- empty_targfunc=empty_targfunc,
- **kwargs
- )
- except BaseException as exc:
- exc.args += (
- "testar: {testar}".format(testar=testar),
- "targar: {targar}".format(targar=targar),
- "targarnan: {targarnan}".format(targarnan=targarnan),
- )
- raise
+ self.check_fun_data(
+ testfunc,
+ targfunc,
+ testarval,
+ targarval,
+ empty_targfunc=empty_targfunc,
+ **kwargs
+ )
def check_funs(
self,
@@ -246,14 +212,13 @@ def check_funs(
targfunc,
allow_complex=True,
allow_all_nan=True,
- allow_str=True,
allow_date=True,
allow_tdelta=True,
allow_obj=True,
**kwargs
):
self.check_fun(testfunc, targfunc, "arr_float", **kwargs)
- self.check_fun(testfunc, targfunc, "arr_float_nan", "arr_float", **kwargs)
+ self.check_fun(testfunc, targfunc, "arr_float_nan", **kwargs)
self.check_fun(testfunc, targfunc, "arr_int", **kwargs)
self.check_fun(testfunc, targfunc, "arr_bool", **kwargs)
objs = [
@@ -267,26 +232,15 @@ def check_funs(
if allow_complex:
self.check_fun(testfunc, targfunc, "arr_complex", **kwargs)
- self.check_fun(
- testfunc, targfunc, "arr_complex_nan", "arr_complex", **kwargs
- )
+ self.check_fun(testfunc, targfunc, "arr_complex_nan", **kwargs)
if allow_all_nan:
self.check_fun(testfunc, targfunc, "arr_nan_nanj", **kwargs)
objs += [self.arr_complex.astype("O")]
- if allow_str:
- self.check_fun(testfunc, targfunc, "arr_str", **kwargs)
- self.check_fun(testfunc, targfunc, "arr_utf", **kwargs)
- objs += [self.arr_str.astype("O"), self.arr_utf.astype("O")]
-
if allow_date:
- try:
- targfunc(self.arr_date)
- except TypeError:
- pass
- else:
- self.check_fun(testfunc, targfunc, "arr_date", **kwargs)
- objs += [self.arr_date.astype("O")]
+ targfunc(self.arr_date)
+ self.check_fun(testfunc, targfunc, "arr_date", **kwargs)
+ objs += [self.arr_date.astype("O")]
if allow_tdelta:
try:
@@ -316,33 +270,20 @@ def _badobj_wrap(self, value, func, allow_complex=True, **kwargs):
value = value.astype("f8")
return func(value, **kwargs)
- def test_nanany(self):
- self.check_funs(
- nanops.nanany,
- np.any,
- allow_all_nan=False,
- allow_str=False,
- allow_date=False,
- allow_tdelta=False,
- )
-
- def test_nanall(self):
+ @pytest.mark.parametrize(
+ "nan_op,np_op", [(nanops.nanany, np.any), (nanops.nanall, np.all)]
+ )
+ def test_nan_funcs(self, nan_op, np_op):
+ # TODO: allow tdelta, doesn't break tests
self.check_funs(
- nanops.nanall,
- np.all,
- allow_all_nan=False,
- allow_str=False,
- allow_date=False,
- allow_tdelta=False,
+ nan_op, np_op, allow_all_nan=False, allow_date=False, allow_tdelta=False
)
def test_nansum(self):
self.check_funs(
nanops.nansum,
np.sum,
- allow_str=False,
allow_date=False,
- allow_tdelta=True,
check_dtype=False,
empty_targfunc=np.nansum,
)
@@ -351,11 +292,9 @@ def test_nanmean(self):
self.check_funs(
nanops.nanmean,
np.mean,
- allow_complex=False,
+ allow_complex=False, # TODO: allow this, doesn't break test
allow_obj=False,
- allow_str=False,
allow_date=False,
- allow_tdelta=True,
)
def test_nanmean_overflow(self):
@@ -371,22 +310,31 @@ def test_nanmean_overflow(self):
assert result == np_result
assert result.dtype == np.float64
- def test_returned_dtype(self):
-
- dtypes = [np.int16, np.int32, np.int64, np.float32, np.float64]
- if hasattr(np, "float128"):
- dtypes.append(np.float128)
+ @pytest.mark.parametrize(
+ "dtype",
+ [
+ np.int16,
+ np.int32,
+ np.int64,
+ np.float32,
+ np.float64,
+ getattr(np, "float128", None),
+ ],
+ )
+ def test_returned_dtype(self, dtype):
+ if dtype is None:
+ # no float128 available
+ return
- for dtype in dtypes:
- s = Series(range(10), dtype=dtype)
- group_a = ["mean", "std", "var", "skew", "kurt"]
- group_b = ["min", "max"]
- for method in group_a + group_b:
- result = getattr(s, method)()
- if is_integer_dtype(dtype) and method in group_a:
- assert result.dtype == np.float64
- else:
- assert result.dtype == dtype
+ s = Series(range(10), dtype=dtype)
+ group_a = ["mean", "std", "var", "skew", "kurt"]
+ group_b = ["min", "max"]
+ for method in group_a + group_b:
+ result = getattr(s, method)()
+ if is_integer_dtype(dtype) and method in group_a:
+ assert result.dtype == np.float64
+ else:
+ assert result.dtype == dtype
def test_nanmedian(self):
with warnings.catch_warnings(record=True):
@@ -395,9 +343,7 @@ def test_nanmedian(self):
nanops.nanmedian,
np.median,
allow_complex=False,
- allow_str=False,
allow_date=False,
- allow_tdelta=True,
allow_obj="convert",
)
@@ -407,9 +353,7 @@ def test_nanvar(self, ddof):
nanops.nanvar,
np.var,
allow_complex=False,
- allow_str=False,
allow_date=False,
- allow_tdelta=True,
allow_obj="convert",
ddof=ddof,
)
@@ -420,9 +364,7 @@ def test_nanstd(self, ddof):
nanops.nanstd,
np.std,
allow_complex=False,
- allow_str=False,
allow_date=False,
- allow_tdelta=True,
allow_obj="convert",
ddof=ddof,
)
@@ -437,32 +379,19 @@ def test_nansem(self, ddof):
nanops.nansem,
sem,
allow_complex=False,
- allow_str=False,
allow_date=False,
allow_tdelta=False,
allow_obj="convert",
ddof=ddof,
)
- def _minmax_wrap(self, value, axis=None, func=None):
-
- # numpy warns if all nan
- res = func(value, axis)
- if res.dtype.kind == "m":
- res = np.atleast_1d(res)
- return res
-
- def test_nanmin(self):
+ @pytest.mark.parametrize(
+ "nan_op,np_op", [(nanops.nanmin, np.min), (nanops.nanmax, np.max)]
+ )
+ def test_nanops_with_warnings(self, nan_op, np_op):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
- func = partial(self._minmax_wrap, func=np.min)
- self.check_funs(nanops.nanmin, func, allow_str=False, allow_obj=False)
-
- def test_nanmax(self):
- with warnings.catch_warnings():
- warnings.simplefilter("ignore", RuntimeWarning)
- func = partial(self._minmax_wrap, func=np.max)
- self.check_funs(nanops.nanmax, func, allow_str=False, allow_obj=False)
+ self.check_funs(nan_op, np_op, allow_obj=False)
def _argminmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
@@ -483,20 +412,13 @@ def test_nanargmax(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
func = partial(self._argminmax_wrap, func=np.argmax)
- self.check_funs(
- nanops.nanargmax,
- func,
- allow_str=False,
- allow_obj=False,
- allow_date=True,
- allow_tdelta=True,
- )
+ self.check_funs(nanops.nanargmax, func, allow_obj=False)
def test_nanargmin(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
func = partial(self._argminmax_wrap, func=np.argmin)
- self.check_funs(nanops.nanargmin, func, allow_str=False, allow_obj=False)
+ self.check_funs(nanops.nanargmin, func, allow_obj=False)
def _skew_kurt_wrap(self, values, axis=None, func=None):
if not isinstance(values.dtype.type, np.floating):
@@ -520,7 +442,6 @@ def test_nanskew(self):
nanops.nanskew,
func,
allow_complex=False,
- allow_str=False,
allow_date=False,
allow_tdelta=False,
)
@@ -536,7 +457,6 @@ def test_nankurt(self):
nanops.nankurt,
func,
allow_complex=False,
- allow_str=False,
allow_date=False,
allow_tdelta=False,
)
@@ -545,7 +465,6 @@ def test_nanprod(self):
self.check_funs(
nanops.nanprod,
np.prod,
- allow_str=False,
allow_date=False,
allow_tdelta=False,
empty_targfunc=np.nanprod,
@@ -697,77 +616,58 @@ def check_nancomp(self, checkfun, targ0):
arr_nan_float1 = self.arr_nan_float1
while targ0.ndim:
- try:
- res0 = checkfun(arr_float, arr_float1)
- tm.assert_almost_equal(targ0, res0)
-
- if targ0.ndim > 1:
- targ1 = np.vstack([targ0, arr_nan])
- else:
- targ1 = np.hstack([targ0, arr_nan])
- res1 = checkfun(arr_float_nan, arr_float1_nan)
- tm.assert_numpy_array_equal(targ1, res1, check_dtype=False)
-
- targ2 = arr_nan_nan
- res2 = checkfun(arr_float_nan, arr_nan_float1)
- tm.assert_numpy_array_equal(targ2, res2, check_dtype=False)
- except Exception as exc:
- exc.args += ("ndim: {arr_float.ndim}".format(arr_float=arr_float),)
- raise
-
- try:
- arr_float = np.take(arr_float, 0, axis=-1)
- arr_float1 = np.take(arr_float1, 0, axis=-1)
- arr_nan = np.take(arr_nan, 0, axis=-1)
- arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1)
- arr_float_nan = np.take(arr_float_nan, 0, axis=-1)
- arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1)
- arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1)
- targ0 = np.take(targ0, 0, axis=-1)
- except ValueError:
- break
-
- def test_nangt(self):
- targ0 = self.arr_float > self.arr_float1
- self.check_nancomp(nanops.nangt, targ0)
-
- def test_nange(self):
- targ0 = self.arr_float >= self.arr_float1
- self.check_nancomp(nanops.nange, targ0)
-
- def test_nanlt(self):
- targ0 = self.arr_float < self.arr_float1
- self.check_nancomp(nanops.nanlt, targ0)
-
- def test_nanle(self):
- targ0 = self.arr_float <= self.arr_float1
- self.check_nancomp(nanops.nanle, targ0)
-
- def test_naneq(self):
- targ0 = self.arr_float == self.arr_float1
- self.check_nancomp(nanops.naneq, targ0)
+ res0 = checkfun(arr_float, arr_float1)
+ tm.assert_almost_equal(targ0, res0)
- def test_nanne(self):
- targ0 = self.arr_float != self.arr_float1
- self.check_nancomp(nanops.nanne, targ0)
-
- def check_bool(self, func, value, correct, *args, **kwargs):
+ if targ0.ndim > 1:
+ targ1 = np.vstack([targ0, arr_nan])
+ else:
+ targ1 = np.hstack([targ0, arr_nan])
+ res1 = checkfun(arr_float_nan, arr_float1_nan)
+ tm.assert_numpy_array_equal(targ1, res1, check_dtype=False)
+
+ targ2 = arr_nan_nan
+ res2 = checkfun(arr_float_nan, arr_nan_float1)
+ tm.assert_numpy_array_equal(targ2, res2, check_dtype=False)
+
+ # Lower dimension for next step in the loop
+ arr_float = np.take(arr_float, 0, axis=-1)
+ arr_float1 = np.take(arr_float1, 0, axis=-1)
+ arr_nan = np.take(arr_nan, 0, axis=-1)
+ arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1)
+ arr_float_nan = np.take(arr_float_nan, 0, axis=-1)
+ arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1)
+ arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1)
+ targ0 = np.take(targ0, 0, axis=-1)
+
+ @pytest.mark.parametrize(
+ "op,nanop",
+ [
+ (operator.eq, nanops.naneq),
+ (operator.ne, nanops.nanne),
+ (operator.gt, nanops.nangt),
+ (operator.ge, nanops.nange),
+ (operator.lt, nanops.nanlt),
+ (operator.le, nanops.nanle),
+ ],
+ )
+ def test_nan_comparison(self, op, nanop):
+ targ0 = op(self.arr_float, self.arr_float1)
+ self.check_nancomp(nanop, targ0)
+
+ def check_bool(self, func, value, correct):
while getattr(value, "ndim", True):
- try:
- res0 = func(value, *args, **kwargs)
- if correct:
- assert res0
- else:
- assert not res0
- except BaseException as exc:
- exc.args += ("dim: {}".format(getattr(value, "ndim", value)),)
- raise
+ res0 = func(value)
+ if correct:
+ assert res0
+ else:
+ assert not res0
+
if not hasattr(value, "ndim"):
break
- try:
- value = np.take(value, 0, axis=-1)
- except ValueError:
- break
+
+ # Reduce dimension for next step in the loop
+ value = np.take(value, 0, axis=-1)
def test__has_infs(self):
pairs = [
@@ -796,21 +696,13 @@ def test__has_infs(self):
for arr, correct in pairs:
val = getattr(self, arr)
- try:
- self.check_bool(nanops._has_infs, val, correct)
- except BaseException as exc:
- exc.args += (arr,)
- raise
+ self.check_bool(nanops._has_infs, val, correct)
for arr, correct in pairs_float:
val = getattr(self, arr)
- try:
- self.check_bool(nanops._has_infs, val, correct)
- self.check_bool(nanops._has_infs, val.astype("f4"), correct)
- self.check_bool(nanops._has_infs, val.astype("f2"), correct)
- except BaseException as exc:
- exc.args += (arr,)
- raise
+ self.check_bool(nanops._has_infs, val, correct)
+ self.check_bool(nanops._has_infs, val.astype("f4"), correct)
+ self.check_bool(nanops._has_infs, val.astype("f2"), correct)
def test__isfinite(self):
pairs = [
@@ -844,21 +736,13 @@ def test__isfinite(self):
for arr, correct in pairs:
val = getattr(self, arr)
- try:
- self.check_bool(func1, val, correct)
- except BaseException as exc:
- exc.args += (arr,)
- raise
+ self.check_bool(func1, val, correct)
for arr, correct in pairs_float:
val = getattr(self, arr)
- try:
- self.check_bool(func1, val, correct)
- self.check_bool(func1, val.astype("f4"), correct)
- self.check_bool(func1, val.astype("f2"), correct)
- except BaseException as exc:
- exc.args += (arr,)
- raise
+ self.check_bool(func1, val, correct)
+ self.check_bool(func1, val.astype("f4"), correct)
+ self.check_bool(func1, val.astype("f2"), correct)
def test__bn_ok_dtype(self):
assert nanops._bn_ok_dtype(self.arr_float.dtype, "test")
@@ -911,10 +795,11 @@ def test_non_convertable_values(self):
msg = "Could not convert foo to numeric"
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric("foo")
- msg = "Could not convert {} to numeric"
+
+ # with the wrong type, python raises TypeError for us
+ msg = "argument must be a string or a number"
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric({})
- msg = r"Could not convert \[\] to numeric"
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric([])
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index bc848a528f2fd..bc8dc7272a83a 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -384,7 +384,7 @@ def test_str_cat_name(self, box, other):
other = other(values)
else:
other = values
- result = box(values, name="name").str.cat(other, sep=",", join="left")
+ result = box(values, name="name").str.cat(other, sep=",")
assert result.name == "name"
@pytest.mark.parametrize("box", [Series, Index])
@@ -418,12 +418,9 @@ def test_str_cat(self, box):
assert_series_or_index_equal(result, expected)
# errors for incorrect lengths
- rgx = "All arrays must be same length, except those having an index.*"
+ rgx = r"If `others` contains arrays or lists \(or other list-likes.*"
z = Series(["1", "2", "3"])
- with pytest.raises(ValueError, match=rgx):
- s.str.cat(z)
-
with pytest.raises(ValueError, match=rgx):
s.str.cat(z.values)
@@ -452,14 +449,12 @@ def test_str_cat_categorical(self, box, dtype_caller, dtype_target, sep):
expected = Index(["ab", "aa", "bb", "ac"])
expected = expected if box == Index else Series(expected, index=s)
- # Series/Index with unaligned Index
- with tm.assert_produces_warning(expected_warning=FutureWarning):
- # FutureWarning to switch to alignment by default
- result = s.str.cat(t, sep=sep)
- assert_series_or_index_equal(result, expected)
+ # Series/Index with unaligned Index -> t.values
+ result = s.str.cat(t.values, sep=sep)
+ assert_series_or_index_equal(result, expected)
# Series/Index with Series having matching Index
- t = Series(t, index=s)
+ t = Series(t.values, index=s)
result = s.str.cat(t, sep=sep)
assert_series_or_index_equal(result, expected)
@@ -468,11 +463,14 @@ def test_str_cat_categorical(self, box, dtype_caller, dtype_target, sep):
assert_series_or_index_equal(result, expected)
# Series/Index with Series having different Index
- t = Series(t.values, index=t)
- with tm.assert_produces_warning(expected_warning=FutureWarning):
- # FutureWarning to switch to alignment by default
- result = s.str.cat(t, sep=sep)
- assert_series_or_index_equal(result, expected)
+ t = Series(t.values, index=t.values)
+ expected = Index(["aa", "aa", "aa", "bb", "bb"])
+ expected = (
+ expected if box == Index else Series(expected, index=expected.str[:1])
+ )
+
+ result = s.str.cat(t, sep=sep)
+ assert_series_or_index_equal(result, expected)
# test integer/float dtypes (inferred by constructor) and mixed
@pytest.mark.parametrize(
@@ -523,55 +521,33 @@ def test_str_cat_mixed_inputs(self, box):
result = s.str.cat([t, s.values])
assert_series_or_index_equal(result, expected)
- # Series/Index with list of list-likes
- with tm.assert_produces_warning(expected_warning=FutureWarning):
- # nested list-likes will be deprecated
- result = s.str.cat([t.values, list(s)])
- assert_series_or_index_equal(result, expected)
-
# Series/Index with list of Series; different indexes
t.index = ["b", "c", "d", "a"]
- with tm.assert_produces_warning(expected_warning=FutureWarning):
- # FutureWarning to switch to alignment by default
- result = s.str.cat([t, s])
- assert_series_or_index_equal(result, expected)
+ expected = box(["aDa", "bAb", "cBc", "dCd"])
+ expected = expected if box == Index else Series(expected.values, index=s.values)
+ result = s.str.cat([t, s])
+ assert_series_or_index_equal(result, expected)
- # Series/Index with mixed list; different indexes
- with tm.assert_produces_warning(expected_warning=FutureWarning):
- # FutureWarning to switch to alignment by default
- result = s.str.cat([t, s.values])
- assert_series_or_index_equal(result, expected)
+ # Series/Index with mixed list; different index
+ result = s.str.cat([t, s.values])
+ assert_series_or_index_equal(result, expected)
# Series/Index with DataFrame; different indexes
d.index = ["b", "c", "d", "a"]
- with tm.assert_produces_warning(expected_warning=FutureWarning):
- # FutureWarning to switch to alignment by default
- result = s.str.cat(d)
- assert_series_or_index_equal(result, expected)
-
- # Series/Index with iterator of list-likes
- with tm.assert_produces_warning(expected_warning=FutureWarning):
- # nested list-likes will be deprecated
- result = s.str.cat(iter([t.values, list(s)]))
- assert_series_or_index_equal(result, expected)
+ expected = box(["aDd", "bAa", "cBb", "dCc"])
+ expected = expected if box == Index else Series(expected.values, index=s.values)
+ result = s.str.cat(d)
+ assert_series_or_index_equal(result, expected)
# errors for incorrect lengths
- rgx = "All arrays must be same length, except those having an index.*"
+ rgx = r"If `others` contains arrays or lists \(or other list-likes.*"
z = Series(["1", "2", "3"])
e = concat([z, z], axis=1)
- # DataFrame
- with pytest.raises(ValueError, match=rgx):
- s.str.cat(e)
-
# two-dimensional ndarray
with pytest.raises(ValueError, match=rgx):
s.str.cat(e.values)
- # list of Series
- with pytest.raises(ValueError, match=rgx):
- s.str.cat([z, s])
-
# list of list-likes
with pytest.raises(ValueError, match=rgx):
s.str.cat([z.values, s.values])
@@ -615,6 +591,10 @@ def test_str_cat_mixed_inputs(self, box):
with pytest.raises(TypeError, match=rgx):
s.str.cat(1)
+ # nested list-likes
+ with pytest.raises(TypeError, match=rgx):
+ s.str.cat(iter([t.values, list(s)]))
+
@pytest.mark.parametrize("join", ["left", "outer", "inner", "right"])
@pytest.mark.parametrize("box", [Series, Index])
def test_str_cat_align_indexed(self, box, join):
@@ -660,10 +640,9 @@ def test_str_cat_align_mixed_inputs(self, join):
result = s.str.cat([t, u], join=join, na_rep="-")
tm.assert_series_equal(result, expected)
- with tm.assert_produces_warning(expected_warning=FutureWarning):
- # nested list-likes will be deprecated
- result = s.str.cat([t, list(u)], join=join, na_rep="-")
- tm.assert_series_equal(result, expected)
+ with pytest.raises(TypeError, match="others must be Series,.*"):
+ # nested lists are forbidden
+ s.str.cat([t, list(u)], join=join)
# errors for incorrect lengths
rgx = r"If `others` contains arrays or lists \(or other list-likes.*"
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index 1abc8aece5ec9..ddf2c6e65b474 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -132,10 +132,7 @@ def _get_offset(self, klass, value=1, normalize=False):
elif klass is DateOffset:
klass = klass(days=value, normalize=normalize)
else:
- try:
- klass = klass(value, normalize=normalize)
- except Exception:
- klass = klass(normalize=normalize)
+ klass = klass(value, normalize=normalize)
return klass
def test_apply_out_of_range(self, tz_naive_fixture):
@@ -4351,3 +4348,12 @@ def test_last_week_of_month_on_offset():
slow = (ts + offset) - offset == ts
fast = offset.onOffset(ts)
assert fast == slow
+
+
+def test_week_add_invalid():
+ # Week with weekday should raise TypeError and _not_ AttributeError
+ # when adding invalid offset
+ offset = Week(weekday=1)
+ other = Day()
+ with pytest.raises(TypeError, match="Cannot add"):
+ offset + other
diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py
index 47e398dfe3d16..7a8a6d511aa69 100644
--- a/pandas/tests/tslibs/test_api.py
+++ b/pandas/tests/tslibs/test_api.py
@@ -29,6 +29,7 @@ def test_namespace():
"NaTType",
"iNaT",
"is_null_datetimelike",
+ "NullFrequencyError",
"OutOfBoundsDatetime",
"Period",
"IncompatibleFrequency",
diff --git a/pandas/tests/tslibs/test_fields.py b/pandas/tests/tslibs/test_fields.py
new file mode 100644
index 0000000000000..cd729956a027c
--- /dev/null
+++ b/pandas/tests/tslibs/test_fields.py
@@ -0,0 +1,31 @@
+import numpy as np
+
+from pandas._libs.tslibs import fields
+
+import pandas.util.testing as tm
+
+
+def test_fields_readonly():
+ # https://github.com/vaexio/vaex/issues/357
+ # fields functions should't raise when we pass read-only data
+ dtindex = np.arange(5, dtype=np.int64) * 10 ** 9 * 3600 * 24 * 32
+ dtindex.flags.writeable = False
+
+ result = fields.get_date_name_field(dtindex, "month_name")
+ expected = np.array(
+ ["January", "February", "March", "April", "May"], dtype=np.object
+ )
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = fields.get_date_field(dtindex, "Y")
+ expected = np.array([1970, 1970, 1970, 1970, 1970], dtype=np.int32)
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = fields.get_start_end_field(dtindex, "is_month_start", None)
+ expected = np.array([True, False, False, False, False], dtype=np.bool_)
+ tm.assert_numpy_array_equal(result, expected)
+
+ # treat dtindex as timedeltas for this next one
+ result = fields.get_timedelta_field(dtindex, "days")
+ expected = np.arange(5, dtype=np.int32) * 32
+ tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py
index a05b567adad7a..1683fda500f85 100644
--- a/pandas/tests/window/test_ewm.py
+++ b/pandas/tests/window/test_ewm.py
@@ -4,7 +4,7 @@
from pandas.errors import UnsupportedFunctionCall
from pandas import DataFrame, Series
-import pandas.core.window as rwindow
+from pandas.core.window import EWM
from pandas.tests.window.common import Base
@@ -60,7 +60,7 @@ def test_constructor(self, which):
@pytest.mark.parametrize("method", ["std", "mean", "var"])
def test_numpy_compat(self, method):
# see gh-12811
- e = rwindow.EWM(Series([2, 4, 6]), alpha=0.5)
+ e = EWM(Series([2, 4, 6]), alpha=0.5)
msg = "numpy operations are not valid with window objects"
diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py
index 1e92c981964c5..098acdff93ac6 100644
--- a/pandas/tests/window/test_expanding.py
+++ b/pandas/tests/window/test_expanding.py
@@ -5,7 +5,7 @@
import pandas as pd
from pandas import DataFrame, Series
-import pandas.core.window as rwindow
+from pandas.core.window import Expanding
from pandas.tests.window.common import Base
import pandas.util.testing as tm
@@ -42,7 +42,7 @@ def test_constructor(self, which):
@pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"])
def test_numpy_compat(self, method):
# see gh-12811
- e = rwindow.Expanding(Series([2, 4, 6]), window=2)
+ e = Expanding(Series([2, 4, 6]), window=2)
msg = "numpy operations are not valid with window objects"
diff --git a/pandas/tests/window/test_moments.py b/pandas/tests/window/test_moments.py
index d860859958254..3d6cd7d10bd10 100644
--- a/pandas/tests/window/test_moments.py
+++ b/pandas/tests/window/test_moments.py
@@ -10,7 +10,7 @@
import pandas as pd
from pandas import DataFrame, Index, Series, concat, isna, notna
-import pandas.core.window as rwindow
+from pandas.core.window.common import _flex_binary_moment
from pandas.tests.window.common import Base
import pandas.util.testing as tm
@@ -1878,7 +1878,7 @@ def test_flex_binary_moment(self):
" np.ndarray/Series/DataFrame"
)
with pytest.raises(TypeError, match=msg):
- rwindow._flex_binary_moment(5, 6, None)
+ _flex_binary_moment(5, 6, None)
def test_corr_sanity(self):
# GH 3155
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index c7177e1d3914f..70ba85120af3c 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -8,7 +8,7 @@
import pandas as pd
from pandas import DataFrame, Series
-import pandas.core.window as rwindow
+from pandas.core.window import Rolling
from pandas.tests.window.common import Base
import pandas.util.testing as tm
@@ -101,7 +101,7 @@ def test_constructor_timedelta_window_and_minperiods(self, window, raw):
@pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"])
def test_numpy_compat(self, method):
# see gh-12811
- r = rwindow.Rolling(Series([2, 4, 6]), window=2)
+ r = Rolling(Series([2, 4, 6]), window=2)
msg = "numpy operations are not valid with window objects"
@@ -326,3 +326,38 @@ def test_rolling_axis_count(self, axis_frame):
result = df.rolling(2, axis=axis_frame).count()
tm.assert_frame_equal(result, expected)
+
+ def test_readonly_array(self):
+ # GH-27766
+ arr = np.array([1, 3, np.nan, 3, 5])
+ arr.setflags(write=False)
+ result = pd.Series(arr).rolling(2).mean()
+ expected = pd.Series([np.nan, 2, np.nan, np.nan, 4])
+ tm.assert_series_equal(result, expected)
+
+ def test_rolling_datetime(self, axis_frame, tz_naive_fixture):
+ # GH-28192
+ tz = tz_naive_fixture
+ df = pd.DataFrame(
+ {
+ i: [1] * 2
+ for i in pd.date_range("2019-8-01", "2019-08-03", freq="D", tz=tz)
+ }
+ )
+ if axis_frame in [0, "index"]:
+ result = df.T.rolling("2D", axis=axis_frame).sum().T
+ else:
+ result = df.rolling("2D", axis=axis_frame).sum()
+ expected = pd.DataFrame(
+ {
+ **{
+ i: [1.0] * 2
+ for i in pd.date_range("2019-8-01", periods=1, freq="D", tz=tz)
+ },
+ **{
+ i: [2.0] * 2
+ for i in pd.date_range("2019-8-02", "2019-8-03", freq="D", tz=tz)
+ },
+ }
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/window/test_window.py b/pandas/tests/window/test_window.py
index a6a56c98a9377..5692404205012 100644
--- a/pandas/tests/window/test_window.py
+++ b/pandas/tests/window/test_window.py
@@ -6,7 +6,7 @@
import pandas as pd
from pandas import Series
-import pandas.core.window as rwindow
+from pandas.core.window import Window
from pandas.tests.window.common import Base
@@ -50,7 +50,7 @@ def test_constructor_with_win_type(self, which, win_types):
@pytest.mark.parametrize("method", ["sum", "mean"])
def test_numpy_compat(self, method):
# see gh-12811
- w = rwindow.Window(Series([2, 4, 6]), window=[0, 2])
+ w = Window(Series([2, 4, 6]), window=[0, 2])
msg = "numpy operations are not valid with window objects"
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index dfe91b514bbe1..4491e6ad9ac7e 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -138,7 +138,7 @@ def to_offset(freq):
delta = offset
else:
delta = delta + offset
- except Exception:
+ except ValueError:
raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq))
else:
@@ -170,7 +170,7 @@ def to_offset(freq):
delta = offset
else:
delta = delta + offset
- except Exception:
+ except (ValueError, TypeError):
raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq))
if delta is None:
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index ac3e92c772517..82cbfa831bf32 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -9,6 +9,7 @@
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
+ Period,
Timedelta,
Timestamp,
ccalendar,
@@ -33,7 +34,6 @@
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly
-from pandas.core.dtypes.generic import ABCPeriod
from pandas.core.dtypes.inference import is_list_like
from pandas.core.tools.datetimes import to_datetime
@@ -204,8 +204,7 @@ def __add__(date):
normalize : bool, default False
Whether to round the result of a DateOffset addition down to the
previous midnight.
- **kwds
- Temporal parameter that add to or replace the offset value.
+ **kwds : Temporal parameter that add to or replace the offset value.
Parameters that **add** to the offset (like Timedelta):
@@ -233,16 +232,19 @@ def __add__(date):
See Also
--------
- dateutil.relativedelta.relativedelta
+ dateutil.relativedelta.relativedelta : The relativedelta type is designed
+ to be applied to an existing datetime an can replace specific components of
+ that datetime, or represents an interval of time.
Examples
--------
+ >>> from pandas.tseries.offsets import DateOffset
>>> ts = pd.Timestamp('2017-01-01 09:10:11')
>>> ts + DateOffset(months=3)
Timestamp('2017-04-01 09:10:11')
>>> ts = pd.Timestamp('2017-01-01 09:10:11')
- >>> ts + DateOffset(month=3)
+ >>> ts + DateOffset(months=2)
Timestamp('2017-03-01 09:10:11')
"""
@@ -603,7 +605,7 @@ def apply(self, other):
return BDay(self.n, offset=self.offset + other, normalize=self.normalize)
else:
raise ApplyTypeError(
- "Only know how to combine business day with " "datetime or timedelta."
+ "Only know how to combine business day with datetime or timedelta."
)
@apply_index_wraps
@@ -1075,8 +1077,6 @@ def onOffset(self, dt):
class CustomBusinessHour(_CustomMixin, BusinessHourMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n custom business days.
-
- .. versionadded:: 0.18.1
"""
_prefix = "CBH"
@@ -1398,8 +1398,6 @@ class SemiMonthEnd(SemiMonthOffset):
Two DateOffset's per month repeating on the last
day of the month and day_of_month.
- .. versionadded:: 0.19.0
-
Parameters
----------
n : int
@@ -1459,8 +1457,6 @@ class SemiMonthBegin(SemiMonthOffset):
Two DateOffset's per month repeating on the first
day of the month and day_of_month.
- .. versionadded:: 0.19.0
-
Parameters
----------
n : int
@@ -1549,6 +1545,13 @@ def apply(self, other):
if self.weekday is None:
return other + self.n * self._inc
+ if not isinstance(other, datetime):
+ raise TypeError(
+ "Cannot add {typ} to {cls}".format(
+ typ=type(other).__name__, cls=type(self).__name__
+ )
+ )
+
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
@@ -2539,7 +2542,7 @@ def __add__(self, other):
return type(self)(self.n + other.n)
else:
return _delta_to_tick(self.delta + other.delta)
- elif isinstance(other, ABCPeriod):
+ elif isinstance(other, Period):
return other + self
try:
return self.apply(other)
diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index 5c7d481ff2586..8a25e511b5fc4 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -1,21 +1,35 @@
from functools import wraps
import inspect
from textwrap import dedent
-from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ List,
+ Optional,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+ cast,
+)
import warnings
from pandas._libs.properties import cache_readonly # noqa
+FuncType = Callable[..., Any]
+F = TypeVar("F", bound=FuncType)
+
def deprecate(
name: str,
- alternative: Callable,
+ alternative: Callable[..., Any],
version: str,
alt_name: Optional[str] = None,
klass: Optional[Type[Warning]] = None,
stacklevel: int = 2,
msg: Optional[str] = None,
-) -> Callable:
+) -> Callable[..., Any]:
"""
Return a new function that emits a deprecation warning on use.
@@ -47,7 +61,7 @@ def deprecate(
warning_msg = msg or "{} is deprecated, use {} instead".format(name, alt_name)
@wraps(alternative)
- def wrapper(*args, **kwargs):
+ def wrapper(*args, **kwargs) -> Callable[..., Any]:
warnings.warn(warning_msg, klass, stacklevel=stacklevel)
return alternative(*args, **kwargs)
@@ -90,9 +104,9 @@ def wrapper(*args, **kwargs):
def deprecate_kwarg(
old_arg_name: str,
new_arg_name: Optional[str],
- mapping: Optional[Union[Dict, Callable[[Any], Any]]] = None,
+ mapping: Optional[Union[Dict[Any, Any], Callable[[Any], Any]]] = None,
stacklevel: int = 2,
-) -> Callable:
+) -> Callable[..., Any]:
"""
Decorator to deprecate a keyword argument of a function.
@@ -160,27 +174,27 @@ def deprecate_kwarg(
"mapping from old to new argument values " "must be dict or callable!"
)
- def _deprecate_kwarg(func):
+ def _deprecate_kwarg(func: F) -> F:
@wraps(func)
- def wrapper(*args, **kwargs):
+ def wrapper(*args, **kwargs) -> Callable[..., Any]:
old_arg_value = kwargs.pop(old_arg_name, None)
- if new_arg_name is None and old_arg_value is not None:
- msg = (
- "the '{old_name}' keyword is deprecated and will be "
- "removed in a future version. "
- "Please take steps to stop the use of '{old_name}'"
- ).format(old_name=old_arg_name)
- warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
- kwargs[old_arg_name] = old_arg_value
- return func(*args, **kwargs)
-
if old_arg_value is not None:
- if mapping is not None:
- if hasattr(mapping, "get"):
- new_arg_value = mapping.get(old_arg_value, old_arg_value)
- else:
+ if new_arg_name is None:
+ msg = (
+ "the '{old_name}' keyword is deprecated and will be "
+ "removed in a future version. "
+ "Please take steps to stop the use of '{old_name}'"
+ ).format(old_name=old_arg_name)
+ warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
+ kwargs[old_arg_name] = old_arg_value
+ return func(*args, **kwargs)
+
+ elif mapping is not None:
+ if callable(mapping):
new_arg_value = mapping(old_arg_value)
+ else:
+ new_arg_value = mapping.get(old_arg_value, old_arg_value)
msg = (
"the {old_name}={old_val!r} keyword is deprecated, "
"use {new_name}={new_val!r} instead"
@@ -198,7 +212,7 @@ def wrapper(*args, **kwargs):
).format(old_name=old_arg_name, new_name=new_arg_name)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
- if kwargs.get(new_arg_name, None) is not None:
+ if kwargs.get(new_arg_name) is not None:
msg = (
"Can only specify '{old_name}' or '{new_name}', " "not both"
).format(old_name=old_arg_name, new_name=new_arg_name)
@@ -207,17 +221,17 @@ def wrapper(*args, **kwargs):
kwargs[new_arg_name] = new_arg_value
return func(*args, **kwargs)
- return wrapper
+ return cast(F, wrapper)
return _deprecate_kwarg
def rewrite_axis_style_signature(
name: str, extra_params: List[Tuple[str, Any]]
-) -> Callable:
- def decorate(func):
+) -> Callable[..., Any]:
+ def decorate(func: F) -> F:
@wraps(func)
- def wrapper(*args, **kwargs):
+ def wrapper(*args, **kwargs) -> Callable[..., Any]:
return func(*args, **kwargs)
kind = inspect.Parameter.POSITIONAL_OR_KEYWORD
@@ -234,8 +248,9 @@ def wrapper(*args, **kwargs):
sig = inspect.Signature(params)
- func.__signature__ = sig
- return wrapper
+ # https://github.com/python/typing/issues/598
+ func.__signature__ = sig # type: ignore
+ return cast(F, wrapper)
return decorate
@@ -279,18 +294,17 @@ def __init__(self, *args, **kwargs):
self.params = args or kwargs
- def __call__(self, func: Callable) -> Callable:
+ def __call__(self, func: F) -> F:
func.__doc__ = func.__doc__ and func.__doc__ % self.params
return func
def update(self, *args, **kwargs) -> None:
"""
Update self.params with supplied args.
-
- If called, we assume self.params is a dict.
"""
- self.params.update(*args, **kwargs)
+ if isinstance(self.params, dict):
+ self.params.update(*args, **kwargs)
class Appender:
@@ -320,7 +334,7 @@ def __init__(self, addendum: Optional[str], join: str = "", indents: int = 0):
self.addendum = addendum
self.join = join
- def __call__(self, func: Callable) -> Callable:
+ def __call__(self, func: F) -> F:
func.__doc__ = func.__doc__ if func.__doc__ else ""
self.addendum = self.addendum if self.addendum else ""
docitems = [func.__doc__, self.addendum]
diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py
index 3de4e5d66d577..0e07b9f5fe9f7 100644
--- a/pandas/util/_test_decorators.py
+++ b/pandas/util/_test_decorators.py
@@ -25,9 +25,8 @@ def test_foo():
"""
from distutils.version import LooseVersion
import locale
-from typing import Optional
+from typing import Callable, Optional
-from _pytest.mark.structures import MarkDecorator
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
@@ -103,7 +102,7 @@ def _skip_if_no_scipy():
)
-def skip_if_installed(package: str,) -> MarkDecorator:
+def skip_if_installed(package: str) -> Callable:
"""
Skip a test if a package is installed.
@@ -117,7 +116,7 @@ def skip_if_installed(package: str,) -> MarkDecorator:
)
-def skip_if_no(package: str, min_version: Optional[str] = None) -> MarkDecorator:
+def skip_if_no(package: str, min_version: Optional[str] = None) -> Callable:
"""
Generic function to help skip tests when required packages are not
present on the testing system.
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index cf8452cdd0c59..aee58f808d9e6 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -4,14 +4,11 @@
from datetime import datetime
from functools import wraps
import gzip
-import http.client
-import lzma
import os
import re
from shutil import rmtree
import string
import tempfile
-import traceback
from typing import Union, cast
import warnings
import zipfile
@@ -26,7 +23,7 @@
)
import pandas._libs.testing as _testing
-from pandas.compat import raise_with_traceback
+from pandas.compat import _get_lzma_file, _import_lzma, raise_with_traceback
from pandas.core.dtypes.common import (
is_bool,
@@ -70,6 +67,8 @@
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
+lzma = _import_lzma()
+
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
@@ -211,7 +210,7 @@ def decompress_file(path, compression):
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
- f = lzma.LZMAFile(path, "rb")
+ f = _get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
@@ -264,9 +263,7 @@ def write_to_compressed(compression, path, data, dest="test"):
compress_method = bz2.BZ2File
elif compression == "xz":
- import lzma
-
- compress_method = lzma.LZMAFile
+ compress_method = _get_lzma_file(lzma)
else:
msg = "Unrecognized compression type: {}".format(compression)
raise ValueError(msg)
@@ -513,7 +510,7 @@ def ensure_clean(filename=None, return_filelike=False):
finally:
try:
os.close(fd)
- except Exception:
+ except OSError:
print(
"Couldn't close file descriptor: {fdesc} (file: {fname})".format(
fdesc=fd, fname=filename
@@ -522,7 +519,7 @@ def ensure_clean(filename=None, return_filelike=False):
try:
if os.path.exists(filename):
os.remove(filename)
- except Exception as e:
+ except OSError as e:
print("Exception on removing file: {error}".format(error=e))
@@ -541,7 +538,7 @@ def ensure_clean_dir():
finally:
try:
rmtree(directory_name)
- except Exception:
+ except OSError:
pass
@@ -581,7 +578,8 @@ def assert_index_equal(
check_categorical: bool = True,
obj: str = "Index",
) -> None:
- """Check that left and right Index are equal.
+ """
+ Check that left and right Index are equal.
Parameters
----------
@@ -1082,7 +1080,8 @@ def assert_series_equal(
check_categorical=True,
obj="Series",
):
- """Check that left and right Series are equal.
+ """
+ Check that left and right Series are equal.
Parameters
----------
@@ -1332,8 +1331,6 @@ def assert_frame_equal(
_check_isinstance(left, right, DataFrame)
if check_frame_type:
- # ToDo: There are some tests using rhs is SparseDataFrame
- # lhs is DataFrame. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
@@ -1557,142 +1554,6 @@ def assert_sp_array_equal(
assert_numpy_array_equal(left.to_dense(), right.to_dense(), check_dtype=check_dtype)
-def assert_sp_series_equal(
- left,
- right,
- check_dtype=True,
- exact_indices=True,
- check_series_type=True,
- check_names=True,
- check_kind=True,
- check_fill_value=True,
- consolidate_block_indices=False,
- obj="SparseSeries",
-):
- """Check that the left and right SparseSeries are equal.
-
- Parameters
- ----------
- left : SparseSeries
- right : SparseSeries
- check_dtype : bool, default True
- Whether to check the Series dtype is identical.
- exact_indices : bool, default True
- check_series_type : bool, default True
- Whether to check the SparseSeries class is identical.
- check_names : bool, default True
- Whether to check the SparseSeries name attribute.
- check_kind : bool, default True
- Whether to just the kind of the sparse index for each column.
- check_fill_value : bool, default True
- Whether to check that left.fill_value matches right.fill_value
- consolidate_block_indices : bool, default False
- Whether to consolidate contiguous blocks for sparse arrays with
- a BlockIndex. Some operations, e.g. concat, will end up with
- block indices that could be consolidated. Setting this to true will
- create a new BlockIndex for that array, with consolidated
- block indices.
- obj : str, default 'SparseSeries'
- Specify the object name being compared, internally used to show
- the appropriate assertion message.
- """
- _check_isinstance(left, right, pd.SparseSeries)
-
- if check_series_type:
- assert_class_equal(left, right, obj=obj)
-
- assert_index_equal(left.index, right.index, obj="{obj}.index".format(obj=obj))
-
- assert_sp_array_equal(
- left.values,
- right.values,
- check_kind=check_kind,
- check_fill_value=check_fill_value,
- consolidate_block_indices=consolidate_block_indices,
- )
-
- if check_names:
- assert_attr_equal("name", left, right)
- if check_dtype:
- assert_attr_equal("dtype", left, right)
-
- assert_numpy_array_equal(np.asarray(left.values), np.asarray(right.values))
-
-
-def assert_sp_frame_equal(
- left,
- right,
- check_dtype=True,
- exact_indices=True,
- check_frame_type=True,
- check_kind=True,
- check_fill_value=True,
- consolidate_block_indices=False,
- obj="SparseDataFrame",
-):
- """Check that the left and right SparseDataFrame are equal.
-
- Parameters
- ----------
- left : SparseDataFrame
- right : SparseDataFrame
- check_dtype : bool, default True
- Whether to check the Series dtype is identical.
- exact_indices : bool, default True
- SparseSeries SparseIndex objects must be exactly the same,
- otherwise just compare dense representations.
- check_frame_type : bool, default True
- Whether to check the SparseDataFrame class is identical.
- check_kind : bool, default True
- Whether to just the kind of the sparse index for each column.
- check_fill_value : bool, default True
- Whether to check that left.fill_value matches right.fill_value
- consolidate_block_indices : bool, default False
- Whether to consolidate contiguous blocks for sparse arrays with
- a BlockIndex. Some operations, e.g. concat, will end up with
- block indices that could be consolidated. Setting this to true will
- create a new BlockIndex for that array, with consolidated
- block indices.
- obj : str, default 'SparseDataFrame'
- Specify the object name being compared, internally used to show
- the appropriate assertion message.
- """
- _check_isinstance(left, right, pd.SparseDataFrame)
-
- if check_frame_type:
- assert_class_equal(left, right, obj=obj)
-
- assert_index_equal(left.index, right.index, obj="{obj}.index".format(obj=obj))
- assert_index_equal(left.columns, right.columns, obj="{obj}.columns".format(obj=obj))
-
- if check_fill_value:
- assert_attr_equal("default_fill_value", left, right, obj=obj)
-
- for col, series in left.items():
- assert col in right
- # trade-off?
-
- if exact_indices:
- assert_sp_series_equal(
- series,
- right[col],
- check_dtype=check_dtype,
- check_kind=check_kind,
- check_fill_value=check_fill_value,
- consolidate_block_indices=consolidate_block_indices,
- )
- else:
- assert_series_equal(
- series.to_dense(), right[col].to_dense(), check_dtype=check_dtype
- )
-
- # do I care?
- # assert(left.default_kind == right.default_kind)
-
- for col in right:
- assert col in left
-
-
# -----------------------------------------------------------------------------
# Others
@@ -1854,10 +1715,10 @@ def makeStringSeries(name=None):
def makeObjectSeries(name=None):
- dateIndex = makeDateIndex(N)
- dateIndex = Index(dateIndex, dtype=object)
+ data = makeStringIndex(N)
+ data = Index(data, dtype=object)
index = makeStringIndex(N)
- return Series(dateIndex, index=index, name=name)
+ return Series(data, index=index, name=name)
def getSeriesData():
@@ -2274,11 +2135,17 @@ def dec(f):
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
-# and conditionally raise on these exception types
-_network_error_classes = (IOError, http.client.HTTPException, TimeoutError)
+# and conditionally raise on exception types in _get_default_network_errors
+
+
+def _get_default_network_errors():
+ # Lazy import for http.client because it imports many things from the stdlib
+ import http.client
+ return (IOError, http.client.HTTPException, TimeoutError)
-def can_connect(url, error_classes=_network_error_classes):
+
+def can_connect(url, error_classes=None):
"""Try to connect to the given url. True if succeeds, False if IOError
raised
@@ -2293,6 +2160,10 @@ def can_connect(url, error_classes=_network_error_classes):
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
+
+ if error_classes is None:
+ error_classes = _get_default_network_errors()
+
try:
with urlopen(url):
pass
@@ -2308,7 +2179,7 @@ def network(
url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
- error_classes=_network_error_classes,
+ error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
@@ -2396,6 +2267,9 @@ def network(
"""
from pytest import skip
+ if error_classes is None:
+ error_classes = _get_default_network_errors()
+
t.network = True
@wraps(t)
@@ -2416,10 +2290,7 @@ def wrapper(*args, **kwargs):
" and error {error}".format(error=e)
)
- try:
- e_str = traceback.format_exc(e)
- except Exception:
- e_str = str(e)
+ e_str = str(e)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
@@ -2662,7 +2533,8 @@ class for all warnings. To check that no warning is returned,
for m in clear:
try:
m.__warningregistry__.clear()
- except Exception:
+ except AttributeError:
+ # module may not have __warningregistry__
pass
saw_warning = False
@@ -2862,30 +2734,6 @@ def _constructor_sliced(self):
return SubclassedSeries
-class SubclassedSparseSeries(pd.SparseSeries):
- _metadata = ["testattr"]
-
- @property
- def _constructor(self):
- return SubclassedSparseSeries
-
- @property
- def _constructor_expanddim(self):
- return SubclassedSparseDataFrame
-
-
-class SubclassedSparseDataFrame(pd.SparseDataFrame):
- _metadata = ["testattr"]
-
- @property
- def _constructor(self):
- return SubclassedSparseDataFrame
-
- @property
- def _constructor_sliced(self):
- return SubclassedSparseSeries
-
-
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000000000..2ec4739c2f7f8
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,14 @@
+[build-system]
+# Minimum requirements for the build system to execute.
+# See https://github.com/scipy/scipy/pull/10431 for the AIX issue.
+requires = [
+ "setuptools",
+ "wheel",
+ "Cython>=0.29.13", # Note: sync with setup.py
+ "numpy==1.13.3; python_version=='3.5' and platform_system!='AIX'",
+ "numpy==1.13.3; python_version=='3.6' and platform_system!='AIX'",
+ "numpy==1.14.5; python_version>='3.7' and platform_system!='AIX'",
+ "numpy==1.16.0; python_version=='3.5' and platform_system=='AIX'",
+ "numpy==1.16.0; python_version=='3.6' and platform_system=='AIX'",
+ "numpy==1.16.0; python_version>='3.7' and platform_system=='AIX'",
+]
diff --git a/requirements-dev.txt b/requirements-dev.txt
index e49ad10bfc99d..fd8e6378240b4 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -2,7 +2,7 @@ numpy>=1.15
python-dateutil>=2.6.1
pytz
asv
-cython>=0.28.2
+cython>=0.29.13
black
cpplint
flake8
@@ -17,6 +17,10 @@ numpydoc>=0.9.0
nbconvert>=5.4.1
nbsphinx
pandoc
+markdown
+feedparser
+pyyaml
+requests
boto3
botocore>=1.11
hypothesis>=3.82
@@ -45,7 +49,7 @@ html5lib
lxml
openpyxl
pyarrow>=0.9.0
-pyqt
+pyqt5>=5.9.2
tables>=3.4.2
python-snappy
s3fs
@@ -54,4 +58,5 @@ xarray
xlrd
xlsxwriter
xlwt
+odfpy
pyreadstat
\ No newline at end of file
diff --git a/scripts/find_commits_touching_func.py b/scripts/find_commits_touching_func.py
index 1075a257d4270..95a892b822cff 100755
--- a/scripts/find_commits_touching_func.py
+++ b/scripts/find_commits_touching_func.py
@@ -10,11 +10,11 @@
Usage::
$ ./find_commits_touching_func.py (see arguments below)
"""
-import logging
-import re
-import os
import argparse
from collections import namedtuple
+import logging
+import os
+import re
from dateutil.parser import parse
diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py
index ac73859b22598..29fe8bf84c12b 100755
--- a/scripts/generate_pip_deps_from_conda.py
+++ b/scripts/generate_pip_deps_from_conda.py
@@ -16,11 +16,11 @@
import os
import re
import sys
-import yaml
+import yaml
EXCLUDE = {"python=3"}
-RENAME = {"pytables": "tables"}
+RENAME = {"pytables": "tables", "pyqt": "pyqt5"}
def conda_package_to_pip(package):
diff --git a/scripts/merge-pr.py b/scripts/merge-pr.py
deleted file mode 100755
index 95352751a23c6..0000000000000
--- a/scripts/merge-pr.py
+++ /dev/null
@@ -1,336 +0,0 @@
-#!/usr/bin/env python
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Utility for creating well-formed pull request merges and pushing them to
-# Apache.
-# usage: ./apache-pr-merge.py (see config env vars below)
-#
-# Lightly modified from version of this script in incubator-parquet-format
-from subprocess import check_output
-from requests.auth import HTTPBasicAuth
-import requests
-
-import os
-import sys
-import textwrap
-
-PANDAS_HOME = "."
-PROJECT_NAME = "pandas"
-print("PANDAS_HOME = " + PANDAS_HOME)
-
-# Remote name with the PR
-PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "upstream")
-
-# Remote name where results pushed
-PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "upstream")
-
-GITHUB_BASE = "https://github.com/pandas-dev/" + PROJECT_NAME + "/pull"
-GITHUB_API_BASE = "https://api.github.com/repos/pandas-dev/" + PROJECT_NAME
-
-# Prefix added to temporary branches
-BRANCH_PREFIX = "PR_TOOL"
-
-os.chdir(PANDAS_HOME)
-
-auth_required = False
-
-if auth_required:
- GITHUB_USERNAME = os.environ["GITHUB_USER"]
- import getpass
-
- GITHUB_PASSWORD = getpass.getpass(
- "Enter github.com password for %s:" % GITHUB_USERNAME
- )
-
- def get_json_auth(url):
- auth = HTTPBasicAuth(GITHUB_USERNAME, GITHUB_PASSWORD)
- req = requests.get(url, auth=auth)
- return req.json()
-
- get_json = get_json_auth
-else:
-
- def get_json_no_auth(url):
- req = requests.get(url)
- return req.json()
-
- get_json = get_json_no_auth
-
-
-def fail(msg):
- print(msg)
- clean_up()
- sys.exit(-1)
-
-
-def run_cmd(cmd):
- if isinstance(cmd, str):
- cmd = cmd.split(" ")
-
- output = check_output(cmd)
-
- if isinstance(output, bytes):
- output = output.decode("utf-8")
- return output
-
-
-def continue_maybe(prompt):
- result = input("\n%s (y/n): " % prompt)
- if result.lower() != "y":
- fail("Okay, exiting")
-
-
-def continue_maybe2(prompt):
- result = input("\n%s (y/n): " % prompt)
- if result.lower() != "y":
- return False
- else:
- return True
-
-
-original_head = run_cmd("git rev-parse HEAD")[:8]
-
-
-def clean_up():
- print("Restoring head pointer to %s" % original_head)
- run_cmd("git checkout %s" % original_head)
-
- branches = run_cmd("git branch").replace(" ", "").split("\n")
-
- for branch in [b for b in branches if b.startswith(BRANCH_PREFIX)]:
- print("Deleting local branch %s" % branch)
- run_cmd("git branch -D %s" % branch)
-
-
-# Merge the requested PR and return the merge hash
-def merge_pr(pr_num, target_ref):
-
- pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
- target_branch_name = "%s_MERGE_PR_%s_%s" % (
- BRANCH_PREFIX,
- pr_num,
- target_ref.upper(),
- )
- run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
- run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
- run_cmd("git checkout %s" % target_branch_name)
-
- had_conflicts = False
- try:
- run_cmd(["git", "merge", pr_branch_name, "--squash"])
- except Exception as e:
- msg = "Error merging: %s\nWould you like to manually fix-up " "this merge?" % e
- continue_maybe(msg)
- msg = (
- "Okay, please fix any conflicts and 'git add' "
- "conflicting files... Finished?"
- )
- continue_maybe(msg)
- had_conflicts = True
-
- commit_authors = run_cmd(
- ["git", "log", "HEAD..%s" % pr_branch_name, "--pretty=format:%an <%ae>"]
- ).split("\n")
- distinct_authors = sorted(
- set(commit_authors), key=lambda x: commit_authors.count(x), reverse=True
- )
- primary_author = distinct_authors[0]
- commits = run_cmd(
- ["git", "log", "HEAD..%s" % pr_branch_name, "--pretty=format:%h [%an] %s"]
- ).split("\n\n")
-
- merge_message_flags = []
-
- merge_message_flags += ["-m", title]
- if body is not None:
- merge_message_flags += ["-m", "\n".join(textwrap.wrap(body))]
-
- authors = "\n".join("Author: %s" % a for a in distinct_authors)
-
- merge_message_flags += ["-m", authors]
-
- if had_conflicts:
- committer_name = run_cmd("git config --get user.name").strip()
- committer_email = run_cmd("git config --get user.email").strip()
- message = (
- "This patch had conflicts when merged, "
- "resolved by\nCommitter: %s <%s>" % (committer_name, committer_email)
- )
- merge_message_flags += ["-m", message]
-
- # The string "Closes #%s" string is required for GitHub to correctly close
- # the PR
- merge_message_flags += [
- "-m",
- "Closes #%s from %s and squashes the following commits:"
- % (pr_num, pr_repo_desc),
- ]
- for c in commits:
- merge_message_flags += ["-m", c]
-
- run_cmd(["git", "commit", '--author="%s"' % primary_author] + merge_message_flags)
-
- continue_maybe(
- "Merge complete (local ref %s). Push to %s?"
- % (target_branch_name, PUSH_REMOTE_NAME)
- )
-
- try:
- run_cmd(
- "git push %s %s:%s" % (PUSH_REMOTE_NAME, target_branch_name, target_ref)
- )
- except Exception as e:
- clean_up()
- fail("Exception while pushing: %s" % e)
-
- merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
- clean_up()
- print("Pull request #%s merged!" % pr_num)
- print("Merge hash: %s" % merge_hash)
- return merge_hash
-
-
-def update_pr(pr_num, user_login, base_ref):
-
- pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
-
- run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
- run_cmd("git checkout %s" % pr_branch_name)
-
- continue_maybe(
- "Update ready (local ref %s)? Push to %s/%s?"
- % (pr_branch_name, user_login, base_ref)
- )
-
- push_user_remote = "https://github.com/%s/pandas.git" % user_login
-
- try:
- run_cmd("git push %s %s:%s" % (push_user_remote, pr_branch_name, base_ref))
- except Exception as e:
-
- if continue_maybe2("Force push?"):
- try:
- run_cmd(
- "git push -f %s %s:%s"
- % (push_user_remote, pr_branch_name, base_ref)
- )
- except Exception as e:
- fail("Exception while pushing: %s" % e)
- clean_up()
- else:
- fail("Exception while pushing: %s" % e)
- clean_up()
-
- clean_up()
- print("Pull request #%s updated!" % pr_num)
-
-
-def cherry_pick(pr_num, merge_hash, default_branch):
- pick_ref = input("Enter a branch name [%s]: " % default_branch)
- if pick_ref == "":
- pick_ref = default_branch
-
- pick_branch_name = "%s_PICK_PR_%s_%s" % (BRANCH_PREFIX, pr_num, pick_ref.upper())
-
- run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref, pick_branch_name))
- run_cmd("git checkout %s" % pick_branch_name)
- run_cmd("git cherry-pick -sx %s" % merge_hash)
-
- continue_maybe(
- "Pick complete (local ref %s). Push to %s?"
- % (pick_branch_name, PUSH_REMOTE_NAME)
- )
-
- try:
- run_cmd("git push %s %s:%s" % (PUSH_REMOTE_NAME, pick_branch_name, pick_ref))
- except Exception as e:
- clean_up()
- fail("Exception while pushing: %s" % e)
-
- pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8]
- clean_up()
-
- print("Pull request #%s picked into %s!" % (pr_num, pick_ref))
- print("Pick hash: %s" % pick_hash)
- return pick_ref
-
-
-def fix_version_from_branch(branch, versions):
- # Note: Assumes this is a sorted (newest->oldest) list of un-released
- # versions
- if branch == "master":
- return versions[0]
- else:
- branch_ver = branch.replace("branch-", "")
- return filter(lambda x: x.name.startswith(branch_ver), versions)[-1]
-
-
-pr_num = input("Which pull request would you like to merge? (e.g. 34): ")
-pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
-
-url = pr["url"]
-title = pr["title"]
-body = pr["body"]
-target_ref = pr["base"]["ref"]
-user_login = pr["user"]["login"]
-base_ref = pr["head"]["ref"]
-pr_repo_desc = "%s/%s" % (user_login, base_ref)
-
-if pr["merged"] is True:
- print(
- "Pull request {0} has already been merged, please backport manually".format(
- pr_num
- )
- )
- sys.exit(0)
-
-if not bool(pr["mergeable"]):
- msg = (
- "Pull request {0} is not mergeable in its current form.\n"
- "Continue? (experts only!)".format(pr_num)
- )
- continue_maybe(msg)
-
-print("\n=== Pull Request #%s ===" % pr_num)
-
-# we may have un-printable unicode in our title
-try:
- title = title.encode("raw_unicode_escape")
-except Exception:
- pass
-
-print(
- "title\t{title}\nsource\t{source}\ntarget\t{target}\nurl\t{url}".format(
- title=title, source=pr_repo_desc, target=target_ref, url=url
- )
-)
-
-
-merged_refs = [target_ref]
-
-print("\nProceed with updating or merging pull request #%s?" % pr_num)
-update = input(
- "Update PR and push to remote (r), merge locally (l), " "or do nothing (n) ?"
-)
-update = update.lower()
-
-if update == "r":
- merge_hash = update_pr(pr_num, user_login, base_ref)
-elif update == "l":
- merge_hash = merge_pr(pr_num, target_ref)
diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py
index f3364e6725a20..85e5bf239cbfa 100644
--- a/scripts/tests/test_validate_docstrings.py
+++ b/scripts/tests/test_validate_docstrings.py
@@ -2,12 +2,13 @@
import random
import string
import textwrap
-import pytest
-import numpy as np
-import pandas as pd
+import numpy as np
+import pytest
import validate_docstrings
+import pandas as pd
+
validate_one = validate_docstrings.validate_one
@@ -200,7 +201,7 @@ def contains(self, pat, case=True, na=np.nan):
def mode(self, axis, numeric_only):
"""
- Ensure sphinx directives don't affect checks for trailing periods.
+ Ensure reST directives don't affect checks for leading periods.
Parameters
----------
@@ -447,6 +448,27 @@ def deprecation_in_wrong_order(self):
def method_wo_docstrings(self):
pass
+ def directives_without_two_colons(self, first, second):
+ """
+ Ensure reST directives have trailing colons.
+
+ Parameters
+ ----------
+ first : str
+ Sentence ending in period, followed by single directive w/o colons.
+
+ .. versionchanged 0.1.2
+
+ second : bool
+ Sentence ending in period, followed by multiple directives w/o
+ colons.
+
+ .. versionadded 0.1.2
+ .. deprecated 0.00.0
+
+ """
+ pass
+
class BadSummaries:
def wrong_line(self):
@@ -840,6 +862,7 @@ def test_bad_class(self, capsys):
"plot",
"method",
"private_classes",
+ "directives_without_two_colons",
],
)
def test_bad_generic_functions(self, capsys, func):
@@ -879,6 +902,14 @@ def test_bad_generic_functions(self, capsys, func):
"deprecation_in_wrong_order",
("Deprecation warning should precede extended summary",),
),
+ (
+ "BadGenericDocStrings",
+ "directives_without_two_colons",
+ (
+ "reST directives ['versionchanged', 'versionadded', "
+ "'deprecated'] must be followed by two colons",
+ ),
+ ),
(
"BadSeeAlso",
"desc_no_period",
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index 37623d32db685..401eaf8ff5ed5 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -13,20 +13,20 @@
$ ./validate_docstrings.py
$ ./validate_docstrings.py pandas.DataFrame.head
"""
-import os
-import sys
-import json
-import re
-import glob
-import functools
-import collections
import argparse
-import pydoc
-import inspect
-import importlib
+import ast
+import collections
import doctest
+import functools
+import glob
+import importlib
+import inspect
+import json
+import os
+import pydoc
+import re
+import sys
import tempfile
-import ast
import textwrap
import flake8.main.application
@@ -41,24 +41,25 @@
# script. Setting here before matplotlib is loaded.
# We don't warn for the number of open plots, as none is actually being opened
os.environ["MPLBACKEND"] = "Template"
-import matplotlib
+import matplotlib # noqa: E402 isort:skip
matplotlib.rc("figure", max_open_warning=10000)
-import numpy
+import numpy # noqa: E402 isort:skip
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(BASE_PATH))
-import pandas
+import pandas # noqa: E402 isort:skip
sys.path.insert(1, os.path.join(BASE_PATH, "doc", "sphinxext"))
-from numpydoc.docscrape import NumpyDocString
-from pandas.io.formats.printing import pprint_thing
+from numpydoc.docscrape import NumpyDocString # noqa: E402 isort:skip
+from pandas.io.formats.printing import pprint_thing # noqa: E402 isort:skip
PRIVATE_CLASSES = ["NDFrame", "IndexOpsMixin"]
DIRECTIVES = ["versionadded", "versionchanged", "deprecated"]
+DIRECTIVE_PATTERN = re.compile(rf"^\s*\.\. ({'|'.join(DIRECTIVES)})(?!::)", re.I | re.M)
ALLOWED_SECTIONS = [
"Parameters",
"Attributes",
@@ -93,6 +94,7 @@
"GL07": "Sections are in the wrong order. Correct order is: " "{correct_sections}",
"GL08": "The object does not have a docstring",
"GL09": "Deprecation warning should precede extended summary",
+ "GL10": "reST directives {directives} must be followed by two colons",
"SS01": "No summary found (a short summary in a single line should be "
"present at the beginning of the docstring)",
"SS02": "Summary does not start with a capital letter",
@@ -478,6 +480,10 @@ def parameter_mismatches(self):
def correct_parameters(self):
return not bool(self.parameter_mismatches)
+ @property
+ def directives_without_two_colons(self):
+ return DIRECTIVE_PATTERN.findall(self.raw_doc)
+
def parameter_type(self, param):
return self.doc_parameters[param][0]
@@ -697,6 +703,10 @@ def get_validation_data(doc):
if doc.deprecated and not doc.extended_summary.startswith(".. deprecated:: "):
errs.append(error("GL09"))
+ directives_without_two_colons = doc.directives_without_two_colons
+ if directives_without_two_colons:
+ errs.append(error("GL10", directives=directives_without_two_colons))
+
if not doc.summary:
errs.append(error("SS01"))
else:
diff --git a/setup.cfg b/setup.cfg
index 7f0062428c442..43dbac15f5cfe 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -77,7 +77,9 @@ filterwarnings =
[coverage:run]
branch = False
-omit = */tests/*
+omit =
+ */tests/*
+ pandas/_typing.py
plugins = Cython.Coverage
[coverage:report]
@@ -108,68 +110,25 @@ directory = coverage_html_report
# To be kept consistent with "Import Formatting" section in contributing.rst
[isort]
-known_pre_libs=pandas._config
-known_pre_core=pandas._libs,pandas.util._*,pandas.compat,pandas.errors
-known_dtypes=pandas.core.dtypes
-known_post_core=pandas.tseries,pandas.io,pandas.plotting
-sections=FUTURE,STDLIB,THIRDPARTY,PRE_LIBS,PRE_CORE,DTYPES,FIRSTPARTY,POST_CORE,LOCALFOLDER
-
-known_first_party=pandas
-known_third_party=Cython,numpy,dateutil,matplotlib,python-dateutil,pytz,pyarrow,pytest
-
-multi_line_output=3
-include_trailing_comma=True
-force_grid_wrap=0
-combine_as_imports=True
-line_length=88
-force_sort_within_sections=True
-skip_glob=env,
-skip=
- pandas/__init__.py
- pandas/core/api.py,
- pandas/io/msgpack/__init__.py
- asv_bench/benchmarks/attrs_caching.py,
- asv_bench/benchmarks/binary_ops.py,
- asv_bench/benchmarks/categoricals.py,
- asv_bench/benchmarks/ctors.py,
- asv_bench/benchmarks/eval.py,
- asv_bench/benchmarks/frame_ctor.py,
- asv_bench/benchmarks/frame_methods.py,
- asv_bench/benchmarks/gil.py,
- asv_bench/benchmarks/groupby.py,
- asv_bench/benchmarks/index_object.py,
- asv_bench/benchmarks/indexing.py,
- asv_bench/benchmarks/inference.py,
- asv_bench/benchmarks/io/csv.py,
- asv_bench/benchmarks/io/excel.py,
- asv_bench/benchmarks/io/hdf.py,
- asv_bench/benchmarks/io/json.py,
- asv_bench/benchmarks/io/msgpack.py,
- asv_bench/benchmarks/io/pickle.py,
- asv_bench/benchmarks/io/sql.py,
- asv_bench/benchmarks/io/stata.py,
- asv_bench/benchmarks/join_merge.py,
- asv_bench/benchmarks/multiindex_object.py,
- asv_bench/benchmarks/panel_ctor.py,
- asv_bench/benchmarks/panel_methods.py,
- asv_bench/benchmarks/plotting.py,
- asv_bench/benchmarks/reindex.py,
- asv_bench/benchmarks/replace.py,
- asv_bench/benchmarks/reshape.py,
- asv_bench/benchmarks/rolling.py,
- asv_bench/benchmarks/series_methods.py,
- asv_bench/benchmarks/sparse.py,
- asv_bench/benchmarks/stat_ops.py,
- asv_bench/benchmarks/timeseries.py
- asv_bench/benchmarks/pandas_vb_common.py
- asv_bench/benchmarks/offset.py
- asv_bench/benchmarks/dtypes.py
- asv_bench/benchmarks/strings.py
- asv_bench/benchmarks/period.py
+known_pre_libs = pandas._config
+known_pre_core = pandas._libs,pandas.util._*,pandas.compat,pandas.errors
+known_dtypes = pandas.core.dtypes
+known_post_core = pandas.tseries,pandas.io,pandas.plotting
+sections = FUTURE,STDLIB,THIRDPARTY,PRE_LIBS,PRE_CORE,DTYPES,FIRSTPARTY,POST_CORE,LOCALFOLDER
+known_first_party = pandas
+known_third_party = _pytest,announce,dateutil,docutils,flake8,git,hypothesis,jinja2,lxml,matplotlib,numpy,numpydoc,pkg_resources,pyarrow,pytest,pytz,requests,scipy,setuptools,sphinx,sqlalchemy,validate_docstrings,yaml
+multi_line_output = 3
+include_trailing_comma = True
+force_grid_wrap = 0
+combine_as_imports = True
+line_length = 88
+force_sort_within_sections = True
+skip_glob = env,
+skip = pandas/__init__.py,pandas/core/api.py
[mypy]
ignore_missing_imports=True
no_implicit_optional=True
[mypy-pandas.conftest,pandas.tests.*]
-ignore_errors=True
\ No newline at end of file
+ignore_errors=True
diff --git a/setup.py b/setup.py
index 53e12da53cdeb..7040147c2b741 100755
--- a/setup.py
+++ b/setup.py
@@ -6,16 +6,16 @@
BSD license. Parts are from lxml (https://github.com/lxml/lxml)
"""
+from distutils.sysconfig import get_config_vars
+from distutils.version import LooseVersion
import os
from os.path import join as pjoin
-
-import pkg_resources
import platform
-from distutils.sysconfig import get_config_vars
-import sys
import shutil
-from distutils.version import LooseVersion
-from setuptools import setup, Command, find_packages
+import sys
+
+import pkg_resources
+from setuptools import Command, find_packages, setup
# versioning
import versioneer
@@ -32,6 +32,8 @@ def is_platform_mac():
min_numpy_ver = "1.13.3"
+min_cython_ver = "0.29.13" # note: sync with pyproject.toml
+
setuptools_kwargs = {
"install_requires": [
"python-dateutil >= 2.6.1",
@@ -43,7 +45,6 @@ def is_platform_mac():
}
-min_cython_ver = "0.28.2"
try:
import Cython
@@ -58,8 +59,8 @@ def is_platform_mac():
# The import of Extension must be after the import of Cython, otherwise
# we do not get the appropriately patched class.
# See https://cython.readthedocs.io/en/latest/src/reference/compilation.html
-from distutils.extension import Extension # noqa:E402
-from distutils.command.build import build # noqa:E402
+from distutils.extension import Extension # noqa: E402 isort:skip
+from distutils.command.build import build # noqa: E402 isort:skip
try:
if not _CYTHON_INSTALLED:
@@ -277,6 +278,7 @@ def initialize_options(self):
".pyo",
".pyd",
".c",
+ ".cpp",
".orig",
):
self._clean_me.append(filepath)
@@ -300,12 +302,12 @@ def run(self):
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
- except Exception:
+ except OSError:
pass
for clean_tree in self._clean_trees:
try:
shutil.rmtree(clean_tree)
- except Exception:
+ except OSError:
pass
@@ -520,17 +522,14 @@ def run(self):
# re-compile.
def maybe_cythonize(extensions, *args, **kwargs):
"""
- Render tempita templates before calling cythonize
+ Render tempita templates before calling cythonize. This is skipped for
+
+ * clean
+ * sdist
"""
- if len(sys.argv) > 1 and "clean" in sys.argv:
- # Avoid running cythonize on `python setup.py clean`
+ if "clean" in sys.argv or "sdist" in sys.argv:
# See https://github.com/cython/cython/issues/1495
return extensions
- if not cython:
- # Avoid trying to look up numpy when installing from sdist
- # https://github.com/pandas-dev/pandas/issues/25193
- # TODO: See if this can be removed after pyproject.toml added.
- return extensions
numpy_incl = pkg_resources.resource_filename("numpy", "core/include")
# TODO: Is this really necessary here?
@@ -830,5 +829,8 @@ def srcpath(name=None, suffix=".pyx", subdir="src"):
"hypothesis>=3.58",
]
},
+ entry_points={
+ "pandas_plotting_backends": ["matplotlib = pandas:plotting._matplotlib"]
+ },
**setuptools_kwargs
)
diff --git a/web/README.md b/web/README.md
new file mode 100644
index 0000000000000..7396fbd0833a1
--- /dev/null
+++ b/web/README.md
@@ -0,0 +1,12 @@
+Directory containing the pandas website (hosted at https://pandas.io).
+
+The website sources are in `web/pandas/`, which also include a `config.yml` file
+containing the settings to build the website. The website is generated with the
+command `./pandas_web.py pandas`. See `./pandas_web.py --help` and the header of
+the script for more information and options.
+
+After building the website, to navigate it, it is needed to access the web using
+an http server (a not open the local files with the browser, since the links and
+the image sources are absolute to where they are served from). The easiest way
+to run an http server locally is to run `python -m http.server` from the
+`web/build/` directory.
diff --git a/web/pandas/_templates/layout.html b/web/pandas/_templates/layout.html
new file mode 100644
index 0000000000000..fe3e4d1245d93
--- /dev/null
+++ b/web/pandas/_templates/layout.html
@@ -0,0 +1,107 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <script type="text/javascript">
+ var _gaq = _gaq || []; _gaq.push(['_setAccount', 'UA-27880019-2']); _gaq.push(['_trackPageview']);
+ (function() {
+ var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
+ ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
+ var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
+ })();
+ </script>
+ <title>pandas - Python Data Analysis Library</title>
+ <meta charset="utf-8">
+ <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
+ <link rel="stylesheet"
+ href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css"
+ integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm"
+ crossorigin="anonymous">
+ {% for stylesheet in static.css %}
+ <link rel="stylesheet"
+ href="{{ base_url }}{{ stylesheet }}">
+ {% endfor %}
+ <script src="https://kit.fontawesome.com/79e5369384.js" crossorigin="anonymous"></script>
+ </head>
+ <body>
+ <header>
+ <nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark">
+ <div class="container">
+ <button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#nav-content" aria-controls="nav-content" aria-expanded="false" aria-label="Toggle navigation">
+ <span class="navbar-toggler-icon"></span>
+ </button>
+
+ {% if static.logo %}<a class="navbar-brand" href="{{ base_url }}/"><img alt="" src="{{ base_url }}{{ static.logo }}"/></a>{% endif %}
+
+ <div class="collapse navbar-collapse" id="nav-content">
+ <ul class="navbar-nav ml-auto">
+ {% for item in navbar %}
+ {% if not item.has_subitems %}
+ <li class="nav-item">
+ <a class="nav-link" href="{% if not item.target.startswith("http") %}{{ base_url }}{% endif %}{{ item.target }}">{{ item.name }}</a>
+ </li>
+ {% else %}
+ <li class="nav-item dropdown">
+ <a class="nav-link dropdown-toggle"
+ data-toggle="dropdown"
+ id="{{ item.slug }}"
+ href="#"
+ role="button"
+ aria-haspopup="true"
+ aria-expanded="false">{{ item.name }}</a>
+ <div class="dropdown-menu" aria-labelledby="{{ item.slug }}">
+ {% for subitem in item.target %}
+ <a class="dropdown-item" href="{% if not subitem.target.startswith("http") %}{{ base_url }}{% endif %}{{ subitem.target }}">{{ subitem.name }}</a>
+ {% endfor %}
+ </div>
+ </li>
+ {% endif %}
+ {% endfor %}
+ </ul>
+ </div>
+ </div>
+ </nav>
+ </header>
+ <main role="main">
+ <div class="container">
+ {% block body %}{% endblock %}
+ </div>
+ </main>
+ <footer class="container pt-4 pt-md-5 border-top">
+ <ul class="list-inline social-buttons float-right">
+ <li class="list-inline-item">
+ <a href="https://twitter.com/pandas_dev/">
+ <i class="fab fa-twitter"></i>
+ </a>
+ </li>
+ <li class="list-inline-item">
+ <a href="https://github.com/pandas-dev/pandas/">
+ <i class="fab fa-github"></i>
+ </a>
+ </li>
+ <li class="list-inline-item">
+ <a href="https://stackoverflow.com/questions/tagged/pandas">
+ <i class="fab fa-stack-overflow"></i>
+ </a>
+ </li>
+ <li class="list-inline-item">
+ <a href="https://pandas.discourse.group">
+ <i class="fab fa-discourse"></i>
+ </a>
+ </li>
+ </ul>
+ <p>
+ pandas is a fiscally sponsored project of <a href="https://numfocus.org">NumFOCUS</a>
+ </p>
+ </footer>
+
+ <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js"
+ integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN"
+ crossorigin="anonymous"></script>
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js"
+ integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q"
+ crossorigin="anonymous"></script>
+ <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js"
+ integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl"
+ crossorigin="anonymous"></script>
+ </body>
+</html>
diff --git a/web/pandas/about/citing.md b/web/pandas/about/citing.md
new file mode 100644
index 0000000000000..77b79c41aa4d1
--- /dev/null
+++ b/web/pandas/about/citing.md
@@ -0,0 +1,46 @@
+# Citing and logo
+
+## Citing pandas
+
+If you use _pandas_ for a scientific publication, we would appreciate citations to one of the following papers:
+
+- [Data structures for statistical computing in python](http://conference.scipy.org/proceedings/scipy2010/pdfs/mckinney.pdf),
+ McKinney, Proceedings of the 9th Python in Science Conference, Volume 445, 2010.
+
+ @inproceedings{mckinney2010data,
+ title={Data structures for statistical computing in python},
+ author={Wes McKinney},
+ booktitle={Proceedings of the 9th Python in Science Conference},
+ volume={445},
+ pages={51--56},
+ year={2010},
+ organization={Austin, TX}
+ }
+
+
+- [pandas: a foundational Python library for data analysis and statistics](https://www.scribd.com/document/71048089/pandas-a-Foundational-Python-Library-for-Data-Analysis-and-Statistics),
+ McKinney, Python for High Performance and Scientific Computing, Volume 14, 2011.
+
+ @article{mckinney2011pandas,
+ title={pandas: a foundational Python library for data analysis and statistics},
+ author={Wes McKinney},
+ journal={Python for High Performance and Scientific Computing},
+ volume={14},
+ year={2011}
+ }
+
+## Brand and logo
+
+When using the project name _pandas_, please use it in lower case, even at the beginning of a sentence.
+
+The official logo of _pandas_ is:
+
+
+
+You can download a `svg` version of the logo [here]({{ base_url }}/static/img/pandas.svg).
+
+When using the logo, please follow the next directives:
+
+- Leave enough margin around the logo
+- Do not distort the logo by changing its proportions
+- Do not place text or other elements on top of the logo
diff --git a/web/pandas/about/index.html b/web/pandas/about/index.html
new file mode 100644
index 0000000000000..4e50d280d2a10
--- /dev/null
+++ b/web/pandas/about/index.html
@@ -0,0 +1,86 @@
+# About pandas
+
+## History of development
+
+In 2008, _pandas_ development began at [AQR Capital Management](http://www.aqr.com).
+By the end of 2009 it had been [open sourced](http://en.wikipedia.org/wiki/Open_source),
+and is actively supported today by a community of like-minded individuals around the world who
+contribute their valuable time and energy to help make open source _pandas_
+possible. Thank you to [all of our contributors](team.html).
+
+Since 2015, _pandas_ is a [NumFOCUS sponsored project](https://numfocus.org/sponsored-projects).
+This will help ensure the success of development of _pandas_ as a world-class open-source project.
+
+### Timeline
+
+- **2008**: Development of _pandas_ started
+- **2009**: _pandas_ becomes open source
+- **2012**: First edition of _Python for Data Analysis_ is published
+- **2015**: _pandas_ becomes a [NumFOCUS sponsored project](https://numfocus.org/sponsored-projects)
+- **2018**: First in-person core developer sprint
+
+## Library Highlights
+
+- A fast and efficient **DataFrame** object for data manipulation with
+ integrated indexing;
+
+- Tools for **reading and writing data** between in-memory data structures and
+ different formats: CSV and text files, Microsoft Excel, SQL databases, and
+ the fast HDF5 format;
+
+- Intelligent **data alignment** and integrated handling of **missing data**:
+ gain automatic label-based alignment in computations and easily manipulate
+ messy data into an orderly form;
+
+- Flexible **reshaping** and pivoting of data sets;
+
+- Intelligent label-based **slicing**, **fancy indexing**, and **subsetting**
+ of large data sets;
+
+- Columns can be inserted and deleted from data structures for **size
+ mutability**;
+
+- Aggregating or transforming data with a powerful **group by** engine
+ allowing split-apply-combine operations on data sets;
+
+- High performance **merging and joining** of data sets;
+
+- **Hierarchical axis indexing** provides an intuitive way of working with
+ high-dimensional data in a lower-dimensional data structure;
+
+- **Time series**-functionality: date range generation and frequency
+ conversion, moving window statistics, moving window linear regressions, date
+ shifting and lagging. Even create domain-specific time offsets and join time
+ series without losing data;
+
+- Highly **optimized for performance**, with critical code paths written in
+ [Cython](http://www.cython.org/) or C.
+
+- Python with *pandas* is in use in a wide variety of **academic and
+ commercial** domains, including Finance, Neuroscience, Economics,
+ Statistics, Advertising, Web Analytics, and more.
+
+## Mission
+
+_pandas_ aims to be the fundamental high-level building block for doing practical,
+real world data analysis in Python.
+Additionally, it has the broader goal of becoming the most powerful and flexible
+open source data analysis / manipulation tool available in any language.
+
+## Vision
+
+A world where data analytics and manipulation software is:
+
+- Accessible to everyone
+- Free for users to use and modify
+- Flexible
+- Powerful
+- Easy to use
+- Fast
+
+## Values
+
+Is in the core of _pandas_ to be respectful and welcoming with everybody,
+users, contributors and the broader community. Regardless of level of experience,
+gender, gender identity and expression, sexual orientation, disability,
+personal appearance, body size, race, ethnicity, age, religion, or nationality.
diff --git a/web/pandas/about/roadmap.md b/web/pandas/about/roadmap.md
new file mode 100644
index 0000000000000..8a5c2735b3d93
--- /dev/null
+++ b/web/pandas/about/roadmap.md
@@ -0,0 +1,195 @@
+# Roadmap
+
+This page provides an overview of the major themes in pandas'
+development. Each of these items requires a relatively large amount of
+effort to implement. These may be achieved more quickly with dedicated
+funding or interest from contributors.
+
+An item being on the roadmap does not mean that it will *necessarily*
+happen, even with unlimited funding. During the implementation period we
+may discover issues preventing the adoption of the feature.
+
+Additionally, an item *not* being on the roadmap does not exclude it
+from inclusion in pandas. The roadmap is intended for larger,
+fundamental changes to the project that are likely to take months or
+years of developer time. Smaller-scoped items will continue to be
+tracked on our [issue tracker](https://github.com/pandas-dev/pandas/issues).
+
+See [Roadmap evolution](#roadmap-evolution) for proposing
+changes to this document.
+
+## Extensibility
+
+Pandas `extending.extension-types` allow
+for extending NumPy types with custom data types and array storage.
+Pandas uses extension types internally, and provides an interface for
+3rd-party libraries to define their own custom data types.
+
+Many parts of pandas still unintentionally convert data to a NumPy
+array. These problems are especially pronounced for nested data.
+
+We'd like to improve the handling of extension arrays throughout the
+library, making their behavior more consistent with the handling of
+NumPy arrays. We'll do this by cleaning up pandas' internals and
+adding new methods to the extension array interface.
+
+## String data type
+
+Currently, pandas stores text data in an `object` -dtype NumPy array.
+The current implementation has two primary drawbacks: First, `object`
+-dtype is not specific to strings: any Python object can be stored in an
+`object` -dtype array, not just strings. Second: this is not efficient.
+The NumPy memory model isn't especially well-suited to variable width
+text data.
+
+To solve the first issue, we propose a new extension type for string
+data. This will initially be opt-in, with users explicitly requesting
+`dtype="string"`. The array backing this string dtype may initially be
+the current implementation: an `object` -dtype NumPy array of Python
+strings.
+
+To solve the second issue (performance), we'll explore alternative
+in-memory array libraries (for example, Apache Arrow). As part of the
+work, we may need to implement certain operations expected by pandas
+users (for example the algorithm used in, `Series.str.upper`). That work
+may be done outside of pandas.
+
+## Apache Arrow interoperability
+
+[Apache Arrow](https://arrow.apache.org) is a cross-language development
+platform for in-memory data. The Arrow logical types are closely aligned
+with typical pandas use cases.
+
+We'd like to provide better-integrated support for Arrow memory and
+data types within pandas. This will let us take advantage of its I/O
+capabilities and provide for better interoperability with other
+languages and libraries using Arrow.
+
+## Block manager rewrite
+
+We'd like to replace pandas current internal data structures (a
+collection of 1 or 2-D arrays) with a simpler collection of 1-D arrays.
+
+Pandas internal data model is quite complex. A DataFrame is made up of
+one or more 2-dimensional "blocks", with one or more blocks per dtype.
+This collection of 2-D arrays is managed by the BlockManager.
+
+The primary benefit of the BlockManager is improved performance on
+certain operations (construction from a 2D array, binary operations,
+reductions across the columns), especially for wide DataFrames. However,
+the BlockManager substantially increases the complexity and maintenance
+burden of pandas.
+
+By replacing the BlockManager we hope to achieve
+
+- Substantially simpler code
+- Easier extensibility with new logical types
+- Better user control over memory use and layout
+- Improved micro-performance
+- Option to provide a C / Cython API to pandas' internals
+
+See [these design
+documents](https://dev.pandas.io/pandas2/internal-architecture.html#removal-of-blockmanager-new-dataframe-internals)
+for more.
+
+## Decoupling of indexing and internals
+
+The code for getting and setting values in pandas' data structures
+needs refactoring. In particular, we must clearly separate code that
+converts keys (e.g., the argument to `DataFrame.loc`) to positions from
+code that uses these positions to get or set values. This is related to
+the proposed BlockManager rewrite. Currently, the BlockManager sometimes
+uses label-based, rather than position-based, indexing. We propose that
+it should only work with positional indexing, and the translation of
+keys to positions should be entirely done at a higher level.
+
+Indexing is a complicated API with many subtleties. This refactor will
+require care and attention. More details are discussed at
+<https://github.com/pandas-dev/pandas/wiki/(Tentative)-rules-for-restructuring-indexing-code>
+
+## Numba-accelerated operations
+
+[Numba](https://numba.pydata.org) is a JIT compiler for Python code.
+We'd like to provide ways for users to apply their own Numba-jitted
+functions where pandas accepts user-defined functions (for example,
+`Series.apply`,
+`DataFrame.apply`,
+`DataFrame.applymap`, and in groupby and
+window contexts). This will improve the performance of
+user-defined-functions in these operations by staying within compiled
+code.
+
+## Documentation improvements
+
+We'd like to improve the content, structure, and presentation of the
+pandas documentation. Some specific goals include
+
+- Overhaul the HTML theme with a modern, responsive design
+ (`15556`)
+- Improve the "Getting Started" documentation, designing and writing
+ learning paths for users different backgrounds (e.g. brand new to
+ programming, familiar with other languages like R, already familiar
+ with Python).
+- Improve the overall organization of the documentation and specific
+ subsections of the documentation to make navigation and finding
+ content easier.
+
+## Package docstring validation
+
+To improve the quality and consistency of pandas docstrings, we've
+developed tooling to check docstrings in a variety of ways.
+<https://github.com/pandas-dev/pandas/blob/master/scripts/validate_docstrings.py>
+contains the checks.
+
+Like many other projects, pandas uses the
+[numpydoc](https://numpydoc.readthedocs.io/en/latest/) style for writing
+docstrings. With the collaboration of the numpydoc maintainers, we'd
+like to move the checks to a package other than pandas so that other
+projects can easily use them as well.
+
+## Performance monitoring
+
+Pandas uses [airspeed velocity](https://asv.readthedocs.io/en/stable/)
+to monitor for performance regressions. ASV itself is a fabulous tool,
+but requires some additional work to be integrated into an open source
+project's workflow.
+
+The [asv-runner](https://github.com/asv-runner) organization, currently
+made up of pandas maintainers, provides tools built on top of ASV. We
+have a physical machine for running a number of project's benchmarks,
+and tools managing the benchmark runs and reporting on results.
+
+We'd like to fund improvements and maintenance of these tools to
+
+- Be more stable. Currently, they're maintained on the nights and
+ weekends when a maintainer has free time.
+- Tune the system for benchmarks to improve stability, following
+ <https://pyperf.readthedocs.io/en/latest/system.html>
+- Build a GitHub bot to request ASV runs *before* a PR is merged.
+ Currently, the benchmarks are only run nightly.
+
+## Roadmap Evolution
+
+Pandas continues to evolve. The direction is primarily determined by
+community interest. Everyone is welcome to review existing items on the
+roadmap and to propose a new item.
+
+Each item on the roadmap should be a short summary of a larger design
+proposal. The proposal should include
+
+1. Short summary of the changes, which would be appropriate for
+ inclusion in the roadmap if accepted.
+2. Motivation for the changes.
+3. An explanation of why the change is in scope for pandas.
+4. Detailed design: Preferably with example-usage (even if not
+ implemented yet) and API documentation
+5. API Change: Any API changes that may result from the proposal.
+
+That proposal may then be submitted as a GitHub issue, where the pandas
+maintainers can review and comment on the design. The [pandas mailing
+list](https://mail.python.org/mailman/listinfo/pandas-dev) should be
+notified of the proposal.
+
+When there's agreement that an implementation would be welcome, the
+roadmap should be updated to include the summary and a link to the
+discussion issue.
diff --git a/web/pandas/about/sponsors.md b/web/pandas/about/sponsors.md
new file mode 100644
index 0000000000000..dcc6e367e5d64
--- /dev/null
+++ b/web/pandas/about/sponsors.md
@@ -0,0 +1,41 @@
+# Sponsors
+
+## NumFOCUS
+
+
+
+_pandas_ is a Sponsored Project of [NumFOCUS](https://numfocus.org/), a 501(c)(3) nonprofit charity in the United States.
+NumFOCUS provides _pandas_ with fiscal, legal, and administrative support to help ensure the
+health and sustainability of the project. Visit numfocus.org for more information.
+
+Donations to _pandas_ are managed by NumFOCUS. For donors in the United States, your gift is tax-deductible
+to the extent provided by law. As with any donation, you should consult with your tax adviser about your particular tax situation.
+
+## Tidelift
+
+_pandas_ is part of the [Tidelift subscription](https://tidelift.com/subscription/pkg/pypi-pandas?utm_source=pypi-pandas&utm_medium=referral&utm_campaign=readme).
+You can support pandas by becoming a Tidelift subscriber.
+
+## Institutional partners
+
+Institutional Partners are companies and universities that support the project by employing contributors.
+Current Institutional Partners include:
+
+<ul>
+ {% for company in partners.active if company.employs %}
+ <li><a href="{{ company.url }}">{{ company.name }}</a> ({{ company.employs }})</li>
+ {% endfor %}
+</ul>
+
+## In-kind sponsors
+
+- [OVH](https://us.ovhcloud.com/): Hosting
+- [Indeed](https://opensource.indeedeng.io/): Logo and website design
+
+## Past institutional partners
+
+<ul>
+ {% for company in partners.past %}
+ <li><a href="{{ company.url }}">{{ company.name }}</a></li>
+ {% endfor %}
+</ul>
diff --git a/web/pandas/about/team.md b/web/pandas/about/team.md
new file mode 100644
index 0000000000000..41da3a0e82bdb
--- /dev/null
+++ b/web/pandas/about/team.md
@@ -0,0 +1,67 @@
+# Team
+
+## Contributors
+
+_pandas_ is made with love by more than [1,500 volunteer contributors](https://github.com/pandas-dev/pandas/graphs/contributors).
+
+If you want to support pandas development, you can find information in the [donations page](../donate.html).
+
+## Maintainers
+
+<div class="row maintainers">
+ {% for row in maintainers.people | batch(6, "") %}
+ <div class="card-group maintainers">
+ {% for person in row %}
+ {% if person %}
+ <div class="card">
+ <img class="card-img-top" alt="" src="{{ person.avatar_url }}"/>
+ <div class="card-body">
+ <h6 class="card-title">
+ {% if person.blog %}
+ <a href="{{ person.blog }}">
+ {{ person.name or person.login }}
+ </a>
+ {% else %}
+ {{ person.name or person.login }}
+ {% endif %}
+ </h6>
+ <p class="card-text small"><a href="{{ person.html_url }}">{{ person.login }}</a></p>
+ </div>
+ </div>
+ {% else %}
+ <div class="card border-0"></div>
+ {% endif %}
+ {% endfor %}
+ </div>
+ {% endfor %}
+</div>
+
+## Governance
+
+Wes McKinney is the Benevolent Dictator for Life (BDFL).
+
+The project governance is available in the [project governance documents](https://github.com/pandas-dev/pandas-governance).
+
+## Code of conduct committee
+
+<ul>
+ {% for person in maintainers.coc %}
+ <li>{{ person }}</li>
+ {% endfor %}
+</ul>
+
+## NumFOCUS committee
+
+<ul>
+ {% for person in maintainers.numfocus %}
+ <li>{{ person }}</li>
+ {% endfor %}
+</ul>
+
+## Emeritus maintainers
+
+<ul>
+ {% for person in maintainers.emeritus %}
+ <li>{{ person }}</li>
+ {% endfor %}
+</ul>
diff --git a/web/pandas/community/blog.html b/web/pandas/community/blog.html
new file mode 100644
index 0000000000000..ffe6f97d679e4
--- /dev/null
+++ b/web/pandas/community/blog.html
@@ -0,0 +1,14 @@
+{% extends "layout.html" %}
+
+{% block body %}
+ {% for post in blog.posts %}
+ <div class="card">
+ <div class="card-body">
+ <h3 class="card-title"><a href="{{post.link }}" target="_blank">{{ post.title }}</a></h3>
+ <h6 class="card-subtitle">Source: {{ post.feed }} | Author: {{ post.author }} | Published: {{ post.published.strftime("%b %d, %Y") }}</h6>
+ <div class="card-text">{{ post.summary }}</div>
+ <a class="card-link" href="{{post.link }}" target="_blank">Read</a>
+ </div>
+ </div>
+ {% endfor %}
+{% endblock %}
diff --git a/web/pandas/community/coc.md b/web/pandas/community/coc.md
new file mode 100644
index 0000000000000..de0e8120f7eee
--- /dev/null
+++ b/web/pandas/community/coc.md
@@ -0,0 +1,63 @@
+# Code of conduct
+
+As contributors and maintainers of this project, and in the interest of
+fostering an open and welcoming community, we pledge to respect all people who
+contribute through reporting issues, posting feature requests, updating
+documentation, submitting pull requests or patches, and other activities.
+
+We are committed to making participation in this project a harassment-free
+experience for everyone, regardless of level of experience, gender, gender
+identity and expression, sexual orientation, disability, personal appearance,
+body size, race, ethnicity, age, religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery
+* Personal attacks
+* Trolling or insulting/derogatory comments
+* Public or private harassment
+* Publishing other's private information, such as physical or electronic
+ addresses, without explicit permission
+* Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to
+fairly and consistently applying these principles to every aspect of managing
+this project. Project maintainers who do not follow or enforce the Code of
+Conduct may be permanently removed from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community.
+
+A working group of community members is committed to promptly addressing any
+reported issues. The working group is made up of pandas contributors and users.
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the working group by e-mail (pandas-coc@googlegroups.com).
+Messages sent to this e-mail address will not be publicly visible but only to
+the working group members. The working group currently includes
+
+<ul>
+ {% for person in maintainers.coc %}
+ <li>{{ person }}</li>
+ {% endfor %}
+</ul>
+
+All complaints will be reviewed and investigated and will result in a response
+that is deemed necessary and appropriate to the circumstances. Maintainers are
+obligated to maintain confidentiality with regard to the reporter of an
+incident.
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 1.3.0, available at
+[http://contributor-covenant.org/version/1/3/0/][version],
+and the [Swift Code of Conduct][swift].
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/3/0/
+[swift]: https://swift.org/community/#code-of-conduct
+
diff --git a/web/pandas/community/ecosystem.md b/web/pandas/community/ecosystem.md
new file mode 100644
index 0000000000000..cf242e86f879f
--- /dev/null
+++ b/web/pandas/community/ecosystem.md
@@ -0,0 +1,370 @@
+# Ecosystem
+
+Increasingly, packages are being built on top of pandas to address
+specific needs in data preparation, analysis and visualization. This is
+encouraging because it means pandas is not only helping users to handle
+their data tasks but also that it provides a better starting point for
+developers to build powerful and more focused data tools. The creation
+of libraries that complement pandas' functionality also allows pandas
+development to remain focused around it's original requirements.
+
+This is an inexhaustive list of projects that build on pandas in order
+to provide tools in the PyData space. For a list of projects that depend
+on pandas, see the [libraries.io usage page for
+pandas](https://libraries.io/pypi/pandas/usage) or [search pypi for
+pandas](https://pypi.org/search/?q=pandas).
+
+We'd like to make it easier for users to find these projects, if you
+know of other substantial projects that you feel should be on this list,
+please let us know.
+
+## Statistics and machine learning
+
+### [Statsmodels](https://www.statsmodels.org/)
+
+Statsmodels is the prominent Python "statistics and econometrics
+library" and it has a long-standing special relationship with pandas.
+Statsmodels provides powerful statistics, econometrics, analysis and
+modeling functionality that is out of pandas' scope. Statsmodels
+leverages pandas objects as the underlying data container for
+computation.
+
+### [sklearn-pandas](https://github.com/paulgb/sklearn-pandas)
+
+Use pandas DataFrames in your [scikit-learn](https://scikit-learn.org/)
+ML pipeline.
+
+### [Featuretools](https://github.com/featuretools/featuretools/)
+
+Featuretools is a Python library for automated feature engineering built
+on top of pandas. It excels at transforming temporal and relational
+datasets into feature matrices for machine learning using reusable
+feature engineering "primitives". Users can contribute their own
+primitives in Python and share them with the rest of the community.
+
+## Visualization
+
+### [Altair](https://altair-viz.github.io/)
+
+Altair is a declarative statistical visualization library for Python.
+With Altair, you can spend more time understanding your data and its
+meaning. Altair's API is simple, friendly and consistent and built on
+top of the powerful Vega-Lite JSON specification. This elegant
+simplicity produces beautiful and effective visualizations with a
+minimal amount of code. Altair works with Pandas DataFrames.
+
+### [Bokeh](https://bokeh.pydata.org)
+
+Bokeh is a Python interactive visualization library for large datasets
+that natively uses the latest web technologies. Its goal is to provide
+elegant, concise construction of novel graphics in the style of
+Protovis/D3, while delivering high-performance interactivity over large
+data to thin clients.
+
+[Pandas-Bokeh](https://github.com/PatrikHlobil/Pandas-Bokeh) provides a
+high level API for Bokeh that can be loaded as a native Pandas plotting
+backend via
+
+```
+pd.set_option("plotting.backend", "pandas_bokeh")
+```
+
+It is very similar to the matplotlib plotting backend, but provides
+interactive web-based charts and maps.
+
+### [seaborn](https://seaborn.pydata.org)
+
+Seaborn is a Python visualization library based on
+[matplotlib](https://matplotlib.org). It provides a high-level,
+dataset-oriented interface for creating attractive statistical graphics.
+The plotting functions in seaborn understand pandas objects and leverage
+pandas grouping operations internally to support concise specification
+of complex visualizations. Seaborn also goes beyond matplotlib and
+pandas with the option to perform statistical estimation while plotting,
+aggregating across observations and visualizing the fit of statistical
+models to emphasize patterns in a dataset.
+
+### [yhat/ggpy](https://github.com/yhat/ggpy)
+
+Hadley Wickham\'s [ggplot2](https://ggplot2.tidyverse.org/) is a
+foundational exploratory visualization package for the R language. Based
+on [\"The Grammar of
+Graphics\"](https://www.cs.uic.edu/~wilkinson/TheGrammarOfGraphics/GOG.html)
+it provides a powerful, declarative and extremely general way to
+generate bespoke plots of any kind of data. It\'s really quite
+incredible. Various implementations to other languages are available,
+but a faithful implementation for Python users has long been missing.
+Although still young (as of Jan-2014), the
+[yhat/ggpy](https://github.com/yhat/ggpy) project has been progressing
+quickly in that direction.
+
+### [IPython Vega](https://github.com/vega/ipyvega)
+
+[IPython Vega](https://github.com/vega/ipyvega) leverages [Vega
+\<https://github.com/trifacta/vega\>]\_\_ to create plots
+within Jupyter Notebook.
+
+### [Plotly](https://plot.ly/python)
+
+[Plotly's](https://plot.ly/) [Python API](https://plot.ly/python/)
+enables interactive figures and web shareability. Maps, 2D, 3D, and
+live-streaming graphs are rendered with WebGL and
+[D3.js](https://d3js.org/). The library supports plotting directly from
+a pandas DataFrame and cloud-based collaboration. Users of [matplotlib,
+ggplot for Python, and
+Seaborn](https://plot.ly/python/matplotlib-to-plotly-tutorial/) can
+convert figures into interactive web-based plots. Plots can be drawn in
+[IPython Notebooks](https://plot.ly/ipython-notebooks/) , edited with R
+or MATLAB, modified in a GUI, or embedded in apps and dashboards. Plotly
+is free for unlimited sharing, and has
+[cloud](https://plot.ly/product/plans/),
+[offline](https://plot.ly/python/offline/), or
+[on-premise](https://plot.ly/product/enterprise/) accounts for private
+use.
+
+### [QtPandas](https://github.com/draperjames/qtpandas)
+
+Spun off from the main pandas library, the
+[qtpandas](https://github.com/draperjames/qtpandas) library enables
+DataFrame visualization and manipulation in PyQt4 and PySide
+applications.
+
+## IDE
+
+### [IPython](https://ipython.org/documentation.html)
+
+IPython is an interactive command shell and distributed computing
+environment. IPython tab completion works with Pandas methods and also
+attributes like DataFrame columns.
+
+### [Jupyter Notebook / Jupyter Lab](https://jupyter.org)
+
+Jupyter Notebook is a web application for creating Jupyter notebooks. A
+Jupyter notebook is a JSON document containing an ordered list of
+input/output cells which can contain code, text, mathematics, plots and
+rich media. Jupyter notebooks can be converted to a number of open
+standard output formats (HTML, HTML presentation slides, LaTeX, PDF,
+ReStructuredText, Markdown, Python) through 'Download As' in the web
+interface and `jupyter convert` in a shell.
+
+Pandas DataFrames implement `_repr_html_`and `_repr_latex` methods which
+are utilized by Jupyter Notebook for displaying (abbreviated) HTML or
+LaTeX tables. LaTeX output is properly escaped. (Note: HTML tables may
+or may not be compatible with non-HTML Jupyter output formats.)
+
+See `Options and Settings <options>` and
+`Available Options <options.available>`
+for pandas `display.` settings.
+
+### [quantopian/qgrid](https://github.com/quantopian/qgrid)
+
+qgrid is \"an interactive grid for sorting and filtering DataFrames in
+IPython Notebook\" built with SlickGrid.
+
+### [Spyder](https://www.spyder-ide.org/)
+
+Spyder is a cross-platform PyQt-based IDE combining the editing,
+analysis, debugging and profiling functionality of a software
+development tool with the data exploration, interactive execution, deep
+inspection and rich visualization capabilities of a scientific
+environment like MATLAB or Rstudio.
+
+Its [Variable
+Explorer](https://docs.spyder-ide.org/variableexplorer.html) allows
+users to view, manipulate and edit pandas `Index`, `Series`, and
+`DataFrame` objects like a \"spreadsheet\", including copying and
+modifying values, sorting, displaying a \"heatmap\", converting data
+types and more. Pandas objects can also be renamed, duplicated, new
+columns added, copyed/pasted to/from the clipboard (as TSV), and
+saved/loaded to/from a file. Spyder can also import data from a variety
+of plain text and binary files or the clipboard into a new pandas
+DataFrame via a sophisticated import wizard.
+
+Most pandas classes, methods and data attributes can be autocompleted in
+Spyder\'s [Editor](https://docs.spyder-ide.org/editor.html) and [IPython
+Console](https://docs.spyder-ide.org/ipythonconsole.html), and Spyder\'s
+[Help pane](https://docs.spyder-ide.org/help.html) can retrieve and
+render Numpydoc documentation on pandas objects in rich text with Sphinx
+both automatically and on-demand.
+
+## API
+
+### [pandas-datareader](https://github.com/pydata/pandas-datareader)
+
+`pandas-datareader` is a remote data access library for pandas
+(PyPI:`pandas-datareader`). It is based on functionality that was
+located in `pandas.io.data` and `pandas.io.wb` but was split off in
+v0.19. See more in the [pandas-datareader
+docs](https://pandas-datareader.readthedocs.io/en/latest/):
+
+The following data feeds are available:
+
+- Google Finance
+- Tiingo
+- Morningstar
+- IEX
+- Robinhood
+- Enigma
+- Quandl
+- FRED
+- Fama/French
+- World Bank
+- OECD
+- Eurostat
+- TSP Fund Data
+- Nasdaq Trader Symbol Definitions
+- Stooq Index Data
+- MOEX Data
+
+### [quandl/Python](https://github.com/quandl/Python)
+
+Quandl API for Python wraps the Quandl REST API to return Pandas
+DataFrames with timeseries indexes.
+
+### [pydatastream](https://github.com/vfilimonov/pydatastream)
+
+PyDatastream is a Python interface to the [Thomson Dataworks Enterprise
+(DWE/Datastream)](http://dataworks.thomson.com/Dataworks/Enterprise/1.0/)
+SOAP API to return indexed Pandas DataFrames with financial data. This
+package requires valid credentials for this API (non free).
+
+### [pandaSDMX](https://pandasdmx.readthedocs.io)
+
+pandaSDMX is a library to retrieve and acquire statistical data and
+metadata disseminated in [SDMX](https://www.sdmx.org) 2.1, an
+ISO-standard widely used by institutions such as statistics offices,
+central banks, and international organisations. pandaSDMX can expose
+datasets and related structural metadata including data flows,
+code-lists, and data structure definitions as pandas Series or
+MultiIndexed DataFrames.
+
+### [fredapi](https://github.com/mortada/fredapi)
+
+fredapi is a Python interface to the [Federal Reserve Economic Data
+(FRED)](https://fred.stlouisfed.org/) provided by the Federal Reserve
+Bank of St. Louis. It works with both the FRED database and ALFRED
+database that contains point-in-time data (i.e. historic data
+revisions). fredapi provides a wrapper in Python to the FRED HTTP API,
+and also provides several convenient methods for parsing and analyzing
+point-in-time data from ALFRED. fredapi makes use of pandas and returns
+data in a Series or DataFrame. This module requires a FRED API key that
+you can obtain for free on the FRED website.
+
+## Domain specific
+
+### [Geopandas](https://github.com/kjordahl/geopandas)
+
+Geopandas extends pandas data objects to include geographic information
+which support geometric operations. If your work entails maps and
+geographical coordinates, and you love pandas, you should take a close
+look at Geopandas.
+
+### [xarray](https://github.com/pydata/xarray)
+
+xarray brings the labeled data power of pandas to the physical sciences
+by providing N-dimensional variants of the core pandas data structures.
+It aims to provide a pandas-like and pandas-compatible toolkit for
+analytics on multi-dimensional arrays, rather than the tabular data for
+which pandas excels.
+
+## Out-of-core
+
+### [Blaze](http://blaze.pydata.org/)
+
+Blaze provides a standard API for doing computations with various
+in-memory and on-disk backends: NumPy, Pandas, SQLAlchemy, MongoDB,
+PyTables, PySpark.
+
+### [Dask](https://dask.readthedocs.io/en/latest/)
+
+Dask is a flexible parallel computing library for analytics. Dask
+provides a familiar `DataFrame` interface for out-of-core, parallel and
+distributed computing.
+
+### [Dask-ML](https://dask-ml.readthedocs.io/en/latest/)
+
+Dask-ML enables parallel and distributed machine learning using Dask
+alongside existing machine learning libraries like Scikit-Learn,
+XGBoost, and TensorFlow.
+
+### [Koalas](https://koalas.readthedocs.io/en/latest/)
+
+Koalas provides a familiar pandas DataFrame interface on top of Apache
+Spark. It enables users to leverage multi-cores on one machine or a
+cluster of machines to speed up or scale their DataFrame code.
+
+### [Odo](http://odo.pydata.org)
+
+Odo provides a uniform API for moving data between different formats. It
+uses pandas own `read_csv` for CSV IO and leverages many existing
+packages such as PyTables, h5py, and pymongo to move data between non
+pandas formats. Its graph based approach is also extensible by end users
+for custom formats that may be too specific for the core of odo.
+
+### [Ray](https://ray.readthedocs.io/en/latest/pandas_on_ray.html)
+
+Pandas on Ray is an early stage DataFrame library that wraps Pandas and
+transparently distributes the data and computation. The user does not
+need to know how many cores their system has, nor do they need to
+specify how to distribute the data. In fact, users can continue using
+their previous Pandas notebooks while experiencing a considerable
+speedup from Pandas on Ray, even on a single machine. Only a
+modification of the import statement is needed, as we demonstrate below.
+Once you've changed your import statement, you're ready to use Pandas on
+Ray just like you would Pandas.
+
+```
+# import pandas as pd
+import ray.dataframe as pd
+```
+
+### [Vaex](https://docs.vaex.io/)
+
+Increasingly, packages are being built on top of pandas to address
+specific needs in data preparation, analysis and visualization. Vaex is
+a python library for Out-of-Core DataFrames (similar to Pandas), to
+visualize and explore big tabular datasets. It can calculate statistics
+such as mean, sum, count, standard deviation etc, on an N-dimensional
+grid up to a billion (10^9^) objects/rows per second. Visualization is
+done using histograms, density plots and 3d volume rendering, allowing
+interactive exploration of big data. Vaex uses memory mapping, zero
+memory copy policy and lazy computations for best performance (no memory
+wasted).
+
+- ``vaex.from_pandas``
+- ``vaex.to_pandas_df``
+
+## Data cleaning and validation
+
+### [pyjanitor](https://github.com/ericmjl/pyjanitor/)
+
+Pyjanitor provides a clean API for cleaning data, using method chaining.
+
+### [Engarde](https://engarde.readthedocs.io/en/latest/)
+
+Engarde is a lightweight library used to explicitly state your
+assumptions about your datasets and check that they're *actually* true.
+
+## Extension data types
+
+Pandas provides an interface for defining
+`extension types <extending.extension-types>` to extend NumPy's type system. The following libraries
+implement that interface to provide types not found in NumPy or pandas,
+which work well with pandas' data containers.
+
+### [cyberpandas](https://cyberpandas.readthedocs.io/en/latest)
+
+Cyberpandas provides an extension type for storing arrays of IP
+Addresses. These arrays can be stored inside pandas\' Series and
+DataFrame.
+
+## Accessors
+
+A directory of projects providing
+`extension accessors <extending.register-accessors>`. This is for users to discover new accessors and for library
+authors to coordinate on the namespace.
+
+ Library Accessor Classes
+ ------------------------------------------------------------- ---------- -----------------------
+ [cyberpandas](https://cyberpandas.readthedocs.io/en/latest) `ip` `Series`
+ [pdvega](https://altair-viz.github.io/pdvega/) `vgplot` `Series`, `DataFrame`
diff --git a/web/pandas/config.yml b/web/pandas/config.yml
new file mode 100644
index 0000000000000..d5c505f298437
--- /dev/null
+++ b/web/pandas/config.yml
@@ -0,0 +1,130 @@
+main:
+ templates_path: _templates
+ base_template: "layout.html"
+ ignore:
+ - _templates/layout.html
+ - config.yml
+ - try.md # the binder page will be added later
+ github_repo_url: pandas-dev/pandas
+ context_preprocessors:
+ - pandas_web.Preprocessors.navbar_add_info
+ - pandas_web.Preprocessors.blog_add_posts
+ - pandas_web.Preprocessors.maintainers_add_info
+ - pandas_web.Preprocessors.home_add_releases
+ markdown_extensions:
+ - toc
+ - tables
+ - fenced_code
+static:
+ logo: # /static/img/pandas.svg
+ css:
+ - /static/css/pandas.css
+navbar:
+ - name: "About us"
+ target:
+ - name: "About pandas"
+ target: /about/index.html
+ - name: "Project roadmap"
+ target: /about/roadmap.html
+ - name: "Team"
+ target: /about/team.html
+ - name: "Sponsors"
+ target: /about/sponsors.html
+ - name: "Citing and logo"
+ target: /about/citing.html
+ - name: "Getting started"
+ target: /getting_started.html
+ - name: "Documentation"
+ target:
+ - name: "User guide"
+ target: /docs/user_guide/index.html
+ - name: "API reference"
+ target: /docs/reference/index.html
+ - name: "Release notes"
+ target: /docs/whatsnew/index.html
+ - name: "Older versions"
+ target: https://pandas.pydata.org/pandas-docs/version/
+ - name: "Community"
+ target:
+ - name: "Blog"
+ target: /community/blog.html
+ - name: "Ask a question (StackOverflow)"
+ target: https://stackoverflow.com/questions/tagged/pandas
+ - name: "Discuss"
+ target: https://pandas.discourse.group
+ - name: "Code of conduct"
+ target: /community/coc.html
+ - name: "Ecosystem"
+ target: /community/ecosystem.html
+ - name: "Contribute"
+ target: /contribute.html
+blog:
+ num_posts: 8
+ feed:
+ - https://wesmckinney.com/feeds/pandas.atom.xml
+ - https://tomaugspurger.github.io/feed
+ - https://jorisvandenbossche.github.io/feeds/all.atom.xml
+ - https://datapythonista.github.io/blog/feeds/pandas.atom.xml
+ - https://numfocus.org/tag/pandas/feed/
+maintainers:
+ active:
+ - wesm
+ - jorisvandenbossche
+ - TomAugspurger
+ - shoyer
+ - jreback
+ - chris-b1
+ - sinhrks
+ - cpcloud
+ - gfyoung
+ - toobaz
+ - WillAyd
+ - mroeschke
+ - jschendel
+ - jbrockmendel
+ - datapythonista
+ - simonjayhawkins
+ - topper-123
+ emeritus:
+ - Wouter Overmeire
+ - Skipper Seabold
+ - Jeff Tratner
+ coc:
+ - Safia Abdalla
+ - Tom Augspurger
+ - Joris Van den Bossche
+ - Camille Scott
+ - Nathaniel Smith
+ numfocus:
+ - Phillip Cloud
+ - Stephan Hoyer
+ - Wes McKinney
+ - Jeff Reback
+ - Joris Van den Bossche
+partners:
+ active:
+ - name: "NumFOCUS"
+ url: https://numfocus.org/
+ logo: /static/img/partners/numfocus.svg
+ - name: "Anaconda"
+ url: https://www.anaconda.com/
+ logo: /static/img/partners/anaconda.svg
+ employs: "Tom Augspurger, Brock Mendel"
+ - name: "Two Sigma"
+ url: https://www.twosigma.com/
+ logo: /static/img/partners/two_sigma.svg
+ employs: "Phillip Cloud, Jeff Reback"
+ - name: "RStudio"
+ url: https://www.rstudio.com/
+ logo: /static/img/partners/r_studio.svg
+ employs: "Wes McKinney"
+ - name: "Ursa Labs"
+ url: https://ursalabs.org/
+ logo: /static/img/partners/ursa_labs.svg
+ employs: "Wes McKinney, Joris Van den Bossche"
+ - name: "Tidelift"
+ url: https://tidelift.com
+ logo: /static/img/partners/tidelift.svg
+ past:
+ - name: "Paris-Saclay Center for Data Science"
+ url: https://www.datascience-paris-saclay.fr/
diff --git a/web/pandas/contribute.md b/web/pandas/contribute.md
new file mode 100644
index 0000000000000..825a5870bf5a0
--- /dev/null
+++ b/web/pandas/contribute.md
@@ -0,0 +1,12 @@
+# Contribute to pandas
+
+_pandas_ is and always will be **free**. To make the development sustainable, we need _pandas_ users, corporate
+or individual, to support the development by providing their time and money.
+
+You can find more information about current developers in the [team page](about/team.html),
+and about current sponsors in the [sponsors page](about/sponsors.html).
+Financial contributions will mainly be used to advance in the [pandas roadmap](about/roadmap.html).
+
+- If your **company or organization** is interested in helping make pandas better, please contact us at [info@numfocus.org](mailto:info@numfocus.org)
+- If you want to contribute to _pandas_ with your **time**, please visit the [contributing page]({{ base_url }}/docs/development/index.html)
+- If you want to support _pandas_ with a **donation**, please use the [donations page](donate.html).
diff --git a/web/pandas/donate.md b/web/pandas/donate.md
new file mode 100644
index 0000000000000..69db7e4648e77
--- /dev/null
+++ b/web/pandas/donate.md
@@ -0,0 +1,14 @@
+# Donate to pandas
+
+<div id="salsalabs-donate-container">
+</div>
+<script type="text/javascript"
+ src="https://default.salsalabs.org/api/widget/template/4ba4e328-1855-47c8-9a89-63e4757d2151/?tId=salsalabs-donate-container">
+</script>
+
+_pandas_ is a Sponsored Project of [NumFOCUS](https://numfocus.org/), a 501(c)(3) nonprofit charity in the United States.
+NumFOCUS provides _pandas_ with fiscal, legal, and administrative support to help ensure the
+health and sustainability of the project. Visit numfocus.org for more information.
+
+Donations to _pandas_ are managed by NumFOCUS. For donors in the United States, your gift is tax-deductible
+to the extent provided by law. As with any donation, you should consult with your tax adviser about your particular tax situation.
diff --git a/web/pandas/getting_started.md b/web/pandas/getting_started.md
new file mode 100644
index 0000000000000..99a7a9f4b2d60
--- /dev/null
+++ b/web/pandas/getting_started.md
@@ -0,0 +1,51 @@
+# Getting started
+
+## Installation instructions
+
+The next steps provides the easiest and recommended way to set up your
+environment to use pandas. Other installation options can be found in
+the [advanced installation page]({{ base_url}}/docs/install.html).
+
+1. Download [Anaconda](https://www.anaconda.com/distribution/) for your operating system and
+ the latest Python version, run the installer, and follow the steps. Detailed instructions
+ on how to install Anaconda can be found in the
+ [Anaconda documentation](https://docs.anaconda.com/anaconda/install/)).
+
+2. In the Anaconda prompt (or terminal in Linux or MacOS), start JupyterLab:
+
+ <img class="img-fluid" alt="" src="{{ base_url }}/static/img/install/anaconda_prompt.png"/>
+
+3. In JupyterLab, create a new (Python 3) notebook:
+
+ <img class="img-fluid" alt="" src="{{ base_url }}/static/img/install/jupyterlab_home.png"/>
+
+4. In the first cell of the notebook, you can import pandas and check the version with:
+
+ <img class="img-fluid" alt="" src="{{ base_url }}/static/img/install/pandas_import_and_version.png"/>
+
+5. Now you are ready to use pandas, and you can write your code in the next cells.
+
+## Tutorials
+
+You can learn more about pandas in the [tutorials](#), and more about JupyterLab
+in the [JupyterLab documentation](https://jupyterlab.readthedocs.io/en/stable/user/interface.html).
+
+## Books
+
+The book we recommend to learn pandas is [Python for Data Analysis](https://amzn.to/2KI5JJw),
+by [Wes McKinney](https://wesmckinney.com/), creator of pandas.
+
+<a href="https://amzn.to/2KI5JJw">
+ <img alt="Python for Data Analysis" src="{{ base_url }}/static/img/pydata_book.gif"/>
+</a>
+
+## Videos
+
+<iframe width="560" height="315" frameborder="0"
+src="https://www.youtube.com/embed/_T8LGqJtuGc"
+allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
+allowfullscreen></iframe>
+
+## Cheat sheet
+
+[pandas cheat sheet](https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf)
diff --git a/web/pandas/index.html b/web/pandas/index.html
new file mode 100644
index 0000000000000..df6e5ab9a330b
--- /dev/null
+++ b/web/pandas/index.html
@@ -0,0 +1,114 @@
+{% extends "layout.html" %}
+{% block body %}
+ <div class="container">
+ <div class="row">
+ <div class="col-md-9">
+ <section class="jumbotron text-center">
+ <h1>pandas</h1>
+ <p>
+ <strong>pandas</strong> is a fast, powerful, flexible and easy to use open source data analysis and manipulation tool,<br/>
+ built on top of the <a href="http://www.python.org">Python</a> programming language.
+ </p>
+ <p>
+ <a class="btn btn-primary" href="{{ base_url }}/getting_started.html">Install pandas now!</a>
+ </p>
+ </section>
+
+ <div class="row">
+ <div class="col-md-4">
+ <h5>Getting started</h5>
+ <ul>
+ <!-- <li><a href="{{ base_url }}/try.html">Try pandas online</a></li> -->
+ <li><a href="{{ base_url }}/getting_started.html">Install pandas</a></li>
+ <li><a href="{{ base_url }}/docs/getting_started/index.html">Getting started</a></li>
+ </ul>
+ </div>
+ <div class="col-md-4">
+ <h5>Documentation</h5>
+ <ul>
+ <li><a href="{{ base_url }}/docs/user_guide/index.html">User guide</a></li>
+ <li><a href="{{ base_url }}/docs/reference/index.html">API reference</a></li>
+ <li><a href="{{ base_url }}/docs/development/index.html">Contributing to pandas</a></li>
+ <li><a href="{{ base_url }}/docs/whatsnew/index.html">Release notes</a></li>
+ </ul>
+ </div>
+ <div class="col-md-4">
+ <h5>Community</h5>
+ <ul>
+ <li><a href="{{ base_url }}/community/about.html">About pandas</a></li>
+ <li><a href="https://stackoverflow.com/questions/tagged/pandas">Ask a question</a></li>
+ <li><a href="{{ base_url }}/community/ecosystem.html">Ecosystem</a></li>
+ </ul>
+ </div>
+ </div>
+ <section>
+ <h5>With the support of:</h5>
+ <div class="row h-100">
+ {% for company in partners.active %}
+ <div class="col-sm-6 col-md-2 my-auto">
+ <a href="{{ company.url }}" target="_blank">
+ <img class="img-fluid" alt="{{ company.name }}" src="{{ base_url }}{{ company.logo }}"/>
+ </a>
+ </div>
+ {% endfor %}
+ </div>
+ </section>
+ </div>
+ <div class="col-md-3">
+ {% if releases %}
+ <h4>Latest version: {{ releases[0].name }}</h4>
+ <ul>
+ <li><a href="docs/whatsnew/v0.25.0.html">What's new in {{ releases[0].name }}</a></li>
+ <li>Release date:<br/>{{ releases[0].published.strftime("%b %d, %Y") }}</li>
+ <li><a href="{{ base_url}}/docs/">Documentation (web)</a></li>
+ <li><a href="{{ base_url }}/docs/pandas.pdf">Documentation (pdf)</a></li>
+ <li><a href="{{ releases[0].url }}">Download source code</a></li>
+ </ul>
+ {% endif %}
+ <h4>Follow us</h4>
+ <div class="text-center">
+ <p>
+ <a href="https://twitter.com/pandas_dev?ref_src=twsrc%5Etfw" class="twitter-follow-button" data-show-count="false">Follow @pandas_dev</a><script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
+ </p>
+ </div>
+ <h4>Get the book</h4>
+ <p class="book">
+ <a href="https://amzn.to/2KI5JJw">
+ <img class="img-fluid" alt="Python for Data Analysis" src="{{ base_url }}/static/img/pydata_book.gif"/>
+ </a>
+ </p>
+ {% if releases[1:5] %}
+ <h4>Previous versions</h4>
+ <ul>
+ {% for release in releases[1:5] %}
+ <li class="small">
+ {{ release.name }} ({{ release.published.strftime("%b %d, %Y") }})<br/>
+ <a href="https://pandas.pydata.org/pandas-docs/stable/whatsnew/{{ release.tag }}.html">changelog</a> |
+ <a href="https://pandas.pydata.org/pandas-docs/version/{{ release.name }}/">docs</a> |
+ <a href="https://pandas.pydata.org/pandas-docs/version/{{ release.name }}/pandas.pdf">pdf</a> |
+ <a href="{{ release.url }}">code</a>
+ </li>
+ {% endfor %}
+ </ul>
+ {% endif %}
+ {% if releases[5:] %}
+ <p class="text-center">
+ <a data-toggle="collapse" href="#show-more-releases" role="button" aria-expanded="false" aria-controls="show-more-releases">Show more</a>
+ </p>
+ <ul id="show-more-releases" class="collapse">
+ {% for release in releases[5:] %}
+ <li class="small">
+ {{ release.name }} ({{ release.published.strftime("%Y-%m-%d") }})<br/>
+ <a href="https://pandas.pydata.org/pandas-docs/stable/whatsnew/{{ release.tag }}.html">changelog</a> |
+ <a href="https://pandas.pydata.org/pandas-docs/version/{{ release.name }}/">docs</a> |
+ <a href="https://pandas.pydata.org/pandas-docs/version/{{ release.name }}/pandas.pdf">pdf</a> |
+ <a href="{{ release.url }}">code</a>
+ </li>
+ {% endfor %}
+ </ul>
+ {% endif %}
+ </div>
+ </div>
+ </div>
+
+{% endblock %}
diff --git a/web/pandas/static/css/pandas.css b/web/pandas/static/css/pandas.css
new file mode 100644
index 0000000000000..0a227cf8d96c9
--- /dev/null
+++ b/web/pandas/static/css/pandas.css
@@ -0,0 +1,54 @@
+body {
+ padding-top: 5em;
+ color: #444;
+}
+h1 {
+ font-size: 2.4rem;
+ font-weight: 700;
+ color: #130654;
+}
+h2 {
+ font-size: 1.45rem;
+ font-weight: 700;
+ color: black;
+}
+h3 {
+ font-size: 1.3rem;
+ font-weight: 600;
+ color: black;
+}
+a {
+ color: #130654;
+}
+code {
+ white-space: pre;
+}
+.fab {
+ font-size: 1.2rem;
+ color: #666;
+}
+.fab:hover {
+ color: #130654;
+}
+a.navbar-brand img {
+ max-height: 2em;
+}
+div.card {
+ margin: 0 0 .2em .2em !important;
+}
+div.card .card-title {
+ font-weight: 500;
+ color: #130654;
+}
+.book {
+ padding: 0 20%;
+}
+.bg-dark {
+ background-color: #130654 !important;
+}
+.navbar-dark .navbar-nav .nav-link {
+ color: rgba(255, 255, 255, .9);
+}
+.navbar-dark .navbar-nav .nav-link:hover {
+ color: white;
+}
diff --git a/web/pandas/static/img/install/anaconda_prompt.png b/web/pandas/static/img/install/anaconda_prompt.png
new file mode 100644
index 0000000000000..7b547e4ebb02a
Binary files /dev/null and b/web/pandas/static/img/install/anaconda_prompt.png differ
diff --git a/web/pandas/static/img/install/jupyterlab_home.png b/web/pandas/static/img/install/jupyterlab_home.png
new file mode 100644
index 0000000000000..c62d33a5e0fc6
Binary files /dev/null and b/web/pandas/static/img/install/jupyterlab_home.png differ
diff --git a/web/pandas/static/img/install/pandas_import_and_version.png b/web/pandas/static/img/install/pandas_import_and_version.png
new file mode 100644
index 0000000000000..64c1303ac495c
Binary files /dev/null and b/web/pandas/static/img/install/pandas_import_and_version.png differ
diff --git a/web/pandas/static/img/pandas.svg b/web/pandas/static/img/pandas.svg
new file mode 120000
index 0000000000000..2e5d3872e4845
--- /dev/null
+++ b/web/pandas/static/img/pandas.svg
@@ -0,0 +1 @@
+../../../../doc/logo/pandas_logo.svg
\ No newline at end of file
diff --git a/web/pandas/static/img/partners/anaconda.svg b/web/pandas/static/img/partners/anaconda.svg
new file mode 100644
index 0000000000000..fcddf72ebaa28
--- /dev/null
+++ b/web/pandas/static/img/partners/anaconda.svg
@@ -0,0 +1,99 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ viewBox="0 0 530.44 90.053329"
+ height="90.053329"
+ width="530.44"
+ xml:space="preserve"
+ id="svg2"
+ version="1.1"><metadata
+ id="metadata8"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /></cc:Work></rdf:RDF></metadata><defs
+ id="defs6" /><g
+ transform="matrix(1.3333333,0,0,-1.3333333,0,90.053333)"
+ id="g10"><g
+ transform="scale(0.1)"
+ id="g12"><path
+ id="path14"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 958.313,274.5 53.637,120.406 h 1.64 L 1068.32,274.5 Z m 67.867,251.754 c -1.65,3.285 -3.83,6.027 -9.31,6.027 h -5.47 c -4.93,0 -7.66,-2.742 -9.31,-6.027 L 831.887,157.93 c -3.282,-7.117 1.097,-14.231 9.304,-14.231 h 47.618 c 8.754,0 13.679,5.473 15.867,10.942 l 26.82,59.113 h 163.644 l 26.81,-59.113 c 3.83,-7.657 7.66,-10.942 15.88,-10.942 h 47.61 c 8.21,0 12.59,7.114 9.3,14.231 l -168.56,368.324" /><path
+ id="path16"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 1547.94,526.801 h -50.35 c -6.03,0 -10.4,-4.922 -10.4,-10.395 V 290.371 h -0.55 l -227.67,241.91 h -13.68 c -5.48,0 -10.4,-4.383 -10.4,-9.855 V 154.102 c 0,-5.481 4.92,-10.403 10.4,-10.403 h 49.8 c 6.02,0 10.4,4.922 10.4,10.403 v 235.332 h 0.54 L 1534.8,138.227 h 13.14 c 5.47,0 10.4,4.378 10.4,9.847 v 368.332 c 0,5.473 -4.93,10.395 -10.4,10.395" /><path
+ id="path18"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 1725.97,274.5 53.64,120.406 h 1.64 L 1835.98,274.5 Z m 67.87,251.754 c -1.64,3.285 -3.83,6.027 -9.31,6.027 h -5.47 c -4.93,0 -7.66,-2.742 -9.31,-6.027 L 1599.55,157.93 c -3.29,-7.117 1.09,-14.231 9.3,-14.231 h 47.62 c 8.75,0 13.68,5.473 15.87,10.942 l 26.82,59.113 h 163.64 l 26.81,-59.113 c 3.83,-7.657 7.67,-10.942 15.88,-10.942 h 47.61 c 8.21,0 12.59,7.114 9.3,14.231 l -168.56,368.324" /><path
+ id="path20"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 2261.6,241.117 c -3.29,3.285 -9.31,3.836 -13.69,0 -22.98,-18.605 -50.9,-31.191 -83.73,-31.191 -70.06,0 -122.6,58.008 -122.6,126.418 0,68.965 51.99,127.519 122.05,127.519 30.64,0 61.3,-12.039 84.28,-32.285 4.38,-4.379 9.85,-4.379 13.69,0 l 33.38,34.477 c 4.38,4.375 4.38,10.941 -0.55,15.328 -37.21,33.383 -77.17,50.898 -132.45,50.898 -109.45,0 -197.57,-88.117 -197.57,-197.574 0,-109.465 88.12,-196.48 197.57,-196.48 48.72,0 95.78,16.964 133,53.086 3.83,3.835 4.92,10.949 0.55,14.777 l -33.93,35.027" /><path
+ id="path22"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 2520.21,209.379 c -68.95,0 -125.33,56.371 -125.33,125.328 0,68.957 56.38,126.426 125.33,126.426 68.96,0 125.88,-57.469 125.88,-126.426 0,-68.957 -56.92,-125.328 -125.88,-125.328 z m 0,322.902 c -109.46,0 -196.48,-88.117 -196.48,-197.574 0,-109.465 87.02,-196.48 196.48,-196.48 109.46,0 197.03,87.015 197.03,196.48 0,109.457 -87.57,197.574 -197.03,197.574" /><path
+ id="path24"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 3090.17,526.801 h -50.35 c -6.02,0 -10.4,-4.922 -10.4,-10.395 V 290.371 h -0.54 l -227.68,241.91 h -13.68 c -5.47,0 -10.4,-4.383 -10.4,-9.855 V 154.102 c 0,-5.481 4.93,-10.403 10.4,-10.403 h 49.8 c 6.02,0 10.4,4.922 10.4,10.403 v 235.332 h 0.55 l 228.77,-251.207 h 13.13 c 5.47,0 10.4,4.378 10.4,9.847 v 368.332 c 0,5.473 -4.93,10.395 -10.4,10.395" /><path
+ id="path26"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 3303.16,210.465 h -62.39 v 250.121 h 62.39 c 71.15,0 123.14,-53.641 123.14,-124.785 0,-71.696 -51.99,-125.336 -123.14,-125.336 z m 6.57,316.336 h -129.71 c -5.47,0 -9.85,-4.922 -9.85,-10.395 V 154.102 c 0,-5.481 4.38,-10.403 9.85,-10.403 h 129.71 c 105.63,0 192.1,85.926 192.1,192.102 0,105.082 -86.47,191 -192.1,191" /><path
+ id="path28"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 3631.32,274.5 53.64,120.406 h 1.64 L 3741.33,274.5 Z m 236.43,-116.57 -168.57,368.324 c -1.64,3.285 -3.82,6.027 -9.29,6.027 h -5.48 c -4.93,0 -7.67,-2.742 -9.3,-6.027 L 3504.9,157.93 c -3.29,-7.117 1.09,-14.231 9.3,-14.231 h 47.62 c 8.76,0 13.68,5.473 15.87,10.942 l 26.82,59.113 h 163.63 l 26.83,-59.113 c 3.82,-7.657 7.66,-10.942 15.86,-10.942 h 47.62 c 8.21,0 12.59,7.114 9.3,14.231" /><path
+ id="path30"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 3940.9,176.27 h 7.99 c 2.7,0 4.5,-1.793 4.5,-4.403 0,-2.422 -1.8,-4.394 -4.5,-4.394 h -7.99 z m -4.85,-26.582 h 3.33 c 0.99,0 1.7,0.808 1.7,1.707 v 10.148 h 5.57 l 4.49,-10.598 c 0.27,-0.629 0.9,-1.257 1.62,-1.257 h 4.04 c 1.26,0 2.16,1.257 1.53,2.425 -1.53,3.235 -3.15,6.645 -4.76,9.969 2.69,0.984 6.82,3.5 6.82,9.879 0,6.824 -5.48,10.594 -11.04,10.594 h -13.3 c -0.98,0 -1.7,-0.809 -1.7,-1.703 v -29.457 c 0,-0.899 0.72,-1.707 1.7,-1.707" /><path
+ id="path32"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 3945.93,192.078 c 14.46,0 26.05,-11.586 26.05,-26.043 0,-14.371 -11.59,-26.047 -26.05,-26.047 -14.37,0 -26.04,11.676 -26.04,26.047 0,14.457 11.67,26.043 26.04,26.043 z m 0,-58.285 c 17.79,0 32.33,14.461 32.33,32.242 0,17.781 -14.54,32.328 -32.33,32.328 -17.78,0 -32.24,-14.547 -32.24,-32.328 0,-17.781 14.46,-32.242 32.24,-32.242" /><path
+ id="path34"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 125.527,158.422 0.051,2.484 c 0.414,19.649 1.977,39.149 4.684,57.961 l 0.254,1.77 -1.668,0.679 c -17.871,7.305 -35.4574,15.782 -52.2699,25.219 l -2.1172,1.184 -1.0742,-2.16 C 62.3164,223.238 52.9844,199.707 45.6836,175.602 l -0.7031,-2.254 2.2812,-0.629 C 72.0234,165.91 97.5195,161.184 123.051,158.66 l 2.476,-0.238" /><path
+ id="path36"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 177.781,500.941 c 0.032,0.196 0.063,0.395 0.094,0.59 -14.668,-0.258 -29.324,-1.265 -43.926,-2.965 1.891,-14.777 4.481,-29.437 7.828,-43.925 10.02,16.949 22.121,32.511 36.004,46.3" /><path
+ id="path38"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 125.527,140.855 -0.039,2.051 -2.043,0.199 c -21.406,2.02 -43.2223,5.661 -64.8278,10.821 l -5.668,1.355 3.211,-4.855 C 75.5742,121.098 99.3125,95.0195 126.73,72.9258 l 4.43,-3.5899 -0.719,5.668 c -2.906,22.6719 -4.554,44.8321 -4.914,65.8511" /><path
+ id="path40"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 230.566,657.227 c -26.32,-9.008 -51.164,-21.161 -74.101,-36.036 17.359,-3.07 34.469,-7.097 51.273,-12.027 6.696,16.375 14.297,32.426 22.828,48.063" /><path
+ id="path42"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 339.918,675.43 c -13.023,0 -25.848,-0.813 -38.488,-2.25 17.925,-12.489 35.066,-26.145 51.238,-41.051 l 13.43,-12.391 -13.168,-12.672 c -10.899,-10.488 -21.559,-21.898 -31.688,-33.918 l -0.512,-0.585 c -0.117,-0.125 -2.003,-2.219 -5.152,-6.055 8,0.84 16.117,1.293 24.34,1.293 127.07,0 230.086,-103.016 230.086,-230.086 0,-127.074 -103.016,-230.086 -230.086,-230.086 -44.094,0 -85.277,12.426 -120.277,33.934 -17.27,-1.918 -34.629,-2.922 -52.012,-2.922 -8.074,0 -16.152,0.211 -24.227,0.629 0.524,-26.172 3.016,-53.3052 7.477,-81.438 C 204.82,21.3242 269.879,0 339.918,0 c 186.516,0 337.715,151.199 337.715,337.715 0,186.512 -151.199,337.715 -337.715,337.715" /><path
+ id="path44"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 295.145,595.602 c 6.726,7.968 13.671,15.695 20.765,23.101 -15.824,13.469 -32.531,25.758 -50.004,36.856 -10.742,-18.161 -20.09,-36.977 -28.093,-56.282 15.195,-5.574 30.066,-11.953 44.589,-19.031 6.711,8.617 11.399,13.883 12.743,15.356" /><path
+ id="path46"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 65.9219,402.934 1.289,-2.09 2.0118,1.433 c 15.6289,11.235 32.0823,21.594 48.9103,30.789 l 1.582,0.864 -0.449,1.738 c -5.028,19.227 -8.868,39.055 -11.414,58.941 l -0.305,2.399 -2.387,-0.434 C 80.168,492.027 55.4609,485.344 31.7383,476.703 l -2.2227,-0.816 0.8789,-2.188 c 9.7422,-24.562 21.6914,-48.363 35.5274,-70.765" /><path
+ id="path48"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="M 62.0469,370.18 60.125,368.629 C 41.9492,353.844 24.7266,337.414 8.93359,319.797 L 7.375,318.066 9.13281,316.531 C 26.6641,301.188 45.5547,287.094 65.2734,274.645 l 2.0274,-1.293 1.2031,2.097 c 8.8828,15.781 18.8945,31.356 29.7695,46.278 l 1.0938,1.503 -1.2383,1.383 c -12.3281,13.746 -23.9883,28.395 -34.668,43.547 l -1.414,2.02" /><path
+ id="path50"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 194.48,157.273 5.868,0.348 -4.559,3.723 c -17.976,14.715 -33.625,32.09 -46.453,51.656 l -0.106,0.621 -3.75,1.649 -0.433,-3.184 c -2.262,-16.856 -3.586,-34.566 -3.945,-52.625 l -0.039,-2.215 2.207,-0.129 c 8.003,-0.429 16.078,-0.644 24.171,-0.644 9.004,0 18.032,0.269 27.039,0.8" /><path
+ id="path52"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 183.219,530.238 c 3.633,16.649 8.109,33.121 13.511,49.317 -21.125,6.078 -42.769,10.617 -64.789,13.523 -1.867,-22.047 -2.082,-44.082 -0.707,-65.941 17.278,1.988 34.629,3.011 51.985,3.101" /><path
+ id="path54"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 215.813,531.414 c 14.707,9.441 30.539,17.266 47.281,23.195 -11.875,5.59 -24,10.661 -36.348,15.184 -4.219,-12.633 -7.863,-25.441 -10.933,-38.379" /><path
+ id="path56"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 58.6914,257.121 -1.7773,1.113 C 39.4922,269.16 22.6055,281.363 6.74609,294.496 l -4.51953,3.742 0.76953,-5.812 C 7.30078,260.039 16.2734,228.496 29.6406,198.684 l 2.3672,-5.278 1.9024,5.465 c 6.6406,19.125 14.6601,38.102 23.8281,56.387 l 0.9531,1.863" /><path
+ id="path58"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="M 102.133,577.48 C 81.9766,557.492 64.3555,534.969 49.7266,510.445 c 17.4804,5.215 35.1836,9.371 53.0194,12.528 -1.23,18.082 -1.465,36.273 -0.613,54.507" /><path
+ id="path60"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 112.121,340.762 0.234,5.824 c 0.79,20.598 4.309,40.855 10.461,60.195 l 1.793,5.653 -5.129,-2.961 c -13.152,-7.59 -26.1792,-16.012 -38.7222,-25.047 l -1.8281,-1.328 1.293,-1.86 c 8.6992,-12.406 18.1562,-24.535 28.0973,-36.062 l 3.801,-4.414" /><path
+ id="path62"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 114.383,305.906 -0.805,5.707 -3.34,-4.691 C 100.836,293.727 92.082,279.945 84.2227,265.961 l -1.1133,-1.992 1.9922,-1.133 c 14.1562,-7.984 29.0114,-15.305 44.1564,-21.762 l 5.402,-2.316 -2.406,5.363 c -8.863,19.668 -14.875,40.453 -17.871,61.785" /><path
+ id="path64"
+ style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ d="m 48.6602,386.676 1.5976,1.273 -1.0781,1.735 c -10.5859,16.918 -20.1836,34.707 -28.5469,52.867 l -2.457,5.355 -1.8125,-5.605 C 6.51172,411.789 1.05859,379.887 0.160156,347.473 L 0,341.523 4.10938,345.82 c 14.01172,14.598 28.99612,28.34 44.55082,40.856" /></g></g></svg>
\ No newline at end of file
diff --git a/web/pandas/static/img/partners/numfocus.svg b/web/pandas/static/img/partners/numfocus.svg
new file mode 100644
index 0000000000000..fcdd87b41e475
--- /dev/null
+++ b/web/pandas/static/img/partners/numfocus.svg
@@ -0,0 +1,60 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Generator: Adobe Illustrator 19.0.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
+<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 432 135.7" style="enable-background:new 0 0 432 135.7;" xml:space="preserve">
+<style type="text/css">
+ .st0{fill:#F1563F;}
+ .st1{fill:#008896;}
+</style>
+<g>
+ <g>
+ <g>
+ <path class="st0" d="M97.9,12.2v51.9c0,12.7-6.8,19.7-19.1,19.7c-12.2,0-19-7-19-19.7V12.2h5v51.9c0,9.8,4.8,14.9,14,14.9 c9.2,0,14.1-5.2,14.1-14.9V12.2H97.9z"/>
+ </g>
+ <g>
+ <path class="st1" d="M329.8,29.8c0-0.3,0.1-0.7,0.1-1c0-8.3-6.9-16.7-20.1-16.7c-13.1,0-20.6,7.7-20.6,21.2v29.5 c0,13.5,7.4,21.2,20.3,21.2c13.4,0,20.4-8.4,20.4-16.7c0-0.3,0-0.8-0.1-1.4l-7.8,0c-0.7,4.6-1.7,10.4-12,10.4 c-9,0-13-4.1-13-13.4V33.3c0-9.2,4-13.4,12.7-13.4c7.7,0,11.8,3.4,12.2,10.1L329.8,29.8z"/>
+ </g>
+ <g>
+ <path class="st1" d="M376.2,12.4v50.3c0,13.6-7.3,21.2-20.5,21.2c-13.2,0-20.4-7.5-20.4-21.2V12.4h7.9v50.3 c0,9,4.1,13.4,12.5,13.4c8.4,0,12.6-4.5,12.6-13.4V12.4H376.2z"/>
+ </g>
+ <g>
+ <path class="st1" d="M414.9,22.6c-2-1-6-2.9-11.3-2.9c-8.4,0-12.6,3.4-12.6,10c0,7.1,4.8,9.1,12.5,11.8 c8.3,2.9,18.6,6.5,18.6,21.9c0,13-7.5,20.5-20.6,20.5c-8.2,0-14.2-2.7-17.3-6c-1.2-1.3-0.5-0.6-1.2-1.6l5.3-5.1 c1.9,2.2,5,5,12.8,5c8.7,0,13.2-4.1,13.2-12.3c0-9.9-6.6-12.3-14.3-15.1C392,46,383,42.8,383,30.1c0-11.3,7.7-18.1,20.6-18.1 c5.5,0,12.5,1.3,15.4,4.1L414.9,22.6z"/>
+ </g>
+ <g>
+ <path class="st1" d="M283.5,47.2c0-21.2-17.2-38.5-38.5-38.5c-21.2,0-38.5,17.2-38.5,38.5c0,21.2,17.2,38.5,38.5,38.5 C266.2,85.6,283.5,68.4,283.5,47.2z M213.1,47.2c0-17.6,14.3-31.9,31.9-31.9c17.6,0,31.9,14.3,31.9,31.9 c0,17.6-14.3,31.9-31.9,31.9C227.4,79.1,213.1,64.8,213.1,47.2z"/>
+ </g>
+ <g>
+ <path class="st0" d="M233.9,32.3c1.2,0,2.1-1,2.1-2.3c0-1.3-0.9-2.3-2.1-2.3h-7.3c-1.3,0-2.3,1-2.3,2.2v34.5c0,1.2,1,2.2,2.3,2.2 h7.3c1.2,0,2.1-1,2.1-2.3c0-1.3-1-2.3-2.1-2.3h-4.9V32.3H233.9z"/>
+ </g>
+ <g>
+ <path class="st0" d="M256.1,62c-1.2,0-2.2,1-2.2,2.3c0,1.3,1,2.3,2.2,2.3h7.3c1.3,0,2.3-1,2.3-2.2V29.9c0-1.2-1-2.2-2.3-2.2h-7.3 c-1.2,0-2.2,1-2.2,2.3c0,1.3,1,2.3,2.2,2.3h4.9V62H256.1z"/>
+ </g>
+ <polygon class="st1" points="208.7,19.8 208.7,12.1 171.8,12.1 171.8,83.7 179.7,83.7 179.7,51.5 196.2,51.5 196.2,43.9 179.7,43.9 179.7,19.8 "/>
+ <polygon class="st0" points="156.6,12.2 152.3,12.2 133.2,51.9 113.9,12.2 109.7,12.2 109.7,83.7 114.6,83.7 114.6,24.3 133.1,61.9 151.6,24.4 151.6,83.7 156.6,83.7 "/>
+ <polygon class="st0" points="44.6,83.7 48.1,83.7 48.1,12.1 43.1,12.1 43.2,70.5 14.2,12.2 10.1,12.2 10.1,83.7 15.1,83.7 14.9,23.1 "/>
+ </g>
+ <g id="XMLID_3_">
+ <path class="st1" d="M34.9,125.3c-1.2,0-2.3-0.2-3.2-0.5c-0.9-0.3-1.6-0.8-2.1-1.5c-0.5-0.7-0.9-1.4-1.1-2.3 c-0.2-0.9-0.4-1.9-0.4-3v-8.5c0-2.3,0.5-4.1,1.6-5.3c1.1-1.2,2.8-1.8,5.2-1.8c2.4,0,4.1,0.6,5.2,1.8c1.1,1.2,1.6,3,1.6,5.3v8.5 c0,2.3-0.5,4.1-1.6,5.4C39.1,124.7,37.3,125.3,34.9,125.3z M33.3,122.5c0.4,0.2,1,0.3,1.7,0.3c0.7,0,1.2-0.1,1.7-0.3 c0.4-0.2,0.8-0.5,1-0.9c0.2-0.4,0.4-0.8,0.5-1.3c0.1-0.5,0.1-1,0.1-1.7v-9.9c0-0.7,0-1.2-0.1-1.7c-0.1-0.5-0.2-0.9-0.5-1.2 c-0.2-0.4-0.6-0.7-1-0.8c-0.4-0.2-1-0.3-1.7-0.3c-0.7,0-1.2,0.1-1.7,0.3c-0.4,0.2-0.8,0.5-1,0.8c-0.2,0.4-0.4,0.8-0.5,1.2 c-0.1,0.5-0.1,1-0.1,1.7v9.9c0,0.7,0,1.3,0.1,1.7c0.1,0.5,0.2,0.9,0.5,1.3C32.5,122.1,32.8,122.4,33.3,122.5z"/>
+ <path class="st1" d="M46.7,125v-22.5h6.2c2.2,0,3.7,0.5,4.7,1.6c1,1.1,1.5,2.6,1.5,4.6c0,1.8-0.5,3.3-1.6,4.3 c-1,1-2.6,1.5-4.6,1.5h-2.7V125H46.7z M50.2,112.3h1.6c1.5,0,2.6-0.2,3.1-0.7c0.6-0.5,0.9-1.4,0.9-2.8c0-0.6,0-1,0-1.4 c0-0.4-0.1-0.7-0.2-1c-0.1-0.3-0.2-0.6-0.4-0.8c-0.2-0.2-0.4-0.3-0.7-0.5c-0.3-0.1-0.7-0.2-1.1-0.3c-0.4,0-0.9-0.1-1.5-0.1h-1.6 V112.3z"/>
+ <path class="st1" d="M63,125v-22.5h9.6v2.3h-6.2v7.4h5v2.2h-5v8.3h6.2v2.3H63z"/>
+ <path class="st1" d="M77.1,125v-22.5h2.4l7.1,15v-15h2.9V125h-2.2L80,109.7V125H77.1z"/>
+ <path class="st1" d="M109.6,125.3c-1,0-1.9-0.1-2.7-0.4c-0.8-0.3-1.4-0.6-1.9-1c-0.5-0.4-0.9-1-1.2-1.6c-0.3-0.6-0.5-1.3-0.7-2 c-0.1-0.7-0.2-1.5-0.2-2.4v-8c0-1,0.1-1.8,0.2-2.5c0.1-0.7,0.3-1.4,0.7-2.1c0.3-0.6,0.7-1.2,1.2-1.6c0.5-0.4,1.1-0.7,1.9-1 c0.8-0.2,1.7-0.4,2.7-0.4c2.2,0,3.8,0.5,4.8,1.6c1,1.1,1.5,2.7,1.5,4.8v1.8h-3.3V109c0-0.3,0-0.6,0-0.8c0-0.2,0-0.4,0-0.7 c0-0.3,0-0.5-0.1-0.7c0-0.2-0.1-0.4-0.2-0.6c-0.1-0.2-0.1-0.4-0.2-0.5c-0.1-0.1-0.2-0.3-0.4-0.4c-0.1-0.1-0.3-0.2-0.5-0.3 c-0.2-0.1-0.4-0.1-0.7-0.2c-0.3,0-0.6-0.1-0.9-0.1c-0.5,0-0.9,0-1.3,0.1c-0.4,0.1-0.7,0.2-0.9,0.4c-0.2,0.2-0.4,0.4-0.6,0.7 c-0.1,0.2-0.3,0.6-0.3,0.9c-0.1,0.4-0.1,0.8-0.1,1.1s0,0.8,0,1.3v8.9c0,1.7,0.2,2.9,0.7,3.5c0.5,0.7,1.3,1,2.5,1 c0.5,0,0.9,0,1.2-0.1c0.3-0.1,0.6-0.2,0.8-0.4c0.2-0.2,0.4-0.4,0.5-0.7c0.1-0.2,0.2-0.5,0.3-0.9c0.1-0.4,0.1-0.7,0.1-1.1 c0-0.3,0-0.8,0-1.3v-1.7h3.3v1.7c0,0.9-0.1,1.6-0.2,2.3c-0.1,0.7-0.3,1.3-0.6,1.9c-0.3,0.6-0.7,1.1-1.1,1.5 c-0.5,0.4-1.1,0.7-1.8,0.9C111.4,125.2,110.6,125.3,109.6,125.3z"/>
+ <path class="st1" d="M127.1,125.3c-1.2,0-2.3-0.2-3.2-0.5c-0.9-0.3-1.6-0.8-2.1-1.5c-0.5-0.7-0.9-1.4-1.1-2.3 c-0.2-0.9-0.4-1.9-0.4-3v-8.5c0-2.3,0.5-4.1,1.6-5.3c1.1-1.2,2.8-1.8,5.2-1.8c2.4,0,4.1,0.6,5.2,1.8c1.1,1.2,1.6,3,1.6,5.3v8.5 c0,2.3-0.5,4.1-1.6,5.4C131.2,124.7,129.5,125.3,127.1,125.3z M125.4,122.5c0.4,0.2,1,0.3,1.7,0.3c0.7,0,1.2-0.1,1.7-0.3 c0.4-0.2,0.8-0.5,1-0.9c0.2-0.4,0.4-0.8,0.5-1.3c0.1-0.5,0.1-1,0.1-1.7v-9.9c0-0.7,0-1.2-0.1-1.7c-0.1-0.5-0.2-0.9-0.5-1.2 c-0.2-0.4-0.6-0.7-1-0.8c-0.4-0.2-1-0.3-1.7-0.3c-0.7,0-1.2,0.1-1.7,0.3c-0.4,0.2-0.8,0.5-1,0.8c-0.2,0.4-0.4,0.8-0.5,1.2 c-0.1,0.5-0.1,1-0.1,1.7v9.9c0,0.7,0,1.3,0.1,1.7c0.1,0.5,0.2,0.9,0.5,1.3C124.6,122.1,125,122.4,125.4,122.5z"/>
+ <path class="st1" d="M138.9,125v-22.5h5.4c2.7,0,4.6,0.6,5.7,1.7c1.1,1.1,1.7,2.8,1.7,5.2v8.3c0,2.5-0.5,4.3-1.6,5.5 c-1.1,1.2-2.9,1.8-5.5,1.8H138.9z M142.3,122.8h2c0.5,0,1,0,1.4-0.1c0.4-0.1,0.7-0.2,1-0.3c0.3-0.1,0.5-0.3,0.7-0.6 c0.2-0.3,0.3-0.6,0.4-0.8c0.1-0.2,0.2-0.6,0.2-1.1c0-0.5,0.1-0.9,0.1-1.2c0-0.3,0-0.8,0-1.5v-7.3c0-0.5,0-1,0-1.4 c0-0.4-0.1-0.7-0.1-1.1c-0.1-0.4-0.2-0.7-0.3-0.9c-0.1-0.2-0.3-0.5-0.5-0.7c-0.2-0.2-0.4-0.4-0.7-0.5c-0.3-0.1-0.6-0.2-1-0.3 c-0.4-0.1-0.8-0.1-1.3-0.1h-1.9V122.8z"/>
+ <path class="st1" d="M156.6,125v-22.5h9.6v2.3h-6.2v7.4h5v2.2h-5v8.3h6.2v2.3H156.6z"/>
+ <path class="st1" d="M178.9,112.4v-2.3h9.4v2.3H178.9z M178.9,117.3v-2.3h9.4v2.3H178.9z"/>
+ <path class="st1" d="M202.1,125v-22.5h5.7c2.3,0,3.9,0.5,5,1.4c1.1,0.9,1.6,2.3,1.6,4.3c0,2.9-1.2,4.5-3.6,4.8 c1.5,0.3,2.5,1,3.2,1.9c0.7,0.9,1,2.2,1,3.8c0,2-0.5,3.6-1.6,4.7c-1,1.1-2.7,1.7-4.8,1.7H202.1z M205.6,111.9h2 c1.4,0,2.4-0.3,3-0.9c0.6-0.6,0.8-1.5,0.8-2.9c0-0.4,0-0.8-0.1-1.2s-0.2-0.6-0.3-0.8c-0.1-0.2-0.3-0.4-0.5-0.6 c-0.2-0.2-0.5-0.3-0.7-0.4c-0.2-0.1-0.5-0.2-0.9-0.2c-0.4,0-0.8-0.1-1.1-0.1c-0.4,0-0.8,0-1.4,0h-0.8V111.9z M205.6,122.8h2.3 c1.5,0,2.5-0.3,3.1-1c0.6-0.6,0.8-1.7,0.8-3.2c0-1.4-0.3-2.5-1-3.2c-0.7-0.7-1.7-1.1-3.2-1.1h-2.1V122.8z"/>
+ <path class="st1" d="M219.7,125v-22.5h9.6v2.3h-6.2v7.4h5v2.2h-5v8.3h6.2v2.3H219.7z"/>
+ <path class="st1" d="M236.4,125v-20.2h-4.6v-2.3h12.4v2.3h-4.4V125H236.4z"/>
+ <path class="st1" d="M250.2,125v-20.2h-4.6v-2.3h12.4v2.3h-4.4V125H250.2z"/>
+ <path class="st1" d="M261.5,125v-22.5h9.6v2.3H265v7.4h5v2.2h-5v8.3h6.2v2.3H261.5z"/>
+ <path class="st1" d="M275.6,125v-22.5h5c2.5,0,4.4,0.5,5.5,1.4c1.2,0.9,1.8,2.5,1.8,4.6c0,2.9-1,4.7-3.1,5.3l3.6,11.3H285 l-3.3-10.6H279V125H275.6z M279,112.2h1.3c1.5,0,2.6-0.3,3.2-0.8c0.6-0.5,1-1.5,1-2.9c0-1.4-0.3-2.3-0.8-2.9 c-0.6-0.6-1.6-0.8-3.1-0.8H279V112.2z"/>
+ <path class="st1" d="M307.5,125.3c-2.1,0-3.7-0.6-4.8-1.8c-1.1-1.2-1.7-2.8-1.8-4.8l3.1-0.8c0.2,3.2,1.4,4.8,3.5,4.8 c0.9,0,1.6-0.2,2-0.7c0.5-0.4,0.7-1.1,0.7-2c0-0.5-0.1-0.9-0.2-1.3c-0.1-0.4-0.3-0.8-0.6-1.1c-0.3-0.4-0.6-0.7-0.8-0.9 c-0.3-0.2-0.6-0.6-1.1-1l-4.2-3.4c-0.8-0.7-1.5-1.4-1.8-2.2c-0.4-0.8-0.6-1.7-0.6-2.8c0-1.6,0.5-2.9,1.6-3.8 c1-0.9,2.5-1.4,4.3-1.4c2,0,3.5,0.4,4.5,1.4c1,1,1.6,2.4,1.8,4.4l-2.9,0.7c0-0.5-0.1-0.9-0.2-1.3c-0.1-0.4-0.2-0.8-0.3-1.1 c-0.2-0.4-0.4-0.7-0.6-0.9c-0.2-0.2-0.5-0.4-0.9-0.6c-0.4-0.1-0.8-0.2-1.3-0.2c-1.8,0.1-2.7,0.9-2.7,2.5c0,0.7,0.1,1.2,0.4,1.7 c0.3,0.4,0.7,0.9,1.3,1.4l4.2,3.4c1.1,0.9,2,1.8,2.6,2.9c0.6,1,1,2.2,1,3.5c0,1.6-0.5,3-1.7,3.9 C310.7,124.8,309.3,125.3,307.5,125.3z"/>
+ <path class="st1" d="M323.9,125.3c-1,0-1.9-0.1-2.7-0.4c-0.8-0.3-1.4-0.6-1.9-1c-0.5-0.4-0.9-1-1.2-1.6c-0.3-0.6-0.5-1.3-0.7-2 c-0.1-0.7-0.2-1.5-0.2-2.4v-8c0-1,0.1-1.8,0.2-2.5c0.1-0.7,0.3-1.4,0.7-2.1c0.3-0.6,0.7-1.2,1.2-1.6c0.5-0.4,1.1-0.7,1.9-1 c0.8-0.2,1.7-0.4,2.7-0.4c2.2,0,3.9,0.5,4.8,1.6c1,1.1,1.5,2.7,1.5,4.8v1.8h-3.3V109c0-0.3,0-0.6,0-0.8c0-0.2,0-0.4,0-0.7 c0-0.3,0-0.5-0.1-0.7c0-0.2-0.1-0.4-0.2-0.6c-0.1-0.2-0.1-0.4-0.2-0.5c-0.1-0.1-0.2-0.3-0.4-0.4c-0.1-0.1-0.3-0.2-0.5-0.3 c-0.2-0.1-0.4-0.1-0.7-0.2c-0.3,0-0.5-0.1-0.9-0.1c-0.5,0-0.9,0-1.3,0.1c-0.4,0.1-0.7,0.2-0.9,0.4c-0.2,0.2-0.4,0.4-0.6,0.7 c-0.1,0.2-0.3,0.6-0.3,0.9c-0.1,0.4-0.1,0.8-0.1,1.1c0,0.4,0,0.8,0,1.3v8.9c0,1.7,0.2,2.9,0.7,3.5c0.5,0.7,1.3,1,2.5,1 c0.5,0,0.9,0,1.2-0.1c0.3-0.1,0.6-0.2,0.8-0.4c0.2-0.2,0.4-0.4,0.5-0.7c0.1-0.2,0.2-0.5,0.3-0.9c0.1-0.4,0.1-0.7,0.1-1.1 c0-0.3,0-0.8,0-1.3v-1.7h3.3v1.7c0,0.9-0.1,1.6-0.2,2.3c-0.1,0.7-0.3,1.3-0.6,1.9c-0.3,0.6-0.7,1.1-1.1,1.5 c-0.5,0.4-1.1,0.7-1.8,0.9C325.7,125.2,324.9,125.3,323.9,125.3z"/>
+ <path class="st1" d="M335.2,125v-22.5h3.4V125H335.2z"/>
+ <path class="st1" d="M344.2,125v-22.5h9.6v2.3h-6.2v7.4h5v2.2h-5v8.3h6.2v2.3H344.2z"/>
+ <path class="st1" d="M358.3,125v-22.5h2.4l7.1,15v-15h2.9V125h-2.2l-7.2-15.4V125H358.3z"/>
+ <path class="st1" d="M382.3,125.3c-1,0-1.9-0.1-2.7-0.4c-0.8-0.3-1.4-0.6-1.9-1c-0.5-0.4-0.9-1-1.2-1.6c-0.3-0.6-0.5-1.3-0.7-2 c-0.1-0.7-0.2-1.5-0.2-2.4v-8c0-1,0.1-1.8,0.2-2.5c0.1-0.7,0.3-1.4,0.7-2.1c0.3-0.6,0.7-1.2,1.2-1.6c0.5-0.4,1.1-0.7,1.9-1 c0.8-0.2,1.7-0.4,2.7-0.4c2.2,0,3.9,0.5,4.8,1.6c1,1.1,1.5,2.7,1.5,4.8v1.8h-3.3V109c0-0.3,0-0.6,0-0.8c0-0.2,0-0.4,0-0.7 c0-0.3,0-0.5-0.1-0.7c0-0.2-0.1-0.4-0.2-0.6c-0.1-0.2-0.1-0.4-0.2-0.5c-0.1-0.1-0.2-0.3-0.4-0.4c-0.1-0.1-0.3-0.2-0.5-0.3 c-0.2-0.1-0.4-0.1-0.7-0.2c-0.3,0-0.5-0.1-0.9-0.1c-0.5,0-0.9,0-1.3,0.1c-0.4,0.1-0.7,0.2-0.9,0.4c-0.2,0.2-0.4,0.4-0.6,0.7 c-0.1,0.2-0.3,0.6-0.3,0.9c-0.1,0.4-0.1,0.8-0.1,1.1c0,0.4,0,0.8,0,1.3v8.9c0,1.7,0.2,2.9,0.7,3.5c0.5,0.7,1.3,1,2.5,1 c0.5,0,0.9,0,1.2-0.1c0.3-0.1,0.6-0.2,0.8-0.4c0.2-0.2,0.4-0.4,0.5-0.7c0.1-0.2,0.2-0.5,0.3-0.9c0.1-0.4,0.1-0.7,0.1-1.1 c0-0.3,0-0.8,0-1.3v-1.7h3.3v1.7c0,0.9-0.1,1.6-0.2,2.3c-0.1,0.7-0.3,1.3-0.6,1.9c-0.3,0.6-0.7,1.1-1.1,1.5 c-0.5,0.4-1.1,0.7-1.8,0.9C384.1,125.2,383.2,125.3,382.3,125.3z"/>
+ <path class="st1" d="M393.4,125v-22.5h9.6v2.3h-6.2v7.4h5v2.2h-5v8.3h6.2v2.3H393.4z"/>
+ </g>
+</g>
+</svg>
\ No newline at end of file
diff --git a/web/pandas/static/img/partners/r_studio.svg b/web/pandas/static/img/partners/r_studio.svg
new file mode 100644
index 0000000000000..15a1d2a30ff30
--- /dev/null
+++ b/web/pandas/static/img/partners/r_studio.svg
@@ -0,0 +1,50 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Generator: Adobe Illustrator 22.1.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
+<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 1784.1 625.9" style="enable-background:new 0 0 1784.1 625.9;" xml:space="preserve">
+<style type="text/css">
+ .st0{fill:#75AADB;}
+ .st1{fill:#4D4D4D;}
+ .st2{fill:#FFFFFF;}
+ .st3{fill:url(#SVGID_1_);}
+ .st4{fill:url(#SVGID_2_);}
+ .st5{fill:url(#SVGID_3_);}
+ .st6{fill:url(#SVGID_4_);}
+ .st7{fill:url(#SVGID_5_);}
+ .st8{fill:url(#SVGID_6_);}
+ .st9{fill:url(#SVGID_7_);}
+ .st10{fill:url(#SVGID_8_);}
+ .st11{fill:url(#SVGID_9_);}
+ .st12{fill:url(#SVGID_10_);}
+ .st13{opacity:0.18;fill:url(#SVGID_11_);}
+ .st14{opacity:0.3;}
+</style>
+<g id="Gray_Logo">
+</g>
+<g id="Black_Letters">
+</g>
+<g id="Blue_Gradient_Letters">
+ <g>
+
+ <ellipse transform="matrix(0.7071 -0.7071 0.7071 0.7071 -127.9265 317.0317)" class="st0" cx="318.7" cy="312.9" rx="309.8" ry="309.8"/>
+ <g>
+ <path class="st1" d="M694.4,404.8c16.1,10.3,39.1,18.1,63.9,18.1c36.7,0,58.1-19.4,58.1-47.4c0-25.5-14.8-40.8-52.3-54.8 c-45.3-16.5-73.3-40.4-73.3-79.1c0-43.3,35.8-75.4,89.8-75.4c28,0,49,6.6,61,13.6l-9.9,29.3c-8.7-5.4-27.2-13.2-52.3-13.2 c-37.9,0-52.3,22.7-52.3,41.6c0,26,16.9,38.7,55.2,53.6c47,18.1,70.5,40.8,70.5,81.6c0,42.8-31.3,80.3-96.8,80.3 c-26.8,0-56-8.2-70.9-18.1L694.4,404.8z"/>
+ <path class="st1" d="M943.3,201.3v47.8h51.9v27.6h-51.9v107.5c0,24.7,7,38.7,27.2,38.7c9.9,0,15.7-0.8,21-2.5l1.6,27.6 c-7,2.5-18.1,4.9-32.1,4.9c-16.9,0-30.5-5.8-39.1-15.2c-9.9-11.1-14-28.8-14-52.3V276.7h-30.9v-27.6h30.9V212L943.3,201.3z"/>
+ <path class="st1" d="M1202.8,393.7c0,21,0.4,39.1,1.6,54.8h-32.1l-2.1-32.5h-0.8c-9.1,16.1-30.5,37.1-65.9,37.1 c-31.3,0-68.8-17.7-68.8-87.3V249.1h36.3v110c0,37.9,11.9,63.9,44.5,63.9c24.3,0,41.2-16.9,47.8-33.4c2.1-4.9,3.3-11.5,3.3-18.5 v-122h36.3V393.7z"/>
+ <path class="st1" d="M1434.8,156v241c0,17.7,0.8,37.9,1.6,51.5h-32.1l-1.6-34.6h-1.2c-10.7,22.2-34.6,39.1-67.2,39.1 c-48.2,0-85.7-40.8-85.7-101.4c-0.4-66.3,41.2-106.7,89.4-106.7c30.9,0,51.1,14.4,60.2,30.1h0.8V156H1434.8z M1398.9,330.2 c0-4.5-0.4-10.7-1.6-15.2c-5.4-22.7-25.1-41.6-52.3-41.6c-37.5,0-59.7,33-59.7,76.6c0,40.4,20.2,73.8,58.9,73.8 c24.3,0,46.6-16.5,53.1-43.3c1.2-4.9,1.6-9.9,1.6-15.7V330.2z"/>
+ <path class="st1" d="M1535.7,193c0,12.4-8.7,22.2-23.1,22.2c-13.2,0-21.8-9.9-21.8-22.2c0-12.4,9.1-22.7,22.7-22.7 C1526.6,170.4,1535.7,180.3,1535.7,193z M1495.3,448.5V249.1h36.3v199.4H1495.3z"/>
+ <path class="st1" d="M1772.2,347.1c0,73.7-51.5,105.9-99.3,105.9c-53.6,0-95.6-39.6-95.6-102.6c0-66.3,44.1-105.5,98.9-105.5 C1733.5,245,1772.2,286.6,1772.2,347.1z M1614.4,349.2c0,43.7,24.7,76.6,60.2,76.6c34.6,0,60.6-32.5,60.6-77.5 c0-33.8-16.9-76.2-59.7-76.2C1632.9,272.1,1614.4,311.7,1614.4,349.2z"/>
+ </g>
+ <g>
+ <path class="st2" d="M424.7,411.8h33.6v26.1h-51.3L322,310.5h-45.3v101.3h44.3v26.1H209.5v-26.1h38.3V187.3l-38.3-4.7v-24.7 c14.5,3.3,27.1,5.6,42.9,5.6c23.8,0,48.1-5.6,71.9-5.6c46.2,0,89.1,21,89.1,72.3c0,39.7-23.8,64.9-60.7,75.6L424.7,411.8z M276.7,285.3l24.3,0.5c59.3,0.9,82.1-21.9,82.1-52.3c0-35.5-25.7-49.5-58.3-49.5c-15.4,0-31.3,1.4-48.1,3.3V285.3z"/>
+ </g>
+ <g>
+ <path class="st1" d="M1751.8,170.4c-12.9,0-23.4,10.5-23.4,23.4c0,12.9,10.5,23.4,23.4,23.4c12.9,0,23.4-10.5,23.4-23.4 C1775.2,180.9,1764.7,170.4,1751.8,170.4z M1771.4,193.8c0,10.8-8.8,19.5-19.5,19.5c-10.8,0-19.5-8.8-19.5-19.5 c0-10.8,8.8-19.5,19.5-19.5C1762.6,174.2,1771.4,183,1771.4,193.8z"/>
+ <path class="st1" d="M1760.1,203.3l-5.8-8.5c3.3-1.2,5-3.6,5-7c0-5.1-4.3-6.9-8.4-6.9c-1.1,0-2.2,0.1-3.2,0.3 c-1,0.1-2.1,0.2-3.1,0.2c-1.4,0-2.5-0.2-3.7-0.5l-0.6-0.1v3.3l3.4,0.4v18.8h-3.4v3.4h10.9v-3.4h-3.9v-7.9h3.2l7.3,11l0.2,0.2h5.3 v-3.4H1760.1z M1755.6,188.1c0,1.2-0.5,2.2-1.4,2.9c-1.1,0.8-2.8,1.2-5,1.2l-1.9,0v-7.7c1.4-0.1,2.6-0.2,3.7-0.2 C1753.1,184.3,1755.6,185,1755.6,188.1z"/>
+ </g>
+ </g>
+</g>
+<g id="White_Letters">
+</g>
+<g id="R_Ball">
+</g>
+</svg>
\ No newline at end of file
diff --git a/web/pandas/static/img/partners/tidelift.svg b/web/pandas/static/img/partners/tidelift.svg
new file mode 100644
index 0000000000000..af12d68417235
--- /dev/null
+++ b/web/pandas/static/img/partners/tidelift.svg
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Generator: Adobe Illustrator 21.1.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
+<svg version="1.1" id="Artwork" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
+ viewBox="0 0 190.1 33" style="enable-background:new 0 0 190.1 33;" xml:space="preserve">
+<style type="text/css">
+ .st0{fill:#4B5168;}
+ .st1{fill:#F6914D;}
+</style>
+<g>
+ <path class="st0" d="M33.4,27.7V5.3c0-2.3,0-2.3,2.4-2.3c2.4,0,2.4,0,2.4,2.3v22.4c0,2.3,0,2.3-2.4,2.3
+ C33.4,29.9,33.4,29.9,33.4,27.7z"/>
+ <path class="st0" d="M45,26.4V6.6c0-3.6,0-3.6,3.6-3.6h5.8c7.8,0,12.5,3.9,13,10.2c0.2,2.2,0.2,3.4,0,5.5
+ c-0.5,6.3-5.3,11.2-13,11.2h-5.8C45,29.9,45,29.9,45,26.4z M54.3,25.4c5.3,0,8-3,8.3-7.1c0.1-1.8,0.1-2.8,0-4.6
+ c-0.3-4.2-3-6.1-8.3-6.1h-4.5v17.8H54.3z"/>
+ <path class="st0" d="M73.8,26.4V6.6c0-3.6,0-3.6,3.6-3.6h13.5c2.3,0,2.3,0,2.3,2.2c0,2.2,0,2.2-2.3,2.2H78.6v6.9h11
+ c2.2,0,2.2,0,2.2,2.1c0,2.1,0,2.1-2.2,2.1h-11v6.9h12.3c2.3,0,2.3,0,2.3,2.2c0,2.3,0,2.3-2.3,2.3H77.4
+ C73.8,29.9,73.8,29.9,73.8,26.4z"/>
+ <path class="st0" d="M100,26.4v-21c0-2.3,0-2.3,2.4-2.3c2.4,0,2.4,0,2.4,2.3v20.2h11.9c2.4,0,2.4,0,2.4,2.2c0,2.2,0,2.2-2.4,2.2
+ h-13.1C100,29.9,100,29.9,100,26.4z"/>
+ <path class="st0" d="M125.8,27.7V5.3c0-2.3,0-2.3,2.4-2.3c2.4,0,2.4,0,2.4,2.3v22.4c0,2.3,0,2.3-2.4,2.3
+ C125.8,29.9,125.8,29.9,125.8,27.7z"/>
+ <path class="st0" d="M137.4,27.7V6.6c0-3.6,0-3.6,3.6-3.6h13.5c2.3,0,2.3,0,2.3,2.2c0,2.2,0,2.2-2.3,2.2h-12.2v7.2h11.3
+ c2.3,0,2.3,0,2.3,2.2c0,2.2,0,2.2-2.3,2.2h-11.3v8.6c0,2.3,0,2.3-2.4,2.3S137.4,29.9,137.4,27.7z"/>
+ <path class="st0" d="M24.2,3.1H5.5c-2.4,0-2.4,0-2.4,2.2c0,2.2,0,2.2,2.4,2.2h7v4.7v3.2l4.8-3.7v-1.1V7.5h7c2.4,0,2.4,0,2.4-2.2
+ C26.6,3.1,26.6,3.1,24.2,3.1z"/>
+ <path class="st1" d="M12.5,20v7.6c0,2.3,0,2.3,2.4,2.3c2.4,0,2.4,0,2.4-2.3V16.3L12.5,20z"/>
+ <g>
+ <path class="st0" d="M165.9,3.1h18.7c2.4,0,2.4,0,2.4,2.2c0,2.2,0,2.2-2.4,2.2h-7v4.7v3.2l-4.8-3.7v-1.1V7.5h-7
+ c-2.4,0-2.4,0-2.4-2.2C163.5,3.1,163.5,3.1,165.9,3.1z"/>
+ <path class="st1" d="M177.6,20v7.6c0,2.3,0,2.3-2.4,2.3c-2.4,0-2.4,0-2.4-2.3V16.3L177.6,20z"/>
+ </g>
+</g>
+</svg>
diff --git a/web/pandas/static/img/partners/two_sigma.svg b/web/pandas/static/img/partners/two_sigma.svg
new file mode 100644
index 0000000000000..d38df12766ed6
--- /dev/null
+++ b/web/pandas/static/img/partners/two_sigma.svg
@@ -0,0 +1 @@
+<svg width="230" height="42" viewBox="0 0 230 42" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"><title>Logo</title><defs><path id="a" d="M19.436 21.668V1.025H0v20.643h19.435z"></path></defs><g fill="none" fill-rule="evenodd"><path fill="#2D2D2D" d="M59.06 13.464h-7.137v-3.155h17.811v3.155H62.6V30.95h-3.54zm14.01-3.155h3.745l4.747 15.66h.06l4.483-15.66h3.301l4.454 15.66h.059l4.777-15.66h3.716L95.895 30.95H92.09l-4.335-15.127h-.059L83.361 30.95h-3.804zm41.214-.355c5.986 0 10.527 4.158 10.527 10.556 0 6.55-4.541 10.794-10.527 10.794-5.985 0-10.558-4.245-10.558-10.794 0-6.398 4.573-10.556 10.558-10.556m0 18.285c3.892 0 6.93-2.89 6.93-7.729 0-4.658-3.007-7.518-6.93-7.518-3.922 0-6.93 2.86-6.93 7.518 0 4.839 3.038 7.73 6.93 7.73m40.846-17.931h3.539V30.95h-3.54V19.41zm18.744-.355c2.832 0 5.222.885 7.313 2.33 0 0-2.026 2.374-2.128 2.311-1.56-1-3.21-1.574-5.096-1.574-4.247 0-7.048 3.068-7.048 7.433 0 4.746 2.624 7.785 7.048 7.785 1.534 0 3.067-.385 4.13-1.003v-4.897h-5.19v-2.623h8.462v9.347c-2.007 1.416-4.63 2.24-7.49 2.24-6.46 0-10.587-4.363-10.587-10.85 0-6.075 4.187-10.499 10.586-10.499m12.506.355h3.57l6.812 9.701 6.811-9.701h3.541V30.95h-3.421V15.558l-6.962 9.73-6.958-9.73V30.95h-3.392z"></path><g transform="translate(210.418 9.283)"><mask id="b" fill="#fff"><use xlink:href="#a"></use></mask><path d="M7.639 1.025h4.158l7.64 20.643H15.63l-1.561-4.454H5.368l-1.533 4.454H0L7.639 1.025zM6.34 14.354h6.725L9.734 4.74h-.06L6.34 14.354z" fill="#2D2D2D" mask="url(#b)"></path></g><path d="M136.826 26.498c1.861 1.007 3.618 1.68 5.887 1.68 2.715 0 4.069-1.18 4.069-2.83 0-4.66-11.616-1.594-11.616-9.466 0-3.303 2.74-5.928 7.37-5.928 2.714 0 5.443.653 7.579 1.902l-2.314 2.361c-1.68-.72-3.11-1.137-5.146-1.137-2.389 0-3.806 1.21-3.806 2.744 0 4.63 11.62 1.473 11.62 9.494 0 3.393-2.567 5.985-7.756 5.985-3.035 0-6.33-1.076-8.273-2.419l2.386-2.386z" fill="#2D2D2D"></path><path fill="#009AA6" d="M20.625 0L0 20.63l20.625 20.628 20.63-20.628z"></path><path d="M9.748 26.478c-.16-6.605 7.789-5.746 7.789-9.13 0-1.1-.863-2.041-2.784-2.041-1.401 0-2.743.701-3.724 1.602l-1.46-1.463c1.259-1.18 3.223-2.14 5.284-2.14 3.304 0 4.986 1.842 4.986 4.003 0 4.986-7.728 4.104-7.728 8.27h7.607v1.98h-9.95l-.02-1.081zm15.937-.5c-1.521 0-2.423-.98-2.423-2.862 0-2.404 1.602-4.566 3.525-4.566 1.5 0 2.402.981 2.402 2.883 0 2.401-1.582 4.545-3.504 4.545zm9.713-9.25h-8.444v.003c-3.437.005-6.033 2.745-6.033 6.403 0 2.905 1.881 4.666 4.544 4.666 3.464 0 6.067-2.743 6.067-6.386 0-1.182-.313-2.173-.856-2.935h2.947l1.775-1.75z" fill="#FFF"></path></g></svg>
diff --git a/web/pandas/static/img/partners/ursa_labs.svg b/web/pandas/static/img/partners/ursa_labs.svg
new file mode 100644
index 0000000000000..cacc80e337d25
--- /dev/null
+++ b/web/pandas/static/img/partners/ursa_labs.svg
@@ -0,0 +1,106 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Generator: Adobe Illustrator 23.0.3, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
+<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
+ viewBox="0 0 359 270" style="enable-background:new 0 0 359 270;" xml:space="preserve">
+<style type="text/css">
+ .st0{fill-rule:evenodd;clip-rule:evenodd;fill:#404040;}
+ .st1{filter:url(#Adobe_OpacityMaskFilter);}
+ .st2{fill-rule:evenodd;clip-rule:evenodd;fill:#FFFFFF;}
+ .st3{mask:url(#mask-2_1_);}
+</style>
+<title>HOME 1 Copy 8</title>
+<desc>Created with Sketch.</desc>
+<g id="HOME-1-Copy-8">
+ <g id="Group" transform="translate(20.000000, 20.000000)">
+ <path id="URSA-LABS-Copy" class="st0" d="M0,158.4h9.1V214c0,0.3,0,0.7,0.1,1.1c0,0.3,0,0.9,0.1,1.6s0.2,1.5,0.6,2.3
+ c0.3,0.8,0.9,1.5,1.6,2.1c0.7,0.6,1.8,0.9,3.3,0.9c0.3,0,0.9,0,1.6-0.1c0.7-0.1,1.4-0.4,2.1-0.9c1-0.9,1.6-2,1.8-3.3
+ s0.3-3.2,0.4-5.5v-53.8h9.2v54.4c0,0.6,0,1.3-0.1,2.1c-0.1,0.8-0.2,1.7-0.3,2.6s-0.3,1.8-0.5,2.6c-0.7,2.3-1.7,4.1-3,5.4
+ c-1.3,1.3-2.7,2.3-4.2,2.9c-1.5,0.7-2.9,1.1-4.2,1.2c-1.3,0.1-2.3,0.2-3,0.2c-0.6,0-1.5-0.1-2.7-0.2c-1.2-0.1-2.5-0.5-3.8-1
+ s-2.6-1.4-3.8-2.5c-1.2-1.1-2.2-2.7-3-4.6c-0.4-1-0.7-2.1-0.9-3.3c-0.2-1.2-0.3-2.9-0.4-5V158.4z M44,158.4h17
+ c0.6,0,1.2,0,1.7,0.1c0.6,0.1,1.3,0.2,2.2,0.3c0.9,0.1,1.7,0.4,2.6,0.8c0.8,0.4,1.6,1.1,2.3,2c0.7,0.9,1.2,2.1,1.6,3.7
+ c0.4,1.8,0.6,5.1,0.6,10.1c0,1.3,0,2.7-0.1,4.1c0,1.4-0.1,2.8-0.2,4.2c-0.1,0.9-0.3,1.9-0.4,2.9s-0.4,1.9-0.7,2.7
+ c-0.4,0.9-0.9,1.6-1.6,2.1s-1.3,0.8-2,1c-0.7,0.2-1.3,0.3-1.9,0.3H64v0.5c1.3,0.1,2.4,0.3,3.3,0.6c0.9,0.3,1.8,1,2.5,2.1
+ c0.8,1.3,1.3,2.7,1.5,4.3c0.2,1.6,0.3,3.9,0.3,6.8v7.7c0,2,0,3.6,0.1,4.9c0.1,1.3,0.2,2.4,0.3,3.3c0.1,0.9,0.3,1.8,0.5,2.7
+ c0.2,0.9,0.6,1.8,1,2.9h-9.7c-0.3-1.7-0.6-3-0.8-4.1s-0.3-2.2-0.4-3.2c-0.1-1-0.2-2.1-0.2-3.2c0-1.1-0.1-2.5-0.1-4.2v-5
+ c-0.1-1.2-0.1-2.4-0.2-3.6c0-1.2-0.1-2.4-0.3-3.6c-0.1-0.9-0.3-1.7-0.5-2.5c-0.2-0.8-0.6-1.5-1.2-2c-0.5-0.3-1-0.5-1.5-0.6
+ s-1-0.2-1.6-0.2h-3.8v32.4H44V158.4z M53.4,166.9v21.7h4.4c1.2,0,2.2-0.2,2.9-0.6c0.7-0.4,1.2-1.2,1.6-2.5
+ c0.2-0.9,0.3-2.3,0.4-4.2s0.1-4.1,0.1-6.6c0-0.7,0-1.5-0.1-2.2c0-0.8-0.1-1.5-0.2-2.2c-0.1-1.4-0.4-2.3-1-2.8
+ c-0.3-0.3-0.8-0.5-1.3-0.5c-0.5,0-1.2,0-2.2,0H53.4z M110.6,169.1v12.4h-8.5v-12.4c0-0.2,0-0.6-0.1-1.1c0-0.5-0.2-1.1-0.4-1.6
+ c-0.2-0.5-0.6-1-1.1-1.4s-1.3-0.6-2.3-0.6c-1.1,0-2,0.2-2.6,0.6c-0.6,0.4-1.1,1-1.4,1.7c-0.3,0.7-0.5,1.5-0.6,2.3
+ c-0.1,0.9-0.1,1.7-0.1,2.5c0,1.5,0.1,2.8,0.3,4c0.2,1.2,0.5,2.3,0.9,3.4s0.9,2.2,1.5,3.2s1.3,2.2,2.1,3.4c0.7,1.1,1.3,2.1,2,3.1
+ c0.7,1,1.4,2,2.1,3.1c1,1.4,2,2.9,3.1,4.6c1.2,1.9,2.2,3.7,2.9,5.3c0.7,1.6,1.3,3.1,1.7,4.5c0.4,1.4,0.7,2.7,0.8,3.9
+ c0.1,1.2,0.2,2.3,0.2,3.3c0,0.4,0,1.3-0.1,2.6c-0.1,1.3-0.4,2.8-0.9,4.4c-0.5,1.6-1.3,3.3-2.3,4.9c-1,1.6-2.6,2.9-4.6,3.7
+ c-0.6,0.3-1.4,0.5-2.4,0.7c-1,0.2-2.3,0.3-3.8,0.3c-2.9,0-5.1-0.5-6.8-1.4s-2.8-1.9-3.6-2.8c-1.5-1.7-2.3-3.4-2.6-5.3
+ s-0.4-3.8-0.5-5.9V203h8.6v12.8c0,1.7,0.2,3,0.5,3.8c0.3,0.8,0.8,1.5,1.6,2c0.2,0.1,0.5,0.3,1,0.5c0.5,0.2,1.1,0.3,1.8,0.3
+ c1.1,0,2-0.3,2.7-0.8c0.6-0.6,1.1-1.3,1.4-2.1c0.3-0.8,0.4-1.7,0.5-2.7c0-1,0.1-1.8,0.1-2.6c0-2.5-0.3-4.6-0.8-6.4
+ c-0.5-1.7-1.4-3.7-2.7-5.9c-1.3-2.3-2.8-4.5-4.3-6.6s-2.9-4.3-4.3-6.5c-0.4-0.6-0.9-1.4-1.5-2.4c-0.6-1-1.2-2.2-1.8-3.6
+ c-0.6-1.4-1.1-3-1.5-4.7s-0.7-3.7-0.7-5.7c0-3.9,0.7-6.9,2.1-9s2.8-3.7,4.3-4.5c0.7-0.5,1.8-0.9,3.1-1.3c1.3-0.4,3-0.6,5-0.6
+ c0.5,0,1.2,0,2.3,0.1c1,0.1,2.1,0.3,3.3,0.7c1.1,0.4,2.2,1.1,3.3,2c1.1,0.9,1.9,2.3,2.4,4c0.2,0.7,0.4,1.4,0.5,2.1
+ C110.5,166.6,110.5,167.7,110.6,169.1z M140.1,158.4l10.9,70.3h-9.1l-1.8-12.9h-10.6l-1.6,12.9h-9.1l10-70.3H140.1z M133.5,183
+ l-3,24.2h8.4l-3.5-24.4c-0.1-0.6-0.2-1.2-0.3-1.8c0-0.6-0.1-1.2-0.2-1.8c-0.1-1.3-0.1-2.6-0.1-3.8c0-1.3,0-2.5-0.1-3.8H134
+ c-0.1,1.9-0.1,3.8-0.2,5.7C133.7,179.2,133.6,181.1,133.5,183z M190.2,158.4V220h15.4v8.7h-24.7v-70.3H190.2z M232,158.4
+ l10.9,70.3h-9.1l-1.8-12.9h-10.6l-1.6,12.9h-9.1l10-70.3H232z M225.4,183l-3,24.2h8.4l-3.5-24.4c-0.1-0.6-0.2-1.2-0.3-1.8
+ c0-0.6-0.1-1.2-0.2-1.8c-0.1-1.3-0.1-2.6-0.1-3.8c0-1.3,0-2.5-0.1-3.8h-0.8c-0.1,1.9-0.1,3.8-0.2,5.7
+ C225.6,179.2,225.5,181.1,225.4,183z M251.9,158.4h16.5c1.5,0,2.9,0.1,4.4,0.2s2.8,0.8,3.9,1.8c1.3,1.2,2,2.7,2.2,4.5
+ c0.2,1.8,0.3,4.3,0.4,7.4c0,0.6,0,1.2,0.1,1.8c0,0.6,0.1,1.2,0.1,1.8c0,1.1,0,2.2-0.1,3.3c0,1.1-0.1,2.2-0.2,3.3
+ c0,0.2,0,0.9-0.1,2.1c-0.1,1.2-0.3,2.3-0.8,3.3c-0.4,0.7-1,1.3-1.7,1.8c-0.7,0.5-1.4,0.8-2.2,1c-0.4,0.1-0.8,0.2-1.3,0.2
+ c-0.5,0-0.8,0-0.9,0.1v0.5c1.3,0.1,2.4,0.4,3.5,0.7c1,0.4,1.9,1.1,2.6,2.2c0.5,1,0.8,2.2,0.9,3.7c0.1,1.5,0.1,3.4,0.1,5.9
+ c0.1,0.9,0.1,1.9,0.1,2.8v7c0,1.4-0.1,2.8-0.2,4.3c0,0.2,0,0.6-0.1,1.2c0,0.6-0.2,1.3-0.4,2.1c-0.2,0.8-0.5,1.6-0.9,2.5
+ s-1,1.6-1.7,2.3c-1.4,1.1-3,1.8-4.9,1.9s-3.6,0.2-5.3,0.2h-14.2V158.4z M260.9,166.8v21.1h3.6c1.5-0.1,2.7-0.2,3.7-0.5
+ c1-0.3,1.6-1.3,1.8-3c0.2-1.4,0.3-3.8,0.3-7.1c0-2.2-0.1-4.4-0.3-6.6c-0.1-1.7-0.4-2.8-1-3.3c-0.3-0.3-0.8-0.5-1.3-0.5
+ c-0.5,0-1.2,0-2.1,0H260.9z M260.9,195.5V220h4.8c0.5,0,1,0,1.5,0c0.5,0,0.9-0.1,1.3-0.2c0.4-0.1,0.7-0.3,1-0.6
+ c0.3-0.3,0.5-0.8,0.6-1.4c0-0.3,0-0.7,0.1-1.4c0-0.7,0.1-1.5,0.1-2.4c0-0.9,0.1-1.9,0.1-2.9c0-1.1,0.1-2.1,0.1-3.1
+ c0-1.2,0-2.4-0.1-3.5c0-1.2-0.1-2.3-0.2-3.5c-0.1-0.7-0.2-1.4-0.3-2.3c-0.1-0.9-0.4-1.6-1-2.1c-0.4-0.3-0.9-0.5-1.4-0.6
+ s-1-0.1-1.5-0.1H260.9z M318.4,169.1v12.4h-8.5v-12.4c0-0.2,0-0.6-0.1-1.1c0-0.5-0.2-1.1-0.4-1.6c-0.2-0.5-0.6-1-1.1-1.4
+ c-0.5-0.4-1.3-0.6-2.3-0.6c-1.1,0-2,0.2-2.6,0.6s-1.1,1-1.4,1.7s-0.5,1.5-0.6,2.3c-0.1,0.9-0.1,1.7-0.1,2.5c0,1.5,0.1,2.8,0.3,4
+ s0.5,2.3,0.9,3.4s0.9,2.2,1.5,3.2c0.6,1.1,1.3,2.2,2.1,3.4c0.7,1.1,1.3,2.1,2,3.1c0.7,1,1.4,2,2.1,3.1c1,1.4,2,2.9,3.1,4.6
+ c1.2,1.9,2.2,3.7,2.9,5.3c0.7,1.6,1.3,3.1,1.7,4.5c0.4,1.4,0.7,2.7,0.8,3.9c0.1,1.2,0.2,2.3,0.2,3.3c0,0.4,0,1.3-0.1,2.6
+ c-0.1,1.3-0.4,2.8-0.9,4.4c-0.5,1.6-1.3,3.3-2.3,4.9c-1,1.6-2.6,2.9-4.6,3.7c-0.6,0.3-1.4,0.5-2.4,0.7c-1,0.2-2.3,0.3-3.8,0.3
+ c-2.9,0-5.1-0.5-6.8-1.4c-1.6-0.9-2.8-1.9-3.6-2.8c-1.5-1.7-2.3-3.4-2.6-5.3c-0.3-1.9-0.4-3.8-0.5-5.9V203h8.6v12.8
+ c0,1.7,0.2,3,0.5,3.8c0.3,0.8,0.8,1.5,1.6,2c0.2,0.1,0.5,0.3,1,0.5c0.5,0.2,1.1,0.3,1.8,0.3c1.1,0,2-0.3,2.7-0.8
+ c0.6-0.6,1.1-1.3,1.4-2.1c0.3-0.8,0.4-1.7,0.5-2.7c0-1,0.1-1.8,0.1-2.6c0-2.5-0.3-4.6-0.8-6.4c-0.5-1.7-1.4-3.7-2.7-5.9
+ c-1.3-2.3-2.8-4.5-4.3-6.6c-1.5-2.1-2.9-4.3-4.3-6.5c-0.4-0.6-0.9-1.4-1.5-2.4c-0.6-1-1.2-2.2-1.8-3.6c-0.6-1.4-1.1-3-1.5-4.7
+ c-0.4-1.8-0.7-3.7-0.7-5.7c0-3.9,0.7-6.9,2.1-9s2.8-3.7,4.3-4.5c0.7-0.5,1.8-0.9,3.1-1.3c1.3-0.4,3-0.6,5-0.6c0.5,0,1.2,0,2.3,0.1
+ c1,0.1,2.1,0.3,3.3,0.7s2.2,1.1,3.3,2s1.9,2.3,2.4,4c0.2,0.7,0.4,1.4,0.5,2.1C318.2,166.6,318.3,167.7,318.4,169.1z"/>
+ <g id="Group-3-Copy" transform="translate(47.000000, 0.000000)">
+ <g id="Clip-2">
+ </g>
+ <defs>
+ <filter id="Adobe_OpacityMaskFilter" filterUnits="userSpaceOnUse" x="0" y="0" width="225.8" height="123.9">
+ <feColorMatrix type="matrix" values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0"/>
+ </filter>
+ </defs>
+ <mask maskUnits="userSpaceOnUse" x="0" y="0" width="225.8" height="123.9" id="mask-2_1_">
+ <g class="st1">
+ <polygon id="path-1_1_" class="st2" points="0,0 225.9,0 225.9,123.9 0,123.9 "/>
+ </g>
+ </mask>
+ <g id="Page-1" class="st3">
+ <g id="Mask">
+ <path class="st0" d="M177.2,54.3c6.1,21.2,19.4,48.5,24,54.7c5.3-1.2,9.1,1.2,12.4,5.1c-1.2,0.9-2.7,1.5-3.4,2.6
+ c-2.7,4.4-6.9,3-10.7,3.2c-2.8,0.2-5.6,0.3-8.4,0.3c-0.9,0-1.8-0.3-2.7-0.5c-1-0.3-1.9-1-2.8-1c-2.5,0.1-4.7,0-7.1-1.1
+ c-1-0.5-2.6,0.9-3.6-0.8c-1.1-1.8-2.2-3.6-3.4-5.5c-1.2,0.2-2.2,0.4-3.4,0.6c-2.4-3-3.4-14.8-6.1-17.7
+ c-0.6-0.7-2.1-2.2-3.8-2.7c-0.3-0.9-5.4-7.2-5.9-8.7c-0.2-0.5-0.3-1.2-0.7-1.4c-3.1-2-4.2-4.9-4-8.5c0-0.4-0.2-0.7-0.4-1.7
+ c-1.2,2.7-2.2,4.8-3.2,7.1c-0.6,1.4-1,2.9-1.8,4.3c-0.5,0.9-1.3,1.6-2,2.3c-2.4,2.2-1.8,0.9-3.2,3.6c-1.1,2-2,4-3,6.1
+ c-0.5,1.1-0.9,2.2-1.1,3.3c-0.7,4.1-3.2,7.6-1.5,11.2c3,0.6,6.3,0.5,8.6,2c2.2,1.5,3.5,4.5,5,6.7c-3.1,0.5-5.9,1.2-8.7,1.4
+ c-3.8,0.3-7.6,0.2-11.3,0.2c-5,0-10.1-0.1-15.1-0.1c-2.6,0-3.9-1.5-5.4-3.7c-2.1-3.1-1.1-6-0.8-9.1c0.1-0.8,0-3.3-0.1-4.2
+ c-0.1-0.9-0.1-1.9,0-2.9c0.2-1.3,0.8-2.6,0.9-3.9c0.1-1.5-0.4-3-0.4-4.5c0-1.5,0.1-3.1,0.5-4.6c0.7-2.7-0.1,0,0.7-2.7
+ c0.1-0.2,0-0.7,0-0.8c-0.9-3.6,1.8-6,2.8-8.8c0-0.1,0-0.1-0.1-0.5c-1.8,1.8-4.1,0.8-6.1,1.2c-2.9,0.6-5.7,2.1-8,3
+ c-1.4-0.1-2.5-0.4-3.5-0.2c-2,0.5-3.9,1.1-6.2,0.9c-2.5-0.2-5.1,0.6-7.7,0.8c-2.2,0.2-4.8,0.9-6.5,0c-1.5-0.7-2.8-0.9-4.4-1
+ c-1.6-0.1-2.4,0.7-2.6,2.1c-1.1,6.3-2.3,12.7-3.1,19.1c-0.4,3.3-0.2,6.6-0.2,9.9c0,1.5,0.6,2.5,1.9,3.5
+ c1.5,1.1,2.6,2.7,3.6,4.3c0.8,1.3,0.6,2.6-1.5,2.7c-7.3,0.2-14.6,0.5-21.9,0.4c-2.1,0-4.2-1.5-6.2-2.5
+ c-0.3-0.2-0.4-1.1-0.4-1.7c0-4.4,0-13.5,0-18.4c-1,0.6-1.3,0.8-1.6,1c-2.5,2.3-4.9,4.1-7.3,6.4c-1.9,1.8-1.6,3.3,0.2,5.4
+ c2.4,2.7,4.4,5.7,4.4,9.5c0,2.5-2.2,3.2-3.8,3.3c-5.7,0.4-11.5,0.4-17.2,0.4c-2.8,0-3.8-1.5-4.4-4.2
+ c-1.2-5.4-2.2-10.8-4.3-16.1c-1.6-4.1-2-8.9,1.5-13c5.1-5.9,9.5-12.3,12.8-19.5c1-2.2,1.4-3.8,0.4-6.1c-4.9-1-7.1-3.7-8.2-8.7
+ c-1-4.6-0.2-8.9,1-13.2c2.3-7.8,4.1-11,8.4-18c5.6-9,13.4-15.5,22.8-20.2c11.3-5.6,23.3-5.5,35.3-4.2
+ c16.2,1.6,32.4,3.6,48.6,5.3c1.3,0.1,2.9-0.2,4.1-0.8c7.7-3.9,15.5-4.2,23.6-1.4c5.6,1.9,11.4,3.6,17.1,5.2
+ c2,0.6,4.1,0.8,6.2,1.1c5.7,0.9,11.5,1.8,17.3,2.4c2.9,0.3,5.9,0.1,8.8,0.3c0.7,0,1.5,0.3,2.1,0.7c2.6,1.8,5.1,3.7,7.5,5.6
+ c1.6,1.2,3.2,2.3,4.5,3.8c0.6,0.7,0.7,1.9,0.9,2.9c0.3,1.1,0.3,2.6,0.9,3.4c2.6,3.1,5.3,6,8.1,8.9c0.9,1,1.1,1.7,0.3,2.9
+ c-1.2,1.6-1.8,3.7-3.3,4.8c-3.1,2.2-6.3,4.3-10.7,3.2c-2.5-0.6-5.5,0.5-8.2,0.8c-2.1,0.3-4.3,0.2-6.2,0.9
+ c-4.1,1.6-8.5,1.1-12.5,2.3c-1.5,0.4-2.8,1.2-4.3,1.6C179.2,54.8,178.3,54.5,177.2,54.3"/>
+ </g>
+ </g>
+ </g>
+ </g>
+</g>
+</svg>
diff --git a/web/pandas/static/img/pydata_book.gif b/web/pandas/static/img/pydata_book.gif
new file mode 100644
index 0000000000000..db05c209704a2
Binary files /dev/null and b/web/pandas/static/img/pydata_book.gif differ
diff --git a/web/pandas/try.md b/web/pandas/try.md
new file mode 100644
index 0000000000000..20e119759df6f
--- /dev/null
+++ b/web/pandas/try.md
@@ -0,0 +1,21 @@
+# Try pandas online
+
+<section>
+ <pre data-executable>
+import pandas
+fibonacci = pandas.Series([1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144])
+fibonacci.sum()
+ </pre>
+ <script src="https://combinatronics.com/ines/juniper/v0.1.0/dist/juniper.min.js"></script>
+ <script>new Juniper({ repo: 'datapythonista/pandas-web' })</script>
+</section>
+
+## Interactive tutorials
+
+You can also try _pandas_ on [Binder](https://mybinder.org/) for one of the next topics:
+
+- Exploratory analysis of US presidents
+- Preprocessing the Titanic dataset to train a machine learning model
+- Forecasting the stock market
+
+_(links will be added soon)_
diff --git a/web/pandas_web.py b/web/pandas_web.py
new file mode 100644
index 0000000000000..d515d8a0e1cd7
--- /dev/null
+++ b/web/pandas_web.py
@@ -0,0 +1,286 @@
+#!/usr/bin/env python
+"""
+Simple static site generator for the pandas web.
+
+pandas_web.py takes a directory as parameter, and copies all the files into the
+target directory after converting markdown files into html and rendering both
+markdown and html files with a context. The context is obtained by parsing
+the file ``config.yml`` in the root of the source directory.
+
+The file should contain:
+```
+main:
+ template_path: <path_to_the_jinja2_templates_directory>
+ base_template: <template_file_all_other_files_will_extend>
+ ignore:
+ - <list_of_files_in_the_source_that_will_not_be_copied>
+ github_repo_url: <organization/repo-name>
+ context_preprocessors:
+ - <list_of_functions_that_will_enrich_the_context_parsed_in_this_file>
+ markdown_extensions:
+ - <list_of_markdown_extensions_that_will_be_loaded>
+```
+
+The rest of the items in the file will be added directly to the context.
+"""
+import argparse
+import datetime
+import importlib
+import operator
+import os
+import shutil
+import sys
+import time
+import typing
+
+import feedparser
+import markdown
+import jinja2
+import requests
+import yaml
+
+
+class Preprocessors:
+ """
+ Built-in context preprocessors.
+
+ Context preprocessors are functions that receive the context used to
+ render the templates, and enriches it with additional information.
+
+ The original context is obtained by parsing ``config.yml``, and
+ anything else needed just be added with context preprocessors.
+ """
+
+ @staticmethod
+ def navbar_add_info(context):
+ """
+ Items in the main navigation bar can be direct links, or dropdowns with
+ subitems. This context preprocessor adds a boolean field
+ ``has_subitems`` that tells which one of them every element is. It
+ also adds a ``slug`` field to be used as a CSS id.
+ """
+ for i, item in enumerate(context["navbar"]):
+ context["navbar"][i] = dict(
+ item,
+ has_subitems=isinstance(item["target"], list),
+ slug=(item["name"].replace(" ", "-").lower()),
+ )
+ return context
+
+ @staticmethod
+ def blog_add_posts(context):
+ """
+ Given the blog feed defined in the configuration yaml, this context
+ preprocessor fetches the posts in the feeds, and returns the relevant
+ information for them (sorted from newest to oldest).
+ """
+ posts = []
+ for feed_url in context["blog"]["feed"]:
+ feed_data = feedparser.parse(feed_url)
+ for entry in feed_data.entries:
+ published = datetime.datetime.fromtimestamp(
+ time.mktime(entry.published_parsed)
+ )
+ posts.append(
+ {
+ "title": entry.title,
+ "author": entry.author,
+ "published": published,
+ "feed": feed_data["feed"]["title"],
+ "link": entry.link,
+ "description": entry.description,
+ "summary": entry.summary,
+ }
+ )
+ posts.sort(key=operator.itemgetter("published"), reverse=True)
+ context["blog"]["posts"] = posts[: context["blog"]["num_posts"]]
+ return context
+
+ @staticmethod
+ def maintainers_add_info(context):
+ """
+ Given the active maintainers defined in the yaml file, it fetches
+ the GitHub user information for them.
+ """
+ context["maintainers"]["people"] = []
+ for user in context["maintainers"]["active"]:
+ resp = requests.get(f"https://api.github.com/users/{user}")
+ if context["ignore_io_errors"] and resp.status_code == 403:
+ return context
+ resp.raise_for_status()
+ context["maintainers"]["people"].append(resp.json())
+ return context
+
+ @staticmethod
+ def home_add_releases(context):
+ context["releases"] = []
+
+ github_repo_url = context["main"]["github_repo_url"]
+ resp = requests.get(f"https://api.github.com/repos/{github_repo_url}/releases")
+ if context["ignore_io_errors"] and resp.status_code == 403:
+ return context
+ resp.raise_for_status()
+
+ for release in resp.json():
+ if release["prerelease"]:
+ continue
+ published = datetime.datetime.strptime(
+ release["published_at"], "%Y-%m-%dT%H:%M:%SZ"
+ )
+ context["releases"].append(
+ {
+ "name": release["tag_name"].lstrip("v"),
+ "tag": release["tag_name"],
+ "published": published,
+ "url": (
+ release["assets"][0]["browser_download_url"]
+ if release["assets"]
+ else ""
+ ),
+ }
+ )
+ return context
+
+
+def get_callable(obj_as_str: str) -> object:
+ """
+ Get a Python object from its string representation.
+
+ For example, for ``sys.stdout.write`` would import the module ``sys``
+ and return the ``write`` function.
+ """
+ components = obj_as_str.split(".")
+ attrs = []
+ while components:
+ try:
+ obj = importlib.import_module(".".join(components))
+ except ImportError:
+ attrs.insert(0, components.pop())
+ else:
+ break
+
+ if not obj:
+ raise ImportError(f'Could not import "{obj_as_str}"')
+
+ for attr in attrs:
+ obj = getattr(obj, attr)
+
+ return obj
+
+
+def get_context(config_fname: str, ignore_io_errors: bool, **kwargs):
+ """
+ Load the config yaml as the base context, and enrich it with the
+ information added by the context preprocessors defined in the file.
+ """
+ with open(config_fname) as f:
+ context = yaml.safe_load(f)
+
+ context["ignore_io_errors"] = ignore_io_errors
+ context.update(kwargs)
+
+ preprocessors = (
+ get_callable(context_prep)
+ for context_prep in context["main"]["context_preprocessors"]
+ )
+ for preprocessor in preprocessors:
+ context = preprocessor(context)
+ msg = f"{preprocessor.__name__} is missing the return statement"
+ assert context is not None, msg
+
+ return context
+
+
+def get_source_files(source_path: str) -> typing.Generator[str, None, None]:
+ """
+ Generate the list of files present in the source directory.
+ """
+ for root, dirs, fnames in os.walk(source_path):
+ root = os.path.relpath(root, source_path)
+ for fname in fnames:
+ yield os.path.join(root, fname)
+
+
+def extend_base_template(content: str, base_template: str) -> str:
+ """
+ Wrap document to extend the base template, before it is rendered with
+ Jinja2.
+ """
+ result = '{% extends "' + base_template + '" %}'
+ result += "{% block body %}"
+ result += content
+ result += "{% endblock %}"
+ return result
+
+
+def main(
+ source_path: str, target_path: str, base_url: str, ignore_io_errors: bool
+) -> int:
+ """
+ Copy every file in the source directory to the target directory.
+
+ For ``.md`` and ``.html`` files, render them with the context
+ before copyings them. ``.md`` files are transformed to HTML.
+ """
+ config_fname = os.path.join(source_path, "config.yml")
+
+ shutil.rmtree(target_path, ignore_errors=True)
+ os.makedirs(target_path, exist_ok=True)
+
+ sys.stderr.write("Generating context...\n")
+ context = get_context(config_fname, ignore_io_errors, base_url=base_url)
+ sys.stderr.write("Context generated\n")
+
+ templates_path = os.path.join(source_path, context["main"]["templates_path"])
+ jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(templates_path))
+
+ for fname in get_source_files(source_path):
+ if os.path.normpath(fname) in context["main"]["ignore"]:
+ continue
+
+ sys.stderr.write(f"Processing {fname}\n")
+ dirname = os.path.dirname(fname)
+ os.makedirs(os.path.join(target_path, dirname), exist_ok=True)
+
+ extension = os.path.splitext(fname)[-1]
+ if extension in (".html", ".md"):
+ with open(os.path.join(source_path, fname)) as f:
+ content = f.read()
+ if extension == ".md":
+ body = markdown.markdown(
+ content, extensions=context["main"]["markdown_extensions"]
+ )
+ content = extend_base_template(body, context["main"]["base_template"])
+ content = jinja_env.from_string(content).render(**context)
+ fname = os.path.splitext(fname)[0] + ".html"
+ with open(os.path.join(target_path, fname), "w") as f:
+ f.write(content)
+ else:
+ shutil.copy(
+ os.path.join(source_path, fname), os.path.join(target_path, dirname)
+ )
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Documentation builder.")
+ parser.add_argument(
+ "source_path", help="path to the source directory (must contain config.yml)"
+ )
+ parser.add_argument(
+ "--target-path", default="build", help="directory where to write the output"
+ )
+ parser.add_argument(
+ "--base-url", default="", help="base url where the website is served from"
+ )
+ parser.add_argument(
+ "--ignore-io-errors",
+ action="store_true",
+ help="do not fail if errors happen when fetching "
+ "data from http sources, and those fail "
+ "(mostly useful to allow github quota errors "
+ "when running the script locally)",
+ )
+ args = parser.parse_args()
+ sys.exit(
+ main(args.source_path, args.target_path, args.base_url, args.ignore_io_errors)
+ )
| - [ X ] closes #27357
- [X ] tests added / passed
- [ X ] passes `black pandas`
- [ X ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28684 | 2019-09-30T12:56:49Z | 2019-10-01T03:54:02Z | null | 2019-10-01T03:54:02Z |
BUG: make pct_change can handle the anchored freq #28664 | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 90606fb61ada8..3b7756256dcab 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -440,6 +440,7 @@ Reshaping
- :func:`qcut` and :func:`cut` now handle boolean input (:issue:`20303`)
- Fix to ensure all int dtypes can be used in :func:`merge_asof` when using a tolerance value. Previously every non-int64 type would raise an erroneous ``MergeError`` (:issue:`28870`).
- Better error message in :func:`get_dummies` when `columns` isn't a list-like value (:issue:`28383`)
+- Bug :meth:`Series.pct_change` where supplying an anchored frequency would throw a ValueError (:issue:`28664`)
Sparse
^^^^^^
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 47a0582edbea4..ddae2a26e10b5 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10443,6 +10443,7 @@ def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, **kwar
data = self.fillna(method=fill_method, limit=limit, axis=axis)
rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1
+ rs = rs.loc[~rs.index.duplicated()]
rs = rs.reindex_like(data)
if freq is None:
mask = isna(com.values_from_object(data))
diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py
index 7154975c6c73b..4ae00bca3e832 100644
--- a/pandas/tests/series/test_timeseries.py
+++ b/pandas/tests/series/test_timeseries.py
@@ -370,6 +370,16 @@ def test_pct_change(self, datetime_series):
rs, (filled / filled.shift(freq="5D") - 1).reindex_like(filled)
)
+ def test_pct_change_with_duplicate_axis(self):
+ # GH 28664
+ common_idx = date_range("2019-11-14", periods=5, freq="D")
+ result = Series(range(5), common_idx).pct_change(freq="B")
+
+ # the reason that the expected should be like this is documented at PR 28681
+ expected = Series([np.NaN, np.inf, np.NaN, np.NaN, 3.0], common_idx)
+
+ tm.assert_series_equal(result, expected)
+
def test_pct_change_shift_over_nas(self):
s = Series([1.0, 1.5, np.nan, 2.5, 3.0])
| - [x] closes #28664
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
pct_change didn't work when the freq is anchored(like `1W`, `1M`, `BM`)
so when the freq is anchored, use `data.asfreq(freq)` instead of the raw
`data`. | https://api.github.com/repos/pandas-dev/pandas/pulls/28681 | 2019-09-30T10:34:21Z | 2019-11-15T14:55:06Z | 2019-11-15T14:55:06Z | 2019-11-15T14:55:09Z |
BUG: Fix RangeIndex.get_indexer for decreasing RangeIndex | diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst
index f904d69d6421b..9789c9fce3541 100644
--- a/doc/source/whatsnew/v0.25.2.rst
+++ b/doc/source/whatsnew/v0.25.2.rst
@@ -50,6 +50,7 @@ Indexing
^^^^^^^^
- Fix regression in :meth:`DataFrame.reindex` not following ``limit`` argument (:issue:`28631`).
+- Fix regression in :meth:`RangeIndex.get_indexer` for decreasing :class:`RangeIndex` where target values may be improperly identified as missing/present (:issue:`28678`)
-
-
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 43445a0d5d5a2..6e2d500f4c5ab 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -388,8 +388,9 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
if self.step > 0:
start, stop, step = self.start, self.stop, self.step
else:
- # Work on reversed range for simplicity:
- start, stop, step = (self.stop - self.step, self.start + 1, -self.step)
+ # GH 28678: work on reversed range for simplicity
+ reverse = self._range[::-1]
+ start, stop, step = reverse.start, reverse.stop, reverse.step
target_array = np.asarray(target)
if not (is_integer_dtype(target_array) and target_array.ndim == 1):
diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py
index 7e08a5deaff7a..627c5cc56e010 100644
--- a/pandas/tests/indexes/test_range.py
+++ b/pandas/tests/indexes/test_range.py
@@ -424,6 +424,14 @@ def test_get_indexer_limit(self):
expected = np.array([0, 1, 2, 3, 3, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
+ @pytest.mark.parametrize("stop", [0, -1, -2])
+ def test_get_indexer_decreasing(self, stop):
+ # GH 28678
+ index = RangeIndex(7, stop, -3)
+ result = index.get_indexer(range(9))
+ expected = np.array([-1, 2, -1, -1, 1, -1, -1, 0, -1], dtype=np.intp)
+ tm.assert_numpy_array_equal(result, expected)
+
def test_join_outer(self):
# join with Int64Index
other = Int64Index(np.arange(25, 14, -1))
| - [X] closes #28678
- [X] tests added / passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28680 | 2019-09-30T06:05:30Z | 2019-10-02T06:50:51Z | 2019-10-02T06:50:51Z | 2019-10-02T16:13:03Z |
minor inconsistency in Categorical.remove_categories error message | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 751db2b88069d..f2bb20746741d 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -223,7 +223,7 @@ Categorical
- Bug where :func:`merge` was unable to join on categorical and extension dtype columns (:issue:`28668`)
- :meth:`Categorical.searchsorted` and :meth:`CategoricalIndex.searchsorted` now work on unordered categoricals also (:issue:`21667`)
- Added test to assert roundtripping to parquet with :func:`DataFrame.to_parquet` or :func:`read_parquet` will preserve Categorical dtypes for string types (:issue:`27955`)
--
+- Changed the error message in :meth:`Categorical.remove_categories` to always show the invalid removals as a set (:issue:`28669`)
Datetimelike
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index bab1127e6e539..a14b91d78212d 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1120,7 +1120,7 @@ def remove_categories(self, removals, inplace=False):
# GH 10156
if any(isna(removals)):
- not_included = [x for x in not_included if notna(x)]
+ not_included = {x for x in not_included if notna(x)}
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
diff --git a/pandas/tests/arrays/categorical/test_api.py b/pandas/tests/arrays/categorical/test_api.py
index ab07b3c96a1db..42087b89a19b5 100644
--- a/pandas/tests/arrays/categorical/test_api.py
+++ b/pandas/tests/arrays/categorical/test_api.py
@@ -1,3 +1,5 @@
+import re
+
import numpy as np
import pytest
@@ -339,9 +341,13 @@ def test_remove_categories(self):
tm.assert_categorical_equal(cat, new)
assert res is None
- # removal is not in categories
- with pytest.raises(ValueError):
- cat.remove_categories(["c"])
+ @pytest.mark.parametrize("removals", [["c"], ["c", np.nan], "c", ["c", "c"]])
+ def test_remove_categories_raises(self, removals):
+ cat = Categorical(["a", "b", "a"])
+ message = re.escape("removals must all be in old categories: {'c'}")
+
+ with pytest.raises(ValueError, match=message):
+ cat.remove_categories(removals)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"], categories=["a", "b", "c", "d", "e"])
| This pull request fixes minor inconsistency in Categorical.remove_categories error message
- [x] closes #28669
- [ ] tests added / passed
- passes `black pandas`
- passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- whatsnew entry
Changed the error message to show invalid removals as a set. Added tests for removal of null from the categories. Parameterized pytest. | https://api.github.com/repos/pandas-dev/pandas/pulls/28677 | 2019-09-30T02:34:07Z | 2019-10-22T15:46:04Z | 2019-10-22T15:46:04Z | 2019-10-22T15:46:10Z |
DOC: fix PR09,PR08 doc string errors in Timestamp, NaT, Timedelta class | diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 328fc26e4fef6..b17c6079d81fd 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -396,7 +396,7 @@ class NaTType(_NaT):
Parameters
----------
locale : string, default None (English locale)
- locale determining the language in which to return the month name
+ Locale determining the language in which to return the month name.
Returns
-------
@@ -411,7 +411,7 @@ class NaTType(_NaT):
Parameters
----------
locale : string, default None (English locale)
- locale determining the language in which to return the day name
+ Locale determining the language in which to return the day name.
Returns
-------
@@ -509,11 +509,11 @@ class NaTType(_NaT):
Parameters
----------
ordinal : int
- date corresponding to a proleptic Gregorian ordinal
+ Date corresponding to a proleptic Gregorian ordinal.
freq : str, DateOffset
- Offset which Timestamp will have
+ Offset to apply to the Timestamp.
tz : str, pytz.timezone, dateutil.tz.tzfile or None
- Time zone for time which Timestamp will have.
+ Time zone for the Timestamp.
""")
# _nat_methods
@@ -534,7 +534,7 @@ class NaTType(_NaT):
Parameters
----------
tz : str or timezone object, default None
- Timezone to localize to
+ Timezone to localize to.
""")
today = _make_nat_func('today', # noqa:E128
"""
@@ -547,7 +547,7 @@ class NaTType(_NaT):
Parameters
----------
tz : str or timezone object, default None
- Timezone to localize to
+ Timezone to localize to.
""")
round = _make_nat_func('round', # noqa:E128
"""
@@ -555,27 +555,30 @@ class NaTType(_NaT):
Parameters
----------
- freq : a freq string indicating the rounding resolution
- ambiguous : bool, 'NaT', default 'raise'
- - bool contains flags to determine if time is dst or not (note
- that this flag is only applicable for ambiguous fall dst dates)
- - 'NaT' will return NaT for an ambiguous time
- - 'raise' will raise an AmbiguousTimeError for an ambiguous time
+ freq : str
+ Frequency string indicating the rounding resolution.
+ ambiguous : bool or {'raise', 'NaT'}, default 'raise'
+ The behavior is as follows:
+
+ * bool contains flags to determine if time is dst or not (note
+ that this flag is only applicable for ambiguous fall dst dates).
+ * 'NaT' will return NaT for an ambiguous time.
+ * 'raise' will raise an AmbiguousTimeError for an ambiguous time.
.. versionadded:: 0.24.0
- nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
-default 'raise'
+ nonexistent : {'raise', 'shift_forward', 'shift_backward, 'NaT', \
+timedelta}, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- - 'shift_forward' will shift the nonexistent time forward to the
- closest existing time
- - 'shift_backward' will shift the nonexistent time backward to the
- closest existing time
- - 'NaT' will return NaT where there are nonexistent times
- - timedelta objects will shift nonexistent times by the timedelta
- - 'raise' will raise an NonExistentTimeError if there are
- nonexistent times
+ * 'shift_forward' will shift the nonexistent time forward to the
+ closest existing time.
+ * 'shift_backward' will shift the nonexistent time backward to the
+ closest existing time.
+ * 'NaT' will return NaT where there are nonexistent times.
+ * timedelta objects will shift nonexistent times by the timedelta.
+ * 'raise' will raise an NonExistentTimeError if there are
+ nonexistent times.
.. versionadded:: 0.24.0
@@ -585,7 +588,7 @@ default 'raise'
Raises
------
- ValueError if the freq cannot be converted
+ ValueError if the freq cannot be converted.
""")
floor = _make_nat_func('floor', # noqa:E128
"""
@@ -593,33 +596,36 @@ default 'raise'
Parameters
----------
- freq : a freq string indicating the flooring resolution
- ambiguous : bool, 'NaT', default 'raise'
- - bool contains flags to determine if time is dst or not (note
- that this flag is only applicable for ambiguous fall dst dates)
- - 'NaT' will return NaT for an ambiguous time
- - 'raise' will raise an AmbiguousTimeError for an ambiguous time
+ freq : str
+ Frequency string indicating the flooring resolution.
+ ambiguous : bool or {'raise', 'NaT'}, default 'raise'
+ The behavior is as follows:
+
+ * bool contains flags to determine if time is dst or not (note
+ that this flag is only applicable for ambiguous fall dst dates).
+ * 'NaT' will return NaT for an ambiguous time.
+ * 'raise' will raise an AmbiguousTimeError for an ambiguous time.
.. versionadded:: 0.24.0
- nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
-default 'raise'
+ nonexistent : {'raise', 'shift_forward', 'shift_backward, 'NaT', \
+timedelta}, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- - 'shift_forward' will shift the nonexistent time forward to the
- closest existing time
- - 'shift_backward' will shift the nonexistent time backward to the
- closest existing time
- - 'NaT' will return NaT where there are nonexistent times
- - timedelta objects will shift nonexistent times by the timedelta
- - 'raise' will raise an NonExistentTimeError if there are
- nonexistent times
+ * 'shift_forward' will shift the nonexistent time forward to the
+ closest existing time.
+ * 'shift_backward' will shift the nonexistent time backward to the
+ closest existing time.
+ * 'NaT' will return NaT where there are nonexistent times.
+ * timedelta objects will shift nonexistent times by the timedelta.
+ * 'raise' will raise an NonExistentTimeError if there are
+ nonexistent times.
.. versionadded:: 0.24.0
Raises
------
- ValueError if the freq cannot be converted
+ ValueError if the freq cannot be converted.
""")
ceil = _make_nat_func('ceil', # noqa:E128
"""
@@ -627,33 +633,36 @@ default 'raise'
Parameters
----------
- freq : a freq string indicating the ceiling resolution
- ambiguous : bool, 'NaT', default 'raise'
- - bool contains flags to determine if time is dst or not (note
- that this flag is only applicable for ambiguous fall dst dates)
- - 'NaT' will return NaT for an ambiguous time
- - 'raise' will raise an AmbiguousTimeError for an ambiguous time
+ freq : str
+ Frequency string indicating the ceiling resolution.
+ ambiguous : bool or {'raise', 'NaT'}, default 'raise'
+ The behavior is as follows:
+
+ * bool contains flags to determine if time is dst or not (note
+ that this flag is only applicable for ambiguous fall dst dates).
+ * 'NaT' will return NaT for an ambiguous time.
+ * 'raise' will raise an AmbiguousTimeError for an ambiguous time.
.. versionadded:: 0.24.0
- nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
-default 'raise'
+ nonexistent : {'raise', 'shift_forward', 'shift_backward, 'NaT', \
+timedelta}, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- - 'shift_forward' will shift the nonexistent time forward to the
- closest existing time
- - 'shift_backward' will shift the nonexistent time backward to the
- closest existing time
- - 'NaT' will return NaT where there are nonexistent times
- - timedelta objects will shift nonexistent times by the timedelta
- - 'raise' will raise an NonExistentTimeError if there are
- nonexistent times
+ * 'shift_forward' will shift the nonexistent time forward to the
+ closest existing time.
+ * 'shift_backward' will shift the nonexistent time backward to the
+ closest existing time.
+ * 'NaT' will return NaT where there are nonexistent times.
+ * timedelta objects will shift nonexistent times by the timedelta.
+ * 'raise' will raise an NonExistentTimeError if there are
+ nonexistent times.
.. versionadded:: 0.24.0
Raises
------
- ValueError if the freq cannot be converted
+ ValueError if the freq cannot be converted.
""")
tz_convert = _make_nat_func('tz_convert', # noqa:E128
@@ -694,32 +703,39 @@ default 'raise'
`ambiguous` parameter dictates how ambiguous times should be
handled.
- - bool contains flags to determine if time is dst or not (note
- that this flag is only applicable for ambiguous fall dst dates)
- - 'NaT' will return NaT for an ambiguous time
- - 'raise' will raise an AmbiguousTimeError for an ambiguous time
+ The behavior is as follows:
+
+ * bool contains flags to determine if time is dst or not (note
+ that this flag is only applicable for ambiguous fall dst dates).
+ * 'NaT' will return NaT for an ambiguous time.
+ * 'raise' will raise an AmbiguousTimeError for an ambiguous time.
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- - 'shift_forward' will shift the nonexistent time forward to the
- closest existing time
- - 'shift_backward' will shift the nonexistent time backward to the
- closest existing time
- - 'NaT' will return NaT where there are nonexistent times
- - timedelta objects will shift nonexistent times by the timedelta
- - 'raise' will raise an NonExistentTimeError if there are
- nonexistent times
+ The behavior is as follows:
- .. versionadded:: 0.24.0
+ * 'shift_forward' will shift the nonexistent time forward to the
+ closest existing time.
+ * 'shift_backward' will shift the nonexistent time backward to the
+ closest existing time.
+ * 'NaT' will return NaT where there are nonexistent times.
+ * timedelta objects will shift nonexistent times by the timedelta.
+ * 'raise' will raise an NonExistentTimeError if there are
+ nonexistent times.
+ .. versionadded:: 0.24.0
errors : 'raise', 'coerce', default None
- - 'raise' will raise a NonExistentTimeError if a timestamp is not
- valid in the specified timezone (e.g. due to a transition from
- or to DST time). Use ``nonexistent='raise'`` instead.
- - 'coerce' will return NaT if the timestamp can not be converted
+ Determine how errors should be handled.
+
+ The behavior is as follows:
+
+ * 'raise' will raise a NonExistentTimeError if a timestamp is not
+ valid in the specified timezone (e.g. due to a transition from
+ or to DST time). Use ``nonexistent='raise'`` instead.
+ * 'coerce' will return NaT if the timestamp can not be converted
into the specified timezone. Use ``nonexistent='NaT'`` instead.
.. deprecated:: 0.24.0
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index b232042c70eac..5181fff2e589f 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1222,7 +1222,7 @@ class Timedelta(_Timedelta):
'm', 'minute', 'min', 'minutes', 'T', 'S', 'seconds', 'sec', 'second',
'ms', 'milliseconds', 'millisecond', 'milli', 'millis', 'L',
'us', 'microseconds', 'microsecond', 'micro', 'micros', 'U',
- 'ns', 'nanoseconds', 'nano', 'nanos', 'nanosecond', 'N'}
+ 'ns', 'nanoseconds', 'nano', 'nanos', 'nanosecond', 'N'}.
**kwargs
Available kwargs: {days, seconds, microseconds,
milliseconds, minutes, hours, weeks}.
@@ -1323,7 +1323,8 @@ class Timedelta(_Timedelta):
Parameters
----------
- freq : a freq string indicating the rounding resolution
+ freq : str
+ Frequency string indicating the rounding resolution.
Returns
-------
@@ -1341,7 +1342,8 @@ class Timedelta(_Timedelta):
Parameters
----------
- freq : a freq string indicating the flooring resolution
+ freq : str
+ Frequency string indicating the flooring resolution.
"""
return self._round(freq, np.floor)
@@ -1351,7 +1353,8 @@ class Timedelta(_Timedelta):
Parameters
----------
- freq : a freq string indicating the ceiling resolution
+ freq : str
+ Frequency string indicating the ceiling resolution.
"""
return self._round(freq, np.ceil)
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 6ca39d83afd25..261fd7d8068aa 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -251,11 +251,11 @@ class Timestamp(_Timestamp):
Parameters
----------
ordinal : int
- date corresponding to a proleptic Gregorian ordinal
+ Date corresponding to a proleptic Gregorian ordinal.
freq : str, DateOffset
- Offset which Timestamp will have
+ Offset to apply to the Timestamp.
tz : str, pytz.timezone, dateutil.tz.tzfile or None
- Time zone for time which Timestamp will have.
+ Time zone for the Timestamp.
"""
return cls(datetime.fromordinal(ordinal),
freq=freq, tz=tz)
@@ -271,7 +271,7 @@ class Timestamp(_Timestamp):
Parameters
----------
tz : str or timezone object, default None
- Timezone to localize to
+ Timezone to localize to.
"""
if isinstance(tz, str):
tz = maybe_get_tz(tz)
@@ -289,7 +289,7 @@ class Timestamp(_Timestamp):
Parameters
----------
tz : str or timezone object, default None
- Timezone to localize to
+ Timezone to localize to.
"""
return cls.now(tz)
@@ -445,27 +445,30 @@ class Timestamp(_Timestamp):
Parameters
----------
- freq : a freq string indicating the rounding resolution
- ambiguous : bool, 'NaT', default 'raise'
- - bool contains flags to determine if time is dst or not (note
- that this flag is only applicable for ambiguous fall dst dates)
- - 'NaT' will return NaT for an ambiguous time
- - 'raise' will raise an AmbiguousTimeError for an ambiguous time
+ freq : str
+ Frequency string indicating the rounding resolution.
+ ambiguous : bool or {'raise', 'NaT'}, default 'raise'
+ The behavior is as follows:
+
+ * bool contains flags to determine if time is dst or not (note
+ that this flag is only applicable for ambiguous fall dst dates).
+ * 'NaT' will return NaT for an ambiguous time.
+ * 'raise' will raise an AmbiguousTimeError for an ambiguous time.
.. versionadded:: 0.24.0
- nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
-default 'raise'
+ nonexistent : {'raise', 'shift_forward', 'shift_backward, 'NaT', \
+timedelta}, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- - 'shift_forward' will shift the nonexistent time forward to the
- closest existing time
- - 'shift_backward' will shift the nonexistent time backward to the
- closest existing time
- - 'NaT' will return NaT where there are nonexistent times
- - timedelta objects will shift nonexistent times by the timedelta
- - 'raise' will raise an NonExistentTimeError if there are
- nonexistent times
+ * 'shift_forward' will shift the nonexistent time forward to the
+ closest existing time.
+ * 'shift_backward' will shift the nonexistent time backward to the
+ closest existing time.
+ * 'NaT' will return NaT where there are nonexistent times.
+ * timedelta objects will shift nonexistent times by the timedelta.
+ * 'raise' will raise an NonExistentTimeError if there are
+ nonexistent times.
.. versionadded:: 0.24.0
@@ -475,7 +478,7 @@ default 'raise'
Raises
------
- ValueError if the freq cannot be converted
+ ValueError if the freq cannot be converted.
"""
return self._round(
freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent
@@ -487,33 +490,36 @@ default 'raise'
Parameters
----------
- freq : a freq string indicating the flooring resolution
- ambiguous : bool, 'NaT', default 'raise'
- - bool contains flags to determine if time is dst or not (note
- that this flag is only applicable for ambiguous fall dst dates)
- - 'NaT' will return NaT for an ambiguous time
- - 'raise' will raise an AmbiguousTimeError for an ambiguous time
+ freq : str
+ Frequency string indicating the flooring resolution.
+ ambiguous : bool or {'raise', 'NaT'}, default 'raise'
+ The behavior is as follows:
+
+ * bool contains flags to determine if time is dst or not (note
+ that this flag is only applicable for ambiguous fall dst dates).
+ * 'NaT' will return NaT for an ambiguous time.
+ * 'raise' will raise an AmbiguousTimeError for an ambiguous time.
.. versionadded:: 0.24.0
- nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
-default 'raise'
+ nonexistent : {'raise', 'shift_forward', 'shift_backward, 'NaT', \
+timedelta}, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- - 'shift_forward' will shift the nonexistent time forward to the
- closest existing time
- - 'shift_backward' will shift the nonexistent time backward to the
- closest existing time
- - 'NaT' will return NaT where there are nonexistent times
- - timedelta objects will shift nonexistent times by the timedelta
- - 'raise' will raise an NonExistentTimeError if there are
- nonexistent times
+ * 'shift_forward' will shift the nonexistent time forward to the
+ closest existing time.
+ * 'shift_backward' will shift the nonexistent time backward to the
+ closest existing time.
+ * 'NaT' will return NaT where there are nonexistent times.
+ * timedelta objects will shift nonexistent times by the timedelta.
+ * 'raise' will raise an NonExistentTimeError if there are
+ nonexistent times.
.. versionadded:: 0.24.0
Raises
------
- ValueError if the freq cannot be converted
+ ValueError if the freq cannot be converted.
"""
return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)
@@ -523,33 +529,36 @@ default 'raise'
Parameters
----------
- freq : a freq string indicating the ceiling resolution
- ambiguous : bool, 'NaT', default 'raise'
- - bool contains flags to determine if time is dst or not (note
- that this flag is only applicable for ambiguous fall dst dates)
- - 'NaT' will return NaT for an ambiguous time
- - 'raise' will raise an AmbiguousTimeError for an ambiguous time
+ freq : str
+ Frequency string indicating the ceiling resolution.
+ ambiguous : bool or {'raise', 'NaT'}, default 'raise'
+ The behavior is as follows:
+
+ * bool contains flags to determine if time is dst or not (note
+ that this flag is only applicable for ambiguous fall dst dates).
+ * 'NaT' will return NaT for an ambiguous time.
+ * 'raise' will raise an AmbiguousTimeError for an ambiguous time.
.. versionadded:: 0.24.0
- nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
-default 'raise'
+ nonexistent : {'raise', 'shift_forward', 'shift_backward, 'NaT', \
+timedelta}, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- - 'shift_forward' will shift the nonexistent time forward to the
- closest existing time
- - 'shift_backward' will shift the nonexistent time backward to the
- closest existing time
- - 'NaT' will return NaT where there are nonexistent times
- - timedelta objects will shift nonexistent times by the timedelta
- - 'raise' will raise an NonExistentTimeError if there are
- nonexistent times
+ * 'shift_forward' will shift the nonexistent time forward to the
+ closest existing time.
+ * 'shift_backward' will shift the nonexistent time backward to the
+ closest existing time.
+ * 'NaT' will return NaT where there are nonexistent times.
+ * timedelta objects will shift nonexistent times by the timedelta.
+ * 'raise' will raise an NonExistentTimeError if there are
+ nonexistent times.
.. versionadded:: 0.24.0
Raises
------
- ValueError if the freq cannot be converted
+ ValueError if the freq cannot be converted.
"""
return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)
@@ -606,7 +615,7 @@ default 'raise'
Parameters
----------
locale : string, default None (English locale)
- locale determining the language in which to return the day name
+ Locale determining the language in which to return the day name.
Returns
-------
@@ -623,7 +632,7 @@ default 'raise'
Parameters
----------
locale : string, default None (English locale)
- locale determining the language in which to return the month name
+ Locale determining the language in which to return the month name.
Returns
-------
@@ -779,32 +788,39 @@ default 'raise'
`ambiguous` parameter dictates how ambiguous times should be
handled.
- - bool contains flags to determine if time is dst or not (note
- that this flag is only applicable for ambiguous fall dst dates)
- - 'NaT' will return NaT for an ambiguous time
- - 'raise' will raise an AmbiguousTimeError for an ambiguous time
+ The behavior is as follows:
+
+ * bool contains flags to determine if time is dst or not (note
+ that this flag is only applicable for ambiguous fall dst dates).
+ * 'NaT' will return NaT for an ambiguous time.
+ * 'raise' will raise an AmbiguousTimeError for an ambiguous time.
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- - 'shift_forward' will shift the nonexistent time forward to the
- closest existing time
- - 'shift_backward' will shift the nonexistent time backward to the
- closest existing time
- - 'NaT' will return NaT where there are nonexistent times
- - timedelta objects will shift nonexistent times by the timedelta
- - 'raise' will raise an NonExistentTimeError if there are
- nonexistent times
+ The behavior is as follows:
- .. versionadded:: 0.24.0
+ * 'shift_forward' will shift the nonexistent time forward to the
+ closest existing time.
+ * 'shift_backward' will shift the nonexistent time backward to the
+ closest existing time.
+ * 'NaT' will return NaT where there are nonexistent times.
+ * timedelta objects will shift nonexistent times by the timedelta.
+ * 'raise' will raise an NonExistentTimeError if there are
+ nonexistent times.
+ .. versionadded:: 0.24.0
errors : 'raise', 'coerce', default None
- - 'raise' will raise a NonExistentTimeError if a timestamp is not
- valid in the specified timezone (e.g. due to a transition from
- or to DST time). Use ``nonexistent='raise'`` instead.
- - 'coerce' will return NaT if the timestamp can not be converted
+ Determine how errors should be handled.
+
+ The behavior is as follows:
+
+ * 'raise' will raise a NonExistentTimeError if a timestamp is not
+ valid in the specified timezone (e.g. due to a transition from
+ or to DST time). Use ``nonexistent='raise'`` instead.
+ * 'coerce' will return NaT if the timestamp can not be converted
into the specified timezone. Use ``nonexistent='NaT'`` instead.
.. deprecated:: 0.24.0
| This fixes PR09,PR08 errors in Timestamp, NaT, Timedelta classes
Example:
```
pandas.Timestamp.ceil: Parameter "ambiguous" description should finish with "."
pandas.Timestamp.ceil: Parameter "nonexistent" description should finish with "."
pandas.Timestamp.day_name: Parameter "locale" description should finish with "."
pandas.Timestamp.floor: Parameter "ambiguous" description should finish with "."
pandas.Timestamp.floor: Parameter "nonexistent" description should finish with "."
pandas.Timestamp.fromordinal: Parameter "ordinal" description should finish with "."
pandas.Timestamp.fromordinal: Parameter "freq" description should finish with "."
pandas.Timestamp.month_name: Parameter "locale" description should finish with "."
pandas.Timestamp.now: Parameter "tz" description should finish with "."
pandas.Timestamp.round: Parameter "ambiguous" description should finish with "."
pandas.Timestamp.round: Parameter "nonexistent" description should finish with "."
pandas.Timestamp.today: Parameter "tz" description should finish with "."
pandas.Timestamp.tz_localize: Parameter "ambiguous" description should finish with "."
pandas.Timestamp.tz_localize: Parameter "nonexistent" description should finish with "."
pandas.Timestamp.tz_localize: Parameter "errors" description should finish with "."
```
closes #28673
- [x] closes #xxxx
- [x] tests added / passed
| https://api.github.com/repos/pandas-dev/pandas/pulls/28674 | 2019-09-29T20:48:20Z | 2019-10-01T16:59:29Z | null | 2019-10-03T23:59:22Z |
BUG: restore limit in RangeIndex.get_indexer | diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst
index 14682b706f924..f904d69d6421b 100644
--- a/doc/source/whatsnew/v0.25.2.rst
+++ b/doc/source/whatsnew/v0.25.2.rst
@@ -49,7 +49,7 @@ Interval
Indexing
^^^^^^^^
--
+- Fix regression in :meth:`DataFrame.reindex` not following ``limit`` argument (:issue:`28631`).
-
-
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 8783351cc74d1..43445a0d5d5a2 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -380,8 +380,10 @@ def get_loc(self, key, method=None, tolerance=None):
@Appender(_index_shared_docs["get_indexer"])
def get_indexer(self, target, method=None, limit=None, tolerance=None):
- if not (method is None and tolerance is None and is_list_like(target)):
- return super().get_indexer(target, method=method, tolerance=tolerance)
+ if com.any_not_none(method, tolerance, limit) or not is_list_like(target):
+ return super().get_indexer(
+ target, method=method, tolerance=tolerance, limit=limit
+ )
if self.step > 0:
start, stop, step = self.start, self.stop, self.step
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index 6b073c460ea08..6d239e96cd167 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -2217,6 +2217,22 @@ def test_reindex_frame_add_nat(self):
assert mask[-5:].all()
assert not mask[:-5].any()
+ def test_reindex_limit(self):
+ # GH 28631
+ data = [["A", "A", "A"], ["B", "B", "B"], ["C", "C", "C"], ["D", "D", "D"]]
+ exp_data = [
+ ["A", "A", "A"],
+ ["B", "B", "B"],
+ ["C", "C", "C"],
+ ["D", "D", "D"],
+ ["D", "D", "D"],
+ [np.nan, np.nan, np.nan],
+ ]
+ df = DataFrame(data)
+ result = df.reindex([0, 1, 2, 3, 4, 5], method="ffill", limit=1)
+ expected = DataFrame(exp_data)
+ tm.assert_frame_equal(result, expected)
+
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
assert x[0].dtype == np.dtype("M8[ns]")
diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py
index 58b98297f00f3..7e08a5deaff7a 100644
--- a/pandas/tests/indexes/test_range.py
+++ b/pandas/tests/indexes/test_range.py
@@ -416,6 +416,14 @@ def test_get_indexer_backfill(self):
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
+ def test_get_indexer_limit(self):
+ # GH 28631
+ idx = RangeIndex(4)
+ target = RangeIndex(6)
+ result = idx.get_indexer(target, method="pad", limit=1)
+ expected = np.array([0, 1, 2, 3, 3, -1], dtype=np.intp)
+ tm.assert_numpy_array_equal(result, expected)
+
def test_join_outer(self):
# join with Int64Index
other = Int64Index(np.arange(25, 14, -1))
| - [x] closes #28631
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28671 | 2019-09-29T16:38:32Z | 2019-10-01T12:00:39Z | 2019-10-01T12:00:38Z | 2019-10-01T12:17:45Z |
Fix incorrect doc for to_datetime | diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 32dc3c1f3e8f2..7b136fa29ecea 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -637,7 +637,7 @@ def to_datetime(
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
- origin : scalar, default is 'unix'
+ origin : scalar, default 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This PR fixes incorrect doc for `pandas.to_datetime`. | https://api.github.com/repos/pandas-dev/pandas/pulls/28670 | 2019-09-29T06:07:23Z | 2019-10-05T22:44:54Z | 2019-10-05T22:44:54Z | 2019-10-05T22:44:58Z |
DOC: Updating See Also section in IndexOpsMixin | diff --git a/pandas/core/base.py b/pandas/core/base.py
index 910b05c47071d..8cdeaa666f94b 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -678,8 +678,10 @@ def _is_homogeneous_type(self):
See Also
--------
- DataFrame._is_homogeneous_type
- MultiIndex._is_homogeneous_type
+ DataFrame._is_homogeneous_type : Whether all the columns in a
+ DataFrame have the same dtype.
+ MultiIndex._is_homogeneous_type : Whether all the levels of a
+ MultiIndex have the same dtype.
"""
return True
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index e4a44a89998e3..7aa9fc628f71b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -535,6 +535,13 @@ def _is_homogeneous_type(self) -> bool:
-------
bool
+ See Also
+ --------
+ Index._is_homogeneous_type : Whether the object has a single
+ dtype.
+ MultiIndex._is_homogeneous_type : Whether all the levels of a
+ MultiIndex have the same dtype.
+
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 3273c4f8cd13b..b2bb50939551d 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -665,8 +665,10 @@ def _is_homogeneous_type(self):
See Also
--------
- Index._is_homogeneous_type
- DataFrame._is_homogeneous_type
+ Index._is_homogeneous_type : Whether the object has a single
+ dtype.
+ DataFrame._is_homogeneous_type : Whether all the columns in a
+ DataFrame have the same dtype.
Examples
--------
| - [x] Updated the "See Also" section for the IndexOpsMixin '''_is_homogenous_type'''
| https://api.github.com/repos/pandas-dev/pandas/pulls/28667 | 2019-09-28T17:14:14Z | 2019-10-02T04:06:43Z | 2019-10-02T04:06:43Z | 2019-10-02T04:06:48Z |
BUG: fix broken error message in ujson.encode() (GH18878) | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index eb4b72d01d59a..b00d2157a9216 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -301,6 +301,7 @@ Other
- Using :meth:`DataFrame.replace` with overlapping keys in a nested dictionary will no longer raise, now matching the behavior of a flat dictionary (:issue:`27660`)
- :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` now support dicts as ``compression`` argument with key ``'method'`` being the compression method and others as additional compression options when the compression method is ``'zip'``. (:issue:`26023`)
- :meth:`Series.append` will no longer raise a ``TypeError`` when passed a tuple of ``Series`` (:issue:`28410`)
+- Fix corrupted error message when calling ``pandas.libs._json.encode()`` on a 0d array (:issue:`18878`)
.. _whatsnew_1000.contributors:
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index 22c42acea0150..48712dc68829d 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -1986,11 +1986,9 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
tc->type = JT_DOUBLE;
return;
} else if (PyArray_Check(obj) && PyArray_CheckScalar(obj)) {
- tmpObj = PyObject_Repr(obj);
PyErr_Format(PyExc_TypeError,
- "%s (0d array) is not JSON serializable at the moment",
- PyBytes_AS_STRING(tmpObj));
- Py_DECREF(tmpObj);
+ "%R (0d array) is not JSON serializable at the moment",
+ obj);
goto INVALID;
}
diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py
index 69a246487ddf1..d6572ac7b7bfe 100644
--- a/pandas/tests/io/json/test_ujson.py
+++ b/pandas/tests/io/json/test_ujson.py
@@ -780,7 +780,9 @@ def test_array_float(self):
tm.assert_almost_equal(arr, arr_out)
def test_0d_array(self):
- with pytest.raises(TypeError):
+ # gh-18878
+ msg = re.escape("array(1) (0d array) is not JSON serializable at the moment")
+ with pytest.raises(TypeError, match=msg):
ujson.encode(np.array(1))
@pytest.mark.parametrize(
| - [x] closes #18878
- [x] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28666 | 2019-09-28T16:07:58Z | 2019-10-06T22:40:39Z | 2019-10-06T22:40:38Z | 2019-10-08T13:59:33Z |
BUG: Fix groupby.apply | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 8755abe642068..a80fdd6faba09 100755
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -911,7 +911,7 @@ Plotting
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
--
+- Bug in :meth:`DataFrame.groupby.apply` only showing output from a single group when function returns an :class:`Index` (:issue:`28652`)
- Bug in :meth:`DataFrame.groupby` with multiple groups where an ``IndexError`` would be raised if any group contained all NA values (:issue:`20519`)
- Bug in :meth:`pandas.core.resample.Resampler.size` and :meth:`pandas.core.resample.Resampler.count` returning wrong dtype when used with an empty series or dataframe (:issue:`28427`)
- Bug in :meth:`DataFrame.rolling` not allowing for rolling over datetimes when ``axis=1`` (:issue:`28192`)
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 0019fc4b36d20..8571761f77265 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -1,3 +1,4 @@
+from copy import copy
from distutils.version import LooseVersion
from cython import Py_ssize_t
@@ -15,7 +16,7 @@ from numpy cimport (ndarray,
cnp.import_array()
cimport pandas._libs.util as util
-from pandas._libs.lib import maybe_convert_objects
+from pandas._libs.lib import maybe_convert_objects, is_scalar
cdef _check_result_array(object obj, Py_ssize_t cnt):
@@ -492,14 +493,19 @@ def apply_frame_axis0(object frame, object f, object names,
# Need to infer if low level index slider will cause segfaults
require_slow_apply = i == 0 and piece is chunk
try:
- if piece.index is chunk.index:
- piece = piece.copy(deep='all')
- else:
+ if piece.index is not chunk.index:
mutated = True
except AttributeError:
# `piece` might not have an index, could be e.g. an int
pass
+ if not is_scalar(piece):
+ # Need to copy data to avoid appending references
+ if hasattr(piece, "copy"):
+ piece = piece.copy(deep="all")
+ else:
+ piece = copy(piece)
+
results.append(piece)
# If the data was modified inplace we need to
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 0e62569fffeb6..050b1e7c5d3b3 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -686,6 +686,17 @@ def test_apply_with_mixed_types():
tm.assert_frame_equal(result, expected)
+def test_func_returns_object():
+ # GH 28652
+ df = DataFrame({"a": [1, 2]}, index=pd.Int64Index([1, 2]))
+ result = df.groupby("a").apply(lambda g: g.index)
+ expected = Series(
+ [pd.Int64Index([1]), pd.Int64Index([2])], index=pd.Int64Index([1, 2], name="a")
+ )
+
+ tm.assert_series_equal(result, expected)
+
+
@pytest.mark.parametrize(
"group_column_dtlike",
[datetime.today(), datetime.today().date(), datetime.today().time()],
| - [x] closes #28652
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Makes sure that the output of `groupby.apply` is built up by value instead of by reference in `reduction.pyx` to avoid the behavior from #28652. | https://api.github.com/repos/pandas-dev/pandas/pulls/28662 | 2019-09-27T23:27:41Z | 2020-01-01T16:21:28Z | 2020-01-01T16:21:27Z | 2020-01-07T00:30:25Z |
PERF: Make matplotlib import lazy | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index b03c4f2238445..97a09fa8c50f5 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -204,7 +204,7 @@ import sys
import pandas
blacklist = {'bs4', 'gcsfs', 'html5lib', 'http', 'ipython', 'jinja2', 'hypothesis',
- 'lxml', 'numexpr', 'openpyxl', 'py', 'pytest', 's3fs', 'scipy',
+ 'lxml', 'matplotlib', 'numexpr', 'openpyxl', 'py', 'pytest', 's3fs', 'scipy',
'tables', 'urllib.request', 'xlrd', 'xlsxwriter', 'xlwt'}
# GH#28227 for some of these check for top-level modules, while others are
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ed05691d33d07..5ab4e6a476dd8 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -108,7 +108,6 @@
from pandas.io.formats import console, format as fmt
from pandas.io.formats.printing import pprint_thing
-import pandas.plotting
# ---------------------------------------------------------------------
# Docstring templates
@@ -8362,11 +8361,25 @@ def isin(self, values):
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
- plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
- hist = pandas.plotting.hist_frame
- boxplot = pandas.plotting.boxplot_frame
sparse = CachedAccessor("sparse", SparseFrameAccessor)
+ @property
+ def plot(self):
+ # property instead of CachedAccessor to allow for lazy matplotlib import
+ from pandas.plotting._core import PlotAccessor
+
+ return PlotAccessor(self)
+
+ def hist(self, *args, **kwargs):
+ from pandas.plotting._core import hist_frame
+
+ return hist_frame(self, *args, **kwargs)
+
+ def boxplot(self, *args, **kwargs):
+ from pandas.plotting._core import boxplot_frame
+
+ return boxplot_frame(self, *args, **kwargs)
+
DataFrame._setup_axes(
["index", "columns"],
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index f8f1455561c03..f23c0f86b7803 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -59,8 +59,6 @@
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
-from pandas.plotting import boxplot_frame_groupby
-
NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"])
# TODO(typing) the return value on this callable should be any *scalar*.
AggScalar = Union[str, Callable[..., Any]]
@@ -1691,7 +1689,11 @@ def groupby_series(obj, col=None):
results.index = ibase.default_index(len(results))
return results
- boxplot = boxplot_frame_groupby
+ def boxplot(self, *args, **kwargs):
+ # wrap to allow for lazy import of matplotlib
+ from pandas.plotting import boxplot_frame_groupby
+
+ return boxplot_frame_groupby(self, *args, **kwargs)
def _is_multi_agg_with_relabel(**kwargs):
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index e010e615e176e..78a519f632c71 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -611,8 +611,9 @@ def _make_wrapper(self, name):
# need to setup the selection
# as are not passed directly but in the grouper
f = getattr(self._selected_obj, name)
+
if not isinstance(f, types.MethodType):
- return self.apply(lambda self: getattr(self, name))
+ return self.apply(lambda x: getattr(x, name))
f = getattr(type(self._selected_obj), name)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index c87e371354f63..ef7b671b31f21 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -79,7 +79,6 @@
from pandas.core.tools.datetimes import to_datetime
import pandas.io.formats.format as fmt
-import pandas.plotting
__all__ = ["Series"]
@@ -4758,12 +4757,23 @@ def to_period(self, freq=None, copy=True):
str = CachedAccessor("str", StringMethods)
dt = CachedAccessor("dt", CombinedDatetimelikeProperties)
cat = CachedAccessor("cat", CategoricalAccessor)
- plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
sparse = CachedAccessor("sparse", SparseAccessor)
# ----------------------------------------------------------------------
# Add plotting methods to Series
- hist = pandas.plotting.hist_series
+
+ @property
+ def plot(self):
+ # property instead of CachedAccessor to allow for lazy matplotlib import
+ from pandas.plotting._core import PlotAccessor
+
+ return PlotAccessor(self)
+
+ def hist(self, *args, **kwargs):
+ # pass-through to allow for lazy matplotlib import
+ from pandas.plotting._core import hist_series
+
+ return hist_series(self, *args, **kwargs)
Series._setup_axes(
diff --git a/pandas/plotting/__init__.py b/pandas/plotting/__init__.py
index ebe047c58b889..eb460e1915f37 100644
--- a/pandas/plotting/__init__.py
+++ b/pandas/plotting/__init__.py
@@ -56,6 +56,7 @@
For the discussion about the API see
https://github.com/pandas-dev/pandas/issues/26747.
"""
+
from pandas.plotting._core import (
PlotAccessor,
boxplot,
diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py
index 5213e09f14067..a32df8a900b73 100644
--- a/pandas/plotting/_matplotlib/hist.py
+++ b/pandas/plotting/_matplotlib/hist.py
@@ -186,7 +186,7 @@ def _grouped_plot(
warnings.warn(
"figsize='default' is deprecated. Specify figure " "size by tuple instead",
FutureWarning,
- stacklevel=5,
+ stacklevel=6,
)
figsize = None
diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py
index aabe16d5050f9..0ddaa49b9ef93 100644
--- a/pandas/tests/plotting/test_converter.py
+++ b/pandas/tests/plotting/test_converter.py
@@ -30,7 +30,8 @@
def test_initial_warning():
code = (
- "import pandas as pd; import matplotlib.pyplot as plt; "
+ "import pandas as pd; import pandas.plotting; "
+ "import matplotlib.pyplot as plt; "
"s = pd.Series(1, pd.date_range('2000', periods=12)); "
"fig, ax = plt.subplots(); "
"ax.plot(s.index, s.values)"
@@ -47,6 +48,7 @@ def test_registry_mpl_resets():
"import matplotlib.dates as mdates; "
"n_conv = len(units.registry); "
"import pandas as pd; "
+ "import pandas.plotting; "
"pd.plotting.register_matplotlib_converters(); "
"pd.plotting.deregister_matplotlib_converters(); "
"assert len(units.registry) == n_conv"
diff --git a/pandas/tseries/plotting.py b/pandas/tseries/plotting.py
index df41b4b5b40d9..85bea6605cf38 100644
--- a/pandas/tseries/plotting.py
+++ b/pandas/tseries/plotting.py
@@ -1,3 +1,2 @@
-# flake8: noqa
-
+__all__ = ["tsplot"]
from pandas.plotting._matplotlib.timeseries import tsplot
| Cuts import time by about 23%, from 393ms to 303ms (note these measurements are pretty noisy). The percentage edges up to 26% if we ignore get_version, since that is negligible in a release.
Downside: we lose some docstrings on plotting methods. Will need to decide if it is worth a refactor to keep them in place. | https://api.github.com/repos/pandas-dev/pandas/pulls/28661 | 2019-09-27T20:46:37Z | 2019-10-01T14:03:20Z | null | 2019-11-21T20:00:40Z |
accept a dictionary in plot colors | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 70c30eb42169b..93e0df54bd0bf 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -502,6 +502,7 @@ Plotting
- :func:`set_option` now validates that the plot backend provided to ``'plotting.backend'`` implements the backend when the option is set, rather than when a plot is created (:issue:`28163`)
- :meth:`DataFrame.plot` now allow a ``backend`` keyword arugment to allow changing between backends in one session (:issue:`28619`).
- Bug in color validation incorrectly raising for non-color styles (:issue:`29122`).
+- :func:`.plot` for line/bar now accepts color by dictonary (:issue:`8193`).
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 5341dc3a6338a..412495e4154ba 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -741,7 +741,10 @@ def _apply_style_colors(self, colors, kwds, col_num, label):
has_color = "color" in kwds or self.colormap is not None
nocolor_style = style is None or re.match("[a-z]+", style) is None
if (has_color or self.subplots) and nocolor_style:
- kwds["color"] = colors[col_num % len(colors)]
+ if isinstance(colors, dict):
+ kwds["color"] = colors[label]
+ else:
+ kwds["color"] = colors[col_num % len(colors)]
return style, kwds
def _get_colors(self, num_colors=None, color_kwds="color"):
@@ -1354,12 +1357,13 @@ def _make_plot(self):
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
-
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
if self._is_series:
kwds["color"] = colors
+ elif isinstance(colors, dict):
+ kwds["color"] = colors[label]
else:
kwds["color"] = colors[i % ncolors]
diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py
index 927b9cf4e392a..21c8d907b4d14 100644
--- a/pandas/plotting/_matplotlib/style.py
+++ b/pandas/plotting/_matplotlib/style.py
@@ -27,7 +27,11 @@ def _get_standard_colors(
warnings.warn(
"'color' and 'colormap' cannot be used simultaneously. Using 'color'"
)
- colors = list(color) if is_list_like(color) else color
+ colors = (
+ list(color)
+ if is_list_like(color) and not isinstance(color, dict)
+ else color
+ )
else:
if color_type == "default":
# need to call list() on the result to copy so we don't
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index c51cd0e92eb3c..56ac205b99a32 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -416,3 +416,24 @@ def test_get_standard_colors_no_appending(self):
color_list = cm.gnuplot(np.linspace(0, 1, 16))
p = df.A.plot.bar(figsize=(16, 7), color=color_list)
assert p.patches[1].get_facecolor() == p.patches[17].get_facecolor()
+
+ @pytest.mark.slow
+ def test_dictionary_color(self):
+ # issue-8193
+ # Test plot color dictionary format
+ data_files = ["a", "b"]
+
+ expected = [(0.5, 0.24, 0.6), (0.3, 0.7, 0.7)]
+
+ df1 = DataFrame(np.random.rand(2, 2), columns=data_files)
+ dic_color = {"b": (0.3, 0.7, 0.7), "a": (0.5, 0.24, 0.6)}
+
+ # Bar color test
+ ax = df1.plot(kind="bar", color=dic_color)
+ colors = [rect.get_facecolor()[0:-1] for rect in ax.get_children()[0:3:2]]
+ assert all(color == expected[index] for index, color in enumerate(colors))
+
+ # Line color test
+ ax = df1.plot(kind="line", color=dic_color)
+ colors = [rect.get_color() for rect in ax.get_lines()[0:2]]
+ assert all(color == expected[index] for index, color in enumerate(colors))
| - [ ] closes #8193
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Line/bar plot accepts color by dictonary.
@elisamalzoni | https://api.github.com/repos/pandas-dev/pandas/pulls/28659 | 2019-09-27T18:58:27Z | 2020-01-19T08:16:21Z | null | 2020-01-19T08:16:24Z |
DOC: Fixed PR08 docstring errors in pandas.DataFrame | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index e4a44a89998e3..16f34fee5e1ff 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2066,7 +2066,7 @@ def to_feather(self, fname):
Parameters
----------
fname : str
- string file path
+ String file path.
"""
from pandas.io.feather_format import to_feather
@@ -4772,6 +4772,7 @@ def drop_duplicates(self, subset=None, keep="first", inplace=False):
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
+ Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
@@ -4806,10 +4807,10 @@ def duplicated(self, subset=None, keep="first"):
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- - ``first`` : Mark duplicates as ``True`` except for the
- first occurrence.
- - ``last`` : Mark duplicates as ``True`` except for the
- last occurrence.
+ Determines which duplicates (if any) to mark.
+
+ - ``first`` : Mark duplicates as ``True`` except for the first occurrence.
+ - ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
@@ -6233,8 +6234,8 @@ def unstack(self, level=-1, fill_value=None):
----------
level : int, str, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
- fill_value : replace NaN with this value if the unstack produces
- missing values
+ fill_value : int, string or dict
+ Replace NaN with this value if the unstack produces missing values
Returns
-------
@@ -6665,6 +6666,8 @@ def apply(
by result_type='broadcast'.
raw : bool, default False
+ Determines if row or column is passed as a Series or ndarry object:
+
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
@@ -7357,6 +7360,8 @@ def corr(self, method="pearson", min_periods=1):
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
+ Method of correlation:
+
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
@@ -7556,10 +7561,13 @@ def corrwith(self, other, axis=0, drop=False, method="pearson"):
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index' to compute column-wise, 1 or 'columns' for row-wise.
+ The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for
+ row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
+ Method of correlation:
+
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
@@ -7939,8 +7947,8 @@ def idxmin(self, axis=0, skipna=True):
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index' for row-wise, 1 or 'columns' for column-wise
- skipna : bool, default True
+ The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise
+ skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
@@ -7976,8 +7984,8 @@ def idxmax(self, axis=0, skipna=True):
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index' for row-wise, 1 or 'columns' for column-wise
- skipna : bool, default True
+ The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise
+ skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index a3b9bec494854..cb21588c8ba1a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2559,10 +2559,10 @@ def to_msgpack(self, path_or_buf=None, encoding="utf-8", **kwargs):
path : str, buffer-like, or None
Destination for the serialized object.
If None, return generated bytes
- append : bool whether to append to an existing msgpack
- (default is False)
- compress : type of compressor (zlib or blosc), default to None (no
- compression)
+ append : bool, default False
+ Whether to append to an existing msgpack.
+ compress : str, default None
+ Type of compressor (zlib, blosc or None).
Returns
-------
@@ -2797,10 +2797,10 @@ def to_clipboard(self, excel=True, sep=None, **kwargs):
Parameters
----------
excel : bool, default True
- - True, use the provided separator, writing in a csv format for
- allowing easy pasting into excel.
- - False, write a string representation of the object to the
- clipboard.
+ Produce output in a csv format for easy pasting into excel.
+
+ - True, use the provided separator for csv pasting.
+ - False, write a string representation of the object to the clipboard.
sep : str, default ``'\t'``
Field delimiter.
@@ -5024,15 +5024,15 @@ def sample(
Parameters
----------
func : function
- function to apply to the %(klass)s.
+ Function to apply to the %(klass)s.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the %(klass)s.
args : iterable, optional
- positional arguments passed into ``func``.
+ Positional arguments passed into ``func``.
kwargs : mapping, optional
- a dictionary of keyword arguments passed into ``func``.
+ A dictionary of keyword arguments passed into ``func``.
Returns
-------
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 8724382d9ec55..966a18e11a620 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -514,6 +514,8 @@ class PlotAccessor(PandasObject):
Allows plotting of one column versus another. Only used if data is a
DataFrame.
kind : str
+ The kind of plot to produce:
+
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
@@ -537,7 +539,7 @@ class PlotAccessor(PandasObject):
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
- matplotlib line style per column
+ The matplotlib line style per column
logx : bool or 'sym', default False
Use log scaling or symlog scaling on x axis
.. versionchanged:: 0.25.0
| This relates to: [27977](https://github.com/pandas-dev/pandas/issues/27977). I have fixed the doc PR08 formatting issues for:
```
pandas.DataFrame.insert: Parameter "column" description should start with a capital letter
pandas.DataFrame.apply: Parameter "raw" description should start with a capital letter
pandas.DataFrame.pipe: Parameter "func" description should start with a capital letter
pandas.DataFrame.pipe: Parameter "args" description should start with a capital letter
pandas.DataFrame.pipe: Parameter "kwargs" description should start with a capital letter
pandas.DataFrame.corr: Parameter "method" description should start with a capital letter
pandas.DataFrame.corrwith: Parameter "axis" description should start with a capital letter
pandas.DataFrame.corrwith: Parameter "method" description should start with a capital letter
pandas.DataFrame.drop_duplicates: Parameter "keep" description should start with a capital letter
pandas.DataFrame.duplicated: Parameter "keep" description should start with a capital letter
pandas.DataFrame.idxmax: Parameter "axis" description should start with a capital letter
pandas.DataFrame.idxmin: Parameter "axis" description should start with a capital letter
pandas.DataFrame.unstack: Parameter "fill_value" description should start with a capital letter
pandas.DataFrame.plot: Parameter "kind" description should start with a capital letter
pandas.DataFrame.plot: Parameter "style" description should start with a capital letter
pandas.DataFrame.to_feather: Parameter "fname" description should start with a capital letter
pandas.DataFrame.to_msgpack: Parameter "path" description should start with a capital letter
pandas.DataFrame.to_msgpack: Parameter "append" description should start with a capital letter
pandas.DataFrame.to_msgpack: Parameter "compress" description should start with a capital letter
pandas.DataFrame.to_clipboard: Parameter "excel" description should start with a capital letter
```
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Will continue to work through all PR08 docstring errors. | https://api.github.com/repos/pandas-dev/pandas/pulls/28655 | 2019-09-27T12:57:33Z | 2019-09-30T16:22:42Z | 2019-09-30T16:22:42Z | 2019-10-01T18:51:36Z |
CLN: Define and pin GroupBy properties without exec | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index b03c4f2238445..e13738b98833a 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -125,6 +125,10 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then
# invgrep -R --include="*.py*" -E "from numpy import nan " pandas # GH#24822 not yet implemented since the offending imports have not all been removed
RET=$(($RET + $?)) ; echo $MSG "DONE"
+ MSG='Check for use of exec' ; echo $MSG
+ invgrep -R --include="*.py*" -E "[^a-zA-Z0-9_]exec\(" pandas
+ RET=$(($RET + $?)) ; echo $MSG "DONE"
+
MSG='Check for pytest warns' ; echo $MSG
invgrep -r -E --include '*.py' 'pytest\.warns' pandas/tests/
RET=$(($RET + $?)) ; echo $MSG "DONE"
@@ -184,7 +188,7 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then
invgrep -R --include="*.rst" ".. ipython ::" doc/source
RET=$(($RET + $?)) ; echo $MSG "DONE"
- MSG='Check that no file in the repo contains tailing whitespaces' ; echo $MSG
+ MSG='Check that no file in the repo contains trailing whitespaces' ; echo $MSG
set -o pipefail
if [[ "$AZURE" == "true" ]]; then
# we exclude all c/cpp files as the c/cpp files of pandas code base are tested when Linting .c and .h files
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index f8f1455561c03..0ab19448043f6 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -11,7 +11,7 @@
from functools import partial
from textwrap import dedent
import typing
-from typing import Any, Callable, FrozenSet, Iterator, Sequence, Type, Union
+from typing import Any, Callable, FrozenSet, Sequence, Type, Union
import warnings
import numpy as np
@@ -70,47 +70,63 @@
ScalarResult = typing.TypeVar("ScalarResult")
-def whitelist_method_generator(
- base_class: Type[GroupBy], klass: Type[FrameOrSeries], whitelist: FrozenSet[str]
-) -> Iterator[str]:
+def generate_property(name: str, klass: Type[FrameOrSeries]):
"""
- Yields all GroupBy member defs for DataFrame/Series names in whitelist.
+ Create a property for a GroupBy subclass to dispatch to DataFrame/Series.
+
+ Parameters
+ ----------
+ name : str
+ klass : {DataFrame, Series}
+
+ Returns
+ -------
+ property
+ """
+
+ def prop(self):
+ return self._make_wrapper(name)
+
+ parent_method = getattr(klass, name)
+ prop.__doc__ = parent_method.__doc__ or ""
+ prop.__name__ = name
+ return property(prop)
+
+
+def pin_whitelisted_properties(klass: Type[FrameOrSeries], whitelist: FrozenSet[str]):
+ """
+ Create GroupBy member defs for DataFrame/Series names in a whitelist.
Parameters
----------
- base_class : Groupby class
- base class
klass : DataFrame or Series class
class where members are defined.
- whitelist : frozenset
+ whitelist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
- The generator yields a sequence of strings, each suitable for exec'ing,
- that define implementations of the named methods for DataFrameGroupBy
- or SeriesGroupBy.
+ class decorator
+ Notes
+ -----
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
- property_wrapper_template = """@property
-def %(name)s(self) :
- \"""%(doc)s\"""
- return self.__getattr__('%(name)s')"""
-
- for name in whitelist:
- # don't override anything that was explicitly defined
- # in the base class
- if hasattr(base_class, name):
- continue
- # ugly, but we need the name string itself in the method.
- f = getattr(klass, name)
- doc = f.__doc__
- doc = doc if type(doc) == str else ""
- wrapper_template = property_wrapper_template
- params = {"name": name, "doc": doc}
- yield wrapper_template % params
+
+ def pinner(cls):
+ for name in whitelist:
+ if hasattr(cls, name):
+ # don't override anything that was explicitly defined
+ # in the base class
+ continue
+
+ prop = generate_property(name, klass)
+ setattr(cls, name, prop)
+
+ return cls
+
+ return pinner
class NDFrameGroupBy(GroupBy):
@@ -747,13 +763,9 @@ def filter(self, func, dropna=True, *args, **kwargs):
return self._apply_filter(indices, dropna)
+@pin_whitelisted_properties(Series, base.series_apply_whitelist)
class SeriesGroupBy(GroupBy):
- #
- # Make class defs of attributes on SeriesGroupBy whitelist
-
_apply_whitelist = base.series_apply_whitelist
- for _def_str in whitelist_method_generator(GroupBy, Series, _apply_whitelist):
- exec(_def_str)
@property
def _selection_name(self):
@@ -1368,15 +1380,11 @@ def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None):
return (filled / shifted) - 1
+@pin_whitelisted_properties(DataFrame, base.dataframe_apply_whitelist)
class DataFrameGroupBy(NDFrameGroupBy):
_apply_whitelist = base.dataframe_apply_whitelist
- #
- # Make class defs of attributes on DataFrameGroupBy whitelist.
- for _def_str in whitelist_method_generator(GroupBy, DataFrame, _apply_whitelist):
- exec(_def_str)
-
_block_agg_axis = 1
_agg_see_also_doc = dedent(
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index e010e615e176e..f9c8e7748b7f7 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -562,8 +562,6 @@ def __getattr__(self, attr):
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
- if hasattr(self.obj, attr):
- return self._make_wrapper(attr)
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
| - [x] closes #16959
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
In an unrelated branch I found that this exec-generated code is a PITA to debug. So this PR finally gets rid of it. | https://api.github.com/repos/pandas-dev/pandas/pulls/28651 | 2019-09-27T03:36:56Z | 2019-10-01T13:07:53Z | 2019-10-01T13:07:53Z | 2019-10-01T13:43:40Z |
CLN: Exception catching in expressions | diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py
index 90bb12b4cd727..46bc762e1a0b3 100644
--- a/pandas/core/computation/expressions.py
+++ b/pandas/core/computation/expressions.py
@@ -107,15 +107,12 @@ def _evaluate_numexpr(op, op_str, a, b, reversed=False):
a_value = getattr(a, "values", a)
b_value = getattr(b, "values", b)
- try:
- result = ne.evaluate(
- "a_value {op} b_value".format(op=op_str),
- local_dict={"a_value": a_value, "b_value": b_value},
- casting="safe",
- )
- except ValueError as detail:
- if "unknown type object" in str(detail):
- pass
+
+ result = ne.evaluate(
+ "a_value {op} b_value".format(op=op_str),
+ local_dict={"a_value": a_value, "b_value": b_value},
+ casting="safe",
+ )
if _TEST_MODE:
_store_test_result(result is not None)
@@ -140,21 +137,15 @@ def _where_numexpr(cond, a, b):
a_value = getattr(a, "values", a)
b_value = getattr(b, "values", b)
- try:
- result = ne.evaluate(
- "where(cond_value, a_value, b_value)",
- local_dict={
- "cond_value": cond_value,
- "a_value": a_value,
- "b_value": b_value,
- },
- casting="safe",
- )
- except ValueError as detail:
- if "unknown type object" in str(detail):
- pass
- except Exception as detail:
- raise TypeError(str(detail))
+ result = ne.evaluate(
+ "where(cond_value, a_value, b_value)",
+ local_dict={
+ "cond_value": cond_value,
+ "a_value": a_value,
+ "b_value": b_value,
+ },
+ casting="safe",
+ )
if result is None:
result = _where_standard(cond, a, b)
@@ -167,11 +158,10 @@ def _where_numexpr(cond, a, b):
def _has_bool_dtype(x):
+ if isinstance(x, ABCDataFrame):
+ return "bool" in x.dtypes
try:
- if isinstance(x, ABCDataFrame):
- return "bool" in x.dtypes
- else:
- return x.dtype == bool
+ return x.dtype == bool
except AttributeError:
return isinstance(x, (bool, np.bool_))
| https://api.github.com/repos/pandas-dev/pandas/pulls/28650 | 2019-09-26T23:57:04Z | 2019-09-27T21:18:52Z | 2019-09-27T21:18:52Z | 2019-09-27T21:31:23Z | |
CLN: Exception in io, plotting | diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index 65d0c3d9fb17d..b7ecc6be43733 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -541,13 +541,9 @@ def _check_plot_works(f, filterwarnings="always", **kwargs):
assert_is_valid_plot_return_object(ret)
- try:
- kwargs["ax"] = fig.add_subplot(212)
- ret = f(**kwargs)
- except Exception:
- pass
- else:
- assert_is_valid_plot_return_object(ret)
+ kwargs["ax"] = fig.add_subplot(212)
+ ret = f(**kwargs)
+ assert_is_valid_plot_return_object(ret)
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
| https://api.github.com/repos/pandas-dev/pandas/pulls/28649 | 2019-09-26T23:47:58Z | 2019-10-03T19:23:24Z | null | 2019-10-03T19:23:24Z | |
CLN: Exception in nanops | diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index fe88622a04bb4..eb442e8bf3486 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -97,17 +97,21 @@ def f(values, axis=None, skipna=True, **kwds):
for k, v in self.kwargs.items():
if k not in kwds:
kwds[k] = v
- try:
- if values.size == 0 and kwds.get("min_count") is None:
- # We are empty, returning NA for our type
- # Only applies for the default `min_count` of None
- # since that affects how empty arrays are handled.
- # TODO(GH-18976) update all the nanops methods to
- # correctly handle empty inputs and remove this check.
- # It *may* just be `var`
- return _na_for_min_count(values, axis)
-
- if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name):
+
+ if values.size == 0 and kwds.get("min_count") is None:
+ # We are empty, returning NA for our type
+ # Only applies for the default `min_count` of None
+ # since that affects how empty arrays are handled.
+ # TODO(GH-18976) update all the nanops methods to
+ # correctly handle empty inputs and remove this check.
+ # It *may* just be `var`
+ return _na_for_min_count(values, axis)
+
+ if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name):
+ if kwds.get("mask", None) is None:
+ # `mask` is not recognised by bottleneck, would raise
+ # TypeError if called
+ kwds.pop("mask", None)
result = bn_func(values, axis=axis, **kwds)
# prefer to treat inf/-inf as NA, but must compute the func
@@ -116,18 +120,8 @@ def f(values, axis=None, skipna=True, **kwds):
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
- except Exception:
- try:
- result = alt(values, axis=axis, skipna=skipna, **kwds)
- except ValueError as e:
- # we want to transform an object array
- # ValueError message to the more typical TypeError
- # e.g. this is normally a disallowed function on
- # object arrays that contain strings
-
- if is_object_dtype(values):
- raise TypeError(e)
- raise
+ else:
+ result = alt(values, axis=axis, skipna=skipna, **kwds)
return result
| The only relevant exception that gets raised by bottleneck AFAICT is a TypeError when we pass a `mask` kwarg that it doesn't accept. By avoiding this case at the beginning, we avoid having to catch the exception. | https://api.github.com/repos/pandas-dev/pandas/pulls/28648 | 2019-09-26T23:46:05Z | 2019-09-27T11:45:46Z | 2019-09-27T11:45:46Z | 2019-09-27T14:24:36Z |
ENH: When using another plotting backend, minimize pre-processing | diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index c11d94c381d6d..8a35e5084f55b 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -722,6 +722,11 @@ def __call__(self, *args, **kwargs):
)
kind = self._kind_aliases.get(kind, kind)
+
+ # when using another backend, get out of the way
+ if plot_backend.__name__ != "pandas.plotting._matplotlib":
+ return plot_backend.plot(self._parent, x=x, y=y, kind=kind, **kwargs)
+
if kind not in self._all_kinds:
raise ValueError("{} is not a valid plot kind".format(kind))
diff --git a/pandas/tests/plotting/test_backend.py b/pandas/tests/plotting/test_backend.py
index 41b1a88b15acb..d4035f8eba102 100644
--- a/pandas/tests/plotting/test_backend.py
+++ b/pandas/tests/plotting/test_backend.py
@@ -86,3 +86,11 @@ def test_setting_backend_without_plot_raises():
def test_no_matplotlib_ok():
with pytest.raises(ImportError):
pandas.plotting._core._get_plot_backend("matplotlib")
+
+
+def test_extra_kinds_ok(monkeypatch, restore_backend):
+ # https://github.com/pandas-dev/pandas/pull/28647
+ monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend)
+ pandas.set_option("plotting.backend", "pandas_dummy_backend")
+ df = pandas.DataFrame({"A": [1, 2, 3]})
+ df.plot(kind="not a real kind")
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
I ran into this while implementing the hvplot backend. In hvplot you can do:
```python
df.hvplot.hist(y='y', by='category')
```
but with the pandas version
```python
pd.options.plotting.backend= 'holoviews'
df.plot.hist(y='y', by='category')
```
will fail because `data = data[y]` is called before the plotting is passed off to the backend.
Basically it seems like backend writers should be free to get the passed pandas objects with as little interference as possible.
| https://api.github.com/repos/pandas-dev/pandas/pulls/28647 | 2019-09-26T22:09:45Z | 2019-11-19T04:59:10Z | 2019-11-19T04:59:10Z | 2019-11-19T04:59:16Z |
CLN: remove unused categories/ordered handling in astype | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 152983451bc38..a3b9bec494854 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5772,7 +5772,7 @@ def _to_dict_of_blocks(self, copy=True):
for k, v, in self._data.to_dict(copy=copy).items()
}
- def astype(self, dtype, copy=True, errors="raise", **kwargs):
+ def astype(self, dtype, copy=True, errors="raise"):
"""
Cast a pandas object to a specified dtype ``dtype``.
@@ -5795,8 +5795,6 @@ def astype(self, dtype, copy=True, errors="raise", **kwargs):
.. versionadded:: 0.20.0
- **kwargs : keyword arguments to pass on to the constructor
-
Returns
-------
casted : same type as caller
@@ -5882,7 +5880,7 @@ def astype(self, dtype, copy=True, errors="raise", **kwargs):
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
- return self.astype(new_type, copy, errors, **kwargs)
+ return self.astype(new_type, copy, errors)
for col_name in dtype.keys():
if col_name not in self:
@@ -5894,9 +5892,7 @@ def astype(self, dtype, copy=True, errors="raise", **kwargs):
for col_name, col in self.items():
if col_name in dtype:
results.append(
- col.astype(
- dtype=dtype[col_name], copy=copy, errors=errors, **kwargs
- )
+ col.astype(dtype=dtype[col_name], copy=copy, errors=errors)
)
else:
results.append(results.append(col.copy() if copy else col))
@@ -5911,9 +5907,7 @@ def astype(self, dtype, copy=True, errors="raise", **kwargs):
else:
# else, only a single dtype is given
- new_data = self._data.astype(
- dtype=dtype, copy=copy, errors=errors, **kwargs
- )
+ new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors)
return self._constructor(new_data).__finalize__(self)
# GH 19920: retain column metadata after concat
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 04c3b2b7714ef..b76cb5cbec626 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -574,18 +574,6 @@ def _astype(self, dtype, copy=False, errors="raise", **kwargs):
# may need to convert to categorical
if self.is_categorical_astype(dtype):
- # deprecated 17636
- for deprecated_arg in ("categories", "ordered"):
- if deprecated_arg in kwargs:
- raise ValueError(
- "Got an unexpected argument: {}".format(deprecated_arg)
- )
-
- categories = kwargs.get("categories", None)
- ordered = kwargs.get("ordered", None)
- if com.any_not_none(categories, ordered):
- dtype = CategoricalDtype(categories, ordered)
-
if is_categorical_dtype(self.values):
# GH 10696/18593: update an existing categorical efficiently
return self.make_block(self.values.astype(dtype, copy=copy))
@@ -621,7 +609,7 @@ def _astype(self, dtype, copy=False, errors="raise", **kwargs):
# _astype_nansafe works fine with 1-d only
vals1d = values.ravel()
try:
- values = astype_nansafe(vals1d, dtype, copy=True, **kwargs)
+ values = astype_nansafe(vals1d, dtype, copy=True)
except (ValueError, TypeError):
# e.g. astype_nansafe can fail on object-dtype of strings
# trying to convert to float
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index 9be79bf93ece7..6ee120f3bec64 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -228,11 +228,10 @@ def test_astype_dict_like(self, dtype_class):
with pytest.raises(KeyError, match=msg):
s.astype(dt5)
- def test_astype_categories_deprecation_raises(self):
-
- # deprecated 17636
+ def test_astype_categories_raises(self):
+ # deprecated 17636, removed in GH-27141
s = Series(["a", "b", "a"])
- with pytest.raises(ValueError, match="Got an unexpected"):
+ with pytest.raises(TypeError, match="got an unexpected"):
s.astype("category", categories=["a", "b"], ordered=True)
@pytest.mark.parametrize(
| This removes some unused code in the internals `astype`. There is a small change in behaviour though, when passing the categories/ordered keyword you now get TypeError instead of ValueError. But since that is Python's default behaviour, I would say this is rather a good fix. | https://api.github.com/repos/pandas-dev/pandas/pulls/28646 | 2019-09-26T20:14:18Z | 2019-09-27T11:48:03Z | 2019-09-27T11:48:03Z | 2019-09-27T14:58:51Z |
CLN: Exception in pickle loading | diff --git a/doc/source/whatsnew/v0.8.0.rst b/doc/source/whatsnew/v0.8.0.rst
index 664325ac063c0..072d1bae2a2b9 100644
--- a/doc/source/whatsnew/v0.8.0.rst
+++ b/doc/source/whatsnew/v0.8.0.rst
@@ -156,8 +156,7 @@ Other new features
New plotting methods
~~~~~~~~~~~~~~~~~~~~
-.. ipython:: python
- :suppress:
+.. code-block:: python
import pandas as pd
fx = pd.read_pickle('data/fx_prices')
@@ -165,7 +164,7 @@ New plotting methods
``Series.plot`` now supports a ``secondary_y`` option:
-.. ipython:: python
+.. code-block:: python
plt.figure()
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index 3a36713ccdbda..458c0c07c7602 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -4,7 +4,6 @@
import copy
import pickle as pkl
-import sys
from typing import TYPE_CHECKING
import warnings
@@ -25,14 +24,14 @@ def load_reduce(self):
try:
stack[-1] = func(*args)
return
- except Exception as e:
+ except TypeError as err:
# If we have a deprecated function,
# try to replace and try again.
msg = "_reconstruct: First argument must be a sub-type of ndarray"
- if msg in str(e):
+ if msg in str(err):
try:
cls = args[0]
stack[-1] = object.__new__(cls)
@@ -40,22 +39,6 @@ def load_reduce(self):
except TypeError:
pass
- # try to re-encode the arguments
- if getattr(self, "encoding", None) is not None:
- args = tuple(
- arg.encode(self.encoding) if isinstance(arg, str) else arg
- for arg in args
- )
- try:
- stack[-1] = func(*args)
- return
- except TypeError:
- pass
-
- # unknown exception, re-raise
- if getattr(self, "is_verbose", None):
- print(sys.exc_info())
- print(func, args)
raise
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
index 621e8e09230b7..df1996aa0dee0 100644
--- a/pandas/io/pickle.py
+++ b/pandas/io/pickle.py
@@ -5,7 +5,7 @@
from numpy.lib.format import read_array
-from pandas.compat import pickle_compat as pc
+from pandas.compat import PY36, pickle_compat as pc
from pandas.io.common import _get_handle, _stringify_path
@@ -142,18 +142,24 @@ def read_pickle(path, compression="infer"):
# 1) try standard libary Pickle
# 2) try pickle_compat (older pandas version) to handle subclass changes
- # 3) try pickle_compat with latin1 encoding
+
+ excs_to_catch = (AttributeError, ImportError)
+ if PY36:
+ excs_to_catch += (ModuleNotFoundError,)
try:
with warnings.catch_warnings(record=True):
# We want to silence any warnings about, e.g. moved modules.
warnings.simplefilter("ignore", Warning)
return pickle.load(f)
- except Exception:
- try:
- return pc.load(f, encoding=None)
- except Exception:
- return pc.load(f, encoding="latin1")
+ except excs_to_catch:
+ # e.g.
+ # "No module named 'pandas.core.sparse.series'"
+ # "Can't get attribute '__nat_unpickle' on <module 'pandas._libs.tslib"
+ return pc.load(f, encoding=None)
+ except UnicodeDecodeError:
+ # e.g. can occur for files written in py27; see GH#28645
+ return pc.load(f, encoding="latin-1")
finally:
f.close()
for _f in fh:
diff --git a/pandas/tests/io/data/test_py27.pkl b/pandas/tests/io/data/test_py27.pkl
new file mode 100644
index 0000000000000..5308b864bc0c7
Binary files /dev/null and b/pandas/tests/io/data/test_py27.pkl differ
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index edd0b09185e71..23a16c885687f 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -377,3 +377,14 @@ def test_read(self, protocol, get_random_path):
df.to_pickle(path, protocol=protocol)
df2 = pd.read_pickle(path)
tm.assert_frame_equal(df, df2)
+
+
+def test_unicode_decode_error():
+ # pickle file written with py27, should be readable without raising
+ # UnicodeDecodeError, see GH#28645
+ path = os.path.join(os.path.dirname(__file__), "data", "test_py27.pkl")
+ df = pd.read_pickle(path)
+
+ # just test the columns are correct since the values are random
+ excols = pd.Index(["a", "b", "c"])
+ tm.assert_index_equal(df.columns, excols)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28645 | 2019-09-26T20:07:10Z | 2019-11-02T15:48:40Z | 2019-11-02T15:48:40Z | 2019-11-02T15:50:19Z |
Doc contribution pr06 | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ed05691d33d07..e4a44a89998e3 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -322,7 +322,7 @@ class DataFrame(NDFrame):
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
- copy : boolean, default False
+ copy : bool, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
See Also
@@ -1542,7 +1542,7 @@ def from_records(
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
- index : string, list of fields, array-like
+ index : str, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
@@ -1553,7 +1553,7 @@ def from_records(
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
- coerce_float : boolean, default False
+ coerce_float : bool, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
nrows : int, default None
@@ -3461,7 +3461,7 @@ def insert(self, loc, column, value, allow_duplicates=False):
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns)
- column : string, number, or hashable object
+ column : str, number, or hashable object
label of the inserted column
value : int, Series, or array-like
allow_duplicates : bool, optional
@@ -4775,7 +4775,7 @@ def drop_duplicates(self, subset=None, keep="first", inplace=False):
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
- inplace : boolean, default False
+ inplace : bool, default False
Whether to drop duplicates in place or to return a copy
Returns
@@ -5197,7 +5197,7 @@ def swaplevel(self, i=-2, j=-1, axis=0):
Parameters
----------
- i, j : int, string (can be mixed)
+ i, j : int, str (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
@@ -5723,12 +5723,12 @@ def update(
Parameters
----------%s
- index : string or object, optional
+ index : str or object, optional
Column to use to make new frame's index. If None, uses
existing index.
- columns : string or object
+ columns : str or object
Column to use to make new frame's columns.
- values : string, object or a list of the previous, optional
+ values : str, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
@@ -5850,14 +5850,14 @@ def pivot(self, index=None, columns=None, values=None):
is function or list of functions
fill_value : scalar, default None
Value to replace missing values with
- margins : boolean, default False
+ margins : bool, default False
Add all row / columns (e.g. for subtotal / grand totals)
- dropna : boolean, default True
+ dropna : bool, default True
Do not include columns whose entries are all NaN
- margins_name : string, default 'All'
+ margins_name : str, default 'All'
Name of the row / column that will contain the totals
when margins is True.
- observed : boolean, default False
+ observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
@@ -6231,7 +6231,7 @@ def unstack(self, level=-1, fill_value=None):
Parameters
----------
- level : int, string, or list of these, default -1 (last level)
+ level : int, str, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
@@ -6305,7 +6305,7 @@ def unstack(self, level=-1, fill_value=None):
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
- col_level : int or string, optional
+ col_level : int or str, optional
If columns are a MultiIndex then use this level to melt.
Returns
@@ -6894,11 +6894,11 @@ def append(self, other, ignore_index=False, verify_integrity=False, sort=None):
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
- ignore_index : boolean, default False
+ ignore_index : bool, default False
If True, do not use the index labels.
- verify_integrity : boolean, default False
+ verify_integrity : bool, default False
If True, raise ValueError on creating index with duplicates.
- sort : boolean, default None
+ sort : bool, default None
Sort columns if the columns of `self` and `other` are not aligned.
The default sorting is deprecated and will change to not-sorting
in a future version of pandas. Explicitly pass ``sort=True`` to
@@ -7940,7 +7940,7 @@ def idxmin(self, axis=0, skipna=True):
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
- skipna : boolean, default True
+ skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
@@ -7977,7 +7977,7 @@ def idxmax(self, axis=0, skipna=True):
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
- skipna : boolean, default True
+ skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index e010e615e176e..6facbe7e01c57 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1032,7 +1032,7 @@ class GroupBy(_GroupBy):
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
- name : string
+ name : str
Most users should ignore this
Returns
@@ -1253,7 +1253,7 @@ def std(self, ddof=1, *args, **kwargs):
Parameters
----------
- ddof : integer, default 1
+ ddof : int, default 1
degrees of freedom
Returns
@@ -1276,7 +1276,7 @@ def var(self, ddof=1, *args, **kwargs):
Parameters
----------
- ddof : integer, default 1
+ ddof : int, default 1
degrees of freedom
Returns
@@ -1311,7 +1311,7 @@ def sem(self, ddof=1):
Parameters
----------
- ddof : integer, default 1
+ ddof : int, default 1
degrees of freedom
Returns
@@ -1623,7 +1623,7 @@ def pad(self, limit=None):
Parameters
----------
- limit : integer, optional
+ limit : int, optional
limit of how many values to fill
Returns
@@ -1649,7 +1649,7 @@ def backfill(self, limit=None):
Parameters
----------
- limit : integer, optional
+ limit : int, optional
limit of how many values to fill
Returns
@@ -2099,13 +2099,13 @@ def rank(
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
- ascending : boolean, default True
+ ascending : bool, default True
False for ranks by high (1) to low (N)
na_option : {'keep', 'top', 'bottom'}, default 'keep'
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
- pct : boolean, default False
+ pct : bool, default False
Compute percentage rank of data within each group
axis : int, default 0
The axis of the object over which to compute the rank.
@@ -2313,7 +2313,7 @@ def shift(self, periods=1, freq=None, axis=0, fill_value=None):
Parameters
----------
- periods : integer, default 1
+ periods : int, default 1
number of periods to shift
freq : frequency string
axis : axis to shift, default 0
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 2ebfbed0b132a..2d37121d28308 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -48,17 +48,17 @@ class Grouper:
Parameters
----------
- key : string, defaults to None
+ key : str, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
- freq : string / frequency object, defaults to None
+ freq : str / frequency object, defaults to None
This will groupby the specified frequency if the target selection
(via key or level) is a datetime-like object. For full specification
of available frequencies, please see `here
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_.
axis : number/name of the axis, defaults to 0
- sort : boolean, default to False
+ sort : bool, default to False
whether to sort the resulting labels
closed : {'left' or 'right'}
Closed end of interval. Only when `freq` parameter is passed.
@@ -69,7 +69,7 @@ class Grouper:
If grouper is PeriodIndex and `freq` parameter is passed.
base : int, default 0
Only when `freq` parameter is passed.
- loffset : string, DateOffset, timedelta object
+ loffset : str, DateOffset, timedelta object
Only when `freq` parameter is passed.
Returns
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 62662edb692a7..0b633602f3ed0 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -904,8 +904,8 @@ def repeat(self, repeats, axis=None):
Parameters
----------
- name : string, optional
- deep : boolean, default False
+ name : str, optional
+ deep : bool, default False
dtype : numpy dtype or pandas type
Returns
@@ -1172,7 +1172,7 @@ def to_series(self, index=None, name=None):
----------
index : Index, optional
index of resulting Series. If None, defaults to original index
- name : string, optional
+ name : str, optional
name of resulting Series. If None, defaults to name of original
index
@@ -1198,7 +1198,7 @@ def to_frame(self, index=True, name=None):
Parameters
----------
- index : boolean, default True
+ index : bool, default True
Set the index of the returned DataFrame as the original Index.
name : object, default None
@@ -1401,7 +1401,7 @@ def rename(self, name, inplace=False):
----------
name : label or list of labels
Name(s) to set.
- inplace : boolean, default False
+ inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
@@ -1494,7 +1494,7 @@ def sortlevel(self, level=None, ascending=True, sort_remaining=None):
Parameters
----------
- ascending : boolean, default True
+ ascending : bool, default True
False to sort in descending order
level, sort_remaining are compat parameters
@@ -3415,8 +3415,8 @@ def _reindex_non_unique(self, target):
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
- return_indexers : boolean, default False
- sort : boolean, default False
+ return_indexers : bool, default False
+ sort : bool, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword)
@@ -3942,7 +3942,7 @@ def memory_usage(self, deep=False):
Parameters
----------
- cond : boolean array-like with the same length as self
+ cond : bool array-like with the same length as self
other : scalar, or array-like
Returns
@@ -4924,7 +4924,7 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None):
end : label, default None
If None, defaults to the end
step : int, default None
- kind : string, default None
+ kind : str, default None
Returns
-------
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index cce390d98c037..0b20df38e7d42 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -106,7 +106,7 @@ class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index, DatetimeDelegateMixin):
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
- freq : string or pandas offset object, optional
+ freq : str or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation
@@ -129,7 +129,7 @@ class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index, DatetimeDelegateMixin):
.. deprecated:: 0.24.0
- closed : string or None, default None
+ closed : str or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
@@ -1371,8 +1371,8 @@ def indexer_between_time(
datetime.time or string in appropriate format ("%H:%M", "%H%M",
"%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p").
- include_start : boolean, default True
- include_end : boolean, default True
+ include_start : bool, default True
+ include_end : bool, default True
Returns
-------
@@ -1435,7 +1435,7 @@ def date_range(
Left bound for generating dates.
end : str or datetime-like, optional
Right bound for generating dates.
- periods : integer, optional
+ periods : int, optional
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'. See
@@ -1598,22 +1598,22 @@ def bdate_range(
Parameters
----------
- start : string or datetime-like, default None
+ start : str or datetime-like, default None
Left bound for generating dates.
- end : string or datetime-like, default None
+ end : str or datetime-like, default None
Right bound for generating dates.
- periods : integer, default None
+ periods : int, default None
Number of periods to generate.
- freq : string or DateOffset, default 'B' (business daily)
+ freq : str or DateOffset, default 'B' (business daily)
Frequency strings can have multiples, e.g. '5H'.
- tz : string or None
+ tz : str or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
- name : string, default None
+ name : str, default None
Name of the resulting DatetimeIndex.
- weekmask : string or None, default None
+ weekmask : str or None, default None
Weekmask of valid business days, passed to ``numpy.busdaycalendar``,
only used when custom frequency strings are passed. The default
value None is equivalent to 'Mon Tue Wed Thu Fri'.
@@ -1627,7 +1627,7 @@ def bdate_range(
.. versionadded:: 0.21.0
- closed : string, default None
+ closed : str, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None).
**kwargs
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index a5d0e2cb3b58f..545bc21dd6d1b 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -423,7 +423,7 @@ def pad(self, limit=None):
Parameters
----------
- limit : integer, optional
+ limit : int, optional
limit of how many values to fill
Returns
@@ -514,7 +514,7 @@ def backfill(self, limit=None):
Parameters
----------
- limit : integer, optional
+ limit : int, optional
Limit of how many values to fill.
Returns
@@ -628,7 +628,7 @@ def fillna(self, method, limit=None):
* 'backfill' or 'bfill': use next valid observation to fill gap.
* 'nearest': use nearest valid observation to fill gap.
- limit : integer, optional
+ limit : int, optional
Limit of how many consecutive missing values to fill.
Returns
@@ -823,7 +823,7 @@ def std(self, ddof=1, *args, **kwargs):
Parameters
----------
- ddof : integer, default 1
+ ddof : int, default 1
Degrees of freedom.
Returns
@@ -840,7 +840,7 @@ def var(self, ddof=1, *args, **kwargs):
Parameters
----------
- ddof : integer, default 1
+ ddof : int, default 1
degrees of freedom
Returns
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index b07647cf5b5fb..32dc3c1f3e8f2 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -577,7 +577,7 @@ def to_datetime(
Parameters
----------
- arg : integer, float, string, datetime, list, tuple, 1-d array, Series
+ arg : int, float, str, datetime, list, tuple, 1-d array, Series
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
@@ -585,13 +585,13 @@ def to_datetime(
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
- dayfirst : boolean, default False
+ dayfirst : bool, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as
2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
- yearfirst : boolean, default False
+ yearfirst : bool, default False
Specify a date parse order if `arg` is str or its list-likes.
- If True parses dates with the year first, eg 10/11/12 is parsed as
@@ -604,10 +604,10 @@ def to_datetime(
.. versionadded:: 0.16.1
- utc : boolean, default None
+ utc : bool, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
- box : boolean, default True
+ box : bool, default True
- If True returns a DatetimeIndex or Index-like object
- If False returns ndarray of values.
@@ -617,22 +617,22 @@ def to_datetime(
instead to get an ndarray of values or numpy.datetime64,
respectively.
- format : string, default None
+ format : str, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
See strftime documentation for more information on choices:
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
- exact : boolean, True by default
+ exact : bool, True by default
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
- unit : string, default 'ns'
+ unit : str, default 'ns'
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
- infer_datetime_format : boolean, default False
+ infer_datetime_format : bool, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
@@ -649,7 +649,7 @@ def to_datetime(
origin.
.. versionadded:: 0.20.0
- cache : boolean, default True
+ cache : bool, default True
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index 571c544d48b29..6bd3532d538c7 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -29,12 +29,12 @@ def read_sas(
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
- format : string {'xport', 'sas7bdat'} or None
+ format : str {'xport', 'sas7bdat'} or None
If None, file format is inferred from file extension. If 'xport' or
'sas7bdat', uses the corresponding format.
index : identifier of index column, defaults to None
Identifier of column that should be used as index of the DataFrame.
- encoding : string, default is None
+ encoding : str, default is None
Encoding for text data. If None, text data are stored as raw bytes.
chunksize : int
Read file `chunksize` lines at a time, returns iterator.
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index fe6b339c2f4c8..8724382d9ec55 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -28,7 +28,7 @@ def hist_series(
yrot=None,
figsize=None,
bins=10,
- **kwds
+ **kwargs
):
"""
Draw histogram of the input series using matplotlib.
@@ -56,7 +56,7 @@ def hist_series(
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
- `**kwds` : keywords
+ **kwargs
To be passed to the actual plotting function
Returns
@@ -80,7 +80,7 @@ def hist_series(
yrot=yrot,
figsize=figsize,
bins=bins,
- **kwds
+ **kwargs
)
@@ -99,7 +99,7 @@ def hist_frame(
figsize=None,
layout=None,
bins=10,
- **kwds
+ **kwargs
):
"""
Make a histogram of the DataFrame's.
@@ -151,7 +151,7 @@ def hist_frame(
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
- **kwds
+ **kwargs
All other plotting keyword arguments to be passed to
:meth:`matplotlib.pyplot.hist`.
@@ -194,7 +194,7 @@ def hist_frame(
figsize=figsize,
layout=layout,
bins=bins,
- **kwds
+ **kwargs
)
@@ -209,7 +209,7 @@ def boxplot(
figsize=None,
layout=None,
return_type=None,
- **kwds
+ **kwargs
):
"""
Make a box plot from DataFrame columns.
@@ -260,7 +260,7 @@ def boxplot(
If ``return_type`` is `None`, a NumPy array
of axes with the same shape as ``layout`` is returned.
- **kwds
+ **kwargs
All other plotting keyword arguments to be passed to
:func:`matplotlib.pyplot.boxplot`.
@@ -385,7 +385,7 @@ def boxplot(
figsize=figsize,
layout=layout,
return_type=return_type,
- **kwds
+ **kwargs
)
@@ -401,7 +401,7 @@ def boxplot_frame(
figsize=None,
layout=None,
return_type=None,
- **kwds
+ **kwargs
):
plot_backend = _get_plot_backend()
return plot_backend.boxplot_frame(
@@ -415,7 +415,7 @@ def boxplot_frame(
figsize=figsize,
layout=layout,
return_type=return_type,
- **kwds
+ **kwargs
)
@@ -431,7 +431,7 @@ def boxplot_frame_groupby(
layout=None,
sharex=False,
sharey=True,
- **kwds
+ **kwargs
):
"""
Make box plots from DataFrameGroupBy data.
@@ -459,7 +459,7 @@ def boxplot_frame_groupby(
Whether y-axes will be shared among subplots
.. versionadded:: 0.23.1
- `**kwds` : Keyword Arguments
+ **kwargs
All other plotting keyword arguments to be passed to
matplotlib's boxplot function
@@ -495,7 +495,7 @@ def boxplot_frame_groupby(
layout=layout,
sharex=sharex,
sharey=sharey,
- **kwds
+ **kwargs
)
@@ -586,7 +586,7 @@ class PlotAccessor(PandasObject):
labels with "(right)" in the legend
include_bool : bool, default is False
If True, boolean values can be plotted.
- `**kwds` : keywords
+ **kwargs
Options to pass to matplotlib plotting method.
Returns
@@ -810,7 +810,7 @@ def line(self, x=None, y=None, **kwargs):
The values to be plotted.
Either the location or the label of the columns to be used.
By default, it will use the remaining DataFrame numeric columns.
- **kwds
+ **kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
@@ -880,7 +880,7 @@ def bar(self, x=None, y=None, **kwargs):
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
- **kwds
+ **kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
@@ -963,7 +963,7 @@ def barh(self, x=None, y=None, **kwargs):
Column to be used for categories.
y : label or position, default All numeric columns in dataframe
Columns to be plotted from the DataFrame.
- **kwds
+ **kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
@@ -1049,7 +1049,7 @@ def box(self, by=None, **kwargs):
----------
by : str or sequence
Column in the DataFrame to group by.
- **kwds : optional
+ **kwargs
Additional keywords are documented in
:meth:`DataFrame.plot`.
@@ -1092,7 +1092,7 @@ def hist(self, by=None, bins=10, **kwargs):
Column in the DataFrame to group by.
bins : int, default 10
Number of histogram bins to be used.
- **kwds
+ **kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
@@ -1148,7 +1148,7 @@ def kde(self, bw_method=None, ind=None, **kwargs):
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
- **kwds : optional
+ **kwargs
Additional keyword arguments are documented in
:meth:`pandas.%(this-datatype)s.plot`.
@@ -1250,7 +1250,7 @@ def area(self, x=None, y=None, **kwargs):
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
- **kwds : optional
+ **kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
@@ -1322,7 +1322,7 @@ def pie(self, **kwargs):
y : int or label, optional
Label or position of the column to plot.
If not provided, ``subplots=True`` argument must be passed.
- **kwds
+ **kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
@@ -1404,7 +1404,7 @@ def scatter(self, x, y, s=None, c=None, **kwargs):
- A column name or position whose values will be used to color the
marker points according to a colormap.
- **kwds
+ **kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
@@ -1476,7 +1476,7 @@ def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, **kwargs):
Alternatively, gridsize can be a tuple with two elements
specifying the number of hexagons in the x-direction and the
y-direction.
- **kwds
+ **kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
| Documentation update fixing some of the methods with a PR06 error code which involved updating
- string to str
- integer to int
- boolean to bool
No tests required
| https://api.github.com/repos/pandas-dev/pandas/pulls/28644 | 2019-09-26T19:38:55Z | 2019-09-27T15:53:47Z | 2019-09-27T15:53:47Z | 2019-09-27T17:03:49Z |
BUG: Fix TypeError raised in libreduction | diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 361c21c18c4da..a7d6d19bbc80d 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -15,7 +15,7 @@ from numpy cimport (ndarray,
cnp.import_array()
cimport pandas._libs.util as util
-from pandas._libs.lib import maybe_convert_objects, values_from_object
+from pandas._libs.lib import maybe_convert_objects
cdef _get_result_array(object obj, Py_ssize_t size, Py_ssize_t cnt):
@@ -23,7 +23,7 @@ cdef _get_result_array(object obj, Py_ssize_t size, Py_ssize_t cnt):
if (util.is_array(obj) or
(isinstance(obj, list) and len(obj) == cnt) or
getattr(obj, 'shape', None) == (cnt,)):
- raise ValueError('function does not reduce')
+ raise ValueError('Function does not reduce')
return np.empty(size, dtype='O')
@@ -103,7 +103,7 @@ cdef class Reducer:
ndarray arr, result, chunk
Py_ssize_t i, incr
flatiter it
- bint has_labels
+ bint has_labels, has_ndarray_labels
object res, name, labels, index
object cached_typ=None
@@ -113,14 +113,18 @@ cdef class Reducer:
chunk.data = arr.data
labels = self.labels
has_labels = labels is not None
+ has_ndarray_labels = util.is_array(labels)
has_index = self.index is not None
incr = self.increment
try:
for i in range(self.nresults):
- if has_labels:
+ if has_ndarray_labels:
name = util.get_value_at(labels, i)
+ elif has_labels:
+ # labels is an ExtensionArray
+ name = labels[i]
else:
name = None
@@ -362,7 +366,8 @@ cdef class SeriesGrouper:
def get_result(self):
cdef:
- ndarray arr, result
+ # Define result to avoid UnboundLocalError
+ ndarray arr, result = None
ndarray[int64_t] labels, counts
Py_ssize_t i, n, group_size, lab
object res
@@ -428,6 +433,9 @@ cdef class SeriesGrouper:
islider.reset()
vslider.reset()
+ if result is None:
+ raise ValueError("No result.")
+
if result.dtype == np.object_:
result = maybe_convert_objects(result)
@@ -639,11 +647,11 @@ def compute_reduction(arr, f, axis=0, dummy=None, labels=None):
"""
if labels is not None:
- if labels._has_complex_internals:
- raise Exception('Cannot use shortcut')
+ # Caller is responsible for ensuring we don't have MultiIndex
+ assert not labels._has_complex_internals
- # pass as an ndarray
- labels = values_from_object(labels)
+ # pass as an ndarray/ExtensionArray
+ labels = labels._values
reducer = Reducer(arr, f, axis=axis, dummy=dummy, labels=labels)
return reducer.get_result()
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 61d093d19e4be..1be881e683be5 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -223,10 +223,12 @@ def apply_empty_result(self):
def apply_raw(self):
""" apply to the values as a numpy array """
-
try:
result = libreduction.compute_reduction(self.values, self.f, axis=self.axis)
- except Exception:
+ except ValueError as err:
+ if "Function does not reduce" not in str(err):
+ # catch only ValueError raised intentionally in libreduction
+ raise
result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
@@ -273,24 +275,38 @@ def apply_standard(self):
if (
self.result_type in ["reduce", None]
and not self.dtypes.apply(is_extension_type).any()
+ # Disallow complex_internals since libreduction shortcut
+ # cannot handle MultiIndex
+ and not self.agg_axis._has_complex_internals
):
- # Create a dummy Series from an empty array
- from pandas import Series
-
values = self.values
index = self.obj._get_axis(self.axis)
labels = self.agg_axis
empty_arr = np.empty(len(index), dtype=values.dtype)
- dummy = Series(empty_arr, index=index, dtype=values.dtype)
+
+ # Preserve subclass for e.g. test_subclassed_apply
+ dummy = self.obj._constructor_sliced(
+ empty_arr, index=index, dtype=values.dtype
+ )
try:
result = libreduction.compute_reduction(
values, self.f, axis=self.axis, dummy=dummy, labels=labels
)
- return self.obj._constructor_sliced(result, index=labels)
- except Exception:
+ except ValueError as err:
+ if "Function does not reduce" not in str(err):
+ # catch only ValueError raised intentionally in libreduction
+ raise
+ except TypeError:
+ # e.g. test_apply_ignore_failures we just ignore
+ if not self.ignore_failures:
+ raise
+ except ZeroDivisionError:
+ # reached via numexpr; fall back to python implementation
pass
+ else:
+ return self.obj._constructor_sliced(result, index=labels)
# compute the result using the series generator
self.apply_series_generator()
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index bec5cbc5fecb8..6212a37472000 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -775,11 +775,7 @@ def test_omit_nuisance(df):
# won't work with axis = 1
grouped = df.groupby({"A": 0, "C": 0, "D": 1, "E": 1}, axis=1)
- msg = (
- r'\("unsupported operand type\(s\) for \+: '
- "'Timestamp' and 'float'\""
- r", 'occurred at index 0'\)"
- )
+ msg = r'\("unsupported operand type\(s\) for \+: ' "'Timestamp' and 'float'\", 0"
with pytest.raises(TypeError, match=msg):
grouped.agg(lambda x: x.sum(0, numeric_only=False))
| This is a PITA and I'm not 100% happy with the solution here, open to suggestions.
There is a call to `util.get_value_at(labels, i)` that raises `TypeError` if `labels` is not an `ndarray`. This fixes that by checking for non-ndarray and handling that case correctly.
There is _also_ a case in master where we get an `UnboundLocalError` by referencing `result` before it is assigned. This patches that to raise a `ValueError` instead, _but_ AFAICT fixing the ndarray bug above made it so that the UnboundLocalError case is no longer reached in the tests. i.e. this change is definitely more correct, but we don't have a test case specific to it.
I'm also not wild about the specific exception-catching on L297-307 in core.apply, but don't see a viable alternative. Suggestions welcome. | https://api.github.com/repos/pandas-dev/pandas/pulls/28643 | 2019-09-26T18:19:31Z | 2019-10-02T19:58:34Z | 2019-10-02T19:58:34Z | 2019-10-02T20:24:37Z |
CLN: more Exceptions | diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 61d093d19e4be..d093d7a145382 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -342,7 +342,7 @@ def wrap_results(self):
results = self.results
# see if we can infer the results
- if len(results) > 0 and is_sequence(results[0]):
+ if len(results) > 0 and 0 in results and is_sequence(results[0]):
return self.wrap_results_for_axis()
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 62662edb692a7..8aff0bc19d68d 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2588,8 +2588,9 @@ def intersection(self, other, sort=False):
try:
indexer = Index(rvals).get_indexer(lvals)
indexer = indexer.take((indexer != -1).nonzero()[0])
- except Exception:
- # duplicates
+ except (InvalidIndexError, IncompatibleFrequency):
+ # InvalidIndexError raised by get_indexer if non-unique
+ # IncompatibleFrequency raised by PeriodIndex.get_indexer
indexer = algos.unique1d(Index(rvals).get_indexer_non_unique(lvals)[0])
indexer = indexer[indexer != -1]
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 44cb399336d62..b0683fb8b0dfb 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -1591,10 +1591,7 @@ def execute(self, *args, **kwargs):
else:
cur = self.con.cursor()
try:
- if kwargs:
- cur.execute(*args, **kwargs)
- else:
- cur.execute(*args)
+ cur.execute(*args, **kwargs)
return cur
except Exception as exc:
try:
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index c67106e897727..0b674b556b2ee 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -2388,16 +2388,16 @@ def write_file(self):
self._write_map()
except Exception as exc:
self._close()
- try:
- if self._own_file:
+ if self._own_file:
+ try:
os.unlink(self._fname)
- except Exception:
- warnings.warn(
- "This save was not successful but {0} could not "
- "be deleted. This file is not "
- "valid.".format(self._fname),
- ResourceWarning,
- )
+ except OSError:
+ warnings.warn(
+ "This save was not successful but {0} could not "
+ "be deleted. This file is not "
+ "valid.".format(self._fname),
+ ResourceWarning,
+ )
raise exc
else:
self._close()
| 84 of these left. | https://api.github.com/repos/pandas-dev/pandas/pulls/28642 | 2019-09-26T17:56:50Z | 2019-09-27T11:48:49Z | 2019-09-27T11:48:49Z | 2019-09-27T14:23:02Z |
REF: Consolidate alignment calls in DataFrame ops | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ed05691d33d07..9dc16eb132504 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5271,24 +5271,17 @@ def _arith_op(left, right):
new_data = dispatch_fill_zeros(func, this.values, other.values, res_values)
return this._construct_result(new_data)
- def _combine_match_index(self, other, func, level=None):
- left, right = self.align(other, join="outer", axis=0, level=level, copy=False)
- # at this point we have `left.index.equals(right.index)`
+ def _combine_match_index(self, other, func):
+ # at this point we have `self.index.equals(other.index)`
- if left._is_mixed_type or right._is_mixed_type:
+ if self._is_mixed_type or other._is_mixed_type:
# operate column-wise; avoid costly object-casting in `.values`
- new_data = ops.dispatch_to_series(left, right, func)
+ new_data = ops.dispatch_to_series(self, other, func)
else:
# fastpath --> operate directly on values
with np.errstate(all="ignore"):
- new_data = func(left.values.T, right.values).T
- return left._construct_result(new_data)
-
- def _combine_match_columns(self, other: Series, func, level=None):
- left, right = self.align(other, join="outer", axis=1, level=level, copy=False)
- # at this point we have `left.columns.equals(right.index)`
- new_data = ops.dispatch_to_series(left, right, func, axis="columns")
- return left._construct_result(new_data)
+ new_data = func(self.values.T, other.values).T
+ return new_data
def _construct_result(self, result) -> "DataFrame":
"""
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 16d2eaa410637..05b2becfc73d8 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -379,7 +379,7 @@ def column_op(a, b):
return {i: func(a.iloc[:, i], b.iloc[:, i]) for i in range(len(a.columns))}
elif isinstance(right, ABCSeries) and axis == "columns":
- # We only get here if called via left._combine_match_columns,
+ # We only get here if called via _combine_frame_series,
# in which case we specifically want to operate row-by-row
assert right.index.equals(left.columns)
@@ -597,15 +597,18 @@ def _combine_series_frame(self, other, func, fill_value=None, axis=None, level=N
"fill_value {fill} not supported.".format(fill=fill_value)
)
- if axis is not None:
- axis = self._get_axis_number(axis)
- if axis == 0:
- return self._combine_match_index(other, func, level=level)
- else:
- return self._combine_match_columns(other, func, level=level)
+ if axis is None:
+ # default axis is columns
+ axis = 1
+
+ axis = self._get_axis_number(axis)
+ left, right = self.align(other, join="outer", axis=axis, level=level, copy=False)
+ if axis == 0:
+ new_data = left._combine_match_index(right, func)
+ else:
+ new_data = dispatch_to_series(left, right, func, axis="columns")
- # default axis is columns
- return self._combine_match_columns(other, func, level=level)
+ return left._construct_result(new_data)
def _align_method_FRAME(left, right, axis):
| Next step after this will be to consolidate _construct_result calls. That hinges on some not-obvious alignment behaviors. | https://api.github.com/repos/pandas-dev/pandas/pulls/28638 | 2019-09-26T16:19:52Z | 2019-10-02T11:52:27Z | 2019-10-02T11:52:27Z | 2019-10-02T12:44:29Z |
CLN: streamline Series _construct_result calls | diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 16d2eaa410637..79272c5643281 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -5,7 +5,7 @@
"""
import datetime
import operator
-from typing import Tuple
+from typing import Tuple, Union
import numpy as np
@@ -13,7 +13,12 @@
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_list_like, is_timedelta64_dtype
-from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
+from pandas.core.dtypes.generic import (
+ ABCDataFrame,
+ ABCExtensionArray,
+ ABCIndexClass,
+ ABCSeries,
+)
from pandas.core.dtypes.missing import isna
from pandas.core.construction import extract_array
@@ -436,13 +441,37 @@ def _align_method_SERIES(left, right, align_asobject=False):
return left, right
-def _construct_result(left, result, index, name, dtype=None):
+def _construct_result(
+ left: ABCSeries,
+ result: Union[np.ndarray, ABCExtensionArray],
+ index: ABCIndexClass,
+ name,
+):
"""
- If the raw op result has a non-None name (e.g. it is an Index object) and
- the name argument is None, then passing name to the constructor will
- not be enough; we still need to override the name attribute.
+ Construct an appropriately-labelled Series from the result of an op.
+
+ Parameters
+ ----------
+ left : Series
+ result : ndarray or ExtensionArray
+ index : Index
+ name : object
+
+ Returns
+ -------
+ Series
+ In the case of __divmod__ or __rdivmod__, a 2-tuple of Series.
"""
- out = left._constructor(result, index=index, dtype=dtype)
+ if isinstance(result, tuple):
+ # produced by divmod or rdivmod
+ return (
+ _construct_result(left, result[0], index=index, name=name),
+ _construct_result(left, result[1], index=index, name=name),
+ )
+
+ # We do not pass dtype to ensure that the Series constructor
+ # does inference in the case where `result` has object-dtype.
+ out = left._constructor(result, index=index)
out = out.__finalize__(left)
# Set the result's name after __finalize__ is called because __finalize__
@@ -451,15 +480,6 @@ def _construct_result(left, result, index, name, dtype=None):
return out
-def _construct_divmod_result(left, result, index, name, dtype=None):
- """divmod returns a tuple of like indexed series instead of a single series.
- """
- return (
- _construct_result(left, result[0], index=index, name=name, dtype=dtype),
- _construct_result(left, result[1], index=index, name=name, dtype=dtype),
- )
-
-
def _arith_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
@@ -468,9 +488,6 @@ def _arith_method_SERIES(cls, op, special):
str_rep = _get_opstr(op)
op_name = _get_op_name(op, special)
eval_kwargs = _gen_eval_kwargs(op_name)
- construct_result = (
- _construct_divmod_result if op in [divmod, rdivmod] else _construct_result
- )
def wrapper(left, right):
if isinstance(right, ABCDataFrame):
@@ -482,9 +499,7 @@ def wrapper(left, right):
lvalues = extract_array(left, extract_numpy=True)
result = arithmetic_op(lvalues, right, op, str_rep, eval_kwargs)
- # We do not pass dtype to ensure that the Series constructor
- # does inference in the case where `result` has object-dtype.
- return construct_result(left, result, index=left.index, name=res_name)
+ return _construct_result(left, result, index=left.index, name=res_name)
wrapper.__name__ = op_name
return wrapper
@@ -553,6 +568,7 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
# validate axis
if axis is not None:
self._get_axis_number(axis)
+
if isinstance(other, ABCSeries):
return self._binop(other, op, level=level, fill_value=fill_value)
elif isinstance(other, (np.ndarray, list, tuple)):
@@ -564,7 +580,7 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
if fill_value is not None:
self = self.fillna(fill_value)
- return self._constructor(op(self, other), self.index).__finalize__(self)
+ return op(self, other)
flex_wrapper.__name__ = name
return flex_wrapper
diff --git a/pandas/core/series.py b/pandas/core/series.py
index c87e371354f63..276f829d287ab 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2738,10 +2738,7 @@ def _binop(self, other, func, level=None, fill_value=None):
result = func(this_vals, other_vals)
name = ops.get_op_result_name(self, other)
- if func.__name__ in ["divmod", "rdivmod"]:
- ret = ops._construct_divmod_result(self, result, new_index, name)
- else:
- ret = ops._construct_result(self, result, new_index, name)
+ ret = ops._construct_result(self, result, new_index, name)
return ret
def combine(self, other, func, fill_value=None):
| After this, the three Series methods in `ops.__init__` are just about in sync, the last holdout being alignment behavior in the comparison method. | https://api.github.com/repos/pandas-dev/pandas/pulls/28637 | 2019-09-26T15:49:51Z | 2019-10-01T16:53:18Z | 2019-10-01T16:53:17Z | 2019-10-01T16:58:20Z |
"series_examples": _ne_example_SERIES added | diff --git a/pandas/core/ops/docstrings.py b/pandas/core/ops/docstrings.py
index 93f197366cf32..80c0a519d8627 100644
--- a/pandas/core/ops/docstrings.py
+++ b/pandas/core/ops/docstrings.py
@@ -233,6 +233,45 @@ def _make_flex_doc(op_name, typ):
dtype: float64
"""
+_ne_example_SERIES = """
+Examples
+--------
+>>> s1 = pd.Series([1, 2, 3, 4, 5])
+>>> s2 = pd.Series([1, 2, 5, 9, 2])
+>>> s1.ne(s2)
+Out:
+0 False
+1 False
+2 True
+3 True
+4 True
+dtype: bool
+>>> s1 = pd.Series([1, 2, 3, 4, 5])
+>>> s2 = pd.Series([1, 2, 3])
+>>> s1.ne(s2)
+Out:
+0 False
+1 False
+2 False
+3 True
+4 True
+dtype: bool
+
+>>> s1 = pd.Series([1, 2])
+>>> s2 = pd.Series([1, 2, 3, 1, 7, 2])
+>>> s1.ne(s2)
+Out:
+0 False
+1 False
+2 True
+3 True
+4 True
+5 True
+dtype: bool
+"""
+
+
+
_op_descriptions = {
# Arithmetic Operators
"add": {
@@ -293,8 +332,8 @@ def _make_flex_doc(op_name, typ):
"ne": {
"op": "!=",
"desc": "Not equal to",
- "reverse": None,
- "series_examples": None,
+ "reverse": "eq",
+ "series_examples": _ne_example_SERIES,
},
"lt": {"op": "<", "desc": "Less than", "reverse": None, "series_examples": None},
"le": {
| "series_examples": _ne_example_SERIES added
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28636 | 2019-09-26T14:47:23Z | 2019-11-11T07:11:29Z | null | 2019-11-11T07:11:29Z |
BUG: value_counts can handle the case even with empty groups (#28479) | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index b40a64420a0be..2130e0c88ef4d 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -454,6 +454,7 @@ Other
- :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` now support dicts as ``compression`` argument with key ``'method'`` being the compression method and others as additional compression options when the compression method is ``'zip'``. (:issue:`26023`)
- Bug in :meth:`Series.diff` where a boolean series would incorrectly raise a ``TypeError`` (:issue:`17294`)
- :meth:`Series.append` will no longer raise a ``TypeError`` when passed a tuple of ``Series`` (:issue:`28410`)
+- :meth:`SeriesGroupBy.value_counts` will be able to handle the case even when the :class:`Grouper` makes empty groups (:issue: 28479)
- Fix corrupted error message when calling ``pandas.libs._json.encode()`` on a 0d array (:issue:`18878`)
- Fix :class:`AbstractHolidayCalendar` to return correct results for
years after 2030 (now goes up to 2200) (:issue:`27790`)
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 2c8aa1294451d..9599ce0bf39a9 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -767,6 +767,11 @@ def group_info(self):
ngroups,
)
+ @cache_readonly
+ def recons_codes(self):
+ # get unique result indices, and prepend 0 as groupby starts from the first
+ return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]]
+
@cache_readonly
def result_index(self):
if len(self.binlabels) != 0 and isna(self.binlabels[0]):
diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py
index 363c5a9af0180..c76ee09f977b5 100644
--- a/pandas/tests/groupby/test_value_counts.py
+++ b/pandas/tests/groupby/test_value_counts.py
@@ -9,7 +9,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, MultiIndex, Series, date_range
+from pandas import DataFrame, Grouper, MultiIndex, Series, date_range, to_datetime
import pandas.util.testing as tm
@@ -79,3 +79,31 @@ def rebuild_index(df):
# have to sort on index because of unstable sort on values
left, right = map(rebuild_index, (left, right)) # xref GH9212
tm.assert_series_equal(left.sort_index(), right.sort_index())
+
+
+def test_series_groupby_value_counts_with_grouper():
+ # GH28479
+ df = DataFrame(
+ {
+ "Timestamp": [
+ 1565083561,
+ 1565083561 + 86400,
+ 1565083561 + 86500,
+ 1565083561 + 86400 * 2,
+ 1565083561 + 86400 * 3,
+ 1565083561 + 86500 * 3,
+ 1565083561 + 86400 * 4,
+ ],
+ "Food": ["apple", "apple", "banana", "banana", "orange", "orange", "pear"],
+ }
+ ).drop([3])
+
+ df["Datetime"] = to_datetime(df["Timestamp"].apply(lambda t: str(t)), unit="s")
+ dfg = df.groupby(Grouper(freq="1D", key="Datetime"))
+
+ # have to sort on index because of unstable sort on values xref GH9212
+ result = dfg["Food"].value_counts().sort_index()
+ expected = dfg["Food"].apply(Series.value_counts).sort_index()
+ expected.index.names = result.index.names
+
+ tm.assert_series_equal(result, expected)
| * If applying rep to recons_labels go fail, use ids which has no
consecutive duplicates instead.
- [x] closes #28479
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
xuancong84 found that value_counts() crashes if `groupby` object contains empty groups.
However, even though I made the construction of DataFrame don't skip empty rows, it still crashed.
Till then, I already tried in many ways though, in this time I tried to correct the callee `self.grouper.recons_labels`. After several tests, I found that If freq of `Grouper` is too long so that it has empty groups in some periods then it crashes. And also have found that this is solved by using `ids` which has no consecutive duplicates instead of `self.grouper.recons_labels`. | https://api.github.com/repos/pandas-dev/pandas/pulls/28634 | 2019-09-26T13:06:48Z | 2019-11-07T21:19:58Z | 2019-11-07T21:19:57Z | 2019-11-07T21:20:01Z |
BUG: DataFrame.to_html validates formatters has the correct length | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 7ca93d7d75854..874aea1e22735 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -252,6 +252,7 @@ I/O
- Bug in :func:`DataFrame.to_string` where values were truncated using display options instead of outputting the full content (:issue:`9784`)
- Bug in :meth:`DataFrame.to_json` where a datetime column label would not be written out in ISO format with ``orient="table"`` (:issue:`28130`)
- Bug in :func:`DataFrame.to_parquet` where writing to GCS would fail with `engine='fastparquet'` if the file did not already exist (:issue:`28326`)
+- Bug in :meth:`DataFrame.to_html` where the length of the ``formatters`` argument was not verified (:issue:`28469`)
Plotting
^^^^^^^^
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 3a50f63409582..15f21814b072d 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -561,7 +561,17 @@ def __init__(
self.sparsify = sparsify
self.float_format = float_format
- self.formatters = formatters if formatters is not None else {}
+ if formatters is None:
+ self.formatters = {}
+ elif len(frame.columns) == len(formatters) or isinstance(formatters, dict):
+ self.formatters = formatters
+ else:
+ raise ValueError(
+ (
+ "Formatters length({flen}) should match"
+ " DataFrame number of columns({dlen})"
+ ).format(flen=len(formatters), dlen=len(frame.columns))
+ )
self.na_rep = na_rep
self.decimal = decimal
self.col_space = col_space
diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py
index 004dffd128dd6..ef19319e208d9 100644
--- a/pandas/tests/io/formats/test_to_html.py
+++ b/pandas/tests/io/formats/test_to_html.py
@@ -235,6 +235,15 @@ def test_to_html_truncate(datapath):
assert result == expected
+@pytest.mark.parametrize("size", [1, 5])
+def test_html_invalid_formatters_arg_raises(size):
+ # issue-28469
+ df = DataFrame(columns=["a", "b", "c"])
+ msg = "Formatters length({}) should match DataFrame number of columns(3)"
+ with pytest.raises(ValueError, match=re.escape(msg.format(size))):
+ df.to_html(formatters=["{}".format] * size)
+
+
def test_to_html_truncate_formatter(datapath):
# issue-25955
data = [
| - [x] closes #28469
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
@gabriellm1 @hugoecarl | https://api.github.com/repos/pandas-dev/pandas/pulls/28632 | 2019-09-26T12:18:52Z | 2019-10-07T15:01:03Z | 2019-10-07T15:01:02Z | 2019-10-19T01:22:56Z |
WEB: Add diversity note to team.md | diff --git a/web/pandas/about/team.md b/web/pandas/about/team.md
index 41da3a0e82bdb..8eb2edebec817 100644
--- a/web/pandas/about/team.md
+++ b/web/pandas/about/team.md
@@ -36,6 +36,16 @@ If you want to support pandas development, you can find information in the [dona
{% endfor %}
</div>
+## Diversity and Inclusion
+
+> _pandas_ expressly welcomes and encourages contributions from anyone who faces under-representation, discrimination in the technology industry
+> or anyone willing to increase the diversity of our team.
+> We have identified visible gaps and obstacles in sustaining diversity and inclusion in the open-source communities and we are proactive in increasing
+> the diversity of our team.
+> We have a [code of conduct]({base_url}/community/coc.html) to ensure a friendly and welcoming environment.
+> Please send an email to [pandas-code-of-conduct-committee](mailto:pandas-coc@googlegroups.com), if you think we can do a
+> better job at achieving this goal.
+
## Governance
Wes McKinney is the Benevolent Dictator for Life (BDFL).
| @datapythonista kindly review
| https://api.github.com/repos/pandas-dev/pandas/pulls/28630 | 2019-09-26T10:57:22Z | 2019-10-04T02:13:39Z | 2019-10-04T02:13:39Z | 2019-10-04T08:48:40Z |
TST: loc misbehaves when Period is at start of 3-level MultiIndex | diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index 3da3d1e4b1b41..cef77f5a795af 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -251,10 +251,8 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
-------
datetime, datetime/dateutil.parser._result, str
"""
- if not isinstance(arg, (str, unicode)):
- # Note: cython recognizes `unicode` in both py2/py3, optimizes
- # this check into a C call.
- return arg
+ if not isinstance(arg, str):
+ raise TypeError("parse_time_string argument must be str")
if getattr(freq, "_typ", None) == "dateoffset":
freq = freq.rule_code
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index 8b3b66bd1ee6b..ac19fa70442cd 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -617,6 +617,44 @@ def test_insert(self):
result = period_range("2017Q1", periods=4, freq="Q").insert(1, na)
tm.assert_index_equal(result, expected)
+ @pytest.mark.parametrize(
+ "msg, key",
+ [
+ (r"Period\('2019', 'A-DEC'\), 'foo', 'bar'", (Period(2019), "foo", "bar")),
+ (r"Period\('2019', 'A-DEC'\), 'y1', 'bar'", (Period(2019), "y1", "bar")),
+ (r"Period\('2019', 'A-DEC'\), 'foo', 'z1'", (Period(2019), "foo", "z1")),
+ (
+ r"Period\('2018', 'A-DEC'\), Period\('2016', 'A-DEC'\), 'bar'",
+ (Period(2018), Period(2016), "bar"),
+ ),
+ (r"Period\('2018', 'A-DEC'\), 'foo', 'y1'", (Period(2018), "foo", "y1")),
+ (
+ r"Period\('2017', 'A-DEC'\), 'foo', Period\('2015', 'A-DEC'\)",
+ (Period(2017), "foo", Period(2015)),
+ ),
+ (r"Period\('2017', 'A-DEC'\), 'z1', 'bar'", (Period(2017), "z1", "bar")),
+ ],
+ )
+ def test_contains_raise_error_if_period_index_is_in_multi_index(self, msg, key):
+ # issue 20684
+ """
+ parse_time_string return parameter if type not matched.
+ PeriodIndex.get_loc takes returned value from parse_time_string as a tuple.
+ If first argument is Period and a tuple has 3 items,
+ process go on not raise exception
+ """
+ df = DataFrame(
+ {
+ "A": [Period(2019), "x1", "x2"],
+ "B": [Period(2018), Period(2016), "y1"],
+ "C": [Period(2017), "z1", Period(2015)],
+ "V1": [1, 2, 3],
+ "V2": [10, 20, 30],
+ }
+ ).set_index(["A", "B", "C"])
+ with pytest.raises(KeyError, match=msg):
+ df.loc[key]
+
def test_maybe_convert_timedelta():
pi = PeriodIndex(["2000", "2001"], freq="D")
diff --git a/pandas/tests/tslibs/test_parsing.py b/pandas/tests/tslibs/test_parsing.py
index 126a1bd12ad59..7df7dbbf91d0b 100644
--- a/pandas/tests/tslibs/test_parsing.py
+++ b/pandas/tests/tslibs/test_parsing.py
@@ -209,3 +209,13 @@ def test_try_parse_dates():
expected = np.array([parse(d, dayfirst=True) for d in arr])
tm.assert_numpy_array_equal(result, expected)
+
+
+def test_parse_time_string_check_instance_type_raise_exception():
+ # issue 20684
+ with pytest.raises(TypeError):
+ parse_time_string((1, 2, 3))
+
+ result = parse_time_string("2019")
+ expected = (datetime(2019, 1, 1), datetime(2019, 1, 1), "year")
+ assert result == expected
| If index is MultiIndex and level of 0 is PeriodIndex, loc function raise
exception if all input of loc does not match index values
Background: This bug only happens when MultiIndex's level is 3 and first level index is PeriodIndex. In this situation, if someone access one row using a '.loc' with a miss match key, then would not raise exception.
Someone already change what i try to do `parse_time_string` function in '_libs.tslibs.parsing'.
in the past,
```
def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
if not isinstance(arg, str):
return arg
```
What i try to do:
```
def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
if not isinstance(arg, str):
raise TypeError
```
now in master:
```
def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
if not isinstance(arg, str):
raise TypeError("parse_time_string argument must be str")
```
Just add tests for issue.
- [x] closes (#20684)
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28628 | 2019-09-26T07:08:53Z | 2019-10-30T12:23:35Z | 2019-10-30T12:23:35Z | 2019-10-30T14:04:29Z |
Backport PR #28357 on branch 0.25.x (Change conda channel order for Windows builds) | diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml
index 33c8122fb232a..88b38aaef237c 100644
--- a/ci/deps/azure-windows-36.yaml
+++ b/ci/deps/azure-windows-36.yaml
@@ -1,17 +1,15 @@
name: pandas-dev
channels:
- - defaults
- conda-forge
+ - defaults
dependencies:
- blosc
- bottleneck
- - boost-cpp<1.67
- fastparquet>=0.2.1
- matplotlib=3.0.2
- numexpr
- numpy=1.15.*
- openpyxl
- - parquet-cpp
- pyarrow
- pytables
- python-dateutil
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index d634859e72d7b..9573ac15dc45f 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -1,5 +1,6 @@
""" test parquet compat """
import datetime
+from distutils.version import LooseVersion
import os
from warnings import catch_warnings
@@ -238,6 +239,15 @@ def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
+ if (
+ LooseVersion(pyarrow.__version__) < "0.15"
+ and LooseVersion(pyarrow.__version__) >= "0.13"
+ ):
+ pytest.xfail(
+ "Reading fastparquet with pyarrow in 0.14 fails: "
+ "https://issues.apache.org/jira/browse/ARROW-6492"
+ )
+
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=fp, compression=None)
| Backport PR #28357: Change conda channel order for Windows builds | https://api.github.com/repos/pandas-dev/pandas/pulls/28627 | 2019-09-26T06:40:54Z | 2019-09-26T11:32:27Z | 2019-09-26T11:32:27Z | 2019-09-26T11:40:36Z |
REF: Assert json roundtrip equal | diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 415b1d81eb3e4..2195bf248f43a 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -37,6 +37,14 @@
_mixed_frame = _frame.copy()
+def assert_json_roundtrip_equal(result, expected, orient):
+ if orient == "records" or orient == "values":
+ expected = expected.reset_index(drop=True)
+ if orient == "values":
+ expected.columns = range(len(expected.columns))
+ assert_frame_equal(result, expected)
+
+
class TestPandasContainer:
@pytest.fixture(scope="function", autouse=True)
def setup(self, datapath):
@@ -90,12 +98,7 @@ def test_frame_double_encoded_labels(self, orient):
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
- if orient == "records" or orient == "values":
- expected = expected.reset_index(drop=True)
- if orient == "values":
- expected.columns = range(len(expected.columns))
-
- assert_frame_equal(result, expected)
+ assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
@@ -103,12 +106,7 @@ def test_frame_non_unique_index(self, orient):
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
- if orient == "records" or orient == "values":
- expected = expected.reset_index(drop=True)
- if orient == "values":
- expected.columns = range(len(expected.columns))
-
- assert_frame_equal(result, expected)
+ assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
@@ -172,12 +170,7 @@ def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype):
# TODO: debug why sort is required
expected = expected.sort_index()
- if orient == "records" or orient == "values":
- expected = expected.reset_index(drop=True)
- if orient == "values":
- expected.columns = range(len(expected.columns))
-
- tm.assert_frame_equal(result, expected)
+ assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@@ -191,11 +184,6 @@ def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
if not numpy and (orient == "index" or (PY35 and orient == "columns")):
expected = expected.sort_index()
- if orient == "records" or orient == "values":
- expected = expected.reset_index(drop=True)
- if orient == "values":
- expected.columns = range(len(expected.columns))
-
if (
numpy
and (is_platform_32bit() or is_platform_windows())
@@ -205,7 +193,7 @@ def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
# TODO: see what is causing roundtrip dtype loss
expected = expected.astype(np.int32)
- tm.assert_frame_equal(result, expected)
+ assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@@ -246,12 +234,7 @@ def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
elif orient == "records" and convert_axes:
expected.columns = expected.columns.astype(np.int64)
- if orient == "records" or orient == "values":
- expected = expected.reset_index(drop=True)
- if orient == "values":
- expected.columns = range(len(expected.columns))
-
- tm.assert_frame_equal(result, expected)
+ assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
@@ -277,12 +260,7 @@ def test_roundtrip_categorical(self, orient, convert_axes, numpy):
if not numpy and (orient == "index" or (PY35 and orient == "columns")):
expected = expected.sort_index()
- if orient == "records" or orient == "values":
- expected = expected.reset_index(drop=True)
- if orient == "values":
- expected.columns = range(len(expected.columns))
-
- tm.assert_frame_equal(result, expected)
+ assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
@@ -320,12 +298,7 @@ def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
expected.index = idx
- if orient == "records" or orient == "values":
- expected = expected.reset_index(drop=True)
- if orient == "values":
- expected.columns = range(len(expected.columns))
-
- tm.assert_frame_equal(result, expected)
+ assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
@@ -354,12 +327,7 @@ def test_roundtrip_mixed(self, orient, convert_axes, numpy):
if not numpy and (orient == "index" or (PY35 and orient == "columns")):
expected = expected.sort_index()
- if orient == "records" or orient == "values":
- expected = expected.reset_index(drop=True)
- if orient == "values":
- expected.columns = range(len(expected.columns))
-
- tm.assert_frame_equal(result, expected)
+ assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize(
"data,msg,orient",
| Replaces a some frequently repeated lines with a function.
- [x] closes #28555
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/28626 | 2019-09-26T04:10:26Z | 2019-09-27T22:47:24Z | 2019-09-27T22:47:24Z | 2019-09-27T22:47:48Z |
REF: separate out dispatch-centric ops functions | diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 06c0e9722c045..16d2eaa410637 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -5,33 +5,18 @@
"""
import datetime
import operator
-from typing import Any, Callable, Tuple, Union
+from typing import Tuple
import numpy as np
from pandas._libs import Timedelta, Timestamp, lib
-from pandas.errors import NullFrequencyError
from pandas.util._decorators import Appender
-from pandas.core.dtypes.common import (
- is_datetime64_dtype,
- is_extension_array_dtype,
- is_integer_dtype,
- is_list_like,
- is_object_dtype,
- is_scalar,
- is_timedelta64_dtype,
-)
-from pandas.core.dtypes.generic import (
- ABCDataFrame,
- ABCExtensionArray,
- ABCIndexClass,
- ABCSeries,
-)
+from pandas.core.dtypes.common import is_list_like, is_timedelta64_dtype
+from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna
-from pandas._typing import ArrayLike
-from pandas.core.construction import array, extract_array
+from pandas.core.construction import extract_array
from pandas.core.ops.array_ops import (
arithmetic_op,
comparison_op,
@@ -39,6 +24,8 @@
logical_op,
)
from pandas.core.ops.array_ops import comp_method_OBJECT_ARRAY # noqa:F401
+from pandas.core.ops.dispatch import maybe_dispatch_ufunc_to_dunder_op # noqa:F401
+from pandas.core.ops.dispatch import should_series_dispatch
from pandas.core.ops.docstrings import (
_arith_doc_FRAME,
_flex_comp_doc_FRAME,
@@ -358,71 +345,6 @@ def fill_binop(left, right, fill_value):
# Dispatch logic
-def should_extension_dispatch(left: ABCSeries, right: Any) -> bool:
- """
- Identify cases where Series operation should use dispatch_to_extension_op.
-
- Parameters
- ----------
- left : Series
- right : object
-
- Returns
- -------
- bool
- """
- if (
- is_extension_array_dtype(left.dtype)
- or is_datetime64_dtype(left.dtype)
- or is_timedelta64_dtype(left.dtype)
- ):
- return True
-
- if not is_scalar(right) and is_extension_array_dtype(right):
- # GH#22378 disallow scalar to exclude e.g. "category", "Int64"
- return True
-
- return False
-
-
-def should_series_dispatch(left, right, op):
- """
- Identify cases where a DataFrame operation should dispatch to its
- Series counterpart.
-
- Parameters
- ----------
- left : DataFrame
- right : DataFrame
- op : binary operator
-
- Returns
- -------
- override : bool
- """
- if left._is_mixed_type or right._is_mixed_type:
- return True
-
- if not len(left.columns) or not len(right.columns):
- # ensure obj.dtypes[0] exists for each obj
- return False
-
- ldtype = left.dtypes.iloc[0]
- rdtype = right.dtypes.iloc[0]
-
- if (is_timedelta64_dtype(ldtype) and is_integer_dtype(rdtype)) or (
- is_timedelta64_dtype(rdtype) and is_integer_dtype(ldtype)
- ):
- # numpy integer dtypes as timedelta64 dtypes in this scenario
- return True
-
- if is_datetime64_dtype(ldtype) and is_object_dtype(rdtype):
- # in particular case where right is an array of DateOffsets
- return True
-
- return False
-
-
def dispatch_to_series(left, right, func, str_rep=None, axis=None):
"""
Evaluate the frame operation func(left, right) by evaluating
@@ -489,58 +411,6 @@ def column_op(a, b):
return new_data
-def dispatch_to_extension_op(
- op,
- left: Union[ABCExtensionArray, np.ndarray],
- right: Any,
- keep_null_freq: bool = False,
-):
- """
- Assume that left or right is a Series backed by an ExtensionArray,
- apply the operator defined by op.
-
- Parameters
- ----------
- op : binary operator
- left : ExtensionArray or np.ndarray
- right : object
- keep_null_freq : bool, default False
- Whether to re-raise a NullFrequencyError unchanged, as opposed to
- catching and raising TypeError.
-
- Returns
- -------
- ExtensionArray or np.ndarray
- 2-tuple of these if op is divmod or rdivmod
- """
- # NB: left and right should already be unboxed, so neither should be
- # a Series or Index.
-
- if left.dtype.kind in "mM" and isinstance(left, np.ndarray):
- # We need to cast datetime64 and timedelta64 ndarrays to
- # DatetimeArray/TimedeltaArray. But we avoid wrapping others in
- # PandasArray as that behaves poorly with e.g. IntegerArray.
- left = array(left)
-
- # The op calls will raise TypeError if the op is not defined
- # on the ExtensionArray
-
- try:
- res_values = op(left, right)
- except NullFrequencyError:
- # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError
- # on add/sub of integers (or int-like). We re-raise as a TypeError.
- if keep_null_freq:
- # TODO: remove keep_null_freq after Timestamp+int deprecation
- # GH#22535 is enforced
- raise
- raise TypeError(
- "incompatible type for a datetime/timedelta "
- "operation [{name}]".format(name=op.__name__)
- )
- return res_values
-
-
# -----------------------------------------------------------------------------
# Series
@@ -906,92 +776,3 @@ def f(self, other):
f.__name__ = op_name
return f
-
-
-# -----------------------------------------------------------------------------
-# Sparse
-
-
-def maybe_dispatch_ufunc_to_dunder_op(
- self: ArrayLike, ufunc: Callable, method: str, *inputs: ArrayLike, **kwargs: Any
-):
- """
- Dispatch a ufunc to the equivalent dunder method.
-
- Parameters
- ----------
- self : ArrayLike
- The array whose dunder method we dispatch to
- ufunc : Callable
- A NumPy ufunc
- method : {'reduce', 'accumulate', 'reduceat', 'outer', 'at', '__call__'}
- inputs : ArrayLike
- The input arrays.
- kwargs : Any
- The additional keyword arguments, e.g. ``out``.
-
- Returns
- -------
- result : Any
- The result of applying the ufunc
- """
- # special has the ufuncs we dispatch to the dunder op on
- special = {
- "add",
- "sub",
- "mul",
- "pow",
- "mod",
- "floordiv",
- "truediv",
- "divmod",
- "eq",
- "ne",
- "lt",
- "gt",
- "le",
- "ge",
- "remainder",
- "matmul",
- }
- aliases = {
- "subtract": "sub",
- "multiply": "mul",
- "floor_divide": "floordiv",
- "true_divide": "truediv",
- "power": "pow",
- "remainder": "mod",
- "divide": "div",
- "equal": "eq",
- "not_equal": "ne",
- "less": "lt",
- "less_equal": "le",
- "greater": "gt",
- "greater_equal": "ge",
- }
-
- # For op(., Array) -> Array.__r{op}__
- flipped = {
- "lt": "__gt__",
- "le": "__ge__",
- "gt": "__lt__",
- "ge": "__le__",
- "eq": "__eq__",
- "ne": "__ne__",
- }
-
- op_name = ufunc.__name__
- op_name = aliases.get(op_name, op_name)
-
- def not_implemented(*args, **kwargs):
- return NotImplemented
-
- if method == "__call__" and op_name in special and kwargs.get("out") is None:
- if isinstance(inputs[0], type(self)):
- name = "__{}__".format(op_name)
- return getattr(self, name, not_implemented)(inputs[1])
- else:
- name = flipped.get(op_name, "__r{}__".format(op_name))
- return getattr(self, name, not_implemented)(inputs[0])
- else:
- return NotImplemented
diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py
index b72ef69ede199..55b4b1a899f65 100644
--- a/pandas/core/ops/array_ops.py
+++ b/pandas/core/ops/array_ops.py
@@ -36,6 +36,7 @@
from pandas.core.construction import extract_array
from pandas.core.ops import missing
+from pandas.core.ops.dispatch import dispatch_to_extension_op, should_extension_dispatch
from pandas.core.ops.invalid import invalid_comparison
from pandas.core.ops.roperator import rpow
@@ -179,11 +180,7 @@ def arithmetic_op(
Or a 2-tuple of these in the case of divmod or rdivmod.
"""
- from pandas.core.ops import (
- maybe_upcast_for_op,
- should_extension_dispatch,
- dispatch_to_extension_op,
- )
+ from pandas.core.ops import maybe_upcast_for_op
keep_null_freq = isinstance(
right,
@@ -236,7 +233,6 @@ def comparison_op(
-------
ndarrray or ExtensionArray
"""
- from pandas.core.ops import should_extension_dispatch, dispatch_to_extension_op
# NB: We assume extract_array has already been called on left and right
lvalues = left
@@ -335,7 +331,6 @@ def logical_op(
-------
ndarrray or ExtensionArray
"""
- from pandas.core.ops import should_extension_dispatch, dispatch_to_extension_op
fill_int = lambda x: x
diff --git a/pandas/core/ops/dispatch.py b/pandas/core/ops/dispatch.py
new file mode 100644
index 0000000000000..9835d57ee7366
--- /dev/null
+++ b/pandas/core/ops/dispatch.py
@@ -0,0 +1,223 @@
+"""
+Functions for defining unary operations.
+"""
+from typing import Any, Callable, Union
+
+import numpy as np
+
+from pandas.errors import NullFrequencyError
+
+from pandas.core.dtypes.common import (
+ is_datetime64_dtype,
+ is_extension_array_dtype,
+ is_integer_dtype,
+ is_object_dtype,
+ is_scalar,
+ is_timedelta64_dtype,
+)
+from pandas.core.dtypes.generic import ABCExtensionArray, ABCSeries
+
+from pandas._typing import ArrayLike
+from pandas.core.construction import array
+
+
+def should_extension_dispatch(left: ABCSeries, right: Any) -> bool:
+ """
+ Identify cases where Series operation should use dispatch_to_extension_op.
+
+ Parameters
+ ----------
+ left : Series
+ right : object
+
+ Returns
+ -------
+ bool
+ """
+ if (
+ is_extension_array_dtype(left.dtype)
+ or is_datetime64_dtype(left.dtype)
+ or is_timedelta64_dtype(left.dtype)
+ ):
+ return True
+
+ if not is_scalar(right) and is_extension_array_dtype(right):
+ # GH#22378 disallow scalar to exclude e.g. "category", "Int64"
+ return True
+
+ return False
+
+
+def should_series_dispatch(left, right, op):
+ """
+ Identify cases where a DataFrame operation should dispatch to its
+ Series counterpart.
+
+ Parameters
+ ----------
+ left : DataFrame
+ right : DataFrame
+ op : binary operator
+
+ Returns
+ -------
+ override : bool
+ """
+ if left._is_mixed_type or right._is_mixed_type:
+ return True
+
+ if not len(left.columns) or not len(right.columns):
+ # ensure obj.dtypes[0] exists for each obj
+ return False
+
+ ldtype = left.dtypes.iloc[0]
+ rdtype = right.dtypes.iloc[0]
+
+ if (is_timedelta64_dtype(ldtype) and is_integer_dtype(rdtype)) or (
+ is_timedelta64_dtype(rdtype) and is_integer_dtype(ldtype)
+ ):
+ # numpy integer dtypes as timedelta64 dtypes in this scenario
+ return True
+
+ if is_datetime64_dtype(ldtype) and is_object_dtype(rdtype):
+ # in particular case where right is an array of DateOffsets
+ return True
+
+ return False
+
+
+def dispatch_to_extension_op(
+ op,
+ left: Union[ABCExtensionArray, np.ndarray],
+ right: Any,
+ keep_null_freq: bool = False,
+):
+ """
+ Assume that left or right is a Series backed by an ExtensionArray,
+ apply the operator defined by op.
+
+ Parameters
+ ----------
+ op : binary operator
+ left : ExtensionArray or np.ndarray
+ right : object
+ keep_null_freq : bool, default False
+ Whether to re-raise a NullFrequencyError unchanged, as opposed to
+ catching and raising TypeError.
+
+ Returns
+ -------
+ ExtensionArray or np.ndarray
+ 2-tuple of these if op is divmod or rdivmod
+ """
+ # NB: left and right should already be unboxed, so neither should be
+ # a Series or Index.
+
+ if left.dtype.kind in "mM" and isinstance(left, np.ndarray):
+ # We need to cast datetime64 and timedelta64 ndarrays to
+ # DatetimeArray/TimedeltaArray. But we avoid wrapping others in
+ # PandasArray as that behaves poorly with e.g. IntegerArray.
+ left = array(left)
+
+ # The op calls will raise TypeError if the op is not defined
+ # on the ExtensionArray
+
+ try:
+ res_values = op(left, right)
+ except NullFrequencyError:
+ # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError
+ # on add/sub of integers (or int-like). We re-raise as a TypeError.
+ if keep_null_freq:
+ # TODO: remove keep_null_freq after Timestamp+int deprecation
+ # GH#22535 is enforced
+ raise
+ raise TypeError(
+ "incompatible type for a datetime/timedelta "
+ "operation [{name}]".format(name=op.__name__)
+ )
+ return res_values
+
+
+def maybe_dispatch_ufunc_to_dunder_op(
+ self: ArrayLike, ufunc: Callable, method: str, *inputs: ArrayLike, **kwargs: Any
+):
+ """
+ Dispatch a ufunc to the equivalent dunder method.
+
+ Parameters
+ ----------
+ self : ArrayLike
+ The array whose dunder method we dispatch to
+ ufunc : Callable
+ A NumPy ufunc
+ method : {'reduce', 'accumulate', 'reduceat', 'outer', 'at', '__call__'}
+ inputs : ArrayLike
+ The input arrays.
+ kwargs : Any
+ The additional keyword arguments, e.g. ``out``.
+
+ Returns
+ -------
+ result : Any
+ The result of applying the ufunc
+ """
+ # special has the ufuncs we dispatch to the dunder op on
+ special = {
+ "add",
+ "sub",
+ "mul",
+ "pow",
+ "mod",
+ "floordiv",
+ "truediv",
+ "divmod",
+ "eq",
+ "ne",
+ "lt",
+ "gt",
+ "le",
+ "ge",
+ "remainder",
+ "matmul",
+ }
+ aliases = {
+ "subtract": "sub",
+ "multiply": "mul",
+ "floor_divide": "floordiv",
+ "true_divide": "truediv",
+ "power": "pow",
+ "remainder": "mod",
+ "divide": "div",
+ "equal": "eq",
+ "not_equal": "ne",
+ "less": "lt",
+ "less_equal": "le",
+ "greater": "gt",
+ "greater_equal": "ge",
+ }
+
+ # For op(., Array) -> Array.__r{op}__
+ flipped = {
+ "lt": "__gt__",
+ "le": "__ge__",
+ "gt": "__lt__",
+ "ge": "__le__",
+ "eq": "__eq__",
+ "ne": "__ne__",
+ }
+
+ op_name = ufunc.__name__
+ op_name = aliases.get(op_name, op_name)
+
+ def not_implemented(*args, **kwargs):
+ return NotImplemented
+
+ if method == "__call__" and op_name in special and kwargs.get("out") is None:
+ if isinstance(inputs[0], type(self)):
+ name = "__{}__".format(op_name)
+ return getattr(self, name, not_implemented)(inputs[1])
+ else:
+ name = flipped.get(op_name, "__r{}__".format(op_name))
+ return getattr(self, name, not_implemented)(inputs[0])
+ else:
+ return NotImplemented
| Seems like a reasonable chunk of stuff to get out of `__init__`.
`dispatch_to_series` is not moved because it will end up using `array_ops` and I don't want to introduce circular dependencies. | https://api.github.com/repos/pandas-dev/pandas/pulls/28624 | 2019-09-26T01:03:07Z | 2019-09-26T15:39:54Z | 2019-09-26T15:39:54Z | 2019-09-26T15:42:46Z |
DOC: start using new bootstrap-based sphinx theme | diff --git a/doc/redirects.csv b/doc/redirects.csv
index a1504f9175480..8c8079bb3fd2b 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -6,6 +6,7 @@ whatsnew,whatsnew/index
release,whatsnew/index
# getting started
+install,getting_started/install
10min,getting_started/10min
basics,getting_started/basics
comparison_with_r,getting_started/comparison/comparison_with_r
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 1da1948e45268..5e2a2db20b53c 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -191,7 +191,7 @@
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
-html_theme = "nature_with_gtoc"
+html_theme = "pandas_sphinx_theme"
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
@@ -204,7 +204,7 @@
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
-html_theme_path = ["themes"]
+# html_theme_path = ["themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index aaf2040156a45..48c722bc16a86 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -1,3 +1,5 @@
+:orphan:
+
.. _ecosystem:
{{ header }}
diff --git a/doc/source/getting_started/index.rst b/doc/source/getting_started/index.rst
index eead28830f861..34bb4f930f175 100644
--- a/doc/source/getting_started/index.rst
+++ b/doc/source/getting_started/index.rst
@@ -12,6 +12,7 @@ Getting started
.. toctree::
:maxdepth: 2
+ install
overview
10min
basics
diff --git a/doc/source/install.rst b/doc/source/getting_started/install.rst
similarity index 100%
rename from doc/source/install.rst
rename to doc/source/getting_started/install.rst
diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index f5669626aa2b3..b7cb8bfbdcebc 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -40,10 +40,8 @@ See the :ref:`overview` for more detail about what's in the library.
{% endif %}
{% if not single_doc %}
What's New in 1.0.0 <whatsnew/v1.0.0>
- install
getting_started/index
user_guide/index
- ecosystem
{% endif -%}
{% if include_api -%}
reference/index
@@ -54,9 +52,9 @@ See the :ref:`overview` for more detail about what's in the library.
{% endif %}
* :doc:`whatsnew/v1.0.0`
-* :doc:`install`
* :doc:`getting_started/index`
+ * :doc:`getting_started/install`
* :doc:`getting_started/overview`
* :doc:`getting_started/10min`
* :doc:`getting_started/basics`
diff --git a/environment.yml b/environment.yml
index 7629fa52e7829..f95af62e912c2 100644
--- a/environment.yml
+++ b/environment.yml
@@ -88,3 +88,5 @@ dependencies:
- xlwt # pandas.read_excel, DataFrame.to_excel, pandas.ExcelWriter, pandas.ExcelFile
- odfpy # pandas.read_excel
- pyreadstat # pandas.read_spss
+ - pip:
+ - git+https://github.com/pandas-dev/pandas-sphinx-theme.git@master
diff --git a/requirements-dev.txt b/requirements-dev.txt
index fd8e6378240b4..3849504f640a8 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -59,4 +59,5 @@ xlrd
xlsxwriter
xlwt
odfpy
-pyreadstat
\ No newline at end of file
+pyreadstat
+git+https://github.com/pandas-dev/pandas-sphinx-theme.git@master
\ No newline at end of file
diff --git a/web/pandas/getting_started.md b/web/pandas/getting_started.md
index 99a7a9f4b2d60..9682cf90cad6f 100644
--- a/web/pandas/getting_started.md
+++ b/web/pandas/getting_started.md
@@ -4,7 +4,7 @@
The next steps provides the easiest and recommended way to set up your
environment to use pandas. Other installation options can be found in
-the [advanced installation page]({{ base_url}}/docs/install.html).
+the [advanced installation page]({{ base_url}}/docs/getting_started/install.html).
1. Download [Anaconda](https://www.anaconda.com/distribution/) for your operating system and
the latest Python version, run the installer, and follow the steps. Detailed instructions
| closes https://github.com/pandas-dev/pandas/issues/15556/
I want to propose that we start using the new bootstrap-based theme that is being developed at https://github.com/pandas-dev/pandas-sphinx-theme/ for the dev docs.
This way it will already be used for the docs at https://dev.pandas.io/docs/ so it can get some exposure before the next release.
How I did it in this PR is to install it from git master https://github.com/pandas-dev/pandas-sphinx-theme/ instead of moving the actual source into the pandas repo. I would prefer doing it like this for now, as that makes it easier to further iterate on the theme (the other repo is set up with a faster doc build (disabled ipython directive + smaller api) and automatic preview using doctr on travis). In a later stage, we can still move it here if we want (or move the pandas-specific customizations here).
A preview (of a subset, not all API pages) can be seen at https://dev.pandas.io/pandas-sphinx-theme/
There are still several "must todo's" to get the theme in a decent enough state for a release. There are some open issues on the theme repo, but I can also open an issue here to keep track of those.
And of course, feedback on the theme is very welcome.
I made two other changes:
- moved the install.rst into the getting_started directory (so it is not a top-level navigation item). Given that the new website will have a page with a quick install linking to this more advanced install page, I think that is fine (and also, on a reworked home page of the docs it could also get a prominent place without being in the navigation bar)
- Idem for the ecosystem page, as this will also get more exposure on the new markdown website (we actually need to remove that page / reconcile it with the website, but let's leave that for another PR) | https://api.github.com/repos/pandas-dev/pandas/pulls/28623 | 2019-09-25T21:07:08Z | 2019-10-04T20:12:38Z | 2019-10-04T20:12:38Z | 2019-10-04T20:12:55Z |
ENH: Allow plotting backend to be an option | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 8a481f194d408..b40a64420a0be 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -409,6 +409,7 @@ Plotting
- Bug where :meth:`DataFrame.boxplot` would not accept a `color` parameter like `DataFrame.plot.box` (:issue:`26214`)
- Bug in the ``xticks`` argument being ignored for :meth:`DataFrame.plot.bar` (:issue:`14119`)
- :func:`set_option` now validates that the plot backend provided to ``'plotting.backend'`` implements the backend when the option is set, rather than when a plot is created (:issue:`28163`)
+- :meth:`DataFrame.plot` now allow a ``backend`` keyword arugment to allow changing between backends in one session (:issue:`28619`).
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index c11d94c381d6d..6fc5b03920cba 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -3,7 +3,7 @@
from pandas._config import get_option
-from pandas.util._decorators import Appender
+from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.common import is_integer, is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
@@ -22,6 +22,7 @@ def hist_series(
yrot=None,
figsize=None,
bins=10,
+ backend=None,
**kwargs
):
"""
@@ -50,6 +51,14 @@ def hist_series(
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
+ backend : str, default None
+ Backend to use instead of the backend specified in the option
+ ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
+ specify the ``plotting.backend`` for the whole session, set
+ ``pd.options.plotting.backend``.
+
+ .. versionadded:: 1.0.0
+
**kwargs
To be passed to the actual plotting function.
@@ -62,7 +71,7 @@ def hist_series(
--------
matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.
"""
- plot_backend = _get_plot_backend()
+ plot_backend = _get_plot_backend(backend)
return plot_backend.hist_series(
self,
by=by,
@@ -93,6 +102,7 @@ def hist_frame(
figsize=None,
layout=None,
bins=10,
+ backend=None,
**kwargs
):
"""
@@ -145,6 +155,14 @@ def hist_frame(
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
+ backend : str, default None
+ Backend to use instead of the backend specified in the option
+ ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
+ specify the ``plotting.backend`` for the whole session, set
+ ``pd.options.plotting.backend``.
+
+ .. versionadded:: 1.0.0
+
**kwargs
All other plotting keyword arguments to be passed to
:meth:`matplotlib.pyplot.hist`.
@@ -172,7 +190,7 @@ def hist_frame(
... }, index=['pig', 'rabbit', 'duck', 'chicken', 'horse'])
>>> hist = df.hist(bins=3)
"""
- plot_backend = _get_plot_backend()
+ plot_backend = _get_plot_backend(backend)
return plot_backend.hist_frame(
data,
column=column,
@@ -192,181 +210,198 @@ def hist_frame(
)
-def boxplot(
- data,
- column=None,
- by=None,
- ax=None,
- fontsize=None,
- rot=0,
- grid=True,
- figsize=None,
- layout=None,
- return_type=None,
- **kwargs
-):
- """
- Make a box plot from DataFrame columns.
-
- Make a box-and-whisker plot from DataFrame columns, optionally grouped
- by some other columns. A box plot is a method for graphically depicting
- groups of numerical data through their quartiles.
- The box extends from the Q1 to Q3 quartile values of the data,
- with a line at the median (Q2). The whiskers extend from the edges
- of box to show the range of the data. The position of the whiskers
- is set by default to `1.5 * IQR (IQR = Q3 - Q1)` from the edges of the box.
- Outlier points are those past the end of the whiskers.
-
- For further details see
- Wikipedia's entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`_.
-
- Parameters
- ----------
- column : str or list of str, optional
- Column name or list of names, or vector.
- Can be any valid input to :meth:`pandas.DataFrame.groupby`.
- by : str or array-like, optional
- Column in the DataFrame to :meth:`pandas.DataFrame.groupby`.
- One box-plot will be done per value of columns in `by`.
- ax : object of class matplotlib.axes.Axes, optional
- The matplotlib axes to be used by boxplot.
- fontsize : float or str
- Tick label font size in points or as a string (e.g., `large`).
- rot : int or float, default 0
- The rotation angle of labels (in degrees)
- with respect to the screen coordinate system.
- grid : bool, default True
- Setting this to True will show the grid.
- figsize : A tuple (width, height) in inches
- The size of the figure to create in matplotlib.
- layout : tuple (rows, columns), optional
- For example, (3, 5) will display the subplots
- using 3 columns and 5 rows, starting from the top-left.
- return_type : {'axes', 'dict', 'both'} or None, default 'axes'
- The kind of object to return. The default is ``axes``.
-
- * 'axes' returns the matplotlib axes the boxplot is drawn on.
- * 'dict' returns a dictionary whose values are the matplotlib
- Lines of the boxplot.
- * 'both' returns a namedtuple with the axes and dict.
- * when grouping with ``by``, a Series mapping columns to
- ``return_type`` is returned.
-
- If ``return_type`` is `None`, a NumPy array
- of axes with the same shape as ``layout`` is returned.
- **kwargs
- All other plotting keyword arguments to be passed to
- :func:`matplotlib.pyplot.boxplot`.
-
- Returns
- -------
- result
- See Notes.
-
- See Also
- --------
- Series.plot.hist: Make a histogram.
- matplotlib.pyplot.boxplot : Matplotlib equivalent plot.
-
- Notes
- -----
- The return type depends on the `return_type` parameter:
-
- * 'axes' : object of class matplotlib.axes.Axes
- * 'dict' : dict of matplotlib.lines.Line2D objects
- * 'both' : a namedtuple with structure (ax, lines)
-
- For data grouped with ``by``, return a Series of the above or a numpy
- array:
-
- * :class:`~pandas.Series`
- * :class:`~numpy.array` (for ``return_type = None``)
+_boxplot_doc = """
+Make a box plot from DataFrame columns.
+
+Make a box-and-whisker plot from DataFrame columns, optionally grouped
+by some other columns. A box plot is a method for graphically depicting
+groups of numerical data through their quartiles.
+The box extends from the Q1 to Q3 quartile values of the data,
+with a line at the median (Q2). The whiskers extend from the edges
+of box to show the range of the data. The position of the whiskers
+is set by default to `1.5 * IQR (IQR = Q3 - Q1)` from the edges of the box.
+Outlier points are those past the end of the whiskers.
+
+For further details see
+Wikipedia's entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`_.
+
+Parameters
+----------
+column : str or list of str, optional
+ Column name or list of names, or vector.
+ Can be any valid input to :meth:`pandas.DataFrame.groupby`.
+by : str or array-like, optional
+ Column in the DataFrame to :meth:`pandas.DataFrame.groupby`.
+ One box-plot will be done per value of columns in `by`.
+ax : object of class matplotlib.axes.Axes, optional
+ The matplotlib axes to be used by boxplot.
+fontsize : float or str
+ Tick label font size in points or as a string (e.g., `large`).
+rot : int or float, default 0
+ The rotation angle of labels (in degrees)
+ with respect to the screen coordinate system.
+grid : bool, default True
+ Setting this to True will show the grid.
+figsize : A tuple (width, height) in inches
+ The size of the figure to create in matplotlib.
+layout : tuple (rows, columns), optional
+ For example, (3, 5) will display the subplots
+ using 3 columns and 5 rows, starting from the top-left.
+return_type : {'axes', 'dict', 'both'} or None, default 'axes'
+ The kind of object to return. The default is ``axes``.
+
+ * 'axes' returns the matplotlib axes the boxplot is drawn on.
+ * 'dict' returns a dictionary whose values are the matplotlib
+ Lines of the boxplot.
+ * 'both' returns a namedtuple with the axes and dict.
+ * when grouping with ``by``, a Series mapping columns to
+ ``return_type`` is returned.
+
+ If ``return_type`` is `None`, a NumPy array
+ of axes with the same shape as ``layout`` is returned.
+%(backend)s\
+
+**kwargs
+ All other plotting keyword arguments to be passed to
+ :func:`matplotlib.pyplot.boxplot`.
+
+Returns
+-------
+result
+ See Notes.
+
+See Also
+--------
+Series.plot.hist: Make a histogram.
+matplotlib.pyplot.boxplot : Matplotlib equivalent plot.
+
+Notes
+-----
+The return type depends on the `return_type` parameter:
+
+* 'axes' : object of class matplotlib.axes.Axes
+* 'dict' : dict of matplotlib.lines.Line2D objects
+* 'both' : a namedtuple with structure (ax, lines)
+
+For data grouped with ``by``, return a Series of the above or a numpy
+array:
+
+* :class:`~pandas.Series`
+* :class:`~numpy.array` (for ``return_type = None``)
+
+Use ``return_type='dict'`` when you want to tweak the appearance
+of the lines after plotting. In this case a dict containing the Lines
+making up the boxes, caps, fliers, medians, and whiskers is returned.
+
+Examples
+--------
+
+Boxplots can be created for every column in the dataframe
+by ``df.boxplot()`` or indicating the columns to be used:
+
+.. plot::
+ :context: close-figs
+
+ >>> np.random.seed(1234)
+ >>> df = pd.DataFrame(np.random.randn(10, 4),
+ ... columns=['Col1', 'Col2', 'Col3', 'Col4'])
+ >>> boxplot = df.boxplot(column=['Col1', 'Col2', 'Col3'])
+
+Boxplots of variables distributions grouped by the values of a third
+variable can be created using the option ``by``. For instance:
+
+.. plot::
+ :context: close-figs
+
+ >>> df = pd.DataFrame(np.random.randn(10, 2),
+ ... columns=['Col1', 'Col2'])
+ >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',
+ ... 'B', 'B', 'B', 'B', 'B'])
+ >>> boxplot = df.boxplot(by='X')
+
+A list of strings (i.e. ``['X', 'Y']``) can be passed to boxplot
+in order to group the data by combination of the variables in the x-axis:
+
+.. plot::
+ :context: close-figs
+
+ >>> df = pd.DataFrame(np.random.randn(10, 3),
+ ... columns=['Col1', 'Col2', 'Col3'])
+ >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',
+ ... 'B', 'B', 'B', 'B', 'B'])
+ >>> df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A',
+ ... 'B', 'A', 'B', 'A', 'B'])
+ >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y'])
+
+The layout of boxplot can be adjusted giving a tuple to ``layout``:
+
+.. plot::
+ :context: close-figs
+
+ >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
+ ... layout=(2, 1))
- Use ``return_type='dict'`` when you want to tweak the appearance
- of the lines after plotting. In this case a dict containing the Lines
- making up the boxes, caps, fliers, medians, and whiskers is returned.
+Additional formatting can be done to the boxplot, like suppressing the grid
+(``grid=False``), rotating the labels in the x-axis (i.e. ``rot=45``)
+or changing the fontsize (i.e. ``fontsize=15``):
- Examples
- --------
-
- Boxplots can be created for every column in the dataframe
- by ``df.boxplot()`` or indicating the columns to be used:
-
- .. plot::
- :context: close-figs
-
- >>> np.random.seed(1234)
- >>> df = pd.DataFrame(np.random.randn(10,4),
- ... columns=['Col1', 'Col2', 'Col3', 'Col4'])
- >>> boxplot = df.boxplot(column=['Col1', 'Col2', 'Col3'])
-
- Boxplots of variables distributions grouped by the values of a third
- variable can be created using the option ``by``. For instance:
-
- .. plot::
- :context: close-figs
-
- >>> df = pd.DataFrame(np.random.randn(10, 2),
- ... columns=['Col1', 'Col2'])
- >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',
- ... 'B', 'B', 'B', 'B', 'B'])
- >>> boxplot = df.boxplot(by='X')
-
- A list of strings (i.e. ``['X', 'Y']``) can be passed to boxplot
- in order to group the data by combination of the variables in the x-axis:
-
- .. plot::
- :context: close-figs
+.. plot::
+ :context: close-figs
- >>> df = pd.DataFrame(np.random.randn(10,3),
- ... columns=['Col1', 'Col2', 'Col3'])
- >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',
- ... 'B', 'B', 'B', 'B', 'B'])
- >>> df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A',
- ... 'B', 'A', 'B', 'A', 'B'])
- >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y'])
+ >>> boxplot = df.boxplot(grid=False, rot=45, fontsize=15)
- The layout of boxplot can be adjusted giving a tuple to ``layout``:
+The parameter ``return_type`` can be used to select the type of element
+returned by `boxplot`. When ``return_type='axes'`` is selected,
+the matplotlib axes on which the boxplot is drawn are returned:
- .. plot::
- :context: close-figs
+ >>> boxplot = df.boxplot(column=['Col1', 'Col2'], return_type='axes')
+ >>> type(boxplot)
+ <class 'matplotlib.axes._subplots.AxesSubplot'>
- >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
- ... layout=(2, 1))
+When grouping with ``by``, a Series mapping columns to ``return_type``
+is returned:
- Additional formatting can be done to the boxplot, like suppressing the grid
- (``grid=False``), rotating the labels in the x-axis (i.e. ``rot=45``)
- or changing the fontsize (i.e. ``fontsize=15``):
+ >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
+ ... return_type='axes')
+ >>> type(boxplot)
+ <class 'pandas.core.series.Series'>
- .. plot::
- :context: close-figs
+If ``return_type`` is `None`, a NumPy array of axes with the same shape
+as ``layout`` is returned:
- >>> boxplot = df.boxplot(grid=False, rot=45, fontsize=15)
+ >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
+ ... return_type=None)
+ >>> type(boxplot)
+ <class 'numpy.ndarray'>
+"""
- The parameter ``return_type`` can be used to select the type of element
- returned by `boxplot`. When ``return_type='axes'`` is selected,
- the matplotlib axes on which the boxplot is drawn are returned:
- >>> boxplot = df.boxplot(column=['Col1','Col2'], return_type='axes')
- >>> type(boxplot)
- <class 'matplotlib.axes._subplots.AxesSubplot'>
+_backend_doc = """\
+backend : str, default None
+ Backend to use instead of the backend specified in the option
+ ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
+ specify the ``plotting.backend`` for the whole session, set
+ ``pd.options.plotting.backend``.
- When grouping with ``by``, a Series mapping columns to ``return_type``
- is returned:
+ .. versionadded:: 1.0.0
+"""
- >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
- ... return_type='axes')
- >>> type(boxplot)
- <class 'pandas.core.series.Series'>
- If ``return_type`` is `None`, a NumPy array of axes with the same shape
- as ``layout`` is returned:
-
- >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
- ... return_type=None)
- >>> type(boxplot)
- <class 'numpy.ndarray'>
- """
+@Substitution(backend="")
+@Appender(_boxplot_doc)
+def boxplot(
+ data,
+ column=None,
+ by=None,
+ ax=None,
+ fontsize=None,
+ rot=0,
+ grid=True,
+ figsize=None,
+ layout=None,
+ return_type=None,
+ **kwargs
+):
plot_backend = _get_plot_backend("matplotlib")
return plot_backend.boxplot(
data,
@@ -383,7 +418,8 @@ def boxplot(
)
-@Appender(boxplot.__doc__)
+@Substitution(backend=_backend_doc)
+@Appender(_boxplot_doc)
def boxplot_frame(
self,
column=None,
@@ -395,9 +431,10 @@ def boxplot_frame(
figsize=None,
layout=None,
return_type=None,
+ backend=None,
**kwargs
):
- plot_backend = _get_plot_backend()
+ plot_backend = _get_plot_backend(backend)
return plot_backend.boxplot_frame(
self,
column=column,
@@ -425,6 +462,7 @@ def boxplot_frame_groupby(
layout=None,
sharex=False,
sharey=True,
+ backend=None,
**kwargs
):
"""
@@ -454,6 +492,14 @@ def boxplot_frame_groupby(
Whether y-axes will be shared among subplots.
.. versionadded:: 0.23.1
+ backend : str, default None
+ Backend to use instead of the backend specified in the option
+ ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
+ specify the ``plotting.backend`` for the whole session, set
+ ``pd.options.plotting.backend``.
+
+ .. versionadded:: 1.0.0
+
**kwargs
All other plotting keyword arguments to be passed to
matplotlib's boxplot function.
@@ -477,7 +523,7 @@ def boxplot_frame_groupby(
>>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1)
>>> boxplot_frame_groupby(grouped, subplots=False)
"""
- plot_backend = _get_plot_backend()
+ plot_backend = _get_plot_backend(backend)
return plot_backend.boxplot_frame_groupby(
grouped,
subplots=subplots,
@@ -586,6 +632,14 @@ class PlotAccessor(PandasObject):
labels with "(right)" in the legend.
include_bool : bool, default is False
If True, boolean values can be plotted.
+ backend : str, default None
+ Backend to use instead of the backend specified in the option
+ ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
+ specify the ``plotting.backend`` for the whole session, set
+ ``pd.options.plotting.backend``.
+
+ .. versionadded:: 1.0.0
+
**kwargs
Options to pass to matplotlib plotting method.
@@ -715,7 +769,7 @@ def _get_call_args(backend_name, data, args, kwargs):
return x, y, kind, kwargs
def __call__(self, *args, **kwargs):
- plot_backend = _get_plot_backend()
+ plot_backend = _get_plot_backend(kwargs.pop("backend", None))
x, y, kind, kwargs = self._get_call_args(
plot_backend.__name__, self._parent, args, kwargs
diff --git a/pandas/tests/plotting/test_backend.py b/pandas/tests/plotting/test_backend.py
index 41b1a88b15acb..c84b78c79e771 100644
--- a/pandas/tests/plotting/test_backend.py
+++ b/pandas/tests/plotting/test_backend.py
@@ -9,7 +9,7 @@
import pandas
dummy_backend = types.ModuleType("pandas_dummy_backend")
-setattr(dummy_backend, "plot", lambda *args, **kwargs: None)
+setattr(dummy_backend, "plot", lambda *args, **kwargs: "used_dummy")
@pytest.fixture
@@ -38,6 +38,14 @@ def test_backend_is_correct(monkeypatch, restore_backend):
)
+def test_backend_can_be_set_in_plot_call(monkeypatch, restore_backend):
+ monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend)
+ df = pandas.DataFrame([1, 2, 3])
+
+ assert pandas.get_option("plotting.backend") == "matplotlib"
+ assert df.plot(backend="pandas_dummy_backend") == "used_dummy"
+
+
@td.skip_if_no_mpl
def test_register_entrypoint(restore_backend):
| - [x] closes #28619
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28622 | 2019-09-25T19:42:15Z | 2019-11-05T17:07:09Z | 2019-11-05T17:07:09Z | 2019-11-05T17:07:14Z |
DEPR: Deprecate Index.set_value | diff --git a/doc/source/reference/indexing.rst b/doc/source/reference/indexing.rst
index 576f734d517aa..dd59a99b3df9e 100644
--- a/doc/source/reference/indexing.rst
+++ b/doc/source/reference/indexing.rst
@@ -166,7 +166,6 @@ Selecting
Index.get_slice_bound
Index.get_value
Index.get_values
- Index.set_value
Index.isin
Index.slice_indexer
Index.slice_locs
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 2668734031ee1..16d23d675a8bb 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -123,7 +123,9 @@ Documentation Improvements
Deprecations
~~~~~~~~~~~~
--
+- ``Index.set_value`` has been deprecated. For a given index ``idx``, array ``arr``,
+ value in ``idx`` of ``idx_val`` and a new value of ``val``, ``idx.set_value(arr, idx_val, val)``
+ is equivalent to ``arr[idx.get_loc(idx_val)] = val``, which should be used instead (:issue:`28621`).
-
.. _whatsnew_1000.prior_deprecations:
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 0b5f9fb61fce8..afa4f1a5a8c76 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -205,7 +205,9 @@ class Index(IndexOpsMixin, PandasObject):
"""
# tolist is not actually deprecated, just suppressed in the __dir__
- _deprecations = DirNamesMixin._deprecations | frozenset(["tolist", "dtype_str"])
+ _deprecations = DirNamesMixin._deprecations | frozenset(
+ ["tolist", "dtype_str", "set_value"]
+ )
# To hand over control to subclasses
_join_precedence = 1
@@ -4680,10 +4682,20 @@ def set_value(self, arr, key, value):
"""
Fast lookup of value from 1-dimensional ndarray.
+ .. deprecated:: 1.0
+
Notes
-----
Only use this if you know what you're doing.
"""
+ warnings.warn(
+ (
+ "The 'set_value' method is deprecated, and "
+ "will be removed in a future version."
+ ),
+ FutureWarning,
+ stacklevel=2,
+ )
self._engine.set_value(
com.values_from_object(arr), com.values_from_object(key), value
)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index d1ed79118d2fa..82d5ddd1ac358 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1908,16 +1908,21 @@ def test_is_monotonic_incomparable(self, attr):
index = Index([5, datetime.now(), 7])
assert not getattr(index, attr)
- def test_get_set_value(self):
+ def test_set_value_deprecated(self):
+ # GH 28621
+ idx = self.create_index()
+ arr = np.array([1, 2, 3])
+ with tm.assert_produces_warning(FutureWarning):
+ idx.set_value(arr, idx[1], 80)
+ assert arr[1] == 80
+
+ def test_get_value(self):
# TODO: Remove function? GH 19728
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date), values[67])
- self.dateIndex.set_value(values, date, 10)
- assert values[67] == 10
-
@pytest.mark.parametrize("values", [["foo", "bar", "quux"], {"foo", "bar", "quux"}])
@pytest.mark.parametrize(
"index,expected",
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Deprecates ``Index.set_value``. This is a very little used and confusing method IMO.
For a given index ``idx``, array ``arr``, value ``idx_val`` in ``idx`` and a new value of ``val``, ``idx.set_value(arr, idx_val, val)`` is equivalent to ``arr[idx.get_loc(idx_val) = val``, which is more standard and should be used instead.
| https://api.github.com/repos/pandas-dev/pandas/pulls/28621 | 2019-09-25T19:32:53Z | 2019-10-03T06:56:11Z | 2019-10-03T06:56:10Z | 2019-10-03T06:56:14Z |
Backport PR #28614 on branch 0.25.x (CI: troubleshoot s3fs failures) | diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml
index 48e9e3b6896f3..847e9f66d2c72 100644
--- a/ci/deps/travis-36-cov.yaml
+++ b/ci/deps/travis-36-cov.yaml
@@ -29,7 +29,7 @@ dependencies:
- python-snappy
- python=3.6.*
- pytz
- - s3fs
+ - s3fs<0.3
- scikit-learn
- scipy
- sqlalchemy
diff --git a/ci/deps/travis-36-slow.yaml b/ci/deps/travis-36-slow.yaml
index e9c5dadbc924a..d54708d48a65e 100644
--- a/ci/deps/travis-36-slow.yaml
+++ b/ci/deps/travis-36-slow.yaml
@@ -18,7 +18,7 @@ dependencies:
- python-dateutil
- python=3.6.*
- pytz
- - s3fs
+ - s3fs<0.3
- scipy
- sqlalchemy
- xlrd
diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml
index 4bc490e202818..2fc4d160c8c27 100644
--- a/ci/deps/travis-37.yaml
+++ b/ci/deps/travis-37.yaml
@@ -17,7 +17,7 @@ dependencies:
- pytest-xdist
- pytest-mock
- hypothesis>=3.58.0
- - s3fs
+ - s3fs<0.3
- pip
- pyreadstat
- pip:
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 9750a36d9350b..b08868f311f76 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -782,7 +782,7 @@ def test_categorical_no_compress():
def test_sort():
- # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: flake8
+ # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: E501
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
| Backport PR #28614: CI: troubleshoot s3fs failures | https://api.github.com/repos/pandas-dev/pandas/pulls/28615 | 2019-09-25T15:48:08Z | 2019-09-26T12:24:13Z | 2019-09-26T12:24:13Z | 2019-09-26T12:24:13Z |
CI: troubleshoot s3fs failures | diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml
index b2a74fceaf0fa..e4e917d13990c 100644
--- a/ci/deps/travis-36-cov.yaml
+++ b/ci/deps/travis-36-cov.yaml
@@ -29,7 +29,7 @@ dependencies:
- python-snappy
- python=3.6.*
- pytz
- - s3fs
+ - s3fs<0.3
- scikit-learn
- scipy
- sqlalchemy
diff --git a/ci/deps/travis-36-slow.yaml b/ci/deps/travis-36-slow.yaml
index e9c5dadbc924a..d54708d48a65e 100644
--- a/ci/deps/travis-36-slow.yaml
+++ b/ci/deps/travis-36-slow.yaml
@@ -18,7 +18,7 @@ dependencies:
- python-dateutil
- python=3.6.*
- pytz
- - s3fs
+ - s3fs<0.3
- scipy
- sqlalchemy
- xlrd
diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml
index 903636f2fe060..440ca6c480b87 100644
--- a/ci/deps/travis-37.yaml
+++ b/ci/deps/travis-37.yaml
@@ -17,7 +17,7 @@ dependencies:
- pytest-xdist>=1.29.0
- pytest-mock
- hypothesis>=3.58.0
- - s3fs
+ - s3fs<0.3
- pip
- pyreadstat
- pip:
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index e09af3fd48ee6..fcc0aa3b1c015 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -782,7 +782,7 @@ def test_categorical_no_compress():
def test_sort():
- # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: flake8
+ # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: E501
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
| xref #28612. Recent failures used 0.3.4, the last passing build I see used 0.2.0 | https://api.github.com/repos/pandas-dev/pandas/pulls/28614 | 2019-09-25T14:40:37Z | 2019-09-25T15:47:43Z | 2019-09-25T15:47:43Z | 2019-09-25T16:00:29Z |
BUG: value_counts can handle the case even with empty groups (#28479) | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 7ca93d7d75854..fbda7011e066d 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -299,6 +299,7 @@ Other
- Using :meth:`DataFrame.replace` with overlapping keys in a nested dictionary will no longer raise, now matching the behavior of a flat dictionary (:issue:`27660`)
- :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` now support dicts as ``compression`` argument with key ``'method'`` being the compression method and others as additional compression options when the compression method is ``'zip'``. (:issue:`26023`)
- :meth:`Series.append` will no longer raise a ``TypeError`` when passed a tuple of ``Series`` (:issue:`28410`)
+- :meth:`SeriesGroupBy.value_counts` will be able to handle the case even when the :class:`Grouper` makes empty groups (:issue: 28479)
.. _whatsnew_1000.contributors:
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index f8f1455561c03..fbbcfc06bbdc3 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1259,7 +1259,14 @@ def value_counts(
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
- labels = list(map(rep, self.grouper.recons_labels)) + [llab(lab, inc)]
+ try:
+ labels = list(map(rep, self.grouper.recons_labels)) + [llab(lab, inc)]
+ except ValueError:
+ # If applying rep to recons_labels go fail, use ids which has no
+ # consecutive duplicates instead.
+ _ids_idx = np.ones(len(ids), dtype=bool)
+ _ids_idx[1:] = ids[1:] != ids[:-1]
+ labels = list(map(rep, [ids[_ids_idx]])) + [llab(lab, inc)]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self._selection_name]
diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py
index f8bd8843ab7e3..b7236ab491011 100644
--- a/pandas/tests/groupby/test_value_counts.py
+++ b/pandas/tests/groupby/test_value_counts.py
@@ -9,7 +9,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, MultiIndex, Series, date_range
+from pandas import DataFrame, MultiIndex, Series, date_range, Grouper
from pandas.util import testing as tm
@@ -79,3 +79,28 @@ def rebuild_index(df):
# have to sort on index because of unstable sort on values
left, right = map(rebuild_index, (left, right)) # xref GH9212
tm.assert_series_equal(left.sort_index(), right.sort_index())
+
+
+@pytest.mark.parametrize(
+ "freq, size, frac", product(["1D", "2D", "1W", "1Y"], [100, 1000], [0.1, 0.5, 1])
+)
+def test_series_groupby_value_counts_with_grouper(freq, size, frac):
+ np.random.seed(42)
+
+ df = DataFrame.from_dict(
+ {
+ "date": date_range("2019-09-25", periods=size),
+ "name": np.random.choice(list("abcd"), size),
+ }
+ ).sample(frac=frac)
+
+ gr = df.groupby(Grouper(key="date", freq=freq))["name"]
+
+ # have to sort on index because of unstable sort on values xref GH9212
+ result = gr.value_counts().sort_index()
+ expected = gr.apply(Series.value_counts).sort_index()
+ expected.index.names = (
+ result.index.names
+ ) # .apply(Series.value_counts) can't create all names
+
+ tm.assert_series_equal(result, expected)
| * If applying rep to recons_labels go fail, use ids which has no
consecutive duplicates instead.
- [x] closes #28479
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
xuancong84 found that value_counts() crashes if `groupby` object contains empty groups.
However, even though I made the construction of DataFrame don't skip empty rows, it still crashed.
Till then, I already tried in many ways though, in this time I tried to correct the callee `self.grouper.recons_labels`. After several tests, I found that If freq of `Grouper` is too long so that it has empty groups in some periods then it crashes. And also have found that this is solved by using `ids` which has no consecutive duplicates instead of `self.grouper.recons_labels`. | https://api.github.com/repos/pandas-dev/pandas/pulls/28608 | 2019-09-25T03:56:12Z | 2019-09-26T13:05:58Z | null | 2019-09-26T13:05:59Z |
BUG: value_counts can handle the case even with empty groups (#28479) | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 7ca93d7d75854..fbda7011e066d 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -299,6 +299,7 @@ Other
- Using :meth:`DataFrame.replace` with overlapping keys in a nested dictionary will no longer raise, now matching the behavior of a flat dictionary (:issue:`27660`)
- :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` now support dicts as ``compression`` argument with key ``'method'`` being the compression method and others as additional compression options when the compression method is ``'zip'``. (:issue:`26023`)
- :meth:`Series.append` will no longer raise a ``TypeError`` when passed a tuple of ``Series`` (:issue:`28410`)
+- :meth:`SeriesGroupBy.value_counts` will be able to handle the case even when the :class:`Grouper` makes empty groups (:issue: 28479)
.. _whatsnew_1000.contributors:
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index f8f1455561c03..fbbcfc06bbdc3 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1259,7 +1259,14 @@ def value_counts(
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
- labels = list(map(rep, self.grouper.recons_labels)) + [llab(lab, inc)]
+ try:
+ labels = list(map(rep, self.grouper.recons_labels)) + [llab(lab, inc)]
+ except ValueError:
+ # If applying rep to recons_labels go fail, use ids which has no
+ # consecutive duplicates instead.
+ _ids_idx = np.ones(len(ids), dtype=bool)
+ _ids_idx[1:] = ids[1:] != ids[:-1]
+ labels = list(map(rep, [ids[_ids_idx]])) + [llab(lab, inc)]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self._selection_name]
diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py
index f8bd8843ab7e3..40bdbdc0b2ca6 100644
--- a/pandas/tests/groupby/test_value_counts.py
+++ b/pandas/tests/groupby/test_value_counts.py
@@ -9,7 +9,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, MultiIndex, Series, date_range
+from pandas import DataFrame, MultiIndex, Series, date_range, Grouper
from pandas.util import testing as tm
@@ -79,3 +79,28 @@ def rebuild_index(df):
# have to sort on index because of unstable sort on values
left, right = map(rebuild_index, (left, right)) # xref GH9212
tm.assert_series_equal(left.sort_index(), right.sort_index())
+
+@pytest.mark.parametrize(
+ "freq, size, frac", product(["1D", "2D", "1W", "1Y"], [100, 1000], [0.1, 0.5, 1])
+)
+def test_series_groupby_value_counts_with_grouper(freq, size, frac):
+ np.random.seed(42)
+
+ df = DataFrame.from_dict(
+ {
+ "date": date_range("2019-09-25", periods=size),
+ "name": np.random.choice(list("abcd"), size),
+ }
+ ).sample(frac=frac)
+
+ gr = df.groupby(Grouper(key="date", freq=freq))["name"]
+
+ # have to sort on index because of unstable sort on values xref GH9212
+ result = gr.value_counts().sort_index()
+ expected = gr.apply(Series.value_counts).sort_index()
+ expected.index.names = (
+ result.index.names
+ ) # .apply(Series.value_counts) can't create all names
+
+ tm.assert_series_equal(result, expected)
+
| * If applying rep to recons_labels go fail, use ids which has no
consecutive duplicates instead.
- [x] closes #28479
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
xuancong84 found that value_counts() crashes if `groupby` object contains empty groups.
However, even though I made the construction of DataFrame don't skip empty rows, it still crashed.
Till then, I already tried in many ways though, in this time I tried to correct the callee `self.grouper.recons_labels`. After several tests, I found that If freq of `Grouper` is too long so that it has empty groups in some periods then it crashes. And also have found that this is solved by using `ids` which has no consecutive duplicates instead of `self.grouper.recons_labels`. | https://api.github.com/repos/pandas-dev/pandas/pulls/28607 | 2019-09-25T03:37:44Z | 2019-09-25T03:55:23Z | null | 2019-09-25T05:17:39Z |
fix unnecessary sort in pd.read_json and orient="index" | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 751db2b88069d..fd1c1271a5e37 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -311,6 +311,7 @@ I/O
- Bug in :func:`DataFrame.to_string` where values were truncated using display options instead of outputting the full content (:issue:`9784`)
- Bug in :meth:`DataFrame.to_json` where a datetime column label would not be written out in ISO format with ``orient="table"`` (:issue:`28130`)
- Bug in :func:`DataFrame.to_parquet` where writing to GCS would fail with `engine='fastparquet'` if the file did not already exist (:issue:`28326`)
+- Bug in :meth:`DataFrame.read_json` where using ``orient="index"`` would not maintain the order (:issue:`28557`)
- Bug in :meth:`DataFrame.to_html` where the length of the ``formatters`` argument was not verified (:issue:`28469`)
Plotting
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 73f4985e201f1..6ce288890b6c7 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -12,7 +12,7 @@
from pandas.core.dtypes.common import ensure_str, is_period_dtype
-from pandas import DataFrame, MultiIndex, Series, isna, to_datetime
+from pandas import DataFrame, MultiIndex, Series, compat, isna, to_datetime
from pandas._typing import Scalar
from pandas.core.reshape.concat import concat
@@ -1112,15 +1112,13 @@ def _parse_no_numpy(self):
self.check_keys_split(decoded)
self.obj = DataFrame(dtype=None, **decoded)
elif orient == "index":
- self.obj = (
- DataFrame.from_dict(
- loads(json, precise_float=self.precise_float),
- dtype=None,
- orient="index",
- )
- .sort_index(axis="columns")
- .sort_index(axis="index")
+ self.obj = DataFrame.from_dict(
+ loads(json, precise_float=self.precise_float),
+ dtype=None,
+ orient="index",
)
+ if compat.PY35:
+ self.obj = self.obj.sort_index(axis="columns").sort_index(axis="index")
elif orient == "table":
self.obj = parse_table_schema(json, precise_float=self.precise_float)
else:
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 2195bf248f43a..8e28740c70bad 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -166,8 +166,7 @@ def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype):
expected = self.frame.copy()
- if not numpy and (orient == "index" or (PY35 and orient == "columns")):
- # TODO: debug why sort is required
+ if not numpy and PY35 and orient in ("index", "columns"):
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@@ -181,7 +180,7 @@ def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
- if not numpy and (orient == "index" or (PY35 and orient == "columns")):
+ if not numpy and PY35 and orient in ("index", "columns"):
expected = expected.sort_index()
if (
@@ -216,7 +215,7 @@ def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
)
expected = df.copy()
- if not numpy and (orient == "index" or (PY35 and orient == "columns")):
+ if not numpy and PY35 and orient in ("index", "columns"):
expected = expected.sort_index()
if not dtype:
| - [x] closes #28557
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28606 | 2019-09-25T01:29:14Z | 2019-10-09T14:50:47Z | 2019-10-09T14:50:46Z | 2019-10-11T04:12:27Z |
CLN: Exception x5 | diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 0a3f4ed3cc91d..bd74180403ad9 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -519,7 +519,7 @@ cdef _TSObject convert_str_to_tsobject(object ts, object tz, object unit,
try:
ts = parse_datetime_string(ts, dayfirst=dayfirst,
yearfirst=yearfirst)
- except Exception:
+ except (ValueError, OverflowError):
raise ValueError("could not convert string to Timestamp")
return convert_to_tsobject(ts, tz, unit, dayfirst, yearfirst)
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index d099a77a77044..ca70c8af45f2f 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -309,9 +309,9 @@ cdef parse_datetime_string_with_reso(date_string, freq=None, dayfirst=False,
parsed, reso = dateutil_parse(date_string, _DEFAULT_DATETIME,
dayfirst=dayfirst, yearfirst=yearfirst,
ignoretz=False, tzinfos=None)
- except Exception as e:
+ except (ValueError, OverflowError) as err:
# TODO: allow raise of errors within instead
- raise DateParseError(e)
+ raise DateParseError(err)
if parsed is None:
raise DateParseError("Could not parse {dstr}".format(dstr=date_string))
return parsed, parsed, reso
diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index 67fa79ad5da8c..1c9bd01b16739 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -281,17 +281,15 @@ def _remove_labels_from_axis(axis):
for t in axis.get_majorticklabels():
t.set_visible(False)
- try:
- # set_visible will not be effective if
- # minor axis has NullLocator and NullFormattor (default)
- if isinstance(axis.get_minor_locator(), ticker.NullLocator):
- axis.set_minor_locator(ticker.AutoLocator())
- if isinstance(axis.get_minor_formatter(), ticker.NullFormatter):
- axis.set_minor_formatter(ticker.FormatStrFormatter(""))
- for t in axis.get_minorticklabels():
- t.set_visible(False)
- except Exception: # pragma no cover
- raise
+ # set_visible will not be effective if
+ # minor axis has NullLocator and NullFormattor (default)
+ if isinstance(axis.get_minor_locator(), ticker.NullLocator):
+ axis.set_minor_locator(ticker.AutoLocator())
+ if isinstance(axis.get_minor_formatter(), ticker.NullFormatter):
+ axis.set_minor_formatter(ticker.FormatStrFormatter(""))
+ for t in axis.get_minorticklabels():
+ t.set_visible(False)
+
axis.get_label().set_visible(False)
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index e09af3fd48ee6..fcc0aa3b1c015 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -782,7 +782,7 @@ def test_categorical_no_compress():
def test_sort():
- # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: flake8
+ # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: E501
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
| https://api.github.com/repos/pandas-dev/pandas/pulls/28605 | 2019-09-25T00:40:55Z | 2019-09-25T19:58:07Z | 2019-09-25T19:58:07Z | 2019-09-25T20:01:47Z | |
CLN: Assorted typings | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 6e73e1636a75b..002bbcc63d04f 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -176,7 +176,6 @@ def _reconstruct_data(values, dtype, original):
-------
Index for extension types, otherwise ndarray casted to dtype
"""
- from pandas import Index
if is_extension_array_dtype(dtype):
values = dtype.construct_array_type()._from_sequence(values)
@@ -184,7 +183,7 @@ def _reconstruct_data(values, dtype, original):
values = values.astype(dtype)
# we only support object dtypes bool Index
- if isinstance(original, Index):
+ if isinstance(original, ABCIndexClass):
values = values.astype(object)
elif dtype is not None:
values = values.astype(dtype)
@@ -833,7 +832,7 @@ def duplicated(values, keep="first"):
return f(values, keep=keep)
-def mode(values, dropna=True):
+def mode(values, dropna: bool = True):
"""
Returns the mode(s) of an array.
@@ -1888,7 +1887,7 @@ def searchsorted(arr, value, side="left", sorter=None):
}
-def diff(arr, n, axis=0):
+def diff(arr, n: int, axis: int = 0):
"""
difference of n between self,
analogous to s-s.shift(n)
@@ -1904,7 +1903,6 @@ def diff(arr, n, axis=0):
Returns
-------
shifted
-
"""
n = int(n)
@@ -1935,13 +1933,15 @@ def diff(arr, n, axis=0):
f = _diff_special[arr.dtype.name]
f(arr, out_arr, n, axis)
else:
- res_indexer = [slice(None)] * arr.ndim
- res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
- res_indexer = tuple(res_indexer)
-
- lag_indexer = [slice(None)] * arr.ndim
- lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
- lag_indexer = tuple(lag_indexer)
+ # To keep mypy happy, _res_indexer is a list while res_indexer is
+ # a tuple, ditto for lag_indexer.
+ _res_indexer = [slice(None)] * arr.ndim
+ _res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
+ res_indexer = tuple(_res_indexer)
+
+ _lag_indexer = [slice(None)] * arr.ndim
+ _lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
+ lag_indexer = tuple(_lag_indexer)
# need to make sure that we account for na for datelike/timedelta
# we don't actually want to subtract these i8 numbers
diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py
index bcdbf0855cbb4..4bcc53606aeca 100644
--- a/pandas/core/util/hashing.py
+++ b/pandas/core/util/hashing.py
@@ -26,7 +26,7 @@
_default_hash_key = "0123456789123456"
-def _combine_hash_arrays(arrays, num_items):
+def _combine_hash_arrays(arrays, num_items: int):
"""
Parameters
----------
@@ -55,7 +55,11 @@ def _combine_hash_arrays(arrays, num_items):
def hash_pandas_object(
- obj, index=True, encoding="utf8", hash_key=None, categorize=True
+ obj,
+ index: bool = True,
+ encoding: str = "utf8",
+ hash_key=None,
+ categorize: bool = True,
):
"""
Return a data hash of the Index/Series/DataFrame.
@@ -125,7 +129,10 @@ def hash_pandas_object(
for _ in [None]
)
num_items += 1
- hashes = itertools.chain(hashes, index_hash_generator)
+
+ # keep `hashes` specifically a generator to keep mypy happy
+ _hashes = itertools.chain(hashes, index_hash_generator)
+ hashes = (x for x in _hashes)
h = _combine_hash_arrays(hashes, num_items)
h = Series(h, index=obj.index, dtype="uint64", copy=False)
@@ -179,7 +186,7 @@ def hash_tuples(vals, encoding="utf8", hash_key=None):
return h
-def hash_tuple(val, encoding="utf8", hash_key=None):
+def hash_tuple(val, encoding: str = "utf8", hash_key=None):
"""
Hash a single tuple efficiently
@@ -201,7 +208,7 @@ def hash_tuple(val, encoding="utf8", hash_key=None):
return h
-def _hash_categorical(c, encoding, hash_key):
+def _hash_categorical(c, encoding: str, hash_key: str):
"""
Hash a Categorical by hashing its categories, and then mapping the codes
to the hashes
@@ -239,7 +246,7 @@ def _hash_categorical(c, encoding, hash_key):
return result
-def hash_array(vals, encoding="utf8", hash_key=None, categorize=True):
+def hash_array(vals, encoding: str = "utf8", hash_key=None, categorize: bool = True):
"""
Given a 1d array, return an array of deterministic integers.
@@ -317,7 +324,7 @@ def hash_array(vals, encoding="utf8", hash_key=None, categorize=True):
return vals
-def _hash_scalar(val, encoding="utf8", hash_key=None):
+def _hash_scalar(val, encoding: str = "utf8", hash_key=None):
"""
Hash scalar value
| Broken off from abandoned local branches.
Also fixes one of the problems currently afflicting the CI in tests.groupby.test_categorical | https://api.github.com/repos/pandas-dev/pandas/pulls/28604 | 2019-09-25T00:00:34Z | 2019-10-01T04:00:19Z | 2019-10-01T04:00:19Z | 2019-10-01T13:33:37Z |
OPS: Remove mask_cmp_op fallback behavior | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index a3d75d69e1e82..a78bc07ac2715 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -199,7 +199,7 @@ Timezones
Numeric
^^^^^^^
- Bug in :meth:`DataFrame.quantile` with zero-column :class:`DataFrame` incorrectly raising (:issue:`23925`)
--
+- :class:`DataFrame` inequality comparisons with object-dtype and ``complex`` entries failing to raise ``TypeError`` like their :class:`Series` counterparts (:issue:`28079`)
-
Conversion
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 4f027843fbac1..eb901630b753a 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -28,7 +28,7 @@
ABCIndexClass,
ABCSeries,
)
-from pandas.core.dtypes.missing import isna, notna
+from pandas.core.dtypes.missing import isna
from pandas._typing import ArrayLike
from pandas.core.construction import array, extract_array
@@ -354,38 +354,6 @@ def fill_binop(left, right, fill_value):
return left, right
-def mask_cmp_op(x, y, op):
- """
- Apply the function `op` to only non-null points in x and y.
-
- Parameters
- ----------
- x : array-like
- y : array-like
- op : binary operation
-
- Returns
- -------
- result : ndarray[bool]
- """
- xrav = x.ravel()
- result = np.empty(x.size, dtype=bool)
- if isinstance(y, (np.ndarray, ABCSeries)):
- yrav = y.ravel()
- mask = notna(xrav) & notna(yrav)
- result[mask] = op(np.array(list(xrav[mask])), np.array(list(yrav[mask])))
- else:
- mask = notna(xrav)
- result[mask] = op(np.array(list(xrav[mask])), y)
-
- if op == operator.ne: # pragma: no cover
- np.putmask(result, ~mask, True)
- else:
- np.putmask(result, ~mask, False)
- result = result.reshape(x.shape)
- return result
-
-
# -----------------------------------------------------------------------------
# Dispatch logic
@@ -905,14 +873,6 @@ def _flex_comp_method_FRAME(cls, op, special):
op_name = _get_op_name(op, special)
default_axis = _get_frame_op_default_axis(op_name)
- def na_op(x, y):
- try:
- with np.errstate(invalid="ignore"):
- result = op(x, y)
- except TypeError:
- result = mask_cmp_op(x, y, op)
- return result
-
doc = _flex_comp_doc_FRAME.format(
op_name=op_name, desc=_op_descriptions[op_name]["desc"]
)
@@ -926,16 +886,16 @@ def f(self, other, axis=default_axis, level=None):
# Another DataFrame
if not self._indexed_same(other):
self, other = self.align(other, "outer", level=level, copy=False)
- new_data = dispatch_to_series(self, other, na_op, str_rep)
+ new_data = dispatch_to_series(self, other, op, str_rep)
return self._construct_result(new_data)
elif isinstance(other, ABCSeries):
return _combine_series_frame(
- self, other, na_op, fill_value=None, axis=axis, level=level
+ self, other, op, fill_value=None, axis=axis, level=level
)
else:
# in this case we always have `np.ndim(other) == 0`
- new_data = dispatch_to_series(self, other, na_op)
+ new_data = dispatch_to_series(self, other, op)
return self._construct_result(new_data)
f.__name__ = op_name
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index fc3640503e385..3b46e834933b3 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -235,21 +235,46 @@ def _test_seq(df, idx_ser, col_ser):
rs = df.le(df)
assert not rs.loc[0, 0]
+ def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
- rs = df.gt(df2)
- assert not rs.values.any()
+
+ msg = "|".join(
+ [
+ "'>' not supported between instances of '.*' and 'complex'",
+ r"unorderable types: .*complex\(\)", # PY35
+ ]
+ )
+ with pytest.raises(TypeError, match=msg):
+ # inequalities are not well-defined for complex numbers
+ df.gt(df2)
+ with pytest.raises(TypeError, match=msg):
+ # regression test that we get the same behavior for Series
+ df["a"].gt(df2["a"])
+ with pytest.raises(TypeError, match=msg):
+ # Check that we match numpy behavior here
+ df.values > df2.values
+
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
- rs = df3.gt(2j)
- assert not rs.values.any()
+ with pytest.raises(TypeError, match=msg):
+ # inequalities are not well-defined for complex numbers
+ df3.gt(2j)
+ with pytest.raises(TypeError, match=msg):
+ # regression test that we get the same behavior for Series
+ df3["a"].gt(2j)
+ with pytest.raises(TypeError, match=msg):
+ # Check that we match numpy behavior here
+ df3.values > 2j
+
+ def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
| - [x] closes #28079
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
xref #28050.
To make the Series vs DataFrame behavior consistent, the two main options are a) change the DataFrame behavior (this PR) or b) change the Series behavior. The latter is complicated by the fact that for object dtypes the Series comparisons go through comp_method_OBJECT_ARRAY instead of the numpy op, so we would still have to change the complex-case test changed here. | https://api.github.com/repos/pandas-dev/pandas/pulls/28601 | 2019-09-24T21:19:22Z | 2019-09-26T12:25:29Z | 2019-09-26T12:25:28Z | 2019-09-27T17:30:07Z |
REF/TST: Corner cases for op(DataFrame, Series) | diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 4f027843fbac1..1f658296a559e 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -785,18 +785,9 @@ def _combine_series_frame(self, other, func, fill_value=None, axis=None, level=N
return self._combine_match_index(other, func, level=level)
else:
return self._combine_match_columns(other, func, level=level)
- else:
- if not len(other):
- return self * np.nan
-
- if not len(self):
- # Ambiguous case, use _series so works with DataFrame
- return self._constructor(
- data=self._series, index=self.index, columns=self.columns
- )
- # default axis is columns
- return self._combine_match_columns(other, func, level=level)
+ # default axis is columns
+ return self._combine_match_columns(other, func, level=level)
def _align_method_FRAME(left, right, axis):
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index fc3640503e385..da399750c9bcd 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -663,3 +663,34 @@ def test_operations_with_interval_categories_index(self, all_arithmetic_operator
result = getattr(df, op)(num)
expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)
tm.assert_frame_equal(result, expected)
+
+
+def test_frame_with_zero_len_series_corner_cases():
+ # GH#28600
+ # easy all-float case
+ df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
+ ser = pd.Series(dtype=np.float64)
+
+ result = df + ser
+ expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
+ tm.assert_frame_equal(result, expected)
+
+ result = df == ser
+ expected = pd.DataFrame(False, index=df.index, columns=df.columns)
+ tm.assert_frame_equal(result, expected)
+
+ # non-float case should not raise on comparison
+ df2 = pd.DataFrame(df.values.view("M8[ns]"), columns=df.columns)
+ result = df2 == ser
+ expected = pd.DataFrame(False, index=df.index, columns=df.columns)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_zero_len_frame_with_series_corner_cases():
+ # GH#28600
+ df = pd.DataFrame(columns=["A", "B"], dtype=np.float64)
+ ser = pd.Series([1, 2], index=["A", "B"])
+
+ result = df + ser
+ expected = df
+ tm.assert_frame_equal(result, expected)
| We have two special cases in `_combine_series_frame` that are never tested ATM. This adds tests for them, then notes that the special case handling code is unnecessary and this can now fall through to the general case code. In a follow-up, we'll be able to simplify _combine_series_frame further, but for now I want to keep the changed logic obvious.
Also note that the removed
```
if not len(other):
return self * np.nan
```
is actually wrong in the non-float test case in L681-685 in the test file. | https://api.github.com/repos/pandas-dev/pandas/pulls/28600 | 2019-09-24T17:58:59Z | 2019-09-26T12:27:47Z | 2019-09-26T12:27:47Z | 2019-09-26T14:47:19Z |
Fix docstring error | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 152983451bc38..eb30affc6a5af 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -7479,7 +7479,10 @@ def clip(self, lower=None, upper=None, axis=None, inplace=False, *args, **kwargs
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
- *args, **kwargs
+ *args : iterable, optional
+ Positional argument have no effect but might be accepted
+ for compatibility with numpy.
+ **kwargs : mapping, optional
Additional keywords have no effect but might be accepted
for compatibility with numpy.
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28598 | 2019-09-24T16:57:25Z | 2019-10-11T22:16:32Z | null | 2019-10-11T22:16:32Z |
BUG: Timedelta not formatted correctly in to_json | diff --git a/asv_bench/benchmarks/io/json.py b/asv_bench/benchmarks/io/json.py
index 5c1d39776b91c..e7baf60b9f65b 100644
--- a/asv_bench/benchmarks/io/json.py
+++ b/asv_bench/benchmarks/io/json.py
@@ -66,7 +66,8 @@ class ToJSON(BaseIO):
fname = "__test__.json"
params = [
["split", "columns", "index", "values", "records"],
- ["df", "df_date_idx", "df_td_int_ts", "df_int_floats", "df_int_float_str"],
+ ["df", "df_date_idx", "df_td", "df_td_int_ts", "df_int_floats",
+ "df_int_float_str"],
]
param_names = ["orient", "frame"]
@@ -81,6 +82,13 @@ def setup(self, orient, frame):
strings = tm.makeStringIndex(N)
self.df = DataFrame(np.random.randn(N, ncols), index=np.arange(N))
self.df_date_idx = DataFrame(np.random.randn(N, ncols), index=index)
+ self.df_td = DataFrame(
+ {
+ "td_1": timedeltas,
+ "td_2": timedeltas
+ },
+ index=index,
+ )
self.df_td_int_ts = DataFrame(
{
"td_1": timedeltas,
@@ -118,6 +126,10 @@ def setup(self, orient, frame):
def time_to_json(self, orient, frame):
getattr(self, frame).to_json(self.fname, orient=orient)
+ def time_to_json_iso(self, orient, frame):
+ getattr(self, frame).to_json(self.fname, orient=orient,
+ date_format="iso")
+
def peakmem_to_json(self, orient, frame):
getattr(self, frame).to_json(self.fname, orient=orient)
diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst
index 14682b706f924..99b476d6e0ea9 100644
--- a/doc/source/whatsnew/v0.25.2.rst
+++ b/doc/source/whatsnew/v0.25.2.rst
@@ -63,8 +63,9 @@ I/O
- Fix regression in notebook display where <th> tags not used for :attr:`DataFrame.index` (:issue:`28204`).
- Regression in :meth:`~DataFrame.to_csv` where writing a :class:`Series` or :class:`DataFrame` indexed by an :class:`IntervalIndex` would incorrectly raise a ``TypeError`` (:issue:`28210`)
+- Bug in :meth:`DataFrame.to_json` and :meth:`Series.to_json` where :class:`Timedelta` was not correctly formatted when `date_format="iso"` (:issue:`28256`).
-
--
+
Plotting
^^^^^^^^
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index 22c42acea0150..5889e635127de 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -735,12 +735,20 @@ int NpyArr_iterNextItem(JSOBJ obj, JSONTypeContext *tc) {
NpyArr_freeItemValue(obj, tc);
if (PyArray_ISDATETIME(npyarr->array)) {
- PRINTMARK();
- GET_TC(tc)->itemValue = obj;
- Py_INCREF(obj);
- ((PyObjectEncoder *)tc->encoder)->npyType = PyArray_TYPE(npyarr->array);
- ((PyObjectEncoder *)tc->encoder)->npyValue = npyarr->dataptr;
- ((PyObjectEncoder *)tc->encoder)->npyCtxtPassthru = npyarr;
+ if (PyArray_TYPE(npyarr->array) == NPY_TIMEDELTA) {
+ PRINTMARK();
+ PyObject *item = npyarr->getitem(npyarr->dataptr, npyarr->array);
+ PyObject *td = PyObject_CallFunction(cls_timedelta, "(O)", item);
+ GET_TC(tc)->itemValue = td;
+ Py_DECREF(item);
+ } else {
+ PRINTMARK();
+ GET_TC(tc)->itemValue = obj;
+ Py_INCREF(obj);
+ ((PyObjectEncoder *)tc->encoder)->npyType = PyArray_TYPE(npyarr->array);
+ ((PyObjectEncoder *)tc->encoder)->npyValue = npyarr->dataptr;
+ ((PyObjectEncoder *)tc->encoder)->npyCtxtPassthru = npyarr;
+ }
} else {
PRINTMARK();
GET_TC(tc)->itemValue = npyarr->getitem(npyarr->dataptr, npyarr->array);
@@ -1917,47 +1925,54 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
tc->type = enc->datetimeIso ? JT_UTF8 : JT_LONG;
return;
} else if (PyDelta_Check(obj)) {
- if (PyObject_HasAttrString(obj, "value")) {
+ if (enc->datetimeIso) {
PRINTMARK();
- value = get_long_attr(obj, "value");
+ pc->PyTypeToJSON = PyTimeToJSON;
+ tc->type = JT_UTF8;
+
} else {
- PRINTMARK();
- value = total_seconds(obj) * 1000000000LL; // nanoseconds per second
- }
+ if (PyObject_HasAttrString(obj, "value")) {
+ PRINTMARK();
+ value = get_long_attr(obj, "value");
+ } else {
+ PRINTMARK();
+ value = total_seconds(obj) * 1000000000LL; // nanoseconds per second
+ }
- base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit;
- switch (base) {
- case NPY_FR_ns:
- break;
- case NPY_FR_us:
- value /= 1000LL;
- break;
- case NPY_FR_ms:
- value /= 1000000LL;
- break;
- case NPY_FR_s:
- value /= 1000000000LL;
- break;
- }
+ base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit;
+ switch (base) {
+ case NPY_FR_ns:
+ break;
+ case NPY_FR_us:
+ value /= 1000LL;
+ break;
+ case NPY_FR_ms:
+ value /= 1000000LL;
+ break;
+ case NPY_FR_s:
+ value /= 1000000000LL;
+ break;
+ }
- exc = PyErr_Occurred();
+ exc = PyErr_Occurred();
- if (exc && PyErr_ExceptionMatches(PyExc_OverflowError)) {
- PRINTMARK();
- goto INVALID;
- }
+ if (exc && PyErr_ExceptionMatches(PyExc_OverflowError)) {
+ PRINTMARK();
+ goto INVALID;
+ }
- if (value == get_nat()) {
- PRINTMARK();
- tc->type = JT_NULL;
- return;
- }
+ if (value == get_nat()) {
+ PRINTMARK();
+ tc->type = JT_NULL;
+ return;
+ }
- GET_TC(tc)->longValue = value;
+ GET_TC(tc)->longValue = value;
- PRINTMARK();
- pc->PyTypeToJSON = PyLongToINT64;
- tc->type = JT_LONG;
+ PRINTMARK();
+ pc->PyTypeToJSON = PyLongToINT64;
+ tc->type = JT_LONG;
+ }
return;
} else if (PyArray_IsScalar(obj, Integer)) {
PRINTMARK();
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 73f4985e201f1..3e441a928b94f 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -10,7 +10,7 @@
from pandas._libs.tslibs import iNaT
from pandas.errors import AbstractMethodError
-from pandas.core.dtypes.common import ensure_str, is_period_dtype
+from pandas.core.dtypes.common import ensure_str, is_period_dtype, is_timedelta64_dtype
from pandas import DataFrame, MultiIndex, Series, isna, to_datetime
from pandas._typing import Scalar
@@ -170,6 +170,34 @@ def _write(
class SeriesWriter(Writer):
_default_orient = "index"
+ #
+ # def __init__(
+ # self,
+ # obj,
+ # orient: Optional[str],
+ # date_format: str,
+ # double_precision: int,
+ # ensure_ascii: bool,
+ # date_unit: str,
+ # index: bool,
+ # default_handler: Optional[Callable[[Any], Serializable]] = None,
+ # indent: int = 0,
+ # ):
+ # super().__init__(
+ # obj,
+ # orient,
+ # date_format,
+ # double_precision,
+ # ensure_ascii,
+ # date_unit,
+ # index,
+ # default_handler=default_handler,
+ # indent=indent,
+ # )
+ #
+ # if is_timedelta64_dtype(obj.dtype) and self.date_format == "iso":
+ # obj = obj.copy()
+ # self.obj = obj.apply(lambda x: x.isoformat())
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == "index":
@@ -206,6 +234,37 @@ def _write(
class FrameWriter(Writer):
_default_orient = "columns"
+ # def __init__(
+ # self,
+ # obj,
+ # orient: Optional[str],
+ # date_format: str,
+ # double_precision: int,
+ # ensure_ascii: bool,
+ # date_unit: str,
+ # index: bool,
+ # default_handler: Optional[Callable[[Any], Serializable]] = None,
+ # indent: int = 0,
+ # ):
+ # super().__init__(
+ # obj,
+ # orient,
+ # date_format,
+ # double_precision,
+ # ensure_ascii,
+ # date_unit,
+ # index,
+ # default_handler=default_handler,
+ # indent=indent,
+ # )
+ #
+ # obj = obj.copy()
+ # timedeltas = obj.select_dtypes(include=["timedelta"]).columns
+ #
+ # if len(timedeltas) and self.date_format == "iso":
+ # obj[timedeltas] = obj[timedeltas].applymap(lambda x: x.isoformat())
+ # self.obj = obj
+
def _format_axes(self):
"""
Try to format axes if they are datelike.
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index 569e299860614..5892c88484175 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -613,8 +613,7 @@ def test_timestamp_in_columns(self):
result = df.to_json(orient="table")
js = json.loads(result)
assert js["schema"]["fields"][1]["name"] == "2016-01-01T00:00:00.000Z"
- # TODO - below expectation is not correct; see GH 28256
- assert js["schema"]["fields"][2]["name"] == 10000
+ assert js["schema"]["fields"][2]["name"] == "P0DT0H0M10S"
@pytest.mark.parametrize(
"case",
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 415b1d81eb3e4..f29e62af9114d 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -813,6 +813,40 @@ def test_reconstruction_index(self):
result = read_json(df.to_json())
assert_frame_equal(result, df)
+ @pytest.mark.parametrize(
+ "date_format,expected",
+ [
+ ("iso", '{"0":"P1DT0H0M0S","1":"P2DT0H0M0S"}'),
+ ("epoch", '{"0":86400000,"1":172800000}'),
+ ],
+ )
+ def test_series_timedelta_to_json(self, date_format, expected):
+ # GH28156: to_json not correctly formatting Timedelta
+ s = Series(pd.timedelta_range(start="1D", periods=2))
+
+ result = s.to_json(date_format=date_format)
+ assert result == expected
+
+ result = s.astype(object).to_json(date_format=date_format)
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "date_format,expected",
+ [
+ ("iso", '{"0":{"0":"P1DT0H0M0S","1":"P2DT0H0M0S"}}'),
+ ("epoch", '{"0":{"0":86400000,"1":172800000}}'),
+ ],
+ )
+ def test_dataframe_timedelta_to_json(self, date_format, expected):
+ # GH28156: to_json not correctly formatting Timedelta
+ df = DataFrame(pd.timedelta_range(start="1D", periods=2))
+
+ result = df.to_json(date_format=date_format)
+ assert result == expected
+
+ result = df.astype(object).to_json(date_format=date_format)
+ assert result == expected
+
def test_path(self):
with ensure_clean("test.json") as path:
for df in [
| - [x] closes #28256
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28595 | 2019-09-24T12:12:30Z | 2020-02-12T00:41:12Z | null | 2020-02-12T00:41:12Z |
DOC: Add into about empty results in sample | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 152983451bc38..6dd65a4050ca1 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4862,6 +4862,8 @@ def sample(
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
+ Be careful, with a small number of rows and a small fraction the
+ result can be empty.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : str or ndarray-like, optional
| As referred to in https://github.com/pandas-dev/pandas/issues/28495.
- [x] closes #28495
- [x] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28592 | 2019-09-24T06:29:30Z | 2019-11-07T23:49:51Z | null | 2019-11-07T23:49:52Z |
CLN: unify __finalize__ treatment for Series ops | diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 4f027843fbac1..4e85a3ff104c4 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -606,6 +606,9 @@ def _construct_result(left, result, index, name, dtype=None):
"""
out = left._constructor(result, index=index, dtype=dtype)
out = out.__finalize__(left)
+
+ # Set the result's name after __finalize__ is called because __finalize__
+ # would set it back to self.name
out.name = name
return out
@@ -660,14 +663,6 @@ def wrapper(self, other):
res_name = get_op_result_name(self, other)
- # TODO: shouldn't we be applying finalize whenever
- # not isinstance(other, ABCSeries)?
- finalizer = (
- lambda x: x.__finalize__(self)
- if isinstance(other, (np.ndarray, ABCIndexClass))
- else x
- )
-
if isinstance(other, ABCDataFrame): # pragma: no cover
# Defer to DataFrame implementation; fail early
return NotImplemented
@@ -680,13 +675,7 @@ def wrapper(self, other):
res_values = comparison_op(lvalues, rvalues, op)
- result = self._constructor(res_values, index=self.index)
- result = finalizer(result)
-
- # Set the result's name after finalizer is called because finalizer
- # would set it back to self.name
- result.name = res_name
- return result
+ return _construct_result(self, res_values, index=self.index, name=res_name)
wrapper.__name__ = op_name
return wrapper
@@ -703,14 +692,6 @@ def wrapper(self, other):
self, other = _align_method_SERIES(self, other, align_asobject=True)
res_name = get_op_result_name(self, other)
- # TODO: shouldn't we be applying finalize whenever
- # not isinstance(other, ABCSeries)?
- finalizer = (
- lambda x: x.__finalize__(self)
- if not isinstance(other, (ABCSeries, ABCIndexClass))
- else x
- )
-
if isinstance(other, ABCDataFrame):
# Defer to DataFrame implementation; fail early
return NotImplemented
@@ -719,8 +700,7 @@ def wrapper(self, other):
rvalues = extract_array(other, extract_numpy=True)
res_values = logical_op(lvalues, rvalues, op)
- result = self._constructor(res_values, index=self.index, name=res_name)
- return finalizer(result)
+ return _construct_result(self, res_values, index=self.index, name=res_name)
wrapper.__name__ = op_name
return wrapper
| ATM we have three slightly different usages for the arithmetic, comparison and logical ops, with no clear reason for the differences. This changes that to make the arithmetic the one true version, which has the added benefit of letting us share _construct_result code across these three functions.
| https://api.github.com/repos/pandas-dev/pandas/pulls/28590 | 2019-09-24T01:40:48Z | 2019-09-26T12:30:40Z | 2019-09-26T12:30:40Z | 2019-09-26T14:42:48Z |
CLN: remove unused args from _construct_result | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 0638c4c1b6a01..69ef3b68406b7 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5269,7 +5269,7 @@ def _arith_op(left, right):
with np.errstate(all="ignore"):
res_values = _arith_op(this.values, other.values)
new_data = dispatch_fill_zeros(func, this.values, other.values, res_values)
- return this._construct_result(other, new_data, _arith_op)
+ return this._construct_result(new_data)
def _combine_match_index(self, other, func, level=None):
left, right = self.align(other, join="outer", axis=0, level=level, copy=False)
@@ -5282,44 +5282,31 @@ def _combine_match_index(self, other, func, level=None):
# fastpath --> operate directly on values
with np.errstate(all="ignore"):
new_data = func(left.values.T, right.values).T
- return left._construct_result(other, new_data, func)
+ return left._construct_result(new_data)
def _combine_match_columns(self, other: Series, func, level=None):
left, right = self.align(other, join="outer", axis=1, level=level, copy=False)
# at this point we have `left.columns.equals(right.index)`
new_data = ops.dispatch_to_series(left, right, func, axis="columns")
- return left._construct_result(right, new_data, func)
+ return left._construct_result(new_data)
- def _combine_const(self, other, func):
- # scalar other or np.ndim(other) == 0
- new_data = ops.dispatch_to_series(self, other, func)
- return self._construct_result(other, new_data, func)
-
- def _construct_result(self, other, result, func):
+ def _construct_result(self, result) -> "DataFrame":
"""
Wrap the result of an arithmetic, comparison, or logical operation.
Parameters
----------
- other : object
result : DataFrame
- func : binary operator
Returns
-------
DataFrame
-
- Notes
- -----
- `func` is included for compat with SparseDataFrame signature, is not
- needed here.
"""
out = self._constructor(result, index=self.index, copy=False)
# Pin columns instead of passing to constructor for compat with
# non-unique columns case
out.columns = self.columns
return out
- # TODO: finalize? we do for SparseDataFrame
def combine(self, other, func, fill_value=None, overwrite=True):
"""
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 0c1e1e90c003b..4f027843fbac1 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -892,7 +892,8 @@ def f(self, other, axis=default_axis, level=None, fill_value=None):
if fill_value is not None:
self = self.fillna(fill_value)
- return self._combine_const(other, op)
+ new_data = dispatch_to_series(self, other, op)
+ return self._construct_result(new_data)
f.__name__ = op_name
@@ -926,7 +927,7 @@ def f(self, other, axis=default_axis, level=None):
if not self._indexed_same(other):
self, other = self.align(other, "outer", level=level, copy=False)
new_data = dispatch_to_series(self, other, na_op, str_rep)
- return self._construct_result(other, new_data, na_op)
+ return self._construct_result(new_data)
elif isinstance(other, ABCSeries):
return _combine_series_frame(
@@ -934,7 +935,8 @@ def f(self, other, axis=default_axis, level=None):
)
else:
# in this case we always have `np.ndim(other) == 0`
- return self._combine_const(other, na_op)
+ new_data = dispatch_to_series(self, other, na_op)
+ return self._construct_result(new_data)
f.__name__ = op_name
@@ -957,7 +959,7 @@ def f(self, other):
"Can only compare identically-labeled DataFrame objects"
)
new_data = dispatch_to_series(self, other, func, str_rep)
- return self._construct_result(other, new_data, func)
+ return self._construct_result(new_data)
elif isinstance(other, ABCSeries):
return _combine_series_frame(
@@ -967,8 +969,8 @@ def f(self, other):
# straight boolean comparisons we want to allow all columns
# (regardless of dtype to pass thru) See #4537 for discussion.
- res = self._combine_const(other, func)
- return res
+ new_data = dispatch_to_series(self, other, func)
+ return self._construct_result(new_data)
f.__name__ = op_name
| These were there for compat with the SparseDataFrame._construct_result signature, which is no longer relevant.
Also got rid of _combine_const; that was also just waiting for the SparseDataFrame version to be gone. | https://api.github.com/repos/pandas-dev/pandas/pulls/28589 | 2019-09-24T01:37:31Z | 2019-09-24T12:06:59Z | 2019-09-24T12:06:59Z | 2019-09-24T15:41:49Z |
CLN: indexing Exception in Series | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 2431bfcfd0356..c87e371354f63 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1131,7 +1131,9 @@ def _get_with(self, key):
elif isinstance(key, tuple):
try:
return self._get_values_tuple(key)
- except Exception:
+ except ValueError:
+ # if we don't have a MultiIndex, we may still be able to handle
+ # a 1-tuple. see test_1tuple_without_multiindex
if len(key) == 1:
key = key[0]
if isinstance(key, slice):
@@ -1186,7 +1188,9 @@ def _get_values(self, indexer):
return self._constructor(
self._data.get_slice(indexer), fastpath=True
).__finalize__(self)
- except Exception:
+ except ValueError:
+ # mpl compat if we look up e.g. ser[:, np.newaxis];
+ # see tests.series.timeseries.test_mpl_compat_hack
return self._values[indexer]
def _get_value(self, label, takeable: bool = False):
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index e375bd459e66f..d478fbfa1686d 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -1202,3 +1202,12 @@ def test_readonly_indices():
result = df["data"].iloc[indices]
expected = df["data"].loc[[1, 3, 6]]
tm.assert_series_equal(result, expected)
+
+
+def test_1tuple_without_multiindex():
+ ser = pd.Series(range(5))
+ key = (slice(3),)
+
+ result = ser[key]
+ expected = ser[key[0]]
+ tm.assert_series_equal(result, expected)
| https://api.github.com/repos/pandas-dev/pandas/pulls/28588 | 2019-09-24T00:49:50Z | 2019-09-24T12:07:41Z | 2019-09-24T12:07:41Z | 2019-09-24T15:49:06Z | |
TST/CLN: Exception catching | diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 85eab91af3c48..c3ba5c0545b8b 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -8,6 +8,7 @@
import pandas as pd
from pandas import DataFrame, Series, concat, date_range, isna
from pandas.api.types import is_scalar
+from pandas.core.indexing import IndexingError
from pandas.tests.indexing.common import Base
from pandas.util import testing as tm
@@ -722,7 +723,7 @@ def test_iloc_mask(self):
else:
accessor = df
ans = str(bin(accessor[mask]["nums"].sum()))
- except Exception as e:
+ except (ValueError, IndexingError, NotImplementedError) as e:
ans = str(e)
key = tuple([idx, method])
diff --git a/pandas/tests/io/pytables/test_pytables.py b/pandas/tests/io/pytables/test_pytables.py
index 856d97e29f2c0..ae604b1141204 100644
--- a/pandas/tests/io/pytables/test_pytables.py
+++ b/pandas/tests/io/pytables/test_pytables.py
@@ -37,7 +37,6 @@
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal, set_timezone
-from pandas.io.formats.printing import pprint_thing
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
@@ -3415,14 +3414,9 @@ def test_string_select(self):
expected = df[df.x == "none"]
assert_frame_equal(result, expected)
- try:
- result = store.select("df", "x!=none")
- expected = df[df.x != "none"]
- assert_frame_equal(result, expected)
- except Exception as detail:
- pprint_thing("[{0}]".format(detail))
- pprint_thing(store)
- pprint_thing(expected)
+ result = store.select("df", "x!=none")
+ expected = df[df.x != "none"]
+ assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index af726caa52e88..aee58f808d9e6 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -9,7 +9,6 @@
from shutil import rmtree
import string
import tempfile
-import traceback
from typing import Union, cast
import warnings
import zipfile
@@ -2291,10 +2290,7 @@ def wrapper(*args, **kwargs):
" and error {error}".format(error=e)
)
- try:
- e_str = traceback.format_exc(e)
- except Exception:
- e_str = str(e)
+ e_str = str(e)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
| https://api.github.com/repos/pandas-dev/pandas/pulls/28587 | 2019-09-23T22:45:42Z | 2019-09-24T12:24:04Z | 2019-09-24T12:24:04Z | 2019-09-24T15:50:08Z | |
Fix typo in class DataFrame | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 0638c4c1b6a01..be969e939afda 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -312,7 +312,7 @@ class DataFrame(NDFrame):
.. versionchanged:: 0.25.0
If data is a list of dicts, column order follows insertion-order
- Python 3.6 and later.
+ for Python 3.6 and later.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This is basically a version of #28579 for the master branch. | https://api.github.com/repos/pandas-dev/pandas/pulls/28586 | 2019-09-23T22:04:09Z | 2019-09-24T01:26:13Z | 2019-09-24T01:26:13Z | 2019-09-24T17:45:09Z |
PERF: Implement DataFrame-with-scalar ops block-wise | diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index bda5f8f4326f1..dc90bcf2239ac 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -328,6 +328,32 @@ class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray)
_generate_range
"""
+ @property
+ def ndim(self):
+ return self._data.ndim
+
+ @property
+ def shape(self):
+ return self._data.shape
+
+ def __len__(self):
+ return len(self._data)
+
+ @property
+ def T(self):
+ # Note: we drop any freq
+ return type(self)(self._data.T, dtype=self.dtype)
+
+ def reshape(self, *args, **kwargs):
+ # Note: we drop any freq
+ data = self._data.reshape(*args, **kwargs)
+ return type(self)(data, dtype=self.dtype)
+
+ def ravel(self, *args, **kwargs):
+ # Note: we drop any freq
+ data = self._data.ravel(*args, **kwargs)
+ return type(self)(data, dtype=self.dtype)
+
@property
def _box_func(self):
"""
@@ -396,9 +422,6 @@ def size(self) -> int:
"""The number of elements in this array."""
return np.prod(self.shape)
- def __len__(self):
- return len(self._data)
-
def __getitem__(self, key):
"""
This getitem defers to the underlying array, which by-definition can
@@ -1032,7 +1055,7 @@ def _add_nat(self):
# GH#19124 pd.NaT is treated like a timedelta for both timedelta
# and datetime dtypes
- result = np.zeros(len(self), dtype=np.int64)
+ result = np.zeros(self.shape, dtype=np.int64)
result.fill(iNaT)
return type(self)(result, dtype=self.dtype, freq=None)
@@ -1046,7 +1069,7 @@ def _sub_nat(self):
# For datetime64 dtypes by convention we treat NaT as a datetime, so
# this subtraction returns a timedelta64 dtype.
# For period dtype, timedelta64 is a close-enough return dtype.
- result = np.zeros(len(self), dtype=np.int64)
+ result = np.zeros(self.shape, dtype=np.int64)
result.fill(iNaT)
return result.view("timedelta64[ns]")
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 0335058a69c63..c0283915f7d92 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -76,6 +76,17 @@
"""
+def compat_2d(meth):
+ def new_meth(self, *args, **kwargs):
+ if self.ndim > 1:
+ result = meth(self.ravel(), *args, **kwargs)
+ return result.reshape(self.shape)
+ return meth(self, *args, **kwargs)
+
+ new_meth.__name__ = meth.__name__
+ return new_meth
+
+
def tz_to_dtype(tz):
"""
Return a datetime64[ns] dtype appropriate for the given timezone.
@@ -361,7 +372,7 @@ def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False):
"ndarray, or Series or Index containing one of those."
)
raise ValueError(msg.format(type(values).__name__))
- if values.ndim != 1:
+ if values.ndim not in [1, 2]:
raise ValueError("Only 1-dimensional input arrays are supported.")
if values.dtype == "i8":
@@ -818,6 +829,7 @@ def _sub_datetime_arraylike(self, other):
new_values[arr_mask] = iNaT
return new_values.view("timedelta64[ns]")
+ @compat_2d
def _add_offset(self, offset):
assert not isinstance(offset, Tick)
try:
@@ -825,6 +837,7 @@ def _add_offset(self, offset):
values = self.tz_localize(None)
else:
values = self
+
result = offset.apply_index(values)
if self.tz is not None:
result = result.tz_localize(self.tz)
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 6c9462ff4fa4d..81588b0dee96d 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -222,7 +222,7 @@ def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False):
"ndarray, or Series or Index containing one of those."
)
raise ValueError(msg.format(type(values).__name__))
- if values.ndim != 1:
+ if values.ndim not in [1, 2]:
raise ValueError("Only 1-dimensional input arrays are supported.")
if values.dtype == "i8":
@@ -1076,7 +1076,7 @@ def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"):
)
data = np.array(data, copy=copy)
- if data.ndim != 1:
+ if data.ndim not in [1, 2]:
raise ValueError("Only 1-dimensional input arrays are supported.")
assert data.dtype == "m8[ns]", data
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 398fa9b0c1fc0..0e90a395d7f7e 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -12,7 +12,11 @@
from pandas._libs import Timedelta, Timestamp, lib
from pandas.util._decorators import Appender
-from pandas.core.dtypes.common import is_list_like, is_timedelta64_dtype
+from pandas.core.dtypes.common import (
+ is_extension_array_dtype,
+ is_list_like,
+ is_timedelta64_dtype,
+)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCExtensionArray,
@@ -24,6 +28,7 @@
from pandas.core.construction import extract_array
from pandas.core.ops.array_ops import (
arithmetic_op,
+ array_op,
comparison_op,
define_na_arithmetic_op,
logical_op,
@@ -350,7 +355,7 @@ def fill_binop(left, right, fill_value):
# Dispatch logic
-def dispatch_to_series(left, right, func, str_rep=None, axis=None):
+def dispatch_to_series(left, right, func, str_rep=None, axis=None, eval_kwargs=None):
"""
Evaluate the frame operation func(left, right) by evaluating
column-by-column, dispatching to the Series implementation.
@@ -369,11 +374,57 @@ def dispatch_to_series(left, right, func, str_rep=None, axis=None):
"""
# Note: we use iloc to access columns for compat with cases
# with non-unique columns.
+ eval_kwargs = eval_kwargs or {}
+
import pandas.core.computation.expressions as expressions
right = lib.item_from_zerodim(right)
+
if lib.is_scalar(right) or np.ndim(right) == 0:
+ new_blocks = []
+ mgr = left._data
+ for blk in mgr.blocks:
+ # Reshape for EA Block
+ blk_vals = blk.values
+ if hasattr(blk_vals, "reshape"):
+ # ndarray, DTA/TDA/PA
+ blk_vals = blk_vals.reshape(blk.shape)
+ blk_vals = blk_vals.T
+
+ new_vals = array_op(blk_vals, right, func, str_rep, eval_kwargs)
+
+ # Reshape for EA Block
+ if is_extension_array_dtype(new_vals.dtype):
+ from pandas.core.internals.blocks import make_block
+
+ if hasattr(new_vals, "reshape"):
+ # ndarray, DTA/TDA/PA
+ new_vals = new_vals.reshape(blk.shape[::-1])
+ assert new_vals.shape[-1] == len(blk.mgr_locs)
+ for i in range(new_vals.shape[-1]):
+ nb = make_block(new_vals[..., i], placement=[blk.mgr_locs[i]])
+ new_blocks.append(nb)
+ else:
+ # Categorical, IntegerArray
+ assert len(blk.mgr_locs) == 1
+ assert new_vals.shape == (blk.shape[-1],)
+ nb = make_block(new_vals, placement=blk.mgr_locs, ndim=2)
+ new_blocks.append(nb)
+ elif blk.values.ndim == 1:
+ # need to bump up to 2D
+ new_vals = new_vals.reshape(-1, 1)
+ assert new_vals.T.shape == blk.shape
+ nb = blk.make_block(new_vals.T)
+ new_blocks.append(nb)
+ else:
+ assert new_vals.T.shape == blk.shape
+ nb = blk.make_block(new_vals.T)
+ new_blocks.append(nb)
+
+ bm = type(mgr)(new_blocks, mgr.axes)
+ return type(left)(bm)
+
def column_op(a, b):
return {i: func(a.iloc[:, i], b) for i in range(len(a.columns))}
@@ -526,7 +577,7 @@ def wrapper(self, other):
lvalues = extract_array(self, extract_numpy=True)
rvalues = extract_array(other, extract_numpy=True)
- res_values = comparison_op(lvalues, rvalues, op)
+ res_values = comparison_op(lvalues, rvalues, op, None, {})
return _construct_result(self, res_values, index=self.index, name=res_name)
@@ -552,7 +603,7 @@ def wrapper(self, other):
lvalues = extract_array(self, extract_numpy=True)
rvalues = extract_array(other, extract_numpy=True)
- res_values = logical_op(lvalues, rvalues, op)
+ res_values = logical_op(lvalues, rvalues, op, None, {})
return _construct_result(self, res_values, index=self.index, name=res_name)
wrapper.__name__ = op_name
@@ -723,7 +774,9 @@ def f(self, other, axis=default_axis, level=None, fill_value=None):
if fill_value is not None:
self = self.fillna(fill_value)
- new_data = dispatch_to_series(self, other, op)
+ new_data = dispatch_to_series(
+ self, other, op, str_rep=str_rep, eval_kwargs=eval_kwargs
+ )
return self._construct_result(new_data)
f.__name__ = op_name
@@ -749,7 +802,9 @@ def f(self, other, axis=default_axis, level=None):
# Another DataFrame
if not self._indexed_same(other):
self, other = self.align(other, "outer", level=level, copy=False)
- new_data = dispatch_to_series(self, other, op, str_rep)
+ new_data = dispatch_to_series(
+ self, other, op, str_rep=str_rep, eval_kwargs={}
+ )
return self._construct_result(new_data)
elif isinstance(other, ABCSeries):
@@ -781,7 +836,9 @@ def f(self, other):
raise ValueError(
"Can only compare identically-labeled DataFrame objects"
)
- new_data = dispatch_to_series(self, other, op, str_rep)
+ new_data = dispatch_to_series(
+ self, other, op, str_rep=str_rep, eval_kwargs={}
+ )
return self._construct_result(new_data)
elif isinstance(other, ABCSeries):
diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py
index a225eec93b27e..c4cb2178c5035 100644
--- a/pandas/core/ops/array_ops.py
+++ b/pandas/core/ops/array_ops.py
@@ -56,7 +56,7 @@ def comp_method_OBJECT_ARRAY(op, x, y):
result = libops.vec_compare(x, y, op)
else:
- result = libops.scalar_compare(x, y, op)
+ result = libops.scalar_compare(x.ravel(), y, op).reshape(x.shape)
return result
@@ -156,12 +156,22 @@ def na_arithmetic_op(left, right, op, str_rep: str, eval_kwargs):
return missing.dispatch_fill_zeros(op, left, right, result)
+def array_op(left, right, op, str_rep, eval_kwargs):
+ op_name = op.__name__.strip("_")
+ if op_name in {"eq", "ne", "lt", "le", "gt", "ge"}:
+ return comparison_op(left, right, op, str_rep, eval_kwargs)
+ elif op_name in {"and", "or", "xor", "rand", "ror", "rxor"}:
+ return logical_op(left, right, op, str_rep, eval_kwargs)
+ else:
+ return arithmetic_op(left, right, op, str_rep, eval_kwargs)
+
+
def arithmetic_op(
left: Union[np.ndarray, ABCExtensionArray],
right: Any,
op,
str_rep: str,
- eval_kwargs: Dict[str, str],
+ eval_kwargs: Dict[str, bool],
):
"""
Evaluate an arithmetic operation `+`, `-`, `*`, `/`, `//`, `%`, `**`, ...
@@ -218,7 +228,7 @@ def arithmetic_op(
def comparison_op(
- left: Union[np.ndarray, ABCExtensionArray], right: Any, op
+ left: Union[np.ndarray, ABCExtensionArray], right: Any, op, str_rep, eval_kwargs
) -> Union[np.ndarray, ABCExtensionArray]:
"""
Evaluate a comparison operation `=`, `!=`, `>=`, `>`, `<=`, or `<`.
@@ -256,10 +266,11 @@ def comparison_op(
elif is_scalar(rvalues) and isna(rvalues):
# numpy does not like comparisons vs None
+ # TODO: Should we be using invalid_comparison here?
if op is operator.ne:
- res_values = np.ones(len(lvalues), dtype=bool)
+ res_values = np.ones(lvalues.shape, dtype=bool)
else:
- res_values = np.zeros(len(lvalues), dtype=bool)
+ res_values = np.zeros(lvalues.shape, dtype=bool)
elif is_object_dtype(lvalues.dtype):
res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues)
@@ -323,7 +334,7 @@ def na_logical_op(x: np.ndarray, y, op):
def logical_op(
- left: Union[np.ndarray, ABCExtensionArray], right: Any, op
+ left: Union[np.ndarray, ABCExtensionArray], right: Any, op, str_rep, eval_kwargs
) -> Union[np.ndarray, ABCExtensionArray]:
"""
Evaluate a logical operation `|`, `&`, or `^`.
diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py
index ed693d873efb8..d4fdeffa2c2db 100644
--- a/pandas/tests/arithmetic/test_period.py
+++ b/pandas/tests/arithmetic/test_period.py
@@ -755,18 +755,18 @@ def test_pi_sub_isub_offset(self):
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
- def test_pi_add_offset_n_gt1(self, box_transpose_fail):
+ def test_pi_add_offset_n_gt1(self, box_with_array):
# GH#23215
# add offset to PeriodIndex with freq.n > 1
- box, transpose = box_transpose_fail
+ box = box_with_array
per = pd.Period("2016-01", freq="2M")
pi = pd.PeriodIndex([per])
expected = pd.PeriodIndex(["2016-03"], freq="2M")
- pi = tm.box_expected(pi, box, transpose=transpose)
- expected = tm.box_expected(expected, box, transpose=transpose)
+ pi = tm.box_expected(pi, box)
+ expected = tm.box_expected(expected, box)
result = pi + per.freq
tm.assert_equal(result, expected)
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index c3cda22497ecb..acb419c09210f 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -23,9 +23,8 @@ def test_from_sequence_invalid_type(self):
def test_only_1dim_accepted(self):
arr = np.array([0, 1, 2, 3], dtype="M8[h]").astype("M8[ns]")
- with pytest.raises(ValueError, match="Only 1-dimensional"):
- # 2-dim
- DatetimeArray(arr.reshape(2, 2))
+ # 2-dim allowed for ops compat
+ DatetimeArray(arr.reshape(2, 2))
with pytest.raises(ValueError, match="Only 1-dimensional"):
# 0-dim
diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py
index 42e7bee97e671..ddfd2339e6c49 100644
--- a/pandas/tests/arrays/test_timedeltas.py
+++ b/pandas/tests/arrays/test_timedeltas.py
@@ -11,9 +11,8 @@ def test_only_1dim_accepted(self):
# GH#25282
arr = np.array([0, 1, 2, 3], dtype="m8[h]").astype("m8[ns]")
- with pytest.raises(ValueError, match="Only 1-dimensional"):
- # 2-dim
- TimedeltaArray(arr.reshape(2, 2))
+ # 2-dim allowed for ops compat
+ TimedeltaArray(arr.reshape(2, 2))
with pytest.raises(ValueError, match="Only 1-dimensional"):
# 0-dim
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index 82c197ac054f0..f5f6c9ad6b3da 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -122,7 +122,8 @@ def test_ops(self):
result = getattr(df, rop)(m)
assert_frame_equal(result, expected)
- # GH7192
+ # GH7192: Note we need a large number of rows to ensure this
+ # goes through the numexpr path
df = DataFrame(dict(A=np.random.randn(25000)))
df.iloc[0:5] = np.nan
expected = 1 - np.isnan(df.iloc[0:25])
| One of four cases we'll need to implement (the others being Series-align-index, Series-align-columns, and DataFrame).
~670x speedup on the fastest ops, ~8x on the slower end.
```
In [3]: arr = np.arange(10**5).reshape(100, 1000)
In [4]: df = pd.DataFrame(arr)
In [5]: %timeit df + 1
198 ms ± 2.17 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) # <-- master
294 µs ± 3.25 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) # <-- PR
In [8]: ts = pd.Timestamp.now("UTC")
In [9]: df2 = pd.DataFrame(arr.view("timedelta64[ns]"))
In [10]: %timeit ts - df2
319 ms ± 2.49 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) # <-- master
40.2 ms ± 622 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) # <-- PR
```
The 2D DTA and TDAs that get created exist only briefly, and the changes to these classes are about as minimal as I can make them. | https://api.github.com/repos/pandas-dev/pandas/pulls/28583 | 2019-09-23T19:38:24Z | 2019-11-10T17:01:52Z | null | 2020-04-05T17:45:11Z |
WEB: Restructuring pages and navigation, styling and new footer | diff --git a/web/pandas/_templates/layout.html b/web/pandas/_templates/layout.html
index 253318182f30c..fe3e4d1245d93 100644
--- a/web/pandas/_templates/layout.html
+++ b/web/pandas/_templates/layout.html
@@ -20,41 +20,44 @@
<link rel="stylesheet"
href="{{ base_url }}{{ stylesheet }}">
{% endfor %}
+ <script src="https://kit.fontawesome.com/79e5369384.js" crossorigin="anonymous"></script>
</head>
<body>
<header>
<nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark">
- <button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#nav-content" aria-controls="nav-content" aria-expanded="false" aria-label="Toggle navigation">
- <span class="navbar-toggler-icon"></span>
- </button>
+ <div class="container">
+ <button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#nav-content" aria-controls="nav-content" aria-expanded="false" aria-label="Toggle navigation">
+ <span class="navbar-toggler-icon"></span>
+ </button>
- {% if static.logo %}<a class="navbar-brand" href="{{ base_url }}/"><img alt="" src="{{ base_url }}{{ static.logo }}"/></a>{% endif %}
+ {% if static.logo %}<a class="navbar-brand" href="{{ base_url }}/"><img alt="" src="{{ base_url }}{{ static.logo }}"/></a>{% endif %}
- <div class="collapse navbar-collapse" id="nav-content">
- <ul class="navbar-nav">
- {% for item in navbar %}
- {% if not item.has_subitems %}
- <li class="nav-item">
- <a class="nav-link" href="{% if not item.target.startswith("http") %}{{ base_url }}{% endif %}{{ item.target }}">{{ item.name }}</a>
- </li>
- {% else %}
- <li class="nav-item dropdown">
- <a class="nav-link dropdown-toggle"
- data-toggle="dropdown"
- id="{{ item.slug }}"
- href="#"
- role="button"
- aria-haspopup="true"
- aria-expanded="false">{{ item.name }}</a>
- <div class="dropdown-menu" aria-labelledby="{{ item.slug }}">
- {% for subitem in item.target %}
- <a class="dropdown-item" href="{% if not subitem.target.startswith("http") %}{{ base_url }}{% endif %}{{ subitem.target }}">{{ subitem.name }}</a>
- {% endfor %}
- </div>
- </li>
- {% endif %}
- {% endfor %}
- </ul>
+ <div class="collapse navbar-collapse" id="nav-content">
+ <ul class="navbar-nav ml-auto">
+ {% for item in navbar %}
+ {% if not item.has_subitems %}
+ <li class="nav-item">
+ <a class="nav-link" href="{% if not item.target.startswith("http") %}{{ base_url }}{% endif %}{{ item.target }}">{{ item.name }}</a>
+ </li>
+ {% else %}
+ <li class="nav-item dropdown">
+ <a class="nav-link dropdown-toggle"
+ data-toggle="dropdown"
+ id="{{ item.slug }}"
+ href="#"
+ role="button"
+ aria-haspopup="true"
+ aria-expanded="false">{{ item.name }}</a>
+ <div class="dropdown-menu" aria-labelledby="{{ item.slug }}">
+ {% for subitem in item.target %}
+ <a class="dropdown-item" href="{% if not subitem.target.startswith("http") %}{{ base_url }}{% endif %}{{ subitem.target }}">{{ subitem.name }}</a>
+ {% endfor %}
+ </div>
+ </li>
+ {% endif %}
+ {% endfor %}
+ </ul>
+ </div>
</div>
</nav>
</header>
@@ -64,11 +67,30 @@
</div>
</main>
<footer class="container pt-4 pt-md-5 border-top">
- <p class="float-right">
- <a href="#">Back to top</a>
- </p>
+ <ul class="list-inline social-buttons float-right">
+ <li class="list-inline-item">
+ <a href="https://twitter.com/pandas_dev/">
+ <i class="fab fa-twitter"></i>
+ </a>
+ </li>
+ <li class="list-inline-item">
+ <a href="https://github.com/pandas-dev/pandas/">
+ <i class="fab fa-github"></i>
+ </a>
+ </li>
+ <li class="list-inline-item">
+ <a href="https://stackoverflow.com/questions/tagged/pandas">
+ <i class="fab fa-stack-overflow"></i>
+ </a>
+ </li>
+ <li class="list-inline-item">
+ <a href="https://pandas.discourse.group">
+ <i class="fab fa-discourse"></i>
+ </a>
+ </li>
+ </ul>
<p>
- © 2009 - 2019, pandas team
+ pandas is a fiscally sponsored project of <a href="https://numfocus.org">NumFOCUS</a>
</p>
</footer>
diff --git a/web/pandas/community/citing.md b/web/pandas/about/citing.md
similarity index 98%
rename from web/pandas/community/citing.md
rename to web/pandas/about/citing.md
index 6bad948bb3736..77b79c41aa4d1 100644
--- a/web/pandas/community/citing.md
+++ b/web/pandas/about/citing.md
@@ -1,6 +1,6 @@
-# Citing pandas
+# Citing and logo
-## Citing
+## Citing pandas
If you use _pandas_ for a scientific publication, we would appreciate citations to one of the following papers:
diff --git a/web/pandas/community/about.md b/web/pandas/about/index.html
similarity index 100%
rename from web/pandas/community/about.md
rename to web/pandas/about/index.html
diff --git a/web/pandas/community/roadmap.md b/web/pandas/about/roadmap.md
similarity index 100%
rename from web/pandas/community/roadmap.md
rename to web/pandas/about/roadmap.md
diff --git a/web/pandas/about/sponsors.md b/web/pandas/about/sponsors.md
new file mode 100644
index 0000000000000..dcc6e367e5d64
--- /dev/null
+++ b/web/pandas/about/sponsors.md
@@ -0,0 +1,41 @@
+# Sponsors
+
+## NumFOCUS
+
+
+
+_pandas_ is a Sponsored Project of [NumFOCUS](https://numfocus.org/), a 501(c)(3) nonprofit charity in the United States.
+NumFOCUS provides _pandas_ with fiscal, legal, and administrative support to help ensure the
+health and sustainability of the project. Visit numfocus.org for more information.
+
+Donations to _pandas_ are managed by NumFOCUS. For donors in the United States, your gift is tax-deductible
+to the extent provided by law. As with any donation, you should consult with your tax adviser about your particular tax situation.
+
+## Tidelift
+
+_pandas_ is part of the [Tidelift subscription](https://tidelift.com/subscription/pkg/pypi-pandas?utm_source=pypi-pandas&utm_medium=referral&utm_campaign=readme).
+You can support pandas by becoming a Tidelift subscriber.
+
+## Institutional partners
+
+Institutional Partners are companies and universities that support the project by employing contributors.
+Current Institutional Partners include:
+
+<ul>
+ {% for company in partners.active if company.employs %}
+ <li><a href="{{ company.url }}">{{ company.name }}</a> ({{ company.employs }})</li>
+ {% endfor %}
+</ul>
+
+## In-kind sponsors
+
+- [OVH](https://us.ovhcloud.com/): Hosting
+- [Indeed](https://opensource.indeedeng.io/): Logo and website design
+
+## Past institutional partners
+
+<ul>
+ {% for company in partners.past %}
+ <li><a href="{{ company.url }}">{{ company.name }}</a></li>
+ {% endfor %}
+</ul>
diff --git a/web/pandas/community/team.md b/web/pandas/about/team.md
similarity index 63%
rename from web/pandas/community/team.md
rename to web/pandas/about/team.md
index c0a15081e1fa8..41da3a0e82bdb 100644
--- a/web/pandas/community/team.md
+++ b/web/pandas/about/team.md
@@ -36,25 +36,12 @@ If you want to support pandas development, you can find information in the [dona
{% endfor %}
</div>
-## BDFL
+## Governance
Wes McKinney is the Benevolent Dictator for Life (BDFL).
-## Governance
-
The project governance is available in the [project governance documents](https://github.com/pandas-dev/pandas-governance).
-## NumFOCUS
-
-
-
-_pandas_ is a Sponsored Project of [NumFOCUS](https://numfocus.org/), a 501(c)(3) nonprofit charity in the United States.
-NumFOCUS provides _pandas_ with fiscal, legal, and administrative support to help ensure the
-health and sustainability of the project. Visit numfocus.org for more information.
-
-Donations to _pandas_ are managed by NumFOCUS. For donors in the United States, your gift is tax-deductible
-to the extent provided by law. As with any donation, you should consult with your tax adviser about your particular tax situation.
-
## Code of conduct committee
<ul>
@@ -71,19 +58,6 @@ to the extent provided by law. As with any donation, you should consult with you
{% endfor %}
</ul>
-## Institutional partners
-
-<ul>
- {% for company in partners.active if company.employs %}
- <li><a href="{{ company.url }}">{{ company.name }}</a> ({{ company.employs }})</li>
- {% endfor %}
-</ul>
-
-In-kind sponsors
-
-- [Indeed](https://opensource.indeedeng.io/): Logo and website design
-- Can we find a donor for the hosting (website, benchmarks,...?)
-
## Emeritus maintainers
<ul>
@@ -91,11 +65,3 @@ In-kind sponsors
<li>{{ person }}</li>
{% endfor %}
</ul>
-
-## Past institutional partners
-
-<ul>
- {% for company in partners.past %}
- <li><a href="{{ company.url }}">{{ company.name }}</a></li>
- {% endfor %}
-</ul>
diff --git a/web/pandas/blog.html b/web/pandas/community/blog.html
similarity index 100%
rename from web/pandas/blog.html
rename to web/pandas/community/blog.html
diff --git a/web/pandas/community/coc.md b/web/pandas/community/coc.md
index 2841349fdb556..de0e8120f7eee 100644
--- a/web/pandas/community/coc.md
+++ b/web/pandas/community/coc.md
@@ -1,4 +1,4 @@
-# Contributor Code of Conduct
+# Code of conduct
As contributors and maintainers of this project, and in the interest of
fostering an open and welcoming community, we pledge to respect all people who
diff --git a/web/pandas/community/ecosystem.md b/web/pandas/community/ecosystem.md
index af27c31b52d50..cf242e86f879f 100644
--- a/web/pandas/community/ecosystem.md
+++ b/web/pandas/community/ecosystem.md
@@ -1,4 +1,4 @@
-# Pandas ecosystem
+# Ecosystem
Increasingly, packages are being built on top of pandas to address
specific needs in data preparation, analysis and visualization. This is
diff --git a/web/pandas/config.yml b/web/pandas/config.yml
index c7c4b77e309f7..d5c505f298437 100644
--- a/web/pandas/config.yml
+++ b/web/pandas/config.yml
@@ -4,12 +4,11 @@ main:
ignore:
- _templates/layout.html
- config.yml
- - blog.html # blog will be added at a later stage
- try.md # the binder page will be added later
github_repo_url: pandas-dev/pandas
context_preprocessors:
- pandas_web.Preprocessors.navbar_add_info
- # - pandas_web.Preprocessors.blog_add_posts
+ - pandas_web.Preprocessors.blog_add_posts
- pandas_web.Preprocessors.maintainers_add_info
- pandas_web.Preprocessors.home_add_releases
markdown_extensions:
@@ -17,46 +16,48 @@ main:
- tables
- fenced_code
static:
- logo: # path to the logo when it's in the repo
+ logo: # /static/img/pandas.svg
css:
- /static/css/pandas.css
navbar:
- - name: "Install"
- target: /install.html
+ - name: "About us"
+ target:
+ - name: "About pandas"
+ target: /about/index.html
+ - name: "Project roadmap"
+ target: /about/roadmap.html
+ - name: "Team"
+ target: /about/team.html
+ - name: "Sponsors"
+ target: /about/sponsors.html
+ - name: "Citing and logo"
+ target: /about/citing.html
+ - name: "Getting started"
+ target: /getting_started.html
- name: "Documentation"
target:
- - name: "Getting started"
- target: /docs/getting_started/index.html
- name: "User guide"
target: /docs/user_guide/index.html
- name: "API reference"
target: /docs/reference/index.html
- - name: "Contributing to pandas"
- target: /docs/development/index.html
- name: "Release notes"
target: /docs/whatsnew/index.html
+ - name: "Older versions"
+ target: https://pandas.pydata.org/pandas-docs/version/
- name: "Community"
target:
- - name: "About pandas"
- target: /community/about.html
- - name: "Project roadmap"
- target: /community/roadmap.html
- - name: "Ecosystem"
- target: /community/ecosystem.html
+ - name: "Blog"
+ target: /community/blog.html
- name: "Ask a question (StackOverflow)"
target: https://stackoverflow.com/questions/tagged/pandas
- - name: "Discuss (mailing list)"
- target: https://groups.google.com/forum/#!forum/pydata
- - name: "Team"
- target: /community/team.html
- - name: "Code of Conduct"
+ - name: "Discuss"
+ target: https://pandas.discourse.group
+ - name: "Code of conduct"
target: /community/coc.html
- - name: "Citing pandas"
- target: /community/citing.html
- # - name: "Blog"
- # target: /blog.html
- - name: "Donate"
- target: /donate.html
+ - name: "Ecosystem"
+ target: /community/ecosystem.html
+ - name: "Contribute"
+ target: /contribute.html
blog:
num_posts: 8
feed:
diff --git a/web/pandas/contribute.md b/web/pandas/contribute.md
new file mode 100644
index 0000000000000..825a5870bf5a0
--- /dev/null
+++ b/web/pandas/contribute.md
@@ -0,0 +1,12 @@
+# Contribute to pandas
+
+_pandas_ is and always will be **free**. To make the development sustainable, we need _pandas_ users, corporate
+or individual, to support the development by providing their time and money.
+
+You can find more information about current developers in the [team page](about/team.html),
+and about current sponsors in the [sponsors page](about/sponsors.html).
+Financial contributions will mainly be used to advance in the [pandas roadmap](about/roadmap.html).
+
+- If your **company or organization** is interested in helping make pandas better, please contact us at [info@numfocus.org](mailto:info@numfocus.org)
+- If you want to contribute to _pandas_ with your **time**, please visit the [contributing page]({{ base_url }}/docs/development/index.html)
+- If you want to support _pandas_ with a **donation**, please use the [donations page](donate.html).
diff --git a/web/pandas/donate.md b/web/pandas/donate.md
index 5f5b07fb8763c..69db7e4648e77 100644
--- a/web/pandas/donate.md
+++ b/web/pandas/donate.md
@@ -1,16 +1,5 @@
# Donate to pandas
-_pandas_ is and always will be **free**. To make de development sustainable, we need _pandas_ users, corporate
-or individual, to support the development by providing their time and money.
-
-You can find more information about current developers and supporters in the [team page](community/team.html).
-Financial contributions will mainly be used to advance in the [pandas roadmap](community/roadmap.html).
-
-- If your **company or organization** is interested in helping make pandas better, please contact us at [info@numfocus.org](mailto:info@numfocus.org)
-- If you want to contribute to _pandas_ with your **time**, please visit the [contributing page]({{ base_url }}/docs/development/index.html)
-- If you want to support _pandas_ with a **donation**, please use the form below:
-
-
<div id="salsalabs-donate-container">
</div>
<script type="text/javascript"
diff --git a/web/pandas/install.md b/web/pandas/getting_started.md
similarity index 62%
rename from web/pandas/install.md
rename to web/pandas/getting_started.md
index 84721b3d1d9a4..99a7a9f4b2d60 100644
--- a/web/pandas/install.md
+++ b/web/pandas/getting_started.md
@@ -1,4 +1,6 @@
-# Installation instructions
+# Getting started
+
+## Installation instructions
The next steps provides the easiest and recommended way to set up your
environment to use pandas. Other installation options can be found in
@@ -21,8 +23,29 @@ the [advanced installation page]({{ base_url}}/docs/install.html).
<img class="img-fluid" alt="" src="{{ base_url }}/static/img/install/pandas_import_and_version.png"/>
-5. Now you are ready to use pandas you can write your code in the next cells.
+5. Now you are ready to use pandas, and you can write your code in the next cells.
+## Tutorials
You can learn more about pandas in the [tutorials](#), and more about JupyterLab
in the [JupyterLab documentation](https://jupyterlab.readthedocs.io/en/stable/user/interface.html).
+
+## Books
+
+The book we recommend to learn pandas is [Python for Data Analysis](https://amzn.to/2KI5JJw),
+by [Wes McKinney](https://wesmckinney.com/), creator of pandas.
+
+<a href="https://amzn.to/2KI5JJw">
+ <img alt="Python for Data Analysis" src="{{ base_url }}/static/img/pydata_book.gif"/>
+</a>
+
+## Videos
+
+<iframe width="560" height="315" frameborder="0"
+src="https://www.youtube.com/embed/_T8LGqJtuGc"
+allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
+allowfullscreen></iframe>
+
+## Cheat sheet
+
+[pandas cheat sheet](https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf)
diff --git a/web/pandas/index.html b/web/pandas/index.html
index 9f1a0e9a64174..df6e5ab9a330b 100644
--- a/web/pandas/index.html
+++ b/web/pandas/index.html
@@ -10,7 +10,7 @@ <h1>pandas</h1>
built on top of the <a href="http://www.python.org">Python</a> programming language.
</p>
<p>
- <a class="btn btn-primary" href="{{ base_url }}/install.html">Install pandas now!</a>
+ <a class="btn btn-primary" href="{{ base_url }}/getting_started.html">Install pandas now!</a>
</p>
</section>
@@ -19,7 +19,7 @@ <h1>pandas</h1>
<h5>Getting started</h5>
<ul>
<!-- <li><a href="{{ base_url }}/try.html">Try pandas online</a></li> -->
- <li><a href="{{ base_url }}/install.html">Install pandas</a></li>
+ <li><a href="{{ base_url }}/getting_started.html">Install pandas</a></li>
<li><a href="{{ base_url }}/docs/getting_started/index.html">Getting started</a></li>
</ul>
</div>
diff --git a/web/pandas/static/css/pandas.css b/web/pandas/static/css/pandas.css
index 5911de96b5fa9..0a227cf8d96c9 100644
--- a/web/pandas/static/css/pandas.css
+++ b/web/pandas/static/css/pandas.css
@@ -1,16 +1,54 @@
body {
padding-top: 5em;
- padding-bottom: 3em;
+ color: #444;
+}
+h1 {
+ font-size: 2.4rem;
+ font-weight: 700;
+ color: #130654;
+}
+h2 {
+ font-size: 1.45rem;
+ font-weight: 700;
+ color: black;
+}
+h3 {
+ font-size: 1.3rem;
+ font-weight: 600;
+ color: black;
+}
+a {
+ color: #130654;
}
code {
white-space: pre;
}
+.fab {
+ font-size: 1.2rem;
+ color: #666;
+}
+.fab:hover {
+ color: #130654;
+}
a.navbar-brand img {
max-height: 2em;
}
div.card {
margin: 0 0 .2em .2em !important;
}
+div.card .card-title {
+ font-weight: 500;
+ color: #130654;
+}
.book {
padding: 0 20%;
}
+.bg-dark {
+ background-color: #130654 !important;
+}
+.navbar-dark .navbar-nav .nav-link {
+ color: rgba(255, 255, 255, .9);
+}
+.navbar-dark .navbar-nav .nav-link:hover {
+ color: white;
+}
| xref #28519
closes #28520
You can see the result rendered in: https://datapythonista.github.io/pandas-web/ (the new logo is not in the PR).
Been discussing with NumPy the navigation, and what is proposed here is with minimal differences agreed with them. See https://github.com/numpy/numpy.org/issues/43
I don't touch the home page here, I think we need to remove almost everything we've got so far (based on the current), and move more in the direction of Dask or Jupyter. Discussions about it better in #28168 than here. | https://api.github.com/repos/pandas-dev/pandas/pulls/28582 | 2019-09-23T18:33:21Z | 2019-09-26T06:42:27Z | 2019-09-26T06:42:27Z | 2019-09-26T13:22:31Z |
Fix typo | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index c0b331f356c3c..4c39e18ce5002 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -320,7 +320,7 @@ class DataFrame(NDFrame):
.. versionchanged :: 0.25.0
If data is a list of dicts, column order follows insertion-order
- Python 3.6 and later.
+ for Python 3.6 and later.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28579 | 2019-09-23T14:47:24Z | 2019-09-23T15:21:25Z | 2019-09-23T15:21:25Z | 2019-09-23T22:05:02Z |
DOC: Add scaling to large datasets section | diff --git a/doc/.gitignore b/doc/.gitignore
new file mode 100644
index 0000000000000..e23892d6100e8
--- /dev/null
+++ b/doc/.gitignore
@@ -0,0 +1,4 @@
+data/
+timeseries.csv
+timeseries.parquet
+timeseries_wide.parquet
diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index f5669626aa2b3..6ff42eee9dad2 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -83,6 +83,7 @@ See the :ref:`overview` for more detail about what's in the library.
* :doc:`user_guide/style`
* :doc:`user_guide/options`
* :doc:`user_guide/enhancingperf`
+ * :doc:`user_guide/scale`
* :doc:`user_guide/sparse`
* :doc:`user_guide/gotchas`
* :doc:`user_guide/cookbook`
diff --git a/doc/source/user_guide/index.rst b/doc/source/user_guide/index.rst
index 05df83decbd7e..b86961a71433b 100644
--- a/doc/source/user_guide/index.rst
+++ b/doc/source/user_guide/index.rst
@@ -38,6 +38,7 @@ Further information on any specific method can be obtained in the
style
options
enhancingperf
+ scale
sparse
gotchas
cookbook
diff --git a/doc/source/user_guide/scale.rst b/doc/source/user_guide/scale.rst
new file mode 100644
index 0000000000000..7b590a3a1fcc8
--- /dev/null
+++ b/doc/source/user_guide/scale.rst
@@ -0,0 +1,373 @@
+.. _scale:
+
+*************************
+Scaling to large datasets
+*************************
+
+Pandas provides data structures for in-memory analytics, which makes using pandas
+to analyze datasets that are larger than memory datasets somewhat tricky. Even datasets
+that are a sizable fraction of memory become unwieldy, as some pandas operations need
+to make intermediate copies.
+
+This document provides a few recommendations for scaling your analysis to larger datasets.
+It's a complement to :ref:`enhancingperf`, which focuses on speeding up analysis
+for datasets that fit in memory.
+
+But first, it's worth considering *not using pandas*. Pandas isn't the right
+tool for all situations. If you're working with very large datasets and a tool
+like PostgreSQL fits your needs, then you should probably be using that.
+Assuming you want or need the expressiveness and power of pandas, let's carry on.
+
+.. ipython:: python
+
+ import pandas as pd
+ import numpy as np
+
+.. ipython:: python
+ :suppress:
+
+ from pandas.util.testing import _make_timeseries
+
+ # Make a random in-memory dataset
+ ts = _make_timeseries(freq="30S", seed=0)
+ ts.to_csv("timeseries.csv")
+ ts.to_parquet("timeseries.parquet")
+
+
+Load less data
+--------------
+
+.. ipython:: python
+ :suppress:
+
+ # make a similar dataset with many columns
+ timeseries = [
+ _make_timeseries(freq="1T", seed=i).rename(columns=lambda x: f"{x}_{i}")
+ for i in range(10)
+ ]
+ ts_wide = pd.concat(timeseries, axis=1)
+ ts_wide.to_parquet("timeseries_wide.parquet")
+
+Suppose our raw dataset on disk has many columns::
+
+ id_0 name_0 x_0 y_0 id_1 name_1 x_1 ... name_8 x_8 y_8 id_9 name_9 x_9 y_9
+ timestamp ...
+ 2000-01-01 00:00:00 1015 Michael -0.399453 0.095427 994 Frank -0.176842 ... Dan -0.315310 0.713892 1025 Victor -0.135779 0.346801
+ 2000-01-01 00:01:00 969 Patricia 0.650773 -0.874275 1003 Laura 0.459153 ... Ursula 0.913244 -0.630308 1047 Wendy -0.886285 0.035852
+ 2000-01-01 00:02:00 1016 Victor -0.721465 -0.584710 1046 Michael 0.524994 ... Ray -0.656593 0.692568 1064 Yvonne 0.070426 0.432047
+ 2000-01-01 00:03:00 939 Alice -0.746004 -0.908008 996 Ingrid -0.414523 ... Jerry -0.958994 0.608210 978 Wendy 0.855949 -0.648988
+ 2000-01-01 00:04:00 1017 Dan 0.919451 -0.803504 1048 Jerry -0.569235 ... Frank -0.577022 -0.409088 994 Bob -0.270132 0.335176
+ ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
+ 2000-12-30 23:56:00 999 Tim 0.162578 0.512817 973 Kevin -0.403352 ... Tim -0.380415 0.008097 1041 Charlie 0.191477 -0.599519
+ 2000-12-30 23:57:00 970 Laura -0.433586 -0.600289 958 Oliver -0.966577 ... Zelda 0.971274 0.402032 1038 Ursula 0.574016 -0.930992
+ 2000-12-30 23:58:00 1065 Edith 0.232211 -0.454540 971 Tim 0.158484 ... Alice -0.222079 -0.919274 1022 Dan 0.031345 -0.657755
+ 2000-12-30 23:59:00 1019 Ingrid 0.322208 -0.615974 981 Hannah 0.607517 ... Sarah -0.424440 -0.117274 990 George -0.375530 0.563312
+ 2000-12-31 00:00:00 937 Ursula -0.906523 0.943178 1018 Alice -0.564513 ... Jerry 0.236837 0.807650 985 Oliver 0.777642 0.783392
+
+ [525601 rows x 40 columns]
+
+
+To load the columns we want, we have two options.
+Option 1 loads in all the data and then filters to what we need.
+
+.. ipython:: python
+
+ columns = ['id_0', 'name_0', 'x_0', 'y_0']
+
+ pd.read_parquet("timeseries_wide.parquet")[columns]
+
+Option 2 only loads the columns we request.
+
+.. ipython:: python
+
+ pd.read_parquet("timeseries_wide.parquet", columns=columns)
+
+If we were to measure the memory usage of the two calls, we'd see that specifying
+``columns`` uses about 1/10th the memory in this case.
+
+With :func:`pandas.read_csv`, you can specify ``usecols`` to limit the columns
+read into memory. Not all file formats that can be read by pandas provide an option
+to read a subset of columns.
+
+Use efficient datatypes
+-----------------------
+
+The default pandas data types are not the most memory efficient. This is
+especially true for high-cardinality text data (columns with relatively few
+unique values). By using more efficient data types you can store larger datasets
+in memory.
+
+.. ipython:: python
+
+ ts = pd.read_parquet("timeseries.parquet")
+ ts
+
+Now, let's inspect the data types and memory usage to see where we should focus our
+attention.
+
+.. ipython:: python
+
+ ts.dtypes
+
+.. ipython:: python
+
+ ts.memory_usage(deep=True) # memory usage in bytes
+
+
+The ``name`` column is taking up much more memory than any other. It has just a
+few unique values, so it's a good candidate for converting to a
+:class:`Categorical`. With a Categorical, we store each unique name once and use
+space-efficient integers to know which specific name is used in each row.
+
+
+.. ipython:: python
+
+ ts2 = ts.copy()
+ ts2['name'] = ts2['name'].astype('category')
+ ts2.memory_usage(deep=True)
+
+We can go a bit further and downcast the numeric columns to their smallest types
+using :func:`pandas.to_numeric`.
+
+.. ipython:: python
+
+ ts2['id'] = pd.to_numeric(ts2['id'], downcast='unsigned')
+ ts2[['x', 'y']] = ts2[['x', 'y']].apply(pd.to_numeric, downcast='float')
+ ts2.dtypes
+
+.. ipython:: python
+
+ ts2.memory_usage(deep=True)
+
+.. ipython:: python
+
+ reduction = (ts2.memory_usage(deep=True).sum()
+ / ts.memory_usage(deep=True).sum())
+ print(f"{reduction:0.2f}")
+
+In all, we've reduced the in-memory footprint of this dataset to 1/5 of its
+original size.
+
+See :ref:`categorical` for more on ``Categorical`` and :ref:`basics.dtypes`
+for an overview of all of pandas' dtypes.
+
+Use chunking
+------------
+
+Some workloads can be achieved with chunking: splitting a large problem like "convert this
+directory of CSVs to parquet" into a bunch of small problems ("convert this individual CSV
+file into a Parquet file. Now repeat that for each file in this directory."). As long as each chunk
+fits in memory, you can work with datasets that are much larger than memory.
+
+.. note::
+
+ Chunking works well when the operation you're performing requires zero or minimal
+ coordination between chunks. For more complicated workflows, you're better off
+ :ref:`using another library <scale.other_libraries>`.
+
+Suppose we have an even larger "logical dataset" on disk that's a directory of parquet
+files. Each file in the directory represents a different year of the entire dataset.
+
+.. ipython:: python
+ :suppress:
+
+ import pathlib
+
+ N = 12
+ starts = [f'20{i:>02d}-01-01' for i in range(N)]
+ ends = [f'20{i:>02d}-12-13' for i in range(N)]
+
+ pathlib.Path("data/timeseries").mkdir(exist_ok=True)
+
+ for i, (start, end) in enumerate(zip(starts, ends)):
+ ts = _make_timeseries(start=start, end=end, freq='1T', seed=i)
+ ts.to_parquet(f"data/timeseries/ts-{i:0>2d}.parquet")
+
+
+::
+
+ data
+ └── timeseries
+ ├── ts-00.parquet
+ ├── ts-01.parquet
+ ├── ts-02.parquet
+ ├── ts-03.parquet
+ ├── ts-04.parquet
+ ├── ts-05.parquet
+ ├── ts-06.parquet
+ ├── ts-07.parquet
+ ├── ts-08.parquet
+ ├── ts-09.parquet
+ ├── ts-10.parquet
+ └── ts-11.parquet
+
+Now we'll implement an out-of-core ``value_counts``. The peak memory usage of this
+workflow is the single largest chunk, plus a small series storing the unique value
+counts up to this point. As long as each individual file fits in memory, this will
+work for arbitrary-sized datasets.
+
+.. ipython:: python
+
+ %%time
+ files = pathlib.Path("data/timeseries/").glob("ts*.parquet")
+ counts = pd.Series(dtype=int)
+ for path in files:
+ # Only one dataframe is in memory at a time...
+ df = pd.read_parquet(path)
+ # ... plus a small Series `counts`, which is updated.
+ counts = counts.add(df['name'].value_counts(), fill_value=0)
+ counts.astype(int)
+
+Some readers, like :meth:`pandas.read_csv`, offer parameters to control the
+``chunksize`` when reading a single file.
+
+Manually chunking is an OK option for workflows that don't
+require too sophisticated of operations. Some operations, like ``groupby``, are
+much harder to do chunkwise. In these cases, you may be better switching to a
+different library that implements these out-of-core algorithms for you.
+
+.. _scale.other_libraries:
+
+Use other libraries
+-------------------
+
+Pandas is just one library offering a DataFrame API. Because of its popularity,
+pandas' API has become something of a standard that other libraries implement.
+The pandas documentation maintains a list of libraries implementing a DataFrame API
+in :ref:`our ecosystem page <ecosystem.out-of-core>`.
+
+For example, `Dask`_, a parallel computing library, has `dask.dataframe`_, a
+pandas-like API for working with larger than memory datasets in parallel. Dask
+can use multiple threads or processes on a single machine, or a cluster of
+machines to process data in parallel.
+
+
+We'll import ``dask.dataframe`` and notice that the API feels similar to pandas.
+We can use Dask's ``read_parquet`` function, but provide a globstring of files to read in.
+
+.. ipython:: python
+
+ import dask.dataframe as dd
+
+ ddf = dd.read_parquet("data/timeseries/ts*.parquet", engine="pyarrow")
+ ddf
+
+Inspecting the ``ddf`` object, we see a few things
+
+* There are familiar attributes like ``.columns`` and ``.dtypes``
+* There are familiar methods like ``.groupby``, ``.sum``, etc.
+* There are new attributes like ``.npartitions`` and ``.divisions``
+
+The partitions and divisions are how Dask parallizes computation. A **Dask**
+DataFrame is made up of many **Pandas** DataFrames. A single method call on a
+Dask DataFrame ends up making many pandas method calls, and Dask knows how to
+coordinate everything to get the result.
+
+.. ipython:: python
+
+ ddf.columns
+ ddf.dtypes
+ ddf.npartitions
+
+One major difference: the ``dask.dataframe`` API is *lazy*. If you look at the
+repr above, you'll notice that the values aren't actually printed out; just the
+column names and dtypes. That's because Dask hasn't actually read the data yet.
+Rather than executing immediately, doing operations build up a **task graph**.
+
+.. ipython:: python
+
+ ddf
+ ddf['name']
+ ddf['name'].value_counts()
+
+Each of these calls is instant because the result isn't being computed yet.
+We're just building up a list of computation to do when someone needs the
+result. Dask knows that the return type of a ``pandas.Series.value_counts``
+is a pandas Series with a certain dtype and a certain name. So the Dask version
+returns a Dask Series with the same dtype and the same name.
+
+To get the actual result you can call ``.compute()``.
+
+.. ipython:: python
+
+ %time ddf['name'].value_counts().compute()
+
+At that point, you get back the same thing you'd get with pandas, in this case
+a concrete pandas Series with the count of each ``name``.
+
+Calling ``.compute`` causes the full task graph to be executed. This includes
+reading the data, selecting the columns, and doing the ``value_counts``. The
+execution is done *in parallel* where possible, and Dask tries to keep the
+overall memory footprint small. You can work with datasets that are much larger
+than memory, as long as each partition (a regular pandas DataFrame) fits in memory.
+
+By default, ``dask.dataframe`` operations use a threadpool to do operations in
+parallel. We can also connect to a cluster to distribute the work on many
+machines. In this case we'll connect to a local "cluster" made up of several
+processes on this single machine.
+
+.. code-block:: python
+
+ >>> from dask.distributed import Client, LocalCluster
+
+ >>> cluster = LocalCluster()
+ >>> client = Client(cluster)
+ >>> client
+ <Client: 'tcp://127.0.0.1:53349' processes=4 threads=8, memory=17.18 GB>
+
+Once this ``client`` is created, all of Dask's computation will take place on
+the cluster (which is just processes in this case).
+
+Dask implements the most used parts of the pandas API. For example, we can do
+a familiar groupby aggregation.
+
+.. ipython:: python
+
+ %time ddf.groupby('name')[['x', 'y']].mean().compute().head()
+
+The grouping and aggregation is done out-of-core and in parallel.
+
+When Dask knows the ``divisions`` of a dataset, certain optimizations are
+possible. When reading parquet datasets written by dask, the divisions will be
+known automatically. In this case, since we created the parquet files manually,
+we need to supply the divisions manually.
+
+.. ipython:: python
+
+ N = 12
+ starts = [f'20{i:>02d}-01-01' for i in range(N)]
+ ends = [f'20{i:>02d}-12-13' for i in range(N)]
+
+ divisions = tuple(pd.to_datetime(starts)) + (pd.Timestamp(ends[-1]),)
+ ddf.divisions = divisions
+ ddf
+
+Now we can do things like fast random access with ``.loc``.
+
+.. ipython:: python
+
+ ddf.loc['2002-01-01 12:01':'2002-01-01 12:05'].compute()
+
+Dask knows to just look in the 3rd partition for selecting values in `2002`. It
+doesn't need to look at any other data.
+
+Many workflows involve a large amount of data and processing it in a way that
+reduces the size to something that fits in memory. In this case, we'll resample
+to daily frequency and take the mean. Once we've taken the mean, we know the
+results will fit in memory, so we can safely call ``compute`` without running
+out of memory. At that point it's just a regular pandas object.
+
+.. ipython:: python
+
+ @savefig dask_resample.png
+ ddf[['x', 'y']].resample("1D").mean().cumsum().compute().plot()
+
+These Dask examples have all be done using multiple processes on a single
+machine. Dask can be `deployed on a cluster
+<https://docs.dask.org/en/latest/setup.html>`_ to scale up to even larger
+datasets.
+
+You see more dask examples at https://examples.dask.org.
+
+.. _Dask: https://dask.org
+.. _dask.dataframe: https://docs.dask.org/en/latest/dataframe.html
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index a78bc07ac2715..a6abe39f24ac3 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -111,6 +111,13 @@ Other API changes
- :meth:`MultiIndex.from_arrays` will no longer infer names from arrays if ``names=None`` is explicitly provided (:issue:`27292`)
-
+.. _whatsnew_1000.api.documentation:
+
+Documentation Improvements
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Added new section on :ref:`scale` (:issue:`28315`).
+
.. _whatsnew_1000.deprecations:
Deprecations
diff --git a/environment.yml b/environment.yml
index 7629fa52e7829..7c3ec9064cba3 100644
--- a/environment.yml
+++ b/environment.yml
@@ -35,6 +35,12 @@ dependencies:
- nbconvert>=5.4.1
- nbsphinx
- pandoc
+ # Dask and its dependencies
+ - dask-core
+ - toolz>=0.7.3
+ - fsspec>=0.5.1
+ - partd>=0.3.10
+ - cloudpickle>=0.2.1
# web (jinja2 is also needed, but it's also an optional pandas dependency)
- markdown
@@ -76,7 +82,7 @@ dependencies:
- html5lib # pandas.read_html
- lxml # pandas.read_html
- openpyxl # pandas.read_excel, DataFrame.to_excel, pandas.ExcelWriter, pandas.ExcelFile
- - pyarrow>=0.9.0 # pandas.read_paquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather
+ - pyarrow>=0.13.1 # pandas.read_paquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather
- pyqt>=5.9.2 # pandas.read_clipboard
- pytables>=3.4.2 # pandas.read_hdf, DataFrame.to_hdf
- python-snappy # required by pyarrow
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index aee58f808d9e6..1c0a8dbc19ccd 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -1651,6 +1651,87 @@ def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
+_names = [
+ "Alice",
+ "Bob",
+ "Charlie",
+ "Dan",
+ "Edith",
+ "Frank",
+ "George",
+ "Hannah",
+ "Ingrid",
+ "Jerry",
+ "Kevin",
+ "Laura",
+ "Michael",
+ "Norbert",
+ "Oliver",
+ "Patricia",
+ "Quinn",
+ "Ray",
+ "Sarah",
+ "Tim",
+ "Ursula",
+ "Victor",
+ "Wendy",
+ "Xavier",
+ "Yvonne",
+ "Zelda",
+]
+
+
+def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
+ """
+ Make a DataFrame with a DatetimeIndex
+
+ Parameters
+ ----------
+ start : str or Timestamp, default "2000-01-01"
+ The start of the index. Passed to date_range with `freq`.
+ end : str or Timestamp, default "2000-12-31"
+ The end of the index. Passed to date_range with `freq`.
+ freq : str or Freq
+ The frequency to use for the DatetimeIndex
+ seed : int, optional
+ The random state seed.
+
+ * name : object dtype with string names
+ * id : int dtype with
+ * x, y : float dtype
+
+ Examples
+ --------
+ >>> _make_timeseries()
+ id name x y
+ timestamp
+ 2000-01-01 982 Frank 0.031261 0.986727
+ 2000-01-02 1025 Edith -0.086358 -0.032920
+ 2000-01-03 982 Edith 0.473177 0.298654
+ 2000-01-04 1009 Sarah 0.534344 -0.750377
+ 2000-01-05 963 Zelda -0.271573 0.054424
+ ... ... ... ... ...
+ 2000-12-27 980 Ingrid -0.132333 -0.422195
+ 2000-12-28 972 Frank -0.376007 -0.298687
+ 2000-12-29 1009 Ursula -0.865047 -0.503133
+ 2000-12-30 1000 Hannah -0.063757 -0.507336
+ 2000-12-31 972 Tim -0.869120 0.531685
+ """
+ index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
+ n = len(index)
+ state = np.random.RandomState(seed)
+ columns = {
+ "name": state.choice(_names, size=n),
+ "id": state.poisson(1000, size=n),
+ "x": state.rand(n) * 2 - 1,
+ "y": state.rand(n) * 2 - 1,
+ }
+ df = pd.DataFrame(columns, index=index, columns=sorted(columns))
+ if df.index[-1] == end:
+ df = df.iloc[:-1]
+ return df
+
+
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
diff --git a/requirements-dev.txt b/requirements-dev.txt
index fd8e6378240b4..698e4f3aea094 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -17,6 +17,11 @@ numpydoc>=0.9.0
nbconvert>=5.4.1
nbsphinx
pandoc
+dask-core
+toolz>=0.7.3
+fsspec>=0.5.1
+partd>=0.3.10
+cloudpickle>=0.2.1
markdown
feedparser
pyyaml
@@ -48,7 +53,7 @@ fastparquet>=0.2.1
html5lib
lxml
openpyxl
-pyarrow>=0.9.0
+pyarrow>=0.13.1
pyqt5>=5.9.2
tables>=3.4.2
python-snappy
diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py
index 29fe8bf84c12b..44fe50b99560a 100755
--- a/scripts/generate_pip_deps_from_conda.py
+++ b/scripts/generate_pip_deps_from_conda.py
@@ -20,7 +20,7 @@
import yaml
EXCLUDE = {"python=3"}
-RENAME = {"pytables": "tables", "pyqt": "pyqt5"}
+RENAME = {"pytables": "tables", "pyqt": "pyqt5", "dask-core": "dask"}
def conda_package_to_pip(package):
| Closes https://github.com/pandas-dev/pandas/issues/28315 | https://api.github.com/repos/pandas-dev/pandas/pulls/28577 | 2019-09-23T11:15:58Z | 2019-10-01T11:59:04Z | 2019-10-01T11:59:04Z | 2019-10-01T12:09:30Z |
[WIP] Annotate DataFrame (Part 3) | diff --git a/pandas/_typing.py b/pandas/_typing.py
index 445eff9e19e47..e480933e9da39 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -23,6 +23,7 @@
from pandas.core.indexes.base import Index # noqa: F401
from pandas.core.series import Series # noqa: F401
from pandas.core.generic import NDFrame # noqa: F401
+ from pandas.io.formats.style import Styler # noqa: F401
AnyArrayLike = TypeVar("AnyArrayLike", "ExtensionArray", "Index", "Series", np.ndarray)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index fde3d1657b4f2..08ad92478c67c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -84,7 +84,7 @@
)
from pandas.core.dtypes.missing import isna, notna
-from pandas._typing import Axes, Dtype, FilePathOrBuffer
+from pandas._typing import Axes, Dtype, FilePathOrBuffer, Styler
from pandas.core import algorithms, common as com, nanops, ops
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import Categorical, ExtensionArray
@@ -804,7 +804,7 @@ def to_string(
# ----------------------------------------------------------------------
@property
- def style(self):
+ def style(self) -> "Styler":
"""
Returns a Styler object.
@@ -882,7 +882,7 @@ def items(self) -> Iterable[Tuple[Optional[Hashable], Series]]:
def iteritems(self):
yield from self.items()
- def iterrows(self):
+ def iterrows(self) -> Iterable[Tuple[Index, Series]]:
"""
Iterate over DataFrame rows as (index, Series) pairs.
@@ -1020,7 +1020,7 @@ def __len__(self) -> int:
"""
return len(self.index)
- def dot(self, other):
+ def dot(self, other: Union[Series, DataFrame]) -> Union[Series, DataFrame]:
"""
Compute the matrix multiplication between the DataFrame and other.
@@ -1131,13 +1131,13 @@ def dot(self, other):
else: # pragma: no cover
raise TypeError("unsupported type: {oth}".format(oth=type(other)))
- def __matmul__(self, other):
+ def __matmul__(self, other: Union[Series, DataFrame]) -> Union[Series, DataFrame]:
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
- def __rmatmul__(self, other):
+ def __rmatmul__(self, other: Union[Series, DataFrame]) -> Union[Series, DataFrame]:
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
@@ -4631,7 +4631,7 @@ def drop_duplicates(self, subset=None, keep="first", inplace=False):
duplicated = self.duplicated(subset, keep=keep)
if inplace:
- (inds,) = (-duplicated)._ndarray_values.nonzero()
+ inds = (-duplicated)._ndarray_values.nonzero()[0]
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
| - [x] part of #26792
- [ ] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28575 | 2019-09-23T07:00:49Z | 2020-01-01T20:56:45Z | null | 2020-03-31T02:02:47Z |
Add default parameter for get_group | diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index e010e615e176e..14cb8ea69b9b1 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -659,7 +659,7 @@ def curried(x):
return wrapper
- def get_group(self, name, obj=None):
+ def get_group(self, name, obj=None, default=None):
"""
Construct DataFrame from group with provided name.
@@ -671,6 +671,9 @@ def get_group(self, name, obj=None):
the DataFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
+ default : object
+ the default object in case "the group requested" -> `name` is
+ not found.
Returns
-------
@@ -681,6 +684,8 @@ def get_group(self, name, obj=None):
inds = self._get_index(name)
if not len(inds):
+ if default is not None:
+ return default
raise KeyError(name)
return obj.take(inds, axis=self.axis)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index bec5cbc5fecb8..a4c2cfff8952c 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1948,3 +1948,14 @@ def test_shift_bfill_ffill_tz(tz_naive_fixture, op, expected):
result = getattr(grouped, op)()
expected = DataFrame(expected).assign(time=lambda x: x.time.dt.tz_localize(tz))
assert_frame_equal(result, expected)
+
+
+def test_get_group_default():
+ # Issue: 9299
+ data = DataFrame({"ind": [0, 0, 2, 2]})
+ gb = data.groupby("ind")
+
+ result = gb.get_group(1, default=gb.get_group(0))
+ expected = gb.get_group(0)
+
+ assert_frame_equal(result, expected)
| - [x] closes #9299
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28574 | 2019-09-23T05:06:40Z | 2019-10-10T02:06:39Z | null | 2019-10-10T02:10:06Z |
BUG: Preserve subclassing with groupby operations | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 173cc6b6b483c..8ddb714fae90e 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -272,6 +272,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrame.rolling` not allowing for rolling over datetimes when ``axis=1`` (:issue: `28192`)
- Bug in :meth:`DataFrame.groupby` not offering selection by column name when ``axis=1`` (:issue:`27614`)
- Bug in :meth:`DataFrameGroupby.agg` not able to use lambda function with named aggregation (:issue:`27519`)
+- Bug in groupby/resampling where subclasses were not returned from groupby operations (:issue:`28330`)
Reshaping
^^^^^^^^^
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index f8f1455561c03..b535e7e2f4288 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -337,7 +337,7 @@ def _aggregate_item_by_item(self, func, *args, **kwargs):
if not len(result_columns) and errors is not None:
raise errors
- return DataFrame(result, columns=result_columns)
+ return self.obj._constructor(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
@@ -356,7 +356,7 @@ def _decide_output_index(self, output, labels):
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
- return DataFrame(index=keys)
+ return self.obj._constructor(index=keys)
key_names = self.grouper.names
@@ -372,7 +372,7 @@ def first_not_none(values):
if v is None:
# GH9684. If all values are None, then this will throw an error.
# We'd prefer it return an empty dataframe.
- return DataFrame()
+ return self.obj._constructor()
elif isinstance(v, DataFrame):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
@@ -401,7 +401,7 @@ def first_not_none(values):
# make Nones an empty object
v = first_not_none(values)
if v is None:
- return DataFrame()
+ return self.obj._constructor()
elif isinstance(v, NDFrame):
values = [
x if x is not None else v._constructor(**v._construct_axes_dict())
@@ -467,7 +467,7 @@ def first_not_none(values):
or isinstance(key_index, MultiIndex)
):
stacked_values = np.vstack([np.asarray(v) for v in values])
- result = DataFrame(
+ result = self.obj._constructor(
stacked_values, index=key_index, columns=index
)
else:
@@ -484,14 +484,16 @@ def first_not_none(values):
result.columns = index
else:
stacked_values = np.vstack([np.asarray(v) for v in values])
- result = DataFrame(
+ result = self.obj._constructor(
stacked_values.T, index=v.index, columns=key_index
)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
- return Series(values, index=key_index, name=self._selection_name)
+ return self.obj._constructor_sliced(
+ values, index=key_index, name=self._selection_name
+ )
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
@@ -510,7 +512,7 @@ def first_not_none(values):
# self._selection_name not passed through to Series as the
# result should not take the name of original selection
# of columns
- return Series(values, index=key_index)._convert(
+ return self.obj._constructor_sliced(values, index=key_index)._convert(
datetime=True, coerce=coerce
)
@@ -554,7 +556,7 @@ def _transform_general(self, func, *args, **kwargs):
r.columns = group.columns
r.index = group.index
else:
- r = DataFrame(
+ r = self.obj._constructor(
np.concatenate([res.values] * len(group.index)).reshape(
group.shape
),
@@ -681,7 +683,7 @@ def _transform_item_by_item(self, obj, wrapper):
if len(output) < len(obj.columns):
columns = columns.take(inds)
- return DataFrame(output, index=obj.index, columns=columns)
+ return self.obj._constructor(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs):
"""
@@ -875,7 +877,7 @@ def aggregate(self, func=None, *args, **kwargs):
result = self._aggregate_named(func, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
- ret = Series(result, index=index)
+ ret = self.obj._constructor(result, index=index)
if not self.as_index: # pragma: no cover
print("Warning, ignoring as_index=True")
@@ -943,20 +945,21 @@ def _aggregate_multiple_funcs(self, arg, _level):
# let higher level handle
if _level:
return results
+ self.obj._constructor(results, columns=columns)
- return DataFrame(results, columns=columns)
+ return self.obj._constructor_expanddim(results, columns=columns)
def _wrap_output(self, output, index, names=None):
""" common agg/transform wrapping logic """
output = output[self._selection_name]
if names is not None:
- return DataFrame(output, index=index, columns=names)
+ return self.obj._constructor_expanddim(output, index=index, columns=names)
else:
name = self._selection_name
if name is None:
name = self._selected_obj.name
- return Series(output, index=index, name=name)
+ return self.obj._constructor(output, index=index, name=name)
def _wrap_aggregated_output(self, output, names=None):
result = self._wrap_output(
@@ -970,7 +973,7 @@ def _wrap_transformed_output(self, output, names=None):
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
- return Series([], name=self._selection_name, index=keys)
+ return self.obj._constructor([], name=self._selection_name, index=keys)
def _get_index():
if self.grouper.nkeys > 1:
@@ -982,7 +985,9 @@ def _get_index():
if isinstance(values[0], dict):
# GH #823 #24880
index = _get_index()
- result = self._reindex_output(DataFrame(values, index=index))
+ result = self._reindex_output(
+ self.obj._constructor_expanddim(values, index=index)
+ )
# if self.observed is False,
# keep all-NaN rows created while re-indexing
result = result.stack(dropna=self.observed)
@@ -996,7 +1001,9 @@ def _get_index():
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
- result = Series(data=values, index=_get_index(), name=self._selection_name)
+ result = self.obj._constructor(
+ data=values, index=_get_index(), name=self._selection_name
+ )
return self._reindex_output(result)
def _aggregate_named(self, func, *args, **kwargs):
@@ -1052,7 +1059,7 @@ def transform(self, func, *args, **kwargs):
result = concat(results).sort_index()
else:
- result = Series()
+ result = self.obj._constructor()
# we will only try to coerce the result type if
# we have a numeric dtype, as these are *always* udfs
@@ -1078,7 +1085,7 @@ def _transform_fast(self, func, func_nm):
out = algorithms.take_1d(func()._values, ids)
if cast:
out = self._try_cast(out, self.obj)
- return Series(out, index=self.obj.index, name=self.obj.name)
+ return self.obj._constructor(out, index=self.obj.index, name=self.obj.name)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
@@ -1193,7 +1200,7 @@ def nunique(self, dropna=True):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
- return Series(res, index=ri, name=self._selection_name)
+ return self.obj._constructor(res, index=ri, name=self._selection_name)
@Appender(Series.describe.__doc__)
def describe(self, **kwargs):
@@ -1233,7 +1240,7 @@ def value_counts(
else:
# lab is a Categorical with categories an IntervalIndex
- lab = cut(Series(val), bins, include_lowest=True)
+ lab = cut(self.obj._constructor(val), bins, include_lowest=True)
lev = lab.cat.categories
lab = lev.take(lab.cat.codes)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
@@ -1293,7 +1300,7 @@ def value_counts(
if is_integer_dtype(out):
out = ensure_int64(out)
- return Series(out, index=mi, name=self._selection_name)
+ return self.obj._constructor(out, index=mi, name=self._selection_name)
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
@@ -1322,7 +1329,7 @@ def value_counts(
if is_integer_dtype(out):
out = ensure_int64(out)
- return Series(out, index=mi, name=self._selection_name)
+ return self.obj._constructor(out, index=mi, name=self._selection_name)
def count(self):
"""
@@ -1341,7 +1348,7 @@ def count(self):
minlength = ngroups or 0
out = np.bincount(ids[mask], minlength=minlength)
- return Series(
+ return self.obj._constructor(
out,
index=self.grouper.result_index,
name=self._selection_name,
@@ -1513,9 +1520,11 @@ def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if self.axis == 0:
- return DataFrame(result, index=obj.columns, columns=result_index).T
+ return self.obj._constructor(
+ result, index=obj.columns, columns=result_index
+ ).T
else:
- return DataFrame(result, index=obj.index, columns=result_index)
+ return self.obj._constructor(result, index=obj.index, columns=result_index)
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
@@ -1548,12 +1557,12 @@ def _wrap_aggregated_output(self, output, names=None):
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
- result = DataFrame(output, columns=output_keys)
+ result = self.obj._constructor(output, columns=output_keys)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
- result = DataFrame(output, index=index, columns=output_keys)
+ result = self.obj._constructor(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
@@ -1561,20 +1570,20 @@ def _wrap_aggregated_output(self, output, names=None):
return self._reindex_output(result)._convert(datetime=True)
def _wrap_transformed_output(self, output, names=None):
- return DataFrame(output, index=self.obj.index)
+ return self.obj._constructor(output, index=self.obj.index)
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[-1])
mgr = BlockManager(blocks, [items, index])
- result = DataFrame(mgr)
+ result = self.obj._constructor(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
- result = DataFrame(mgr)
+ result = self.obj._constructor(mgr)
if self.axis == 1:
result = result.T
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index bec5cbc5fecb8..d00b3ce64f021 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -134,6 +134,23 @@ def func(dataf):
result = df.groupby("X", squeeze=False).count()
assert isinstance(result, DataFrame)
+ # https://github.com/pandas-dev/pandas/issues/28330
+ # Test groupby operations on subclassed dataframes/series
+
+ cdf = tm.SubclassedDataFrame(
+ [
+ {"val1": 1, "val2": 20},
+ {"val1": 1, "val2": 19},
+ {"val1": 2, "val2": 27},
+ {"val1": 2, "val2": 12},
+ ]
+ )
+ result = cdf.groupby("val1").sum()
+ assert isinstance(result, tm.SubclassedDataFrame)
+ assert isinstance(result, DataFrame)
+ assert isinstance(result["val2"], tm.SubclassedSeries)
+ assert isinstance(result["val2"], Series)
+
def test_inconsistent_return_type():
# GH5592
| Preserve subclassing with groupby operations
- [x] closes #28330
- [x] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28573 | 2019-09-23T02:20:01Z | 2020-01-17T23:42:07Z | null | 2020-05-08T17:15:10Z |
DOC: Fixed PR08 docstring errors in pandas.tseries | diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 82cbfa831bf32..4ebb4f353a8fd 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -1007,9 +1007,9 @@ class CustomBusinessDay(_CustomMixin, BusinessDay):
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
- weekmask of valid business days, passed to ``numpy.busdaycalendar``
+ Weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
- list/array of dates to exclude from the set of valid business days,
+ List/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
offset : timedelta, default timedelta(0)
@@ -1671,16 +1671,19 @@ class WeekOfMonth(_WeekOfMonthMixin, DateOffset):
Parameters
----------
n : int
- week : {0, 1, 2, 3, ...}, default 0
- 0 is 1st week of month, 1 2nd week, etc.
- weekday : {0, 1, ..., 6}, default 0
- 0: Mondays
- 1: Tuesdays
- 2: Wednesdays
- 3: Thursdays
- 4: Fridays
- 5: Saturdays
- 6: Sundays
+ week : int {0, 1, 2, 3, ...}, default 0
+ A specific integer for the week of the month.
+ e.g. 0 is 1st week of month, 1 is the 2nd week, etc.
+ weekday : int {0, 1, ..., 6}, default 0
+ A specific integer for the day of the week.
+
+ - 0 is Monday
+ - 1 is Tuesday
+ - 2 is Wednesday
+ - 3 is Thursday
+ - 4 is Friday
+ - 5 is Saturday
+ - 6 is Sunday
"""
_prefix = "WOM"
@@ -1747,14 +1750,16 @@ class LastWeekOfMonth(_WeekOfMonthMixin, DateOffset):
Parameters
----------
n : int, default 1
- weekday : {0, 1, ..., 6}, default 0
- 0: Mondays
- 1: Tuesdays
- 2: Wednesdays
- 3: Thursdays
- 4: Fridays
- 5: Saturdays
- 6: Sundays
+ weekday : int {0, 1, ..., 6}, default 0
+ A specific integer for the day of the week.
+
+ - 0 is Monday
+ - 1 is Tuesday
+ - 2 is Wednesday
+ - 3 is Thursday
+ - 4 is Friday
+ - 5 is Saturday
+ - 6 is Sunday
"""
_prefix = "LWOM"
@@ -2055,6 +2060,7 @@ class FY5253(DateOffset):
http://en.wikipedia.org/wiki/4-4-5_calendar
The year may either:
+
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
@@ -2064,17 +2070,25 @@ class FY5253(DateOffset):
Parameters
----------
n : int
- weekday : {0, 1, ..., 6}
- 0: Mondays
- 1: Tuesdays
- 2: Wednesdays
- 3: Thursdays
- 4: Fridays
- 5: Saturdays
- 6: Sundays
- startingMonth : The month in which fiscal years end. {1, 2, ... 12}
- variation : str
- {"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
+ weekday : int {0, 1, ..., 6}, default 0
+ A specific integer for the day of the week.
+
+ - 0 is Monday
+ - 1 is Tuesday
+ - 2 is Wednesday
+ - 3 is Thursday
+ - 4 is Friday
+ - 5 is Saturday
+ - 6 is Sunday
+
+ startingMonth : int {1, 2, ... 12}, default 1
+ The month in which the fiscal year ends.
+
+ variation : str, default "nearest"
+ Method of employing 4-4-5 calendar. There are two options:
+
+ - "nearest" means year end is **weekday** closest to last day of month in year.
+ - "last" means year end is final **weekday** of the final month in fiscal year.
"""
_prefix = "RE"
@@ -2258,6 +2272,7 @@ class FY5253Quarter(DateOffset):
http://en.wikipedia.org/wiki/4-4-5_calendar
The year may either:
+
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
@@ -2271,19 +2286,28 @@ class FY5253Quarter(DateOffset):
Parameters
----------
n : int
- weekday : {0, 1, ..., 6}
- 0: Mondays
- 1: Tuesdays
- 2: Wednesdays
- 3: Thursdays
- 4: Fridays
- 5: Saturdays
- 6: Sundays
- startingMonth : The month in which fiscal years end. {1, 2, ... 12}
- qtr_with_extra_week : The quarter number that has the leap
- or 14 week when needed. {1,2,3,4}
- variation : str
- {"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
+ weekday : int {0, 1, ..., 6}, default 0
+ A specific integer for the day of the week.
+
+ - 0 is Monday
+ - 1 is Tuesday
+ - 2 is Wednesday
+ - 3 is Thursday
+ - 4 is Friday
+ - 5 is Saturday
+ - 6 is Sunday
+
+ startingMonth : int {1, 2, ..., 12}, default 1
+ The month in which fiscal years end.
+
+ qtr_with_extra_week : int {1, 2, 3, 4}, default 1
+ The quarter number that has the leap or 14 week when needed.
+
+ variation : str, default "nearest"
+ Method of employing 4-4-5 calendar. There are two options:
+
+ - "nearest" means year end is **weekday** closest to last day of month in year.
+ - "last" means year end is final **weekday** of the final month in fiscal year.
"""
_prefix = "REQ"
@@ -2707,8 +2731,8 @@ def generate_range(start=None, end=None, periods=None, offset=BDay()):
Parameters
----------
- start : datetime (default None)
- end : datetime (default None)
+ start : datetime, (default None)
+ end : datetime, (default None)
periods : int, (default None)
offset : DateOffset, (default BDay())
| This relates to: [27977](https://github.com/pandas-dev/pandas/issues/27977). I have fixed the doc PR08 formatting issues for:
```
pandas.tseries.offsets.CustomBusinessDay: Parameter "weekmask" description should start with a capital letter
pandas.tseries.offsets.CustomBusinessDay: Parameter "holidays" description should start with a capital letter
pandas.tseries.offsets.WeekOfMonth: Parameter "week" description should start with a capital letter
pandas.tseries.offsets.WeekOfMonth: Parameter "weekday" description should start with a capital letter
pandas.tseries.offsets.LastWeekOfMonth: Parameter "weekday" description should start with a capital letter
pandas.tseries.offsets.FY5253: Parameter "weekday" description should start with a capital letter
pandas.tseries.offsets.FY5253: Parameter "variation" description should start with a capital letter
pandas.tseries.offsets.FY5253Quarter: Parameter "weekday" description should start with a capital letter
pandas.tseries.offsets.FY5253Quarter: Parameter "qtr_with_extra_week" description should start with a capital letter
pandas.tseries.offsets.FY5253Quarter: Parameter "variation" description should start with a capital letter
pandas.tseries.offsets.CDay: Parameter "weekmask" description should start with a capital letter
pandas.tseries.offsets.CDay: Parameter "holidays" description should start with a capital letter
```
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Will continue to work through all PR08 docstring errors. | https://api.github.com/repos/pandas-dev/pandas/pulls/28571 | 2019-09-22T23:41:47Z | 2019-10-01T04:08:13Z | 2019-10-01T04:08:13Z | 2019-10-01T18:51:29Z |
Bugfix/groupby datetime issue | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index eb4b72d01d59a..08bc333d926db 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -182,7 +182,7 @@ Datetimelike
- Bug in :class:`Series` and :class:`DataFrame` with integer dtype failing to raise ``TypeError`` when adding or subtracting a ``np.datetime64`` object (:issue:`28080`)
- Bug in :class:`Week` with ``weekday`` incorrectly raising ``AttributeError`` instead of ``TypeError`` when adding or subtracting an invalid type (:issue:`28530`)
- Bug in :class:`DataFrame` arithmetic operations when operating with a :class:`Series` with dtype `'timedelta64[ns]'` (:issue:`28049`)
--
+- Bug in :func:`pandas.core.groupby.generic.SeriesGroupBy.apply` raising ``ValueError`` when a column in the original DataFrame is a datetime and the column labels are not standard integers (:issue:`28247`)
Timedelta
^^^^^^^^^
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index f8f1455561c03..a4dc1613d8c80 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1905,7 +1905,9 @@ def _recast_datetimelike_result(result: DataFrame) -> DataFrame:
result = result.copy()
obj_cols = [
- idx for idx in range(len(result.columns)) if is_object_dtype(result.dtypes[idx])
+ idx
+ for idx in range(len(result.columns))
+ if is_object_dtype(result.dtypes.iloc[idx])
]
# See GH#26285
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 76588549532b1..4d0063b773bc5 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -657,3 +657,22 @@ def test_apply_with_mixed_types():
result = g.apply(lambda x: x / x.sum())
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "group_column_dtlike",
+ [datetime.today(), datetime.today().date(), datetime.today().time()],
+)
+def test_apply_datetime_issue(group_column_dtlike):
+ # GH-28247
+ # groupby-apply throws an error if one of the columns in the DataFrame
+ # is a datetime object and the column labels are different from
+ # standard int values in range(len(num_columns))
+
+ df = pd.DataFrame({"a": ["foo"], "b": [group_column_dtlike]})
+ result = df.groupby("a").apply(lambda x: pd.Series(["spam"], index=[42]))
+
+ expected = pd.DataFrame(
+ ["spam"], Index(["foo"], dtype="object", name="a"), columns=[42]
+ )
+ tm.assert_frame_equal(result, expected)
| - [x] closes #28247
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28569 | 2019-09-22T18:14:59Z | 2019-10-03T17:25:04Z | 2019-10-03T17:25:04Z | 2020-01-18T00:29:08Z |
DOC: update fixing unknown parameters errors (error code PR02) | diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index fe6b339c2f4c8..8724382d9ec55 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -28,7 +28,7 @@ def hist_series(
yrot=None,
figsize=None,
bins=10,
- **kwds
+ **kwargs
):
"""
Draw histogram of the input series using matplotlib.
@@ -56,7 +56,7 @@ def hist_series(
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
- `**kwds` : keywords
+ **kwargs
To be passed to the actual plotting function
Returns
@@ -80,7 +80,7 @@ def hist_series(
yrot=yrot,
figsize=figsize,
bins=bins,
- **kwds
+ **kwargs
)
@@ -99,7 +99,7 @@ def hist_frame(
figsize=None,
layout=None,
bins=10,
- **kwds
+ **kwargs
):
"""
Make a histogram of the DataFrame's.
@@ -151,7 +151,7 @@ def hist_frame(
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
- **kwds
+ **kwargs
All other plotting keyword arguments to be passed to
:meth:`matplotlib.pyplot.hist`.
@@ -194,7 +194,7 @@ def hist_frame(
figsize=figsize,
layout=layout,
bins=bins,
- **kwds
+ **kwargs
)
@@ -209,7 +209,7 @@ def boxplot(
figsize=None,
layout=None,
return_type=None,
- **kwds
+ **kwargs
):
"""
Make a box plot from DataFrame columns.
@@ -260,7 +260,7 @@ def boxplot(
If ``return_type`` is `None`, a NumPy array
of axes with the same shape as ``layout`` is returned.
- **kwds
+ **kwargs
All other plotting keyword arguments to be passed to
:func:`matplotlib.pyplot.boxplot`.
@@ -385,7 +385,7 @@ def boxplot(
figsize=figsize,
layout=layout,
return_type=return_type,
- **kwds
+ **kwargs
)
@@ -401,7 +401,7 @@ def boxplot_frame(
figsize=None,
layout=None,
return_type=None,
- **kwds
+ **kwargs
):
plot_backend = _get_plot_backend()
return plot_backend.boxplot_frame(
@@ -415,7 +415,7 @@ def boxplot_frame(
figsize=figsize,
layout=layout,
return_type=return_type,
- **kwds
+ **kwargs
)
@@ -431,7 +431,7 @@ def boxplot_frame_groupby(
layout=None,
sharex=False,
sharey=True,
- **kwds
+ **kwargs
):
"""
Make box plots from DataFrameGroupBy data.
@@ -459,7 +459,7 @@ def boxplot_frame_groupby(
Whether y-axes will be shared among subplots
.. versionadded:: 0.23.1
- `**kwds` : Keyword Arguments
+ **kwargs
All other plotting keyword arguments to be passed to
matplotlib's boxplot function
@@ -495,7 +495,7 @@ def boxplot_frame_groupby(
layout=layout,
sharex=sharex,
sharey=sharey,
- **kwds
+ **kwargs
)
@@ -586,7 +586,7 @@ class PlotAccessor(PandasObject):
labels with "(right)" in the legend
include_bool : bool, default is False
If True, boolean values can be plotted.
- `**kwds` : keywords
+ **kwargs
Options to pass to matplotlib plotting method.
Returns
@@ -810,7 +810,7 @@ def line(self, x=None, y=None, **kwargs):
The values to be plotted.
Either the location or the label of the columns to be used.
By default, it will use the remaining DataFrame numeric columns.
- **kwds
+ **kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
@@ -880,7 +880,7 @@ def bar(self, x=None, y=None, **kwargs):
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
- **kwds
+ **kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
@@ -963,7 +963,7 @@ def barh(self, x=None, y=None, **kwargs):
Column to be used for categories.
y : label or position, default All numeric columns in dataframe
Columns to be plotted from the DataFrame.
- **kwds
+ **kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
@@ -1049,7 +1049,7 @@ def box(self, by=None, **kwargs):
----------
by : str or sequence
Column in the DataFrame to group by.
- **kwds : optional
+ **kwargs
Additional keywords are documented in
:meth:`DataFrame.plot`.
@@ -1092,7 +1092,7 @@ def hist(self, by=None, bins=10, **kwargs):
Column in the DataFrame to group by.
bins : int, default 10
Number of histogram bins to be used.
- **kwds
+ **kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
@@ -1148,7 +1148,7 @@ def kde(self, bw_method=None, ind=None, **kwargs):
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
- **kwds : optional
+ **kwargs
Additional keyword arguments are documented in
:meth:`pandas.%(this-datatype)s.plot`.
@@ -1250,7 +1250,7 @@ def area(self, x=None, y=None, **kwargs):
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
- **kwds : optional
+ **kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
@@ -1322,7 +1322,7 @@ def pie(self, **kwargs):
y : int or label, optional
Label or position of the column to plot.
If not provided, ``subplots=True`` argument must be passed.
- **kwds
+ **kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
@@ -1404,7 +1404,7 @@ def scatter(self, x, y, s=None, c=None, **kwargs):
- A column name or position whose values will be used to color the
marker points according to a colormap.
- **kwds
+ **kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
@@ -1476,7 +1476,7 @@ def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, **kwargs):
Alternatively, gridsize can be a tuple with two elements
specifying the number of hexagons in the x-direction and the
y-direction.
- **kwds
+ **kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
| - Documentation update fixing some of the methods with a PR-2 error code which involved updating **kwds to **kwargs
- No tests required
- The data frame.plot methods now have the docstring arguments updated from **kwds to **kwargs to match the method signature and exclude them from the PR02 errors:
| https://api.github.com/repos/pandas-dev/pandas/pulls/28567 | 2019-09-22T10:36:22Z | 2019-09-27T07:00:09Z | 2019-09-27T07:00:09Z | 2019-09-27T07:00:24Z |
BENCH: Add rolling apply benchmarks | diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py
index b42fa553b495c..493f96d46d5e7 100644
--- a/asv_bench/benchmarks/rolling.py
+++ b/asv_bench/benchmarks/rolling.py
@@ -25,6 +25,25 @@ def peakmem_rolling(self, constructor, window, dtype, method):
getattr(self.roll, method)()
+class Apply:
+ params = (
+ ["DataFrame", "Series"],
+ [10, 1000],
+ ["int", "float"],
+ [sum, np.sum, lambda x: np.sum(x) + 5],
+ [True, False],
+ )
+ param_names = ["contructor", "window", "dtype", "function", "raw"]
+
+ def setup(self, constructor, window, dtype, function, raw):
+ N = 10 ** 5
+ arr = (100 * np.random.random(N)).astype(dtype)
+ self.roll = getattr(pd, constructor)(arr).rolling(window)
+
+ def time_rolling(self, constructor, window, dtype, function, raw):
+ self.roll.apply(function, raw=raw)
+
+
class ExpandingMethods:
params = (
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/28566 | 2019-09-22T03:19:01Z | 2019-09-23T00:06:19Z | 2019-09-23T00:06:19Z | 2019-09-23T00:06:23Z |
TST: un-xfail incorrectly xfailed tests for maybe_promote | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index e31918c21c2ac..504eec8010ec6 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -358,6 +358,7 @@ def maybe_promote(dtype, fill_value=np.nan):
fill_value = NaT
elif is_extension_array_dtype(dtype) and isna(fill_value):
fill_value = dtype.na_value
+
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
@@ -366,6 +367,8 @@ def maybe_promote(dtype, fill_value=np.nan):
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.object_
+ else:
+ fill_value = np.bool_(fill_value)
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
@@ -374,6 +377,10 @@ def maybe_promote(dtype, fill_value=np.nan):
arr = np.asarray(fill_value)
if arr != arr.astype(dtype):
dtype = arr.dtype
+ elif issubclass(dtype.type, np.floating):
+ # check if we can cast
+ if _check_lossless_cast(fill_value, dtype):
+ fill_value = dtype.type(fill_value)
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
@@ -398,12 +405,31 @@ def maybe_promote(dtype, fill_value=np.nan):
pass
elif is_datetime64tz_dtype(dtype):
pass
- elif issubclass(np.dtype(dtype).type, str):
+ elif issubclass(np.dtype(dtype).type, (bytes, str)):
dtype = np.object_
return dtype, fill_value
+def _check_lossless_cast(value, dtype: np.dtype) -> bool:
+ """
+ Check if we can cast the given value to the given dtype _losslesly_.
+
+ Parameters
+ ----------
+ value : object
+ dtype : np.dtype
+
+ Returns
+ -------
+ bool
+ """
+ casted = dtype.type(value)
+ if casted == value:
+ return True
+ return False
+
+
def infer_dtype_from(val, pandas_dtype=False):
"""
interpret the dtype from a scalar or array. This is a convenience
diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py
index 44aebd4d277f2..211c550100018 100644
--- a/pandas/tests/dtypes/cast/test_promote.py
+++ b/pandas/tests/dtypes/cast/test_promote.py
@@ -23,6 +23,7 @@
is_timedelta64_dtype,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype, PandasExtensionDtype
+from pandas.core.dtypes.missing import isna
import pandas as pd
@@ -95,6 +96,7 @@ def _safe_dtype_assert(left_dtype, right_dtype):
"""
Compare two dtypes without raising TypeError.
"""
+ __tracebackhide__ = True
if isinstance(right_dtype, PandasExtensionDtype):
# switch order of equality check because numpy dtypes (e.g. if
# left_dtype is np.object_) do not know some expected dtypes (e.g.
@@ -157,20 +159,17 @@ def _check_promote(
_safe_dtype_assert(result_dtype, expected_dtype)
- # for equal values, also check type (relevant e.g. for int vs float, resp.
- # for different datetimes and timedeltas)
- match_value = (
- result_fill_value
- == expected_fill_value
- # disabled type check due to too many xfails; GH 23982/25425
- # and type(result_fill_value) == type(expected_fill_value)
- )
+ # GH#23982/25425 require the same type in addition to equality/NA-ness
+ res_type = type(result_fill_value)
+ ex_type = type(expected_fill_value)
+ assert res_type == ex_type
+
+ match_value = result_fill_value == expected_fill_value
+ # Note: type check above ensures that we have the _same_ NA value
# for missing values, None == None and iNaT == iNaT (which is checked
# through match_value above), but np.nan != np.nan and pd.NaT != pd.NaT
- match_missing = (result_fill_value is np.nan and expected_fill_value is np.nan) or (
- result_fill_value is NaT and expected_fill_value is NaT
- )
+ match_missing = isna(result_fill_value) and isna(expected_fill_value)
assert match_value or match_missing
@@ -251,7 +250,9 @@ def test_maybe_promote_bool_with_any(any_numpy_dtype_reduced, box):
if boxed and fill_dtype == bool:
pytest.xfail("falsely upcasts to object")
- if boxed and box_dtype is None and is_datetime_or_timedelta_dtype(fill_dtype):
+ if boxed and box_dtype is None and fill_dtype.kind == "M":
+ pytest.xfail("wrongly casts fill_value")
+ if boxed and box_dtype is None and fill_dtype.kind == "m":
pytest.xfail("wrongly casts fill_value")
# create array of given dtype; casts "1" to correct dtype
@@ -282,7 +283,9 @@ def test_maybe_promote_any_with_bool(any_numpy_dtype_reduced, box):
pytest.xfail("falsely upcasts to object")
if boxed and dtype not in (str, object) and box_dtype is None:
pytest.xfail("falsely upcasts to object")
- if not boxed and is_datetime_or_timedelta_dtype(dtype):
+ if not boxed and dtype.kind == "M":
+ pytest.xfail("raises error")
+ if not boxed and dtype.kind == "m":
pytest.xfail("raises error")
# filling anything but bool with bool casts to object
@@ -393,9 +396,6 @@ def test_maybe_promote_datetimetz_with_any_numpy_dtype(
fill_dtype = np.dtype(any_numpy_dtype_reduced)
boxed, box_dtype = box # read from parametrized fixture
- if box_dtype != object:
- pytest.xfail("does not upcast correctly")
-
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
@@ -430,8 +430,6 @@ def test_maybe_promote_datetimetz_with_datetimetz(
pytest.xfail("Cannot process fill_value with this dtype, see GH 24310")
if dtype.tz == fill_dtype.tz and boxed:
pytest.xfail("falsely upcasts")
- if dtype.tz != fill_dtype.tz and not boxed:
- pytest.xfail("falsely upcasts")
# create array of given dtype; casts "1" to correct dtype
fill_value = pd.Series([10 ** 9], dtype=fill_dtype)[0]
@@ -466,14 +464,10 @@ def test_maybe_promote_datetimetz_with_na(tz_aware_fixture, fill_value, box):
dtype = DatetimeTZDtype(tz=tz_aware_fixture)
boxed, box_dtype = box # read from parametrized fixture
- if boxed and (
- box_dtype == object
- or (box_dtype is None and (fill_value is None or fill_value is NaT))
- ):
- pytest.xfail("false upcasts to object")
# takes the opinion that DatetimeTZ should have single na-marker
# using iNaT would lead to errors elsewhere -> NaT
if not boxed and fill_value == iNaT:
+ # TODO: are we sure iNaT _should_ be cast to NaT?
pytest.xfail("wrong missing value marker")
expected_dtype = dtype
@@ -509,8 +503,10 @@ def test_maybe_promote_any_numpy_dtype_with_datetimetz(
fill_dtype = DatetimeTZDtype(tz=tz_aware_fixture)
boxed, box_dtype = box # read from parametrized fixture
- if is_datetime_or_timedelta_dtype(dtype) and not boxed:
+ if dtype.kind == "m" and not boxed:
pytest.xfail("raises error")
+ elif dtype.kind == "M" and not boxed:
+ pytest.xfail("Comes back as M8 instead of object")
fill_value = pd.Series([fill_value], dtype=fill_dtype)[0]
@@ -566,19 +562,6 @@ def test_maybe_promote_any_with_timedelta64(
else:
if boxed and box_dtype is None and is_timedelta64_dtype(type(fill_value)):
pytest.xfail("does not upcast correctly")
- if (
- not boxed
- and is_timedelta64_dtype(type(fill_value))
- and (
- is_integer_dtype(dtype)
- or is_float_dtype(dtype)
- or is_complex_dtype(dtype)
- or issubclass(dtype.type, np.bytes_)
- )
- ):
- pytest.xfail("does not upcast correctly")
- if box_dtype == "td_dtype":
- pytest.xfail("falsely upcasts")
if not boxed and is_datetime64_dtype(dtype):
pytest.xfail("raises error")
@@ -612,7 +595,9 @@ def test_maybe_promote_string_with_any(string_dtype, any_numpy_dtype_reduced, bo
fill_dtype = np.dtype(any_numpy_dtype_reduced)
boxed, box_dtype = box # read from parametrized fixture
- if boxed and box_dtype is None and is_datetime_or_timedelta_dtype(fill_dtype):
+ if boxed and box_dtype is None and fill_dtype.kind == "m":
+ pytest.xfail("wrong missing value marker")
+ if boxed and box_dtype is None and fill_dtype.kind == "M":
pytest.xfail("wrong missing value marker")
# create array of given dtype; casts "1" to correct dtype
@@ -652,17 +637,6 @@ def test_maybe_promote_any_with_string(any_numpy_dtype_reduced, string_dtype, bo
if is_datetime_or_timedelta_dtype(dtype) and box_dtype != object:
pytest.xfail("does not upcast or raises")
- if (
- boxed
- and box_dtype in (None, "str")
- and (
- is_integer_dtype(dtype)
- or is_float_dtype(dtype)
- or is_complex_dtype(dtype)
- or issubclass(dtype.type, np.bytes_)
- )
- ):
- pytest.xfail("does not upcast correctly")
# create array of given dtype
fill_value = "abc"
@@ -760,19 +734,6 @@ def test_maybe_promote_any_numpy_dtype_with_na(
pytest.xfail("does not upcast to object")
elif dtype == "uint64" and not boxed and fill_value == iNaT:
pytest.xfail("does not upcast correctly")
- elif is_datetime_or_timedelta_dtype(dtype) and boxed:
- pytest.xfail("falsely upcasts to object")
- elif (
- boxed
- and (
- is_integer_dtype(dtype) or is_float_dtype(dtype) or is_complex_dtype(dtype)
- )
- and fill_value is not NaT
- and dtype != "uint64"
- ):
- pytest.xfail("falsely upcasts to object")
- elif boxed and dtype == "uint64" and (fill_value is np.nan or fill_value is None):
- pytest.xfail("falsely upcasts to object")
# below: opinionated that iNaT should be interpreted as missing value
elif (
not boxed
| This sits on top of #28561. | https://api.github.com/repos/pandas-dev/pandas/pulls/28564 | 2019-09-21T14:57:53Z | 2019-10-01T13:06:04Z | 2019-10-01T13:06:04Z | 2019-10-01T13:30:46Z |
CLN: Assorted cleanups | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index c0ed198e200f1..6e73e1636a75b 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -39,7 +39,6 @@
is_period_dtype,
is_scalar,
is_signed_integer_dtype,
- is_sparse,
is_timedelta64_dtype,
is_unsigned_integer_dtype,
needs_i8_conversion,
@@ -743,7 +742,7 @@ def value_counts(
else:
- if is_extension_array_dtype(values) or is_sparse(values):
+ if is_extension_array_dtype(values):
# handle Categorical and sparse,
result = Series(values)._values.value_counts(dropna=dropna)
@@ -1623,7 +1622,7 @@ def take_nd(
out : ndarray or None, default None
Optional output array, must be appropriate type to hold input and
fill_value together, if indexer has any -1 value entries; call
- _maybe_promote to determine this type for any fill_value
+ maybe_promote to determine this type for any fill_value
fill_value : any, default np.nan
Fill value to replace -1 values with
mask_info : tuple of (ndarray, boolean)
@@ -1644,9 +1643,7 @@ def take_nd(
if is_extension_array_dtype(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
- if is_sparse(arr):
- arr = arr.to_dense()
- elif isinstance(arr, (ABCIndexClass, ABCSeries)):
+ if isinstance(arr, (ABCIndexClass, ABCSeries)):
arr = arr._values
arr = np.asarray(arr)
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index 5bd2a2b69deb1..5e8b28267f24f 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -9,7 +9,7 @@
import numpy as np
import numpy.ma as ma
-from pandas._libs import lib, tslibs
+from pandas._libs import lib
from pandas._libs.tslibs import IncompatibleFrequency, OutOfBoundsDatetime
from pandas.core.dtypes.cast import (
@@ -36,7 +36,7 @@
is_timedelta64_ns_dtype,
pandas_dtype,
)
-from pandas.core.dtypes.dtypes import ExtensionDtype, registry
+from pandas.core.dtypes.dtypes import CategoricalDtype, ExtensionDtype, registry
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndexClass,
@@ -275,7 +275,7 @@ def array(
if inferred_dtype == "period":
try:
return period_array(data, copy=copy)
- except tslibs.IncompatibleFrequency:
+ except IncompatibleFrequency:
# We may have a mixture of frequencies.
# We choose to return an ndarray, rather than raising.
pass
@@ -365,7 +365,9 @@ def extract_array(obj, extract_numpy=False):
return obj
-def sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False):
+def sanitize_array(
+ data, index, dtype=None, copy: bool = False, raise_cast_failure: bool = False
+):
"""
Sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified.
@@ -486,13 +488,19 @@ def sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False
return subarr
-def _try_cast(arr, dtype, copy, raise_cast_failure):
+def _try_cast(
+ arr,
+ dtype: Optional[Union[np.dtype, "ExtensionDtype"]],
+ copy: bool,
+ raise_cast_failure: bool,
+):
"""
Convert input to numpy ndarray and optionally cast to a given dtype.
Parameters
----------
- arr : array-like
+ arr : ndarray, list, tuple, iterator (catchall)
+ Excludes: ExtensionArray, Series, Index.
dtype : np.dtype, ExtensionDtype or None
copy : bool
If False, don't copy the data if not needed.
@@ -528,11 +536,13 @@ def _try_cast(arr, dtype, copy, raise_cast_failure):
if is_categorical_dtype(dtype):
# We *do* allow casting to categorical, since we know
# that Categorical is the only array type for 'category'.
+ dtype = cast(CategoricalDtype, dtype)
subarr = dtype.construct_array_type()(
arr, dtype.categories, ordered=dtype._ordered
)
elif is_extension_array_dtype(dtype):
# create an extension array from its dtype
+ dtype = cast(ExtensionDtype, dtype)
array_type = dtype.construct_array_type()._from_sequence
subarr = array_type(arr, dtype=dtype, copy=copy)
elif dtype is not None and raise_cast_failure:
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index e31918c21c2ac..b59660056aadb 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1311,9 +1311,8 @@ def construct_1d_ndarray_preserving_na(values, dtype=None, copy=False):
>>> np.array([1.0, 2.0, None], dtype='str')
array(['1.0', '2.0', 'None'], dtype='<U4')
- >>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype='str')
-
-
+ >>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype=np.dtype('str'))
+ array(['1.0', '2.0', None], dtype=object)
"""
subarr = np.array(values, dtype=dtype, copy=copy)
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 4ea649a2a6faf..41677af7b1721 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -888,7 +888,8 @@ def is_dtype_equal(source, target):
def is_any_int_dtype(arr_or_dtype) -> bool:
- """Check whether the provided array or dtype is of an integer dtype.
+ """
+ Check whether the provided array or dtype is of an integer dtype.
In this function, timedelta64 instances are also considered "any-integer"
type objects and will return True.
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 01399a23e810e..04c3b2b7714ef 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -2128,7 +2128,8 @@ def _can_hold_na(self):
return True
def _maybe_coerce_values(self, values):
- """Input validation for values passed to __init__. Ensure that
+ """
+ Input validation for values passed to __init__. Ensure that
we have datetime64ns, coercing if necessary.
Parameters
| https://api.github.com/repos/pandas-dev/pandas/pulls/28563 | 2019-09-21T14:41:43Z | 2019-09-23T11:58:41Z | 2019-09-23T11:58:41Z | 2019-09-23T12:44:50Z | |
DEPR: Deprecate numpy argument in read_json | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 6ad6b5129ef5a..c978a1825a390 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -501,6 +501,7 @@ Deprecations
- :func:`pandas.json_normalize` is now exposed in the top-level namespace.
Usage of ``json_normalize`` as ``pandas.io.json.json_normalize`` is now deprecated and
it is recommended to use ``json_normalize`` as :func:`pandas.json_normalize` instead (:issue:`27586`).
+- The ``numpy`` argument of :meth:`pandas.read_json` is deprecated (:issue:`28512`).
-
.. _whatsnew_1000.prior_deprecations:
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 30c1c2d59e983..7e43a0eaca3e0 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -10,6 +10,7 @@
import pandas._libs.json as json
from pandas._libs.tslibs import iNaT
from pandas.errors import AbstractMethodError
+from pandas.util._decorators import deprecate_kwarg
from pandas.core.dtypes.common import ensure_str, is_period_dtype
@@ -353,6 +354,7 @@ def _write(
return serialized
+@deprecate_kwarg(old_arg_name="numpy", new_arg_name=None)
def read_json(
path_or_buf=None,
orient=None,
@@ -466,6 +468,8 @@ def read_json(
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
+ .. deprecated:: 1.0.0
+
precise_float : bool, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index bce3d1de849aa..ff18febca44d6 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -3,6 +3,7 @@
from io import StringIO
import json
import os
+from warnings import catch_warnings, filterwarnings
import numpy as np
import pytest
@@ -1601,3 +1602,13 @@ def test_json_indent_all_orients(self, orient, expected):
def test_json_negative_indent_raises(self):
with pytest.raises(ValueError, match="must be a nonnegative integer"):
pd.DataFrame().to_json(indent=-1)
+
+ @pytest.mark.filterwarnings("ignore:.*msgpack:FutureWarning")
+ def test_deprecate_numpy_argument_read_json(self):
+ # https://github.com/pandas-dev/pandas/issues/28512
+ expected = DataFrame([1, 2, 3])
+ with tm.assert_produces_warning(None):
+ with catch_warnings():
+ filterwarnings("ignore", category=FutureWarning)
+ result = read_json(expected.to_json(), numpy=True)
+ tm.assert_frame_equal(result, expected)
| - [x] closes #28512
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28562 | 2019-09-21T13:39:59Z | 2020-01-03T02:44:28Z | null | 2020-01-03T02:44:28Z |
TST: restore type checks to maybe_promote tests | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index e31918c21c2ac..f5406d9f89af8 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -358,6 +358,7 @@ def maybe_promote(dtype, fill_value=np.nan):
fill_value = NaT
elif is_extension_array_dtype(dtype) and isna(fill_value):
fill_value = dtype.na_value
+
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
@@ -366,6 +367,8 @@ def maybe_promote(dtype, fill_value=np.nan):
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.object_
+ else:
+ fill_value = np.bool_(fill_value)
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
@@ -374,6 +377,10 @@ def maybe_promote(dtype, fill_value=np.nan):
arr = np.asarray(fill_value)
if arr != arr.astype(dtype):
dtype = arr.dtype
+ elif issubclass(dtype.type, np.floating):
+ # check if we can cast
+ if _check_lossless_cast(fill_value, dtype):
+ fill_value = dtype.type(fill_value)
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
@@ -404,6 +411,25 @@ def maybe_promote(dtype, fill_value=np.nan):
return dtype, fill_value
+def _check_lossless_cast(value, dtype: np.dtype) -> bool:
+ """
+ Check if we can cast the given value to the given dtype _losslessly_.
+
+ Parameters
+ ----------
+ value : object
+ dtype : np.dtype
+
+ Returns
+ -------
+ bool
+ """
+ casted = dtype.type(value)
+ if casted == value:
+ return True
+ return False
+
+
def infer_dtype_from(val, pandas_dtype=False):
"""
interpret the dtype from a scalar or array. This is a convenience
diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py
index 44aebd4d277f2..1ea49602a8b78 100644
--- a/pandas/tests/dtypes/cast/test_promote.py
+++ b/pandas/tests/dtypes/cast/test_promote.py
@@ -23,6 +23,7 @@
is_timedelta64_dtype,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype, PandasExtensionDtype
+from pandas.core.dtypes.missing import isna
import pandas as pd
@@ -95,6 +96,7 @@ def _safe_dtype_assert(left_dtype, right_dtype):
"""
Compare two dtypes without raising TypeError.
"""
+ __tracebackhide__ = True
if isinstance(right_dtype, PandasExtensionDtype):
# switch order of equality check because numpy dtypes (e.g. if
# left_dtype is np.object_) do not know some expected dtypes (e.g.
@@ -157,20 +159,17 @@ def _check_promote(
_safe_dtype_assert(result_dtype, expected_dtype)
- # for equal values, also check type (relevant e.g. for int vs float, resp.
- # for different datetimes and timedeltas)
- match_value = (
- result_fill_value
- == expected_fill_value
- # disabled type check due to too many xfails; GH 23982/25425
- # and type(result_fill_value) == type(expected_fill_value)
- )
+ # GH#23982/25425 require the same type in addition to equality/NA-ness
+ res_type = type(result_fill_value)
+ ex_type = type(expected_fill_value)
+ assert res_type == ex_type
+
+ match_value = result_fill_value == expected_fill_value
+ # Note: type check above ensures that we have the _same_ NA value
# for missing values, None == None and iNaT == iNaT (which is checked
# through match_value above), but np.nan != np.nan and pd.NaT != pd.NaT
- match_missing = (result_fill_value is np.nan and expected_fill_value is np.nan) or (
- result_fill_value is NaT and expected_fill_value is NaT
- )
+ match_missing = isna(result_fill_value) and isna(expected_fill_value)
assert match_value or match_missing
| xref #25425. Working on fixing the xfailed tests in this file. This is going to be a marathon. | https://api.github.com/repos/pandas-dev/pandas/pulls/28561 | 2019-09-21T02:35:24Z | 2019-09-23T12:55:41Z | null | 2019-09-23T12:55:46Z |
[BUG] Fixed behavior of DataFrameGroupBy.apply to respect _group_selection_context | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index b02769322e013..43c8747394087 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -271,6 +271,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrame.rolling` not allowing for rolling over datetimes when ``axis=1`` (:issue: `28192`)
- Bug in :meth:`DataFrame.groupby` not offering selection by column name when ``axis=1`` (:issue:`27614`)
- Bug in :meth:`DataFrameGroupby.agg` not able to use lambda function with named aggregation (:issue:`27519`)
+- Bug in :meth:`DataFrameGroupby.apply` causing grouped column to remain in ``DataFrame`` even when ``as_index=True`` was specified (:issue:`28549`)
Reshaping
^^^^^^^^^
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index e010e615e176e..9523e8c30da9f 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -702,7 +702,6 @@ def __iter__(self):
)
)
def apply(self, func, *args, **kwargs):
-
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
@@ -724,7 +723,9 @@ def f(g):
f = func
# ignore SettingWithCopy here in case the user mutates
- with option_context("mode.chained_assignment", None):
+ with option_context("mode.chained_assignment", None), _group_selection_context(
+ self
+ ):
try:
result = self._python_apply_general(f)
except TypeError:
@@ -736,8 +737,14 @@ def f(g):
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
- with _group_selection_context(self):
- return self._python_apply_general(f)
+ # GH 28549
+ # This block should only be hit
+ # because of an operation failing on a
+ # grouper column if is_index=False.
+ # Otherwise it will only be hit by an operation
+ # failing on another column, and will fail both attempts
+
+ return self._python_apply_general(f)
return result
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 76588549532b1..a05314cfce380 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -363,11 +363,19 @@ def f(group):
tm.assert_frame_equal(result.loc[key], f(group))
-def test_apply_chunk_view():
+@pytest.mark.parametrize("as_index", [False, True])
+def test_apply_chunk_view(as_index):
# Low level tinkering could be unsafe, make sure not
df = DataFrame({"key": [1, 1, 1, 2, 2, 2, 3, 3, 3], "value": range(9)})
- result = df.groupby("key", group_keys=False).apply(lambda x: x[:2])
+ result = df.groupby("key", group_keys=False, as_index=as_index).apply(
+ lambda x: x[:2]
+ )
+ # GH 28549
+ # key no longer included in reduction output
+ if as_index:
+ df.pop("key")
+
expected = df.take([0, 1, 3, 4, 6, 7])
tm.assert_frame_equal(result, expected)
@@ -386,7 +394,8 @@ def test_apply_no_name_column_conflict():
grouped.apply(lambda x: x.sort_values("value", inplace=True))
-def test_apply_typecast_fail():
+@pytest.mark.parametrize("as_index", [True, False])
+def test_apply_typecast_fail(as_index):
df = DataFrame(
{
"d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0],
@@ -400,7 +409,12 @@ def f(group):
group["v2"] = (v - v.min()) / (v.max() - v.min())
return group
- result = df.groupby("d").apply(f)
+ result = df.groupby("d", as_index=as_index).apply(f)
+
+ # GH 28549
+ # key no longer included in reduction output
+ if as_index:
+ df.pop("d")
expected = df.copy()
expected["v2"] = np.tile([0.0, 0.5, 1], 2)
@@ -426,6 +440,10 @@ def f(group):
result = df.groupby("d").apply(f)
+ # GH 28549
+ # key no longer included in reduction output
+ df.pop("d")
+
expected = df.copy()
expected["v2"] = np.tile([0.0, 0.5, 1], 2)
@@ -611,24 +629,40 @@ def test_func(x):
tm.assert_frame_equal(result, expected)
-def test_groupby_apply_none_first():
+@pytest.mark.parametrize("as_index", [True, False])
+@pytest.mark.parametrize(
+ "groups, vars_, expected_vars, expected_groups",
+ [
+ ([1, 1, 1, 2], [0, 1, 2, 3], [0, 2], [1, 1]),
+ ([1, 2, 2, 2], [0, 1, 2, 3], [1, 3], [2, 2]),
+ ],
+)
+def test_groupby_apply_none_first(
+ groups, vars_, expected_vars, expected_groups, as_index
+):
# GH 12824. Tests if apply returns None first.
- test_df1 = DataFrame({"groups": [1, 1, 1, 2], "vars": [0, 1, 2, 3]})
- test_df2 = DataFrame({"groups": [1, 2, 2, 2], "vars": [0, 1, 2, 3]})
+ test_df = DataFrame({"groups": groups, "vars": vars_})
def test_func(x):
if x.shape[0] < 2:
return None
return x.iloc[[0, -1]]
- result1 = test_df1.groupby("groups").apply(test_func)
- result2 = test_df2.groupby("groups").apply(test_func)
- index1 = MultiIndex.from_arrays([[1, 1], [0, 2]], names=["groups", None])
- index2 = MultiIndex.from_arrays([[2, 2], [1, 3]], names=["groups", None])
- expected1 = DataFrame({"groups": [1, 1], "vars": [0, 2]}, index=index1)
- expected2 = DataFrame({"groups": [2, 2], "vars": [1, 3]}, index=index2)
- tm.assert_frame_equal(result1, expected1)
- tm.assert_frame_equal(result2, expected2)
+ result = test_df.groupby("groups", as_index=as_index).apply(test_func)
+
+ # GH 28549 "groups" should not be in output of apply
+ # unless as_index=True
+ if not as_index:
+ expected = DataFrame(
+ {"groups": expected_groups, "vars": expected_vars}, index=result.index
+ )
+ else:
+ expected = DataFrame({"vars": expected_vars}, index=result.index)
+
+ print(result)
+ print(expected)
+
+ tm.assert_frame_equal(result, expected)
def test_groupby_apply_return_empty_chunk():
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index e09af3fd48ee6..2b84004c612e8 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -95,9 +95,16 @@ def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
- expected = x.iloc[[0, 1]].copy()
+
+ # GH 28549
+ # grouper key should not be present after apply
+ # with as_index=True.
+ # TODO split this into multiple tests
+ dropped = x.drop("person_id", 1)
+
+ expected = dropped.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
- expected["person_name"] = expected["person_name"].astype("object")
+ expected["person_name"] = expected["person_name"]
tm.assert_frame_equal(result, expected)
# GH 9921
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index afb22a732691c..e8d6e26dadf8b 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -102,10 +102,17 @@ def test_builtins_apply(keys, f):
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
- assert_msg = "invalid frame shape: {} (expected ({}, 3))".format(
- result.shape, ngroups
+ # GH 28549
+ # grouping keys should not be included in output
+ if isinstance(keys, list):
+ result_shape = len(df.columns) - len(keys)
+ else:
+ result_shape = len(df.columns) - 1
+
+ assert_msg = "invalid frame shape: {} (expected ({}, {}))".format(
+ result.shape, ngroups, result_shape
)
- assert result.shape == (ngroups, 3), assert_msg
+ assert result.shape == (ngroups, result_shape), assert_msg
tm.assert_frame_equal(
result, # numpy's equivalent function
@@ -113,10 +120,15 @@ def test_builtins_apply(keys, f):
)
if f != sum:
- expected = df.groupby(keys).agg(fname).reset_index()
- expected.set_index(keys, inplace=True, drop=False)
+ # GH 28549
+ # No longer need to reset/set index here
+ expected = df.groupby(keys).agg(fname)
tm.assert_frame_equal(result, expected, check_dtype=False)
+ # GH 28549
+ # grouping keys should not be in output
+ df = df.drop(keys, 1)
+
tm.assert_series_equal(getattr(result, fname)(), getattr(df, fname)())
@@ -341,10 +353,13 @@ def test_cython_api2():
tm.assert_frame_equal(result, expected)
# GH 13994
- result = df.groupby("A").cumsum(axis=1)
+ # GH 28549
+ # Good represention of when as_index=False is now behaving
+ # as expected
+ result = df.groupby("A", as_index=False).cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
- result = df.groupby("A").cumprod(axis=1)
+ result = df.groupby("A", as_index=False).cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
@@ -1107,7 +1122,10 @@ def test_count():
for key in ["1st", "2nd", ["1st", "2nd"]]:
left = df.groupby(key).count()
- right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)
+
+ # GH 28549
+ # don't need to drop key here anymore
+ right = df.groupby(key).apply(DataFrame.count)
tm.assert_frame_equal(left, right)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index bec5cbc5fecb8..3c1662410b524 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -89,9 +89,12 @@ def max_value(group):
applied = df.groupby("A").apply(max_value)
result = applied.dtypes
+
+ # GH 28549
+ # "A" should not be in output anymore
expected = Series(
- [np.dtype("object")] * 2 + [np.dtype("float64")] * 2 + [np.dtype("int64")],
- index=["A", "B", "C", "D", "value"],
+ [np.dtype("object")] + [np.dtype("float64")] * 2 + [np.dtype("int64")],
+ index=["B", "C", "D", "value"],
)
assert_series_equal(result, expected)
@@ -948,16 +951,29 @@ def f_no_copy(x):
assert_series_equal(grpby_copy, grpby_no_copy)
-def test_no_mutate_but_looks_like():
+@pytest.mark.parametrize("as_index", [True, False])
+def test_no_mutate_but_looks_like(as_index):
# GH 8467
# first show's mutation indicator
# second does not, but should yield the same results
df = DataFrame({"key": [1, 1, 1, 2, 2, 2, 3, 3, 3], "value": range(9)})
- result1 = df.groupby("key", group_keys=True).apply(lambda x: x[:].key)
- result2 = df.groupby("key", group_keys=True).apply(lambda x: x.key)
- assert_series_equal(result1, result2)
+ def run_test(df, as_index):
+ result1 = df.groupby("key", group_keys=True, as_index=as_index).apply(
+ lambda x: x[:].key
+ )
+ result2 = df.groupby("key", group_keys=True, as_index=as_index).apply(
+ lambda x: x.key
+ )
+ return result1, result2
+
+ if as_index:
+ with pytest.raises(AttributeError):
+ run_test(df, as_index)
+ else:
+ result1, result2 = run_test(df, as_index)
+ assert_series_equal(result1, result2)
def test_groupby_series_indexed_differently():
@@ -1080,7 +1096,8 @@ def test_consistency_name():
assert_series_equal(result, expected)
-def test_groupby_name_propagation(df):
+@pytest.mark.parametrize("as_index", [True, False])
+def test_groupby_name_propagation(df, as_index):
# GH 6124
def summarize(df, name=None):
return Series({"count": 1, "mean": 2, "omissions": 3}, name=name)
@@ -1091,12 +1108,14 @@ def summarize_random_name(df):
# inconsistent.
return Series({"count": 1, "mean": 2, "omissions": 3}, name=df.iloc[0]["A"])
- metrics = df.groupby("A").apply(summarize)
+ metrics = df.groupby("A", as_index=as_index).apply(summarize)
assert metrics.columns.name is None
- metrics = df.groupby("A").apply(summarize, "metrics")
+ metrics = df.groupby("A", as_index=as_index).apply(summarize, "metrics")
assert metrics.columns.name == "metrics"
- metrics = df.groupby("A").apply(summarize_random_name)
- assert metrics.columns.name is None
+
+ if not as_index:
+ metrics = df.groupby("A", as_index=as_index).apply(summarize_random_name)
+ assert metrics.columns.name is None
def test_groupby_nonstring_columns():
@@ -1345,12 +1364,20 @@ def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
_check_groupby(df, result, ["a", "b"], "d")
-def test_dont_clobber_name_column():
+@pytest.mark.parametrize("as_index", [True, False])
+def test_dont_clobber_name_column(as_index):
df = DataFrame(
{"key": ["a", "a", "a", "b", "b", "b"], "name": ["foo", "bar", "baz"] * 2}
)
- result = df.groupby("key").apply(lambda x: x)
+ result = df.groupby("key", as_index=as_index).apply(lambda x: x)
+
+ # GH 28549
+ # test both True and False for as index to ensure
+ # proper reduction
+ if as_index:
+ df.pop("key")
+
assert_frame_equal(result, df)
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py
index d3972e6ba9008..cc9e28153d83e 100644
--- a/pandas/tests/groupby/test_transform.py
+++ b/pandas/tests/groupby/test_transform.py
@@ -455,7 +455,8 @@ def test_groupby_transform_with_nan_group():
assert_series_equal(result, expected)
-def test_transform_mixed_type():
+@pytest.mark.parametrize("as_index", [True, False])
+def test_transform_mixed_type(as_index):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]])
df = DataFrame(
{
@@ -470,16 +471,23 @@ def f(group):
group["g"] = group["d"] * 2
return group[:1]
- grouped = df.groupby("c")
+ grouped = df.groupby("c", as_index=as_index)
result = grouped.apply(f)
assert result["d"].dtype == np.float64
# this is by definition a mutating operation!
with pd.option_context("mode.chained_assignment", None):
- for key, group in grouped:
+ for index, (key, group) in enumerate(grouped):
res = f(group)
- assert_frame_equal(res, result.loc[key])
+ # GH 28549
+ # if as_index need to drop column from res
+ if as_index:
+ res = res.drop("c", 1)
+
+ k = key if as_index else index
+
+ assert_frame_equal(res, result.loc[k])
def _check_cython_group_transform_cumulative(pd_op, np_op, dtype):
| - [x] closes #28549
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
There were lots of inconsistencies among tests about whether or not `apply` should return the grouper column(s), and almost no testing on the impact of `as_index` on `apply` operations. This hopefully should provide better coverage, and also normalize the behavior everywhere. | https://api.github.com/repos/pandas-dev/pandas/pulls/28554 | 2019-09-20T21:10:37Z | 2019-09-20T21:40:17Z | null | 2019-09-20T21:40:17Z |
TST/CLN: parametrize and clean test_expressions, test_nanops | diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index b11698bf89cda..6edd3125331b9 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -54,14 +54,12 @@ def run_arithmetic(self, df, other):
operations = ["add", "sub", "mul", "mod", "truediv", "floordiv"]
for test_flex in [True, False]:
for arith in operations:
-
- operator_name = arith
-
+ # TODO: share with run_binary
if test_flex:
op = lambda x, y: getattr(x, arith)(y)
op.__name__ = arith
else:
- op = getattr(operator, operator_name)
+ op = getattr(operator, arith)
expr.set_use_numexpr(False)
expected = op(df, other)
expr.set_use_numexpr(True)
@@ -87,13 +85,14 @@ def run_binary(self, df, other):
for test_flex in [True, False]:
for arith in operations:
if test_flex:
- op = lambda x, y: getattr(df, arith)(y)
+ op = lambda x, y: getattr(x, arith)(y)
op.__name__ = arith
else:
op = getattr(operator, arith)
expr.set_use_numexpr(False)
expected = op(df, other)
expr.set_use_numexpr(True)
+
expr.get_test_result()
result = op(df, other)
used_numexpr = expr.get_test_result()
@@ -167,29 +166,29 @@ def test_invalid(self):
"opname,op_str",
[("add", "+"), ("sub", "-"), ("mul", "*"), ("truediv", "/"), ("pow", "**")],
)
- def test_binary_ops(self, opname, op_str):
+ @pytest.mark.parametrize("left,right", [(_frame, _frame2), (_mixed, _mixed2)])
+ def test_binary_ops(self, opname, op_str, left, right):
def testit():
- for f, f2 in [(self.frame, self.frame2), (self.mixed, self.mixed2)]:
+ if opname == "pow":
+ # TODO: get this working
+ return
- if opname == "pow":
- continue
+ op = getattr(operator, opname)
- op = getattr(operator, opname)
+ result = expr._can_use_numexpr(op, op_str, left, left, "evaluate")
+ assert result != left._is_mixed_type
- result = expr._can_use_numexpr(op, op_str, f, f, "evaluate")
- assert result != f._is_mixed_type
+ result = expr.evaluate(op, op_str, left, left, use_numexpr=True)
+ expected = expr.evaluate(op, op_str, left, left, use_numexpr=False)
- result = expr.evaluate(op, op_str, f, f, use_numexpr=True)
- expected = expr.evaluate(op, op_str, f, f, use_numexpr=False)
+ if isinstance(result, DataFrame):
+ tm.assert_frame_equal(result, expected)
+ else:
+ tm.assert_numpy_array_equal(result, expected.values)
- if isinstance(result, DataFrame):
- tm.assert_frame_equal(result, expected)
- else:
- tm.assert_numpy_array_equal(result, expected.values)
-
- result = expr._can_use_numexpr(op, op_str, f2, f2, "evaluate")
- assert not result
+ result = expr._can_use_numexpr(op, op_str, right, right, "evaluate")
+ assert not result
expr.set_use_numexpr(False)
testit()
@@ -210,30 +209,26 @@ def testit():
("ne", "!="),
],
)
- def test_comparison_ops(self, opname, op_str):
+ @pytest.mark.parametrize("left,right", [(_frame, _frame2), (_mixed, _mixed2)])
+ def test_comparison_ops(self, opname, op_str, left, right):
def testit():
- for f, f2 in [(self.frame, self.frame2), (self.mixed, self.mixed2)]:
-
- f11 = f
- f12 = f + 1
+ f12 = left + 1
+ f22 = right + 1
- f21 = f2
- f22 = f2 + 1
+ op = getattr(operator, opname)
- op = getattr(operator, opname)
+ result = expr._can_use_numexpr(op, op_str, left, f12, "evaluate")
+ assert result != left._is_mixed_type
- result = expr._can_use_numexpr(op, op_str, f11, f12, "evaluate")
- assert result != f11._is_mixed_type
+ result = expr.evaluate(op, op_str, left, f12, use_numexpr=True)
+ expected = expr.evaluate(op, op_str, left, f12, use_numexpr=False)
+ if isinstance(result, DataFrame):
+ tm.assert_frame_equal(result, expected)
+ else:
+ tm.assert_numpy_array_equal(result, expected.values)
- result = expr.evaluate(op, op_str, f11, f12, use_numexpr=True)
- expected = expr.evaluate(op, op_str, f11, f12, use_numexpr=False)
- if isinstance(result, DataFrame):
- tm.assert_frame_equal(result, expected)
- else:
- tm.assert_numpy_array_equal(result, expected.values)
-
- result = expr._can_use_numexpr(op, op_str, f21, f22, "evaluate")
- assert not result
+ result = expr._can_use_numexpr(op, op_str, right, f22, "evaluate")
+ assert not result
expr.set_use_numexpr(False)
testit()
@@ -244,15 +239,14 @@ def testit():
testit()
@pytest.mark.parametrize("cond", [True, False])
- def test_where(self, cond):
+ @pytest.mark.parametrize("df", [_frame, _frame2, _mixed, _mixed2])
+ def test_where(self, cond, df):
def testit():
- for f in [self.frame, self.frame2, self.mixed, self.mixed2]:
-
- c = np.empty(f.shape, dtype=np.bool_)
- c.fill(cond)
- result = expr.where(c, f.values, f.values + 1)
- expected = np.where(c, f.values, f.values + 1)
- tm.assert_numpy_array_equal(result, expected)
+ c = np.empty(df.shape, dtype=np.bool_)
+ c.fill(cond)
+ result = expr.where(c, df.values, df.values + 1)
+ expected = np.where(c, df.values, df.values + 1)
+ tm.assert_numpy_array_equal(result, expected)
expr.set_use_numexpr(False)
testit()
@@ -263,7 +257,7 @@ def testit():
testit()
@pytest.mark.parametrize(
- "op_str,opname", list(zip(["/", "//", "**"], ["truediv", "floordiv", "pow"]))
+ "op_str,opname", [("/", "truediv"), ("//", "floordiv"), ("**", "pow")]
)
def test_bool_ops_raise_on_arithmetic(self, op_str, opname):
df = DataFrame({"a": np.random.rand(10) > 0.5, "b": np.random.rand(10) > 0.5})
@@ -291,7 +285,7 @@ def test_bool_ops_raise_on_arithmetic(self, op_str, opname):
f(df, True)
@pytest.mark.parametrize(
- "op_str,opname", list(zip(["+", "*", "-"], ["add", "mul", "sub"]))
+ "op_str,opname", [("+", "add"), ("*", "mul"), ("-", "sub")]
)
def test_bool_ops_warn_on_arithmetic(self, op_str, opname):
n = 10
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index 41b27f030d80f..49d1777df0751 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -1,4 +1,5 @@
from functools import partial
+import operator
import warnings
import numpy as np
@@ -15,6 +16,7 @@
import pandas.util.testing as tm
use_bn = nanops._USE_BOTTLENECK
+has_c16 = hasattr(np, "complex128")
class TestnanopsDataFrame:
@@ -131,14 +133,9 @@ def _coerce_tds(targ, res):
if targ.dtype.kind != "O":
res = res.astype(targ.dtype)
else:
- try:
- res = res.astype("c16")
- except RuntimeError:
- res = res.astype("f8")
- try:
- targ = targ.astype("c16")
- except RuntimeError:
- targ = targ.astype("f8")
+ cast_dtype = "c16" if has_c16 else "f8"
+ res = res.astype(cast_dtype)
+ targ = targ.astype(cast_dtype)
# there should never be a case where numpy returns an object
# but nanops doesn't, so make that an exception
elif targ.dtype.kind == "O":
@@ -152,14 +149,13 @@ def check_fun_data(
targfunc,
testarval,
targarval,
- targarnanval,
check_dtype=True,
empty_targfunc=None,
**kwargs
):
for axis in list(range(targarval.ndim)) + [None]:
for skipna in [False, True]:
- targartempval = targarval if skipna else targarnanval
+ targartempval = targarval if skipna else testarval
if skipna and empty_targfunc and isna(targartempval).all():
targ = empty_targfunc(targartempval, axis=axis, **kwargs)
else:
@@ -180,46 +176,32 @@ def check_fun_data(
if testarval.ndim <= 1:
return
- try:
- testarval2 = np.take(testarval, 0, axis=-1)
- targarval2 = np.take(targarval, 0, axis=-1)
- targarnanval2 = np.take(targarnanval, 0, axis=-1)
- except ValueError:
- return
+ # Recurse on lower-dimension
+ testarval2 = np.take(testarval, 0, axis=-1)
+ targarval2 = np.take(targarval, 0, axis=-1)
self.check_fun_data(
testfunc,
targfunc,
testarval2,
targarval2,
- targarnanval2,
check_dtype=check_dtype,
empty_targfunc=empty_targfunc,
**kwargs
)
- def check_fun(
- self,
- testfunc,
- targfunc,
- testar,
- targar=None,
- targarnan=None,
- empty_targfunc=None,
- **kwargs
- ):
- if targar is None:
- targar = testar
- if targarnan is None:
- targarnan = testar
+ def check_fun(self, testfunc, targfunc, testar, empty_targfunc=None, **kwargs):
+
+ targar = testar
+ if testar.endswith("_nan") and hasattr(self, testar[:-4]):
+ targar = testar[:-4]
+
testarval = getattr(self, testar)
targarval = getattr(self, targar)
- targarnanval = getattr(self, targarnan)
self.check_fun_data(
testfunc,
targfunc,
testarval,
targarval,
- targarnanval,
empty_targfunc=empty_targfunc,
**kwargs
)
@@ -230,14 +212,13 @@ def check_funs(
targfunc,
allow_complex=True,
allow_all_nan=True,
- allow_str=True,
allow_date=True,
allow_tdelta=True,
allow_obj=True,
**kwargs
):
self.check_fun(testfunc, targfunc, "arr_float", **kwargs)
- self.check_fun(testfunc, targfunc, "arr_float_nan", "arr_float", **kwargs)
+ self.check_fun(testfunc, targfunc, "arr_float_nan", **kwargs)
self.check_fun(testfunc, targfunc, "arr_int", **kwargs)
self.check_fun(testfunc, targfunc, "arr_bool", **kwargs)
objs = [
@@ -251,26 +232,15 @@ def check_funs(
if allow_complex:
self.check_fun(testfunc, targfunc, "arr_complex", **kwargs)
- self.check_fun(
- testfunc, targfunc, "arr_complex_nan", "arr_complex", **kwargs
- )
+ self.check_fun(testfunc, targfunc, "arr_complex_nan", **kwargs)
if allow_all_nan:
self.check_fun(testfunc, targfunc, "arr_nan_nanj", **kwargs)
objs += [self.arr_complex.astype("O")]
- if allow_str:
- self.check_fun(testfunc, targfunc, "arr_str", **kwargs)
- self.check_fun(testfunc, targfunc, "arr_utf", **kwargs)
- objs += [self.arr_str.astype("O"), self.arr_utf.astype("O")]
-
if allow_date:
- try:
- targfunc(self.arr_date)
- except TypeError:
- pass
- else:
- self.check_fun(testfunc, targfunc, "arr_date", **kwargs)
- objs += [self.arr_date.astype("O")]
+ targfunc(self.arr_date)
+ self.check_fun(testfunc, targfunc, "arr_date", **kwargs)
+ objs += [self.arr_date.astype("O")]
if allow_tdelta:
try:
@@ -300,33 +270,20 @@ def _badobj_wrap(self, value, func, allow_complex=True, **kwargs):
value = value.astype("f8")
return func(value, **kwargs)
- def test_nanany(self):
- self.check_funs(
- nanops.nanany,
- np.any,
- allow_all_nan=False,
- allow_str=False,
- allow_date=False,
- allow_tdelta=False,
- )
-
- def test_nanall(self):
+ @pytest.mark.parametrize(
+ "nan_op,np_op", [(nanops.nanany, np.any), (nanops.nanall, np.all)]
+ )
+ def test_nan_funcs(self, nan_op, np_op):
+ # TODO: allow tdelta, doesn't break tests
self.check_funs(
- nanops.nanall,
- np.all,
- allow_all_nan=False,
- allow_str=False,
- allow_date=False,
- allow_tdelta=False,
+ nan_op, np_op, allow_all_nan=False, allow_date=False, allow_tdelta=False
)
def test_nansum(self):
self.check_funs(
nanops.nansum,
np.sum,
- allow_str=False,
allow_date=False,
- allow_tdelta=True,
check_dtype=False,
empty_targfunc=np.nansum,
)
@@ -335,11 +292,9 @@ def test_nanmean(self):
self.check_funs(
nanops.nanmean,
np.mean,
- allow_complex=False,
+ allow_complex=False, # TODO: allow this, doesn't break test
allow_obj=False,
- allow_str=False,
allow_date=False,
- allow_tdelta=True,
)
def test_nanmean_overflow(self):
@@ -355,22 +310,31 @@ def test_nanmean_overflow(self):
assert result == np_result
assert result.dtype == np.float64
- def test_returned_dtype(self):
-
- dtypes = [np.int16, np.int32, np.int64, np.float32, np.float64]
- if hasattr(np, "float128"):
- dtypes.append(np.float128)
+ @pytest.mark.parametrize(
+ "dtype",
+ [
+ np.int16,
+ np.int32,
+ np.int64,
+ np.float32,
+ np.float64,
+ getattr(np, "float128", None),
+ ],
+ )
+ def test_returned_dtype(self, dtype):
+ if dtype is None:
+ # no float128 available
+ return
- for dtype in dtypes:
- s = Series(range(10), dtype=dtype)
- group_a = ["mean", "std", "var", "skew", "kurt"]
- group_b = ["min", "max"]
- for method in group_a + group_b:
- result = getattr(s, method)()
- if is_integer_dtype(dtype) and method in group_a:
- assert result.dtype == np.float64
- else:
- assert result.dtype == dtype
+ s = Series(range(10), dtype=dtype)
+ group_a = ["mean", "std", "var", "skew", "kurt"]
+ group_b = ["min", "max"]
+ for method in group_a + group_b:
+ result = getattr(s, method)()
+ if is_integer_dtype(dtype) and method in group_a:
+ assert result.dtype == np.float64
+ else:
+ assert result.dtype == dtype
def test_nanmedian(self):
with warnings.catch_warnings(record=True):
@@ -379,9 +343,7 @@ def test_nanmedian(self):
nanops.nanmedian,
np.median,
allow_complex=False,
- allow_str=False,
allow_date=False,
- allow_tdelta=True,
allow_obj="convert",
)
@@ -391,9 +353,7 @@ def test_nanvar(self, ddof):
nanops.nanvar,
np.var,
allow_complex=False,
- allow_str=False,
allow_date=False,
- allow_tdelta=True,
allow_obj="convert",
ddof=ddof,
)
@@ -404,9 +364,7 @@ def test_nanstd(self, ddof):
nanops.nanstd,
np.std,
allow_complex=False,
- allow_str=False,
allow_date=False,
- allow_tdelta=True,
allow_obj="convert",
ddof=ddof,
)
@@ -421,32 +379,19 @@ def test_nansem(self, ddof):
nanops.nansem,
sem,
allow_complex=False,
- allow_str=False,
allow_date=False,
allow_tdelta=False,
allow_obj="convert",
ddof=ddof,
)
- def _minmax_wrap(self, value, axis=None, func=None):
-
- # numpy warns if all nan
- res = func(value, axis)
- if res.dtype.kind == "m":
- res = np.atleast_1d(res)
- return res
-
- def test_nanmin(self):
+ @pytest.mark.parametrize(
+ "nan_op,np_op", [(nanops.nanmin, np.min), (nanops.nanmax, np.max)]
+ )
+ def test_nanops_with_warnings(self, nan_op, np_op):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
- func = partial(self._minmax_wrap, func=np.min)
- self.check_funs(nanops.nanmin, func, allow_str=False, allow_obj=False)
-
- def test_nanmax(self):
- with warnings.catch_warnings():
- warnings.simplefilter("ignore", RuntimeWarning)
- func = partial(self._minmax_wrap, func=np.max)
- self.check_funs(nanops.nanmax, func, allow_str=False, allow_obj=False)
+ self.check_funs(nan_op, np_op, allow_obj=False)
def _argminmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
@@ -467,20 +412,13 @@ def test_nanargmax(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
func = partial(self._argminmax_wrap, func=np.argmax)
- self.check_funs(
- nanops.nanargmax,
- func,
- allow_str=False,
- allow_obj=False,
- allow_date=True,
- allow_tdelta=True,
- )
+ self.check_funs(nanops.nanargmax, func, allow_obj=False)
def test_nanargmin(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
func = partial(self._argminmax_wrap, func=np.argmin)
- self.check_funs(nanops.nanargmin, func, allow_str=False, allow_obj=False)
+ self.check_funs(nanops.nanargmin, func, allow_obj=False)
def _skew_kurt_wrap(self, values, axis=None, func=None):
if not isinstance(values.dtype.type, np.floating):
@@ -504,7 +442,6 @@ def test_nanskew(self):
nanops.nanskew,
func,
allow_complex=False,
- allow_str=False,
allow_date=False,
allow_tdelta=False,
)
@@ -520,7 +457,6 @@ def test_nankurt(self):
nanops.nankurt,
func,
allow_complex=False,
- allow_str=False,
allow_date=False,
allow_tdelta=False,
)
@@ -529,7 +465,6 @@ def test_nanprod(self):
self.check_funs(
nanops.nanprod,
np.prod,
- allow_str=False,
allow_date=False,
allow_tdelta=False,
empty_targfunc=np.nanprod,
@@ -695,45 +630,34 @@ def check_nancomp(self, checkfun, targ0):
res2 = checkfun(arr_float_nan, arr_nan_float1)
tm.assert_numpy_array_equal(targ2, res2, check_dtype=False)
- try:
- arr_float = np.take(arr_float, 0, axis=-1)
- arr_float1 = np.take(arr_float1, 0, axis=-1)
- arr_nan = np.take(arr_nan, 0, axis=-1)
- arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1)
- arr_float_nan = np.take(arr_float_nan, 0, axis=-1)
- arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1)
- arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1)
- targ0 = np.take(targ0, 0, axis=-1)
- except ValueError:
- break
-
- def test_nangt(self):
- targ0 = self.arr_float > self.arr_float1
- self.check_nancomp(nanops.nangt, targ0)
-
- def test_nange(self):
- targ0 = self.arr_float >= self.arr_float1
- self.check_nancomp(nanops.nange, targ0)
-
- def test_nanlt(self):
- targ0 = self.arr_float < self.arr_float1
- self.check_nancomp(nanops.nanlt, targ0)
-
- def test_nanle(self):
- targ0 = self.arr_float <= self.arr_float1
- self.check_nancomp(nanops.nanle, targ0)
-
- def test_naneq(self):
- targ0 = self.arr_float == self.arr_float1
- self.check_nancomp(nanops.naneq, targ0)
-
- def test_nanne(self):
- targ0 = self.arr_float != self.arr_float1
- self.check_nancomp(nanops.nanne, targ0)
-
- def check_bool(self, func, value, correct, *args, **kwargs):
+ # Lower dimension for next step in the loop
+ arr_float = np.take(arr_float, 0, axis=-1)
+ arr_float1 = np.take(arr_float1, 0, axis=-1)
+ arr_nan = np.take(arr_nan, 0, axis=-1)
+ arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1)
+ arr_float_nan = np.take(arr_float_nan, 0, axis=-1)
+ arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1)
+ arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1)
+ targ0 = np.take(targ0, 0, axis=-1)
+
+ @pytest.mark.parametrize(
+ "op,nanop",
+ [
+ (operator.eq, nanops.naneq),
+ (operator.ne, nanops.nanne),
+ (operator.gt, nanops.nangt),
+ (operator.ge, nanops.nange),
+ (operator.lt, nanops.nanlt),
+ (operator.le, nanops.nanle),
+ ],
+ )
+ def test_nan_comparison(self, op, nanop):
+ targ0 = op(self.arr_float, self.arr_float1)
+ self.check_nancomp(nanop, targ0)
+
+ def check_bool(self, func, value, correct):
while getattr(value, "ndim", True):
- res0 = func(value, *args, **kwargs)
+ res0 = func(value)
if correct:
assert res0
else:
@@ -741,10 +665,9 @@ def check_bool(self, func, value, correct, *args, **kwargs):
if not hasattr(value, "ndim"):
break
- try:
- value = np.take(value, 0, axis=-1)
- except ValueError:
- break
+
+ # Reduce dimension for next step in the loop
+ value = np.take(value, 0, axis=-1)
def test__has_infs(self):
pairs = [
| These are each going to need at least one more pass after this. | https://api.github.com/repos/pandas-dev/pandas/pulls/28553 | 2019-09-20T19:07:57Z | 2019-09-23T12:01:39Z | 2019-09-23T12:01:39Z | 2019-09-23T12:47:18Z |
Backport PR #28524: COMPAT: ensure no warnings on tab completion with Jedi 0.15 | diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst
index 1cdf213d81a74..76c7ad208865d 100644
--- a/doc/source/whatsnew/v0.25.2.rst
+++ b/doc/source/whatsnew/v0.25.2.rst
@@ -100,7 +100,8 @@ Other
^^^^^
- Compatibility with Python 3.8 in :meth:`DataFrame.query` (:issue:`27261`)
--
+- Fix to ensure that tab-completion in an IPython console does not raise
+ warnings for deprecated attributes (:issue:`27900`).
.. _whatsnew_0.252.contributors:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 9aced760725be..400d8647ced92 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -150,7 +150,7 @@ class NDFrame(PandasObject, SelectionMixin):
_internal_names_set = set(_internal_names) # type: Set[str]
_accessors = set() # type: Set[str]
_deprecations = frozenset(
- ["as_blocks", "blocks", "is_copy"]
+ ["as_blocks", "blocks", "is_copy", "ftypes", "ix"]
) # type: FrozenSet[str]
_metadata = [] # type: List[str]
_is_copy = None
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 680976f44ee1e..0e4c9ffcc5858 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -225,7 +225,7 @@ class Index(IndexOpsMixin, PandasObject):
"""
# tolist is not actually deprecated, just suppressed in the __dir__
- _deprecations = DirNamesMixin._deprecations | frozenset(["tolist"])
+ _deprecations = DirNamesMixin._deprecations | frozenset(["tolist", "dtype_str"])
# To hand over control to subclasses
_join_precedence = 1
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 9f31e185fe41a..8394766fb0286 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -56,7 +56,7 @@
import pandas as pd
from pandas.core import algorithms, base, generic, nanops, ops
-from pandas.core.accessor import CachedAccessor
+from pandas.core.accessor import CachedAccessor, DirNamesMixin
from pandas.core.arrays import ExtensionArray, SparseArray
from pandas.core.arrays.categorical import Categorical, CategoricalAccessor
from pandas.core.arrays.sparse import SparseAccessor
@@ -178,8 +178,11 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
_metadata = ["name"]
_accessors = {"dt", "cat", "str", "sparse"}
# tolist is not actually deprecated, just suppressed in the __dir__
- _deprecations = generic.NDFrame._deprecations | frozenset(
- ["asobject", "reshape", "get_value", "set_value", "valid", "tolist"]
+ _deprecations = (
+ generic.NDFrame._deprecations
+ | DirNamesMixin._deprecations
+ | frozenset(["asobject", "reshape", "get_value", "set_value", "valid"])
+ | frozenset(["ftype", "real", "imag", "tolist"])
)
# Override cache_readonly bc Series is mutable
| Backport for #28524
| https://api.github.com/repos/pandas-dev/pandas/pulls/28550 | 2019-09-20T15:55:55Z | 2019-09-21T11:36:03Z | 2019-09-21T11:36:03Z | 2019-09-21T11:36:06Z |
CLN: Exception and BaseException in test_nanops | diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index eb39f01657b90..41b27f030d80f 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -165,25 +165,17 @@ def check_fun_data(
else:
targ = targfunc(targartempval, axis=axis, **kwargs)
- try:
- res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs)
+ res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs)
+ self.check_results(targ, res, axis, check_dtype=check_dtype)
+ if skipna:
+ res = testfunc(testarval, axis=axis, **kwargs)
+ self.check_results(targ, res, axis, check_dtype=check_dtype)
+ if axis is None:
+ res = testfunc(testarval, skipna=skipna, **kwargs)
+ self.check_results(targ, res, axis, check_dtype=check_dtype)
+ if skipna and axis is None:
+ res = testfunc(testarval, **kwargs)
self.check_results(targ, res, axis, check_dtype=check_dtype)
- if skipna:
- res = testfunc(testarval, axis=axis, **kwargs)
- self.check_results(targ, res, axis, check_dtype=check_dtype)
- if axis is None:
- res = testfunc(testarval, skipna=skipna, **kwargs)
- self.check_results(targ, res, axis, check_dtype=check_dtype)
- if skipna and axis is None:
- res = testfunc(testarval, **kwargs)
- self.check_results(targ, res, axis, check_dtype=check_dtype)
- except BaseException as exc:
- exc.args += (
- "axis: {axis} of {of}".format(axis=axis, of=testarval.ndim - 1),
- "skipna: {skipna}".format(skipna=skipna),
- "kwargs: {kwargs}".format(kwargs=kwargs),
- )
- raise
if testarval.ndim <= 1:
return
@@ -222,23 +214,15 @@ def check_fun(
testarval = getattr(self, testar)
targarval = getattr(self, targar)
targarnanval = getattr(self, targarnan)
- try:
- self.check_fun_data(
- testfunc,
- targfunc,
- testarval,
- targarval,
- targarnanval,
- empty_targfunc=empty_targfunc,
- **kwargs
- )
- except BaseException as exc:
- exc.args += (
- "testar: {testar}".format(testar=testar),
- "targar: {targar}".format(targar=targar),
- "targarnan: {targarnan}".format(targarnan=targarnan),
- )
- raise
+ self.check_fun_data(
+ testfunc,
+ targfunc,
+ testarval,
+ targarval,
+ targarnanval,
+ empty_targfunc=empty_targfunc,
+ **kwargs
+ )
def check_funs(
self,
@@ -697,23 +681,19 @@ def check_nancomp(self, checkfun, targ0):
arr_nan_float1 = self.arr_nan_float1
while targ0.ndim:
- try:
- res0 = checkfun(arr_float, arr_float1)
- tm.assert_almost_equal(targ0, res0)
+ res0 = checkfun(arr_float, arr_float1)
+ tm.assert_almost_equal(targ0, res0)
- if targ0.ndim > 1:
- targ1 = np.vstack([targ0, arr_nan])
- else:
- targ1 = np.hstack([targ0, arr_nan])
- res1 = checkfun(arr_float_nan, arr_float1_nan)
- tm.assert_numpy_array_equal(targ1, res1, check_dtype=False)
-
- targ2 = arr_nan_nan
- res2 = checkfun(arr_float_nan, arr_nan_float1)
- tm.assert_numpy_array_equal(targ2, res2, check_dtype=False)
- except Exception as exc:
- exc.args += ("ndim: {arr_float.ndim}".format(arr_float=arr_float),)
- raise
+ if targ0.ndim > 1:
+ targ1 = np.vstack([targ0, arr_nan])
+ else:
+ targ1 = np.hstack([targ0, arr_nan])
+ res1 = checkfun(arr_float_nan, arr_float1_nan)
+ tm.assert_numpy_array_equal(targ1, res1, check_dtype=False)
+
+ targ2 = arr_nan_nan
+ res2 = checkfun(arr_float_nan, arr_nan_float1)
+ tm.assert_numpy_array_equal(targ2, res2, check_dtype=False)
try:
arr_float = np.take(arr_float, 0, axis=-1)
@@ -753,15 +733,12 @@ def test_nanne(self):
def check_bool(self, func, value, correct, *args, **kwargs):
while getattr(value, "ndim", True):
- try:
- res0 = func(value, *args, **kwargs)
- if correct:
- assert res0
- else:
- assert not res0
- except BaseException as exc:
- exc.args += ("dim: {}".format(getattr(value, "ndim", value)),)
- raise
+ res0 = func(value, *args, **kwargs)
+ if correct:
+ assert res0
+ else:
+ assert not res0
+
if not hasattr(value, "ndim"):
break
try:
@@ -796,21 +773,13 @@ def test__has_infs(self):
for arr, correct in pairs:
val = getattr(self, arr)
- try:
- self.check_bool(nanops._has_infs, val, correct)
- except BaseException as exc:
- exc.args += (arr,)
- raise
+ self.check_bool(nanops._has_infs, val, correct)
for arr, correct in pairs_float:
val = getattr(self, arr)
- try:
- self.check_bool(nanops._has_infs, val, correct)
- self.check_bool(nanops._has_infs, val.astype("f4"), correct)
- self.check_bool(nanops._has_infs, val.astype("f2"), correct)
- except BaseException as exc:
- exc.args += (arr,)
- raise
+ self.check_bool(nanops._has_infs, val, correct)
+ self.check_bool(nanops._has_infs, val.astype("f4"), correct)
+ self.check_bool(nanops._has_infs, val.astype("f2"), correct)
def test__isfinite(self):
pairs = [
@@ -844,21 +813,13 @@ def test__isfinite(self):
for arr, correct in pairs:
val = getattr(self, arr)
- try:
- self.check_bool(func1, val, correct)
- except BaseException as exc:
- exc.args += (arr,)
- raise
+ self.check_bool(func1, val, correct)
for arr, correct in pairs_float:
val = getattr(self, arr)
- try:
- self.check_bool(func1, val, correct)
- self.check_bool(func1, val.astype("f4"), correct)
- self.check_bool(func1, val.astype("f2"), correct)
- except BaseException as exc:
- exc.args += (arr,)
- raise
+ self.check_bool(func1, val, correct)
+ self.check_bool(func1, val.astype("f4"), correct)
+ self.check_bool(func1, val.astype("f2"), correct)
def test__bn_ok_dtype(self):
assert nanops._bn_ok_dtype(self.arr_float.dtype, "test")
| AFAICT these are artifacts of pre-pytest usage where we needed to manually add info to the traceback.
Adds a code_check to make sure we aren't catching BaseException anywhere. BaseException includes KeyboardInterrupt, which we shouldn't be catching in general.
Saving parametrization in test_nanops for a separate pass, as the diff will get big. | https://api.github.com/repos/pandas-dev/pandas/pulls/28544 | 2019-09-19T21:44:32Z | 2019-09-20T06:14:59Z | 2019-09-20T06:14:59Z | 2019-09-20T14:18:34Z |
WEB/CI: Fixing target path of the web build | diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 5b3d4e91c1e02..62c46b6970969 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -121,7 +121,7 @@ jobs:
- script: |
source activate pandas-dev
- python web/pandas_web.py web/pandas
+ python web/pandas_web.py web/pandas --target-path=web/build
displayName: 'Build website'
- script: |
| http://dev,pandas.io is currently not serving the website (http://dev,pandas.io/docs)
In the deployment of the new website, when building the web, the target default path `build` is not working as expected. The path is later expected to be `web/build`, but since the script is called from the root and not from inside web, the target path is `build/`, and the website is not copied.
This PR fixes the target path to be the correct one, and should fix http://dev,pandas.io
@TomAugspurger if you don't mind having a quick look. | https://api.github.com/repos/pandas-dev/pandas/pulls/28543 | 2019-09-19T21:25:40Z | 2019-09-19T22:24:48Z | 2019-09-19T22:24:48Z | 2019-09-19T23:23:40Z |
ENH: Add dta 119 reading to StataReader | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 54a6171f623f6..2b6fc46311ea7 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -37,7 +37,7 @@ Other enhancements
pandas (so it will become an integer or float dtype depending on the presence of missing data).
(:issue:`28368`)
- :meth:`DataFrame.to_json` now accepts an ``indent`` integer argument to enable pretty printing of JSON output (:issue:`12004`)
-
+- :meth:`read_stata` can read Stata 119 dta files. (:issue:`28250`)
Build Changes
^^^^^^^^^^^^^
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 31fdaa5cc6735..c67106e897727 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -1139,13 +1139,17 @@ def _read_new_header(self, first_char):
# The first part of the header is common to 117 and 118.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
- if self.format_version not in [117, 118]:
+ if self.format_version not in [117, 118, 119]:
raise ValueError(_version_error)
self._set_encoding()
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == b"MSF" and ">" or "<"
self.path_or_buf.read(15) # </byteorder><K>
- self.nvar = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
+ nvar_type = "H" if self.format_version <= 118 else "I"
+ nvar_size = 2 if self.format_version <= 118 else 4
+ self.nvar = struct.unpack(
+ self.byteorder + nvar_type, self.path_or_buf.read(nvar_size)
+ )[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
@@ -1207,7 +1211,7 @@ def _read_new_header(self, first_char):
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
- # Get data type information, works for versions 117-118.
+ # Get data type information, works for versions 117-119.
def _get_dtypes(self, seek_vartypes):
self.path_or_buf.seek(seek_vartypes)
@@ -1241,14 +1245,14 @@ def f(typ):
def _get_varlist(self):
if self.format_version == 117:
b = 33
- elif self.format_version == 118:
+ elif self.format_version >= 118:
b = 129
return [self._decode(self.path_or_buf.read(b)) for i in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self):
- if self.format_version == 118:
+ if self.format_version >= 118:
b = 57
elif self.format_version > 113:
b = 49
@@ -1270,7 +1274,7 @@ def _get_lbllist(self):
return [self._decode(self.path_or_buf.read(b)) for i in range(self.nvar)]
def _get_variable_labels(self):
- if self.format_version == 118:
+ if self.format_version >= 118:
vlblist = [
self._decode(self.path_or_buf.read(321)) for i in range(self.nvar)
]
@@ -1285,13 +1289,13 @@ def _get_variable_labels(self):
return vlblist
def _get_nobs(self):
- if self.format_version == 118:
+ if self.format_version >= 118:
return struct.unpack(self.byteorder + "Q", self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
def _get_data_label(self):
- if self.format_version == 118:
+ if self.format_version >= 118:
strlen = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
@@ -1303,7 +1307,7 @@ def _get_data_label(self):
return self._decode(self.path_or_buf.read(32))
def _get_time_stamp(self):
- if self.format_version == 118:
+ if self.format_version >= 118:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
@@ -1321,7 +1325,7 @@ def _get_seek_variable_labels(self):
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
- elif self.format_version == 118:
+ elif self.format_version >= 118:
return struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
@@ -1519,10 +1523,12 @@ def _read_strls(self):
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
+ v_size = 2 if self.format_version == 118 else 3
if self.byteorder == "<":
- buf = buf[0:2] + buf[4:10]
+ buf = buf[0:v_size] + buf[4 : 12 - v_size]
else:
- buf = buf[0:2] + buf[6:]
+ # This path may not be correct, impossible to test
+ buf = buf[0:v_size] + buf[4 + v_size :]
v_o = struct.unpack("Q", buf)[0]
typ = struct.unpack("B", self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
diff --git a/pandas/tests/io/data/stata1_119.dta.gz b/pandas/tests/io/data/stata1_119.dta.gz
new file mode 100644
index 0000000000000..0f75d8b92db14
Binary files /dev/null and b/pandas/tests/io/data/stata1_119.dta.gz differ
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 1e7d568602656..a0ec06a2197ae 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -101,6 +101,8 @@ def setup_method(self, datapath):
self.dta24_111 = os.path.join(self.dirpath, "stata7_111.dta")
self.dta25_118 = os.path.join(self.dirpath, "stata16_118.dta")
+ self.dta26_119 = os.path.join(self.dirpath, "stata1_119.dta.gz")
+
self.stata_dates = os.path.join(self.dirpath, "stata13_dates.dta")
def read_dta(self, file):
@@ -1780,3 +1782,14 @@ def test_encoding_latin1_118(self):
expected = pd.DataFrame([["Düsseldorf"]] * 151, columns=["kreis1849"])
tm.assert_frame_equal(encoded, expected)
+
+ @pytest.mark.slow
+ def test_stata_119(self):
+ # Gzipped since contains 32,999 variables and uncompressed is 20MiB
+ with gzip.open(self.dta26_119, "rb") as gz:
+ df = read_stata(gz)
+ assert df.shape == (1, 32999)
+ assert df.iloc[0, 6] == "A" * 3000
+ assert df.iloc[0, 7] == 3.14
+ assert df.iloc[0, -1] == 1
+ assert df.iloc[0, 0] == pd.Timestamp(datetime(2012, 12, 21, 21, 12, 21))
| Add requirements for reading 119 format files
- [X] closes #28250
- [X] tests added / passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28542 | 2019-09-19T21:09:13Z | 2019-09-20T12:40:13Z | 2019-09-20T12:40:12Z | 2019-12-19T22:29:34Z |
BUG: Groupby selection context not being properly reset | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 48c1173a372a7..68beeea2849bb 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -376,6 +376,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrame.rolling` not allowing for rolling over datetimes when ``axis=1`` (:issue: `28192`)
- Bug in :meth:`DataFrame.groupby` not offering selection by column name when ``axis=1`` (:issue:`27614`)
- Bug in :meth:`DataFrameGroupby.agg` not able to use lambda function with named aggregation (:issue:`27519`)
+- Bug in :meth:`DataFrameGroupby` causing unexpected mutations of the groupby object (:issue:`28523`)
- Bug in :meth:`DataFrame.groupby` losing column name information when grouping by a categorical column (:issue:`28787`)
Reshaping
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index f622480cfe4b7..34e78ff82acee 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -594,63 +594,63 @@ def pipe(self, func, *args, **kwargs):
def _make_wrapper(self, name):
assert name in self._apply_whitelist
- self._set_group_selection()
-
- # need to setup the selection
- # as are not passed directly but in the grouper
- f = getattr(self._selected_obj, name)
- if not isinstance(f, types.MethodType):
- return self.apply(lambda self: getattr(self, name))
-
- f = getattr(type(self._selected_obj), name)
- sig = inspect.signature(f)
-
- def wrapper(*args, **kwargs):
- # a little trickery for aggregation functions that need an axis
- # argument
- if "axis" in sig.parameters:
- if kwargs.get("axis", None) is None:
- kwargs["axis"] = self.axis
-
- def curried(x):
- return f(x, *args, **kwargs)
+ with _group_selection_context(self):
+ # need to setup the selection
+ # as are not passed directly but in the grouper
+ f = getattr(self._selected_obj, name)
+ if not isinstance(f, types.MethodType):
+ return self.apply(lambda self: getattr(self, name))
+
+ f = getattr(type(self._selected_obj), name)
+ sig = inspect.signature(f)
+
+ def wrapper(*args, **kwargs):
+ # a little trickery for aggregation functions that need an axis
+ # argument
+ if "axis" in sig.parameters:
+ if kwargs.get("axis", None) is None:
+ kwargs["axis"] = self.axis
+
+ def curried(x):
+ return f(x, *args, **kwargs)
+
+ # preserve the name so we can detect it when calling plot methods,
+ # to avoid duplicates
+ curried.__name__ = name
+
+ # special case otherwise extra plots are created when catching the
+ # exception below
+ if name in base.plotting_methods:
+ return self.apply(curried)
- # preserve the name so we can detect it when calling plot methods,
- # to avoid duplicates
- curried.__name__ = name
+ try:
+ return self.apply(curried)
+ except TypeError as err:
+ if not re.search(
+ "reduction operation '.*' not allowed for this dtype", str(err)
+ ):
+ # We don't have a cython implementation
+ # TODO: is the above comment accurate?
+ raise
- # special case otherwise extra plots are created when catching the
- # exception below
- if name in base.plotting_methods:
- return self.apply(curried)
+ # related to : GH3688
+ # try item-by-item
+ # this can be called recursively, so need to raise
+ # ValueError
+ # if we don't have this method to indicated to aggregate to
+ # mark this column as an error
+ try:
+ return self._aggregate_item_by_item(name, *args, **kwargs)
+ except AttributeError:
+ # e.g. SparseArray has no flags attr
+ # FIXME: 'SeriesGroupBy' has no attribute '_aggregate_item_by_item'
+ # occurs in idxmax() case
+ # in tests.groupby.test_function.test_non_cython_api
+ raise ValueError
- try:
- return self.apply(curried)
- except TypeError as err:
- if not re.search(
- "reduction operation '.*' not allowed for this dtype", str(err)
- ):
- # We don't have a cython implementation
- # TODO: is the above comment accurate?
- raise
-
- # related to : GH3688
- # try item-by-item
- # this can be called recursively, so need to raise
- # ValueError
- # if we don't have this method to indicated to aggregate to
- # mark this column as an error
- try:
- return self._aggregate_item_by_item(name, *args, **kwargs)
- except AttributeError:
- # e.g. SparseArray has no flags attr
- # FIXME: 'SeriesGroupBy' has no attribute '_aggregate_item_by_item'
- # occurs in idxmax() case
- # in tests.groupby.test_function.test_non_cython_api
- raise ValueError
+ wrapper.__name__ = name
- wrapper.__name__ = name
- return wrapper
+ return wrapper
def get_group(self, name, obj=None):
"""
@@ -721,7 +721,9 @@ def f(g):
f = func
# ignore SettingWithCopy here in case the user mutates
- with option_context("mode.chained_assignment", None):
+ with option_context(
+ "mode.chained_assignment", None
+ ) as _, _group_selection_context(self) as _:
try:
result = self._python_apply_general(f)
except TypeError:
@@ -732,9 +734,7 @@ def f(g):
# except if the udf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
-
- with _group_selection_context(self):
- return self._python_apply_general(f)
+ return self._python_apply_general(f)
return result
@@ -1335,35 +1335,34 @@ def f(self, **kwargs):
if "min_count" not in kwargs:
kwargs["min_count"] = min_count
- self._set_group_selection()
-
- # try a cython aggregation if we can
- try:
- return self._cython_agg_general(alias, alt=npfunc, **kwargs)
- except DataError:
- pass
- except NotImplementedError as err:
- if "function is not implemented for this dtype" in str(err):
- # raised in _get_cython_function, in some cases can
- # be trimmed by implementing cython funcs for more dtypes
- pass
- elif "decimal does not support skipna=True" in str(err):
- # FIXME: kludge for test_decimal:test_in_numeric_groupby
+ with _group_selection_context(self):
+ # try a cython aggregation if we can
+ try:
+ return self._cython_agg_general(alias, alt=npfunc, **kwargs)
+ except DataError:
pass
+ except NotImplementedError as err:
+ if "function is not implemented for this dtype" in str(err):
+ # raised in _get_cython_function, in some cases can
+ # be trimmed by implementing cython funcs for more dtypes
+ pass
+ elif "decimal does not support skipna=True" in str(err):
+ # FIXME: kludge for test_decimal:test_in_numeric_groupby
+ pass
+ else:
+ raise
+
+ # apply a non-cython aggregation
+ result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
+
+ # coerce the resulting columns if we can
+ if isinstance(result, DataFrame):
+ for col in result.columns:
+ result[col] = self._try_cast(result[col], self.obj[col])
else:
- raise
-
- # apply a non-cython aggregation
- result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
-
- # coerce the resulting columns if we can
- if isinstance(result, DataFrame):
- for col in result.columns:
- result[col] = self._try_cast(result[col], self.obj[col])
- else:
- result = self._try_cast(result, self.obj)
+ result = self._try_cast(result, self.obj)
- return result
+ return result
set_function_name(f, name, cls)
@@ -1736,28 +1735,30 @@ def nth(self, n: Union[int, List[int]], dropna: Optional[str] = None) -> DataFra
nth_values = list(set(n))
nth_array = np.array(nth_values, dtype=np.intp)
- self._set_group_selection()
- mask_left = np.in1d(self._cumcount_array(), nth_array)
- mask_right = np.in1d(self._cumcount_array(ascending=False) + 1, -nth_array)
- mask = mask_left | mask_right
+ with _group_selection_context(self):
+ mask_left = np.in1d(self._cumcount_array(), nth_array)
+ mask_right = np.in1d(
+ self._cumcount_array(ascending=False) + 1, -nth_array
+ )
+ mask = mask_left | mask_right
- ids, _, _ = self.grouper.group_info
+ ids, _, _ = self.grouper.group_info
- # Drop NA values in grouping
- mask = mask & (ids != -1)
+ # Drop NA values in grouping
+ mask = mask & (ids != -1)
- out = self._selected_obj[mask]
- if not self.as_index:
- return out
+ out = self._selected_obj[mask]
+ if not self.as_index:
+ return out
- result_index = self.grouper.result_index
- out.index = result_index[ids[mask]]
+ result_index = self.grouper.result_index
+ out.index = result_index[ids[mask]]
- if not self.observed and isinstance(result_index, CategoricalIndex):
- out = out.reindex(result_index)
+ if not self.observed and isinstance(result_index, CategoricalIndex):
+ out = out.reindex(result_index)
- return out.sort_index() if self.sort else out
+ return out.sort_index() if self.sort else out
# dropna is truthy
if isinstance(n, valid_containers):
diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py
index 5dad868c8c3aa..e89d5cfeb10cd 100644
--- a/pandas/tests/groupby/aggregate/test_other.py
+++ b/pandas/tests/groupby/aggregate/test_other.py
@@ -482,13 +482,13 @@ def test_agg_timezone_round_trip():
assert ts == grouped.first()["B"].iloc[0]
# GH#27110 applying iloc should return a DataFrame
- assert ts == grouped.apply(lambda x: x.iloc[0]).iloc[0, 0]
+ assert ts == grouped.apply(lambda x: x.iloc[0]).drop("A", 1).iloc[0, 0]
ts = df["B"].iloc[2]
assert ts == grouped.last()["B"].iloc[0]
# GH#27110 applying iloc should return a DataFrame
- assert ts == grouped.apply(lambda x: x.iloc[-1]).iloc[0, 0]
+ assert ts == grouped.apply(lambda x: x.iloc[-1]).drop("A", 1).iloc[0, 0]
def test_sum_uint64_overflow():
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index dff5baa9b5984..831d053f603ec 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -433,6 +433,23 @@ def test_frame_groupby_columns(tsframe):
assert len(v.columns) == 2
+def test_frame_groupby_avoids_mutate(reduction_func):
+ # GH28523
+ func = reduction_func
+ df = pd.DataFrame({"A": ["foo", "bar", "foo", "bar"], "B": [1, 2, 3, 4]})
+ grouped = df.groupby("A")
+
+ expected = grouped.apply(lambda x: x)
+
+ args = {"nth": [0], "quantile": [0.5]}.get(func, [])
+ fn = getattr(grouped, func)
+ fn(*args)
+
+ result = grouped.apply(lambda x: x)
+
+ tm.assert_frame_equal(expected, result)
+
+
def test_frame_set_name_single(df):
grouped = df.groupby("A")
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index 403f5f11ee768..0adc1cda6c499 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -172,7 +172,7 @@ def test_grouper_creation_bug(self):
result = g.sum()
assert_frame_equal(result, expected)
- result = g.apply(lambda x: x.sum())
+ result = g.apply(lambda x: x.sum()).drop("A", 1)
assert_frame_equal(result, expected)
g = df.groupby(pd.Grouper(key="A", axis=0))
| - [x] closes #28523
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28541 | 2019-09-19T20:38:54Z | 2020-03-14T21:48:38Z | null | 2020-03-14T21:48:38Z |
Pivot nans fix | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index aeca7782e3ae5..aebf15eb55374 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5980,7 +5980,7 @@ def pivot_table(
margins=False,
dropna=True,
margins_name="All",
- observed=False,
+ observed=True,
):
from pandas.core.reshape.pivot import pivot_table
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index d653dd87308cf..dbfefc1fa7f4a 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -29,7 +29,7 @@ def pivot_table(
margins=False,
dropna=True,
margins_name="All",
- observed=False,
+ observed=True,
):
index = _convert_by(index)
columns = _convert_by(columns)
@@ -92,11 +92,12 @@ def pivot_table(
pass
values = list(values)
- grouped = data.groupby(keys, observed=observed)
+ if dropna:
+ grouped = data.groupby(keys, observed=observed)
+ else:
+ grouped = data.groupby(keys, observed=False)
agged = grouped.agg(aggfunc)
if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns):
- agged = agged.dropna(how="all")
-
# gh-21133
# we want to down cast if
# the original values are ints
@@ -172,10 +173,6 @@ def pivot_table(
if len(index) == 0 and len(columns) > 0:
table = table.T
- # GH 15193 Make sure empty columns are removed if dropna=True
- if isinstance(table, ABCDataFrame) and dropna:
- table = table.dropna(how="all", axis=1)
-
return table
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 582084e3bfb5a..ba48bd70bd807 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -185,6 +185,59 @@ def test_pivot_table_dropna(self):
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
+ def test_pivot_table_keep_nancols(self): # GH18030
+ df = pd.DataFrame(
+ {
+ "metric_value": [10, 11, 0, 3, np.nan, np.nan, 100, 20],
+ "metric_name": ["m", "n", "m", "x", "n", "x", "m", "n"],
+ "product": ["A", "A", "B", "B", "C", "C", "D", "D"],
+ "measurer": ["Tom", "Tom", "Bill", "Tom", "Bill", "Tom", "Bill", "Tom"],
+ }
+ )
+ pv_col = df.pivot_table(
+ "metric_value",
+ "metric_name",
+ ["measurer", "product"],
+ dropna=True,
+ )
+ pv_ind = df.pivot_table(
+ "metric_value",
+ ["measurer", "product"],
+ "metric_name",
+ dropna=True,
+ )
+
+ m = MultiIndex.from_tuples(
+ [
+ ("Bill", "B"),
+ ("Bill", "C"),
+ ("Bill", "D"),
+ ("Tom", "A"),
+ ("Tom", "B"),
+ ("Tom", "C"),
+ ("Tom", "D"),
+ ],
+ names=["measurer", "product"],
+ )
+ tm.assert_index_equal(pv_col.columns, m)
+ tm.assert_index_equal(pv_ind.index, m)
+
+ expected_pv_col = pd.DataFrame(
+ {
+ ("Bill", "B"): {"m": 0.0, "n": np.nan, "x": np.nan},
+ ("Bill", "C"): {"m": np.nan, "n": np.nan, "x": np.nan},
+ ("Bill", "D"): {"m": 100.0, "n": np.nan, "x": np.nan},
+ ("Tom", "A"): {"m": 10.0, "n": 11.0, "x": np.nan},
+ ("Tom", "B"): {"m": np.nan, "n": np.nan, "x": 3.0},
+ ("Tom", "C"): {"m": np.nan, "n": np.nan, "x": np.nan},
+ ("Tom", "D"): {"m": np.nan, "n": 20.0, "x": np.nan},
+ }
+ )
+ expected_pv_col.index.name = "metric_name"
+ expected_pv_col.columns.names = ["measurer", "product"]
+
+ tm.assert_frame_equal(pv_col, expected_pv_col)
+
def test_pivot_table_categorical(self):
cat1 = Categorical(
| this gets the ball rolling on #18030 as it is very easy to fix, but requires some decisions, mostly for structure and naming
the basic options as I see them:
0 - this PR as is (maybe some name change to this 26 character monster)
1 - Outright changing the `dropna=False` default behavior I thought would be a bad idea as it breaks backwards compatibility, but this solution is quite clunky.
2 - Change `dropna=True` behavior to satisfy the test added here, but again that is not backwards compatible, but seems reasonable.
Also, I've noticed that taking out the line
https://github.com/pandas-dev/pandas/blob/3f8c0c4f002b6bcc69e0e534cb0c502fbab00373/pandas/core/reshape/pivot.py#L177
doesn't break any of the tests, so there is that. (this helps 2)
- [x] closes #18030
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28540 | 2019-09-19T20:33:41Z | 2019-12-17T17:39:34Z | null | 2019-12-18T11:16:38Z |
Pandas.io.formats.style.Styler docstring PR02 | diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 033d93d1456c8..95e1084747aa3 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -645,7 +645,7 @@ def apply(self, func, axis=0, subset=None, **kwargs):
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
- kwargs : dict
+ **kwargs : dict
pass along to ``func``
Returns
@@ -697,7 +697,7 @@ def applymap(self, func, subset=None, **kwargs):
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
- kwargs : dict
+ **kwargs : dict
pass along to ``func``
Returns
@@ -732,7 +732,7 @@ def where(self, cond, value, other=None, subset=None, **kwargs):
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
- kwargs : dict
+ **kwargs : dict
pass along to ``cond``
Returns
@@ -965,8 +965,10 @@ def background_gradient(
----------
cmap : str or colormap
matplotlib colormap
- low, high : float
- compress the range by these values.
+ low : float
+ compress the range by the low.
+ high : float
+ compress the range by the high.
axis : {0 or 'index', 1 or 'columns', None}, default 0
apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
@@ -1078,7 +1080,7 @@ def set_properties(self, subset=None, **kwargs):
----------
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
- kwargs : dict
+ **kwargs : dict
property: value pairs to be set for each cell
Returns
@@ -1350,8 +1352,10 @@ def pipe(self, func, *args, **kwargs):
Function to apply to the Styler. Alternatively, a
``(callable, keyword)`` tuple where ``keyword`` is a string
indicating the keyword of ``callable`` that expects the Styler.
- *args, **kwargs :
+ *args : optional
Arguments passed to `func`.
+ **kwargs : optional
+ A dictionary of keyword arguments passed into ``func``.
Returns
-------
| Solves:
- Unknown parameters {kwargs} in apply method in Styler class
- Unknown parameters {kwargs} in applymap method in Styler class
- Unknown parameters {kwargs} in where method in Styler class
- Unknown parameters {low, high} in background_gradient method in Styler class
- Unknown parameters {kwargs} in set_properties method in Styler class
- Unknown parameters {*args, **kwargs :} in pipe method in Styler class
for issues:
pandas.io.formats.style.Styler.apply: Unknown parameters {kwargs}
pandas.io.formats.style.Styler.applymap: Unknown parameters {kwargs}
pandas.io.formats.style.Styler.where: Unknown parameters {kwargs}
pandas.io.formats.style.Styler.set_properties: Unknown parameters {kwargs}
pandas.io.formats.style.Styler.pipe: Unknown parameters {*args, **kwargs :}
pandas.io.formats.style.Styler.background_gradient: Unknown parameters {low, high}
all in #27976
| https://api.github.com/repos/pandas-dev/pandas/pulls/28539 | 2019-09-19T20:27:30Z | 2019-09-20T12:45:27Z | 2019-09-20T12:45:27Z | 2019-09-20T12:46:28Z |
REF: Parametrize value_counts tests | diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py
index c7b28822092a8..f8bd8843ab7e3 100644
--- a/pandas/tests/groupby/test_value_counts.py
+++ b/pandas/tests/groupby/test_value_counts.py
@@ -52,29 +52,30 @@ def seed_df(seed_nans, n, m):
@pytest.mark.slow
@pytest.mark.parametrize("df, keys, bins, n, m", binned, ids=ids)
-def test_series_groupby_value_counts(df, keys, bins, n, m):
+@pytest.mark.parametrize("isort", [True, False])
+@pytest.mark.parametrize("normalize", [True, False])
+@pytest.mark.parametrize("sort", [True, False])
+@pytest.mark.parametrize("ascending", [True, False])
+@pytest.mark.parametrize("dropna", [True, False])
+def test_series_groupby_value_counts(
+ df, keys, bins, n, m, isort, normalize, sort, ascending, dropna
+):
def rebuild_index(df):
arr = list(map(df.index.get_level_values, range(df.index.nlevels)))
df.index = MultiIndex.from_arrays(arr, names=df.index.names)
return df
- for isort, normalize, sort, ascending, dropna in product((False, True), repeat=5):
-
- kwargs = dict(
- normalize=normalize,
- sort=sort,
- ascending=ascending,
- dropna=dropna,
- bins=bins,
- )
+ kwargs = dict(
+ normalize=normalize, sort=sort, ascending=ascending, dropna=dropna, bins=bins
+ )
- gr = df.groupby(keys, sort=isort)
- left = gr["3rd"].value_counts(**kwargs)
+ gr = df.groupby(keys, sort=isort)
+ left = gr["3rd"].value_counts(**kwargs)
- gr = df.groupby(keys, sort=isort)
- right = gr["3rd"].apply(Series.value_counts, **kwargs)
- right.index.names = right.index.names[:-1] + ["3rd"]
+ gr = df.groupby(keys, sort=isort)
+ right = gr["3rd"].apply(Series.value_counts, **kwargs)
+ right.index.names = right.index.names[:-1] + ["3rd"]
- # have to sort on index because of unstable sort on values
- left, right = map(rebuild_index, (left, right)) # xref GH9212
- tm.assert_series_equal(left.sort_index(), right.sort_index())
+ # have to sort on index because of unstable sort on values
+ left, right = map(rebuild_index, (left, right)) # xref GH9212
+ tm.assert_series_equal(left.sort_index(), right.sort_index())
| Parametrizes the for loop in `test_series_groupby_value_counts`. As a side note, this test seems to run for a pretty long time (a minute and a half); should it operate on less data perhaps? | https://api.github.com/repos/pandas-dev/pandas/pulls/28537 | 2019-09-19T20:03:19Z | 2019-09-20T14:27:59Z | 2019-09-20T14:27:59Z | 2019-09-20T14:42:03Z |
Pandas.io.formats.style.Styler.apply docstring PR02 | Solves:
- Unknown parameters {kwargs}
pandas.io.formats.style.Styler.apply: Unknown parameters {kwargs} in #27976 | https://api.github.com/repos/pandas-dev/pandas/pulls/28535 | 2019-09-19T19:51:37Z | 2019-10-11T22:10:18Z | null | 2019-10-11T22:10:18Z | |
Pandas.io.formats.style.Styler.pipe docstring PR02 | diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 033d93d1456c8..05611be646bd2 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -1350,8 +1350,10 @@ def pipe(self, func, *args, **kwargs):
Function to apply to the Styler. Alternatively, a
``(callable, keyword)`` tuple where ``keyword`` is a string
indicating the keyword of ``callable`` that expects the Styler.
- *args, **kwargs :
- Arguments passed to `func`.
+ *args : optional
+ Positional arguments passed into ``func``.
+ **kwargs : optional
+ A dictionary of keyword arguments passed into ``func``.
Returns
-------
| Solves:
- Unknown parameters {*args, **kwargs :}
pandas.io.formats.style.Styler.pipe: Unknown parameters {*args, **kwargs :} in #27976
| https://api.github.com/repos/pandas-dev/pandas/pulls/28534 | 2019-09-19T19:36:21Z | 2019-09-20T15:52:13Z | null | 2019-09-20T15:52:13Z |
TST: Add astype_nansafe datetime tests | diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 266f7ac50c663..04e8b329a0b40 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -3,6 +3,7 @@
import pandas.util._test_decorators as td
+from pandas.core.dtypes.cast import astype_nansafe
import pandas.core.dtypes.common as com
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
@@ -706,3 +707,24 @@ def test__get_dtype_fails(input_param, expected_error_message):
)
def test__is_dtype_type(input_param, result):
assert com._is_dtype_type(input_param, lambda tipo: tipo == result)
+
+
+@pytest.mark.parametrize("from_type", [np.datetime64, np.timedelta64])
+@pytest.mark.parametrize(
+ "to_type",
+ [
+ np.uint8,
+ np.uint16,
+ np.uint32,
+ np.int8,
+ np.int16,
+ np.int32,
+ np.float16,
+ np.float32,
+ ],
+)
+def test_astype_datetime64_bad_dtype_raises(from_type, to_type):
+ arr = np.array([from_type("2018")])
+
+ with pytest.raises(TypeError, match="cannot astype"):
+ astype_nansafe(arr, dtype=to_type)
| Adds tests for `astype_nansafe` in situations where we can't perform a datetime casting because of a precision mismatch:
```
arr
# array(['2018-01-01'], dtype='datetime64[D]')
arr.view(np.int32)
# array([17532, 0], dtype=int32)
arr.view(np.float32)
# array([2.4568e-41, 0.0000e+00], dtype=float32)
```
Related to https://github.com/pandas-dev/pandas/pull/28492 | https://api.github.com/repos/pandas-dev/pandas/pulls/28533 | 2019-09-19T17:45:11Z | 2019-09-25T22:47:46Z | null | 2019-09-25T22:54:55Z |
CLN+TST: Catch specific exception in equals | diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 5dff1f93264c3..0335058a69c63 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -1918,6 +1918,9 @@ def sequence_to_dt64ns(
tz = validate_tz_from_dtype(dtype, tz)
if isinstance(data, ABCIndexClass):
+ if data.nlevels > 1:
+ # Without this check, data._data below is None
+ raise TypeError("Cannot create a DatetimeArray from a MultiIndex.")
data = data._data
# By this point we are assured to have either a numpy array or Index
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index c7664d9777c71..bf89bbbdf2b79 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -192,7 +192,11 @@ def equals(self, other):
elif not isinstance(other, type(self)):
try:
other = type(self)(other)
- except Exception:
+ except (ValueError, TypeError, OverflowError):
+ # e.g.
+ # ValueError -> cannot parse str entry, or OutOfBoundsDatetime
+ # TypeError -> trying to convert IntervalIndex to DatetimeIndex
+ # OverflowError -> Index([very_large_timedeltas])
return False
if not is_dtype_equal(self.dtype, other.dtype):
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index d749d9bb47d25..c3cda22497ecb 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -15,6 +15,11 @@
class TestDatetimeArrayConstructor:
+ def test_from_sequence_invalid_type(self):
+ mi = pd.MultiIndex.from_product([np.arange(5), np.arange(5)])
+ with pytest.raises(TypeError, match="Cannot create a DatetimeArray"):
+ DatetimeArray._from_sequence(mi)
+
def test_only_1dim_accepted(self):
arr = np.array([0, 1, 2, 3], dtype="M8[h]").astype("M8[ns]")
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index d4dff2cbce89b..2ec267c66091b 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -393,6 +393,18 @@ def test_equals(self):
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
+ # check that we do not raise when comparing with OutOfBounds objects
+ oob = pd.Index([datetime(2500, 1, 1)] * 3, dtype=object)
+ assert not idx.equals(oob)
+ assert not idx2.equals(oob)
+ assert not idx3.equals(oob)
+
+ # check that we do not raise when comparing with OutOfBounds dt64
+ oob2 = oob.map(np.datetime64)
+ assert not idx.equals(oob2)
+ assert not idx2.equals(oob2)
+ assert not idx3.equals(oob2)
+
@pytest.mark.parametrize("values", [["20180101", "20180103", "20180105"], []])
@pytest.mark.parametrize("freq", ["2D", Day(2), "2B", BDay(2), "48H", Hour(48)])
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index d7d8b10347861..54ed5058b5253 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -1,3 +1,5 @@
+from datetime import timedelta
+
import numpy as np
import pytest
@@ -266,6 +268,17 @@ def test_equals(self):
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
+ # Check that we dont raise OverflowError on comparisons outside the
+ # implementation range
+ oob = pd.Index([timedelta(days=10 ** 6)] * 3, dtype=object)
+ assert not idx.equals(oob)
+ assert not idx2.equals(oob)
+
+ # FIXME: oob.apply(np.timedelta64) incorrectly overflows
+ oob2 = pd.Index([np.timedelta64(x) for x in oob], dtype=object)
+ assert not idx.equals(oob2)
+ assert not idx2.equals(oob2)
+
@pytest.mark.parametrize("values", [["0 days", "2 days", "4 days"], []])
@pytest.mark.parametrize("freq", ["2D", Day(2), "48H", Hour(48)])
def test_freq_setter(self, values, freq):
| https://api.github.com/repos/pandas-dev/pandas/pulls/28532 | 2019-09-19T17:21:02Z | 2019-09-26T15:12:02Z | 2019-09-26T15:12:02Z | 2019-09-26T15:13:41Z | |
TST: Call tests just once with --dist=loadscope | diff --git a/.travis.yml b/.travis.yml
index 398dd07089ef9..048736e4bf1d0 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -85,15 +85,6 @@ install:
- ci/submit_cython_cache.sh
- echo "install done"
-
-before_script:
- # display server (for clipboard functionality) needs to be started here,
- # does not work if done in install:setup_env.sh (GH-26103)
- - export DISPLAY=":99.0"
- - echo "sh -e /etc/init.d/xvfb start"
- - if [ "$JOB" != "3.8-dev" ]; then sh -e /etc/init.d/xvfb start; fi
- - sleep 3
-
script:
- echo "script start"
- echo "$JOB"
diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml
index d6afb263b447f..66960ca2c6c10 100644
--- a/ci/azure/posix.yml
+++ b/ci/azure/posix.yml
@@ -73,33 +73,16 @@ jobs:
- task: PublishTestResults@2
inputs:
- testResultsFiles: 'test-data-*.xml'
+ testResultsFiles: 'test-data.xml'
testRunTitle: ${{ format('{0}-$(CONDA_PY)', parameters.name) }}
displayName: 'Publish test results'
- powershell: |
- $junitXml = "test-data-single.xml"
- $(Get-Content $junitXml | Out-String) -match 'failures="(.*?)"'
- if ($matches[1] -eq 0)
- {
- Write-Host "No test failures in test-data-single"
- }
- else
- {
- # note that this will produce $LASTEXITCODE=1
- Write-Error "$($matches[1]) tests failed"
- }
-
- $junitXmlMulti = "test-data-multiple.xml"
- $(Get-Content $junitXmlMulti | Out-String) -match 'failures="(.*?)"'
- if ($matches[1] -eq 0)
- {
- Write-Host "No test failures in test-data-multi"
- }
- else
- {
- # note that this will produce $LASTEXITCODE=1
- Write-Error "$($matches[1]) tests failed"
+ $(Get-Content "test-data.xml" | Out-String) -match 'failures="(.*?)"'
+ if ($matches[1] -eq 0) {
+ Write-Host "No test failures in test-data"
+ } else {
+ Write-Error "$($matches[1]) tests failed" # will produce $LASTEXITCODE=1
}
displayName: 'Check for test failures'
diff --git a/ci/print_skipped.py b/ci/print_skipped.py
index e99e789a71fe8..51a2460e05fab 100755
--- a/ci/print_skipped.py
+++ b/ci/print_skipped.py
@@ -27,14 +27,13 @@ def main(filename):
if __name__ == "__main__":
print("SKIPPED TESTS:")
i = 1
- for file_type in ("-single", "-multiple", ""):
- for test_data in main("test-data{}.xml".format(file_type)):
- if test_data is None:
- print("-" * 80)
- else:
- print(
- "#{i} {class_name}.{test_name}: {message}".format(
- **dict(test_data, i=i)
- )
+ for test_data in main("test-data.xml"):
+ if test_data is None:
+ print("-" * 80)
+ else:
+ print(
+ "#{i} {class_name}.{test_name}: {message}".format(
+ **dict(test_data, i=i)
)
- i += 1
+ )
+ i += 1
diff --git a/ci/run_tests.sh b/ci/run_tests.sh
index d1a9447c97d4e..b91cfb3bed8cc 100755
--- a/ci/run_tests.sh
+++ b/ci/run_tests.sh
@@ -15,37 +15,29 @@ if [ -n "$LOCALE_OVERRIDE" ]; then
# exit 1
fi
fi
+
if [[ "not network" == *"$PATTERN"* ]]; then
export http_proxy=http://1.2.3.4 https_proxy=http://1.2.3.4;
fi
-
-if [ -n "$PATTERN" ]; then
- PATTERN=" and $PATTERN"
+if [ "$COVERAGE" ]; then
+ COVERAGE_FNAME="/tmp/test_coverage.xml"
+ COVERAGE="-s --cov=pandas --cov-report=xml:$COVERAGE_FNAME"
fi
-for TYPE in single multiple
-do
- if [ "$COVERAGE" ]; then
- COVERAGE_FNAME="/tmp/coc-$TYPE.xml"
- COVERAGE="-s --cov=pandas --cov-report=xml:$COVERAGE_FNAME"
- fi
+PYTEST_CMD="pytest -m \"$PATTERN\" -n auto --dist=loadfile -s --strict --durations=10 --junitxml=test-data.xml $TEST_ARGS $COVERAGE pandas"
- TYPE_PATTERN=$TYPE
- NUM_JOBS=1
- if [[ "$TYPE_PATTERN" == "multiple" ]]; then
- TYPE_PATTERN="not single"
- NUM_JOBS=2
- fi
+# Travis does not have have an X server
+if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
+ DISPLAY=DISPLAY=:99.0
+ PYTEST_CMD="xvfb-run -e /dev/stdout $PYTEST_CMD"
+fi
- PYTEST_CMD="pytest -m \"$TYPE_PATTERN$PATTERN\" -n $NUM_JOBS -s --strict --durations=10 --junitxml=test-data-$TYPE.xml $TEST_ARGS $COVERAGE pandas"
- echo $PYTEST_CMD
- # if no tests are found (the case of "single and slow"), pytest exits with code 5, and would make the script fail, if not for the below code
- sh -c "$PYTEST_CMD; ret=\$?; [ \$ret = 5 ] && exit 0 || exit \$ret"
+echo $PYTEST_CMD
+sh -c "$PYTEST_CMD"
- if [[ "$COVERAGE" && $? == 0 && "$TRAVIS_BRANCH" == "master" ]]; then
- echo "uploading coverage for $TYPE tests"
- echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME"
- bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME
- fi
-done
+if [[ "$COVERAGE" && $? == 0 && "$TRAVIS_BRANCH" == "master" ]]; then
+ echo "uploading coverage"
+ echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME"
+ bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME
+fi
diff --git a/environment.yml b/environment.yml
index a3582c56ee9d2..bbf3c036f65c4 100644
--- a/environment.yml
+++ b/environment.yml
@@ -53,7 +53,7 @@ dependencies:
- moto # mock S3
- pytest>=4.0.2
- pytest-cov
- - pytest-xdist
+ - pytest-xdist>=1.21
- seaborn
- statsmodels
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 6235b61d92f29..5633a58f254ca 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -32,7 +32,7 @@ hypothesis>=3.82
moto
pytest>=4.0.2
pytest-cov
-pytest-xdist
+pytest-xdist>=1.21
seaborn
statsmodels
ipywidgets
| Another try to what was tried in #26949. Before this PR the tests are called twice, once in a core for the tests that affect shared results, and once in parallel for the rest.
This PR makes a single call, and tests in the same scope (class or module) are granted to run in the same core, so no shared data problems should happen.
The tests being very slow was possibly caused by the proxy env variables, and not `--dist=loadscope`. But please check how long tests took before merging.
| https://api.github.com/repos/pandas-dev/pandas/pulls/28531 | 2019-09-19T15:32:56Z | 2019-11-17T14:20:08Z | 2019-11-17T14:20:08Z | 2019-11-17T18:08:03Z |
BUG: wrong exception raised by Week+Day | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index a5af4e727391a..3beaa2dfa788a 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -147,7 +147,7 @@ Datetimelike
- Bug in :class:`Timestamp` subtraction when subtracting a :class:`Timestamp` from a ``np.datetime64`` object incorrectly raising ``TypeError`` (:issue:`28286`)
- Addition and subtraction of integer or integer-dtype arrays with :class:`Timestamp` will now raise ``NullFrequencyError`` instead of ``ValueError`` (:issue:`28268`)
- Bug in :class:`Series` and :class:`DataFrame` with integer dtype failing to raise ``TypeError`` when adding or subtracting a ``np.datetime64`` object (:issue:`28080`)
--
+- Bug in :class:`Week` with ``weekday`` incorrectly raising ``AttributeError`` instead of ``TypeError`` when adding or subtracting an invalid type (:issue:`28530`)
Timedelta
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index 3ed25b8d3edd5..ddf2c6e65b474 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -4348,3 +4348,12 @@ def test_last_week_of_month_on_offset():
slow = (ts + offset) - offset == ts
fast = offset.onOffset(ts)
assert fast == slow
+
+
+def test_week_add_invalid():
+ # Week with weekday should raise TypeError and _not_ AttributeError
+ # when adding invalid offset
+ offset = Week(weekday=1)
+ other = Day()
+ with pytest.raises(TypeError, match="Cannot add"):
+ offset + other
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index dfe91b514bbe1..4491e6ad9ac7e 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -138,7 +138,7 @@ def to_offset(freq):
delta = offset
else:
delta = delta + offset
- except Exception:
+ except ValueError:
raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq))
else:
@@ -170,7 +170,7 @@ def to_offset(freq):
delta = offset
else:
delta = delta + offset
- except Exception:
+ except (ValueError, TypeError):
raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq))
if delta is None:
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index edf58ba3850a1..82cbfa831bf32 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -605,7 +605,7 @@ def apply(self, other):
return BDay(self.n, offset=self.offset + other, normalize=self.normalize)
else:
raise ApplyTypeError(
- "Only know how to combine business day with " "datetime or timedelta."
+ "Only know how to combine business day with datetime or timedelta."
)
@apply_index_wraps
@@ -1545,6 +1545,13 @@ def apply(self, other):
if self.weekday is None:
return other + self.n * self._inc
+ if not isinstance(other, datetime):
+ raise TypeError(
+ "Cannot add {typ} to {cls}".format(
+ typ=type(other).__name__, cls=type(self).__name__
+ )
+ )
+
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
| With that fixed, we can clean up an Exception in tseries.frequencies. | https://api.github.com/repos/pandas-dev/pandas/pulls/28530 | 2019-09-19T15:08:46Z | 2019-09-19T20:33:52Z | 2019-09-19T20:33:52Z | 2019-09-19T21:10:10Z |
WEB: Fix deployment of the website | diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index ba7a3bfb6ae36..5b3d4e91c1e02 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -171,7 +171,7 @@ jobs:
eq(variables['Build.SourceBranch'], 'refs/heads/master'))
- script: |
- cd doc/build/html
+ cd to_deploy
git remote add origin git@github.com:pandas-dev/pandas-dev.github.io.git
git push -f origin master
displayName: 'Publish web and docs to GitHub pages'
| The master build is broken [1], because one directory wasn't updated in #28497 (the step only runs in master builds and not PR builds, so couldn't be detected).
This updates the directory, the deployment should work again.
1. https://dev.azure.com/pandas-dev/pandas/_build/results?buildId=17721 | https://api.github.com/repos/pandas-dev/pandas/pulls/28529 | 2019-09-19T15:03:22Z | 2019-09-19T15:58:39Z | 2019-09-19T15:58:39Z | 2019-09-19T15:58:39Z |
COMPAT: ensure no warnings on tab completion with Jedi 0.15 | diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst
index de411ef63680a..14682b706f924 100644
--- a/doc/source/whatsnew/v0.25.2.rst
+++ b/doc/source/whatsnew/v0.25.2.rst
@@ -100,7 +100,8 @@ Other
^^^^^
- Compatibility with Python 3.8 in :meth:`DataFrame.query` (:issue:`27261`)
--
+- Fix to ensure that tab-completion in an IPython console does not raise
+ warnings for deprecated attributes (:issue:`27900`).
.. _whatsnew_0.252.contributors:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 7e77c56fefe04..152983451bc38 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -166,7 +166,7 @@ class NDFrame(PandasObject, SelectionMixin):
_internal_names_set = set(_internal_names) # type: Set[str]
_accessors = set() # type: Set[str]
_deprecations = frozenset(
- ["as_blocks", "blocks", "is_copy"]
+ ["as_blocks", "blocks", "is_copy", "ftypes", "ix"]
) # type: FrozenSet[str]
_metadata = [] # type: List[str]
_is_copy = None
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 6ef9d78ff9e97..f5f7056d8bbcf 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -205,7 +205,7 @@ class Index(IndexOpsMixin, PandasObject):
"""
# tolist is not actually deprecated, just suppressed in the __dir__
- _deprecations = DirNamesMixin._deprecations | frozenset(["tolist"])
+ _deprecations = DirNamesMixin._deprecations | frozenset(["tolist", "dtype_str"])
# To hand over control to subclasses
_join_precedence = 1
diff --git a/pandas/core/series.py b/pandas/core/series.py
index b0616c053df6d..2431bfcfd0356 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -54,7 +54,7 @@
import pandas as pd
from pandas.core import algorithms, base, generic, nanops, ops
-from pandas.core.accessor import CachedAccessor
+from pandas.core.accessor import CachedAccessor, DirNamesMixin
from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.categorical import Categorical, CategoricalAccessor
from pandas.core.arrays.sparse import SparseAccessor
@@ -176,8 +176,10 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
_metadata = ["name"]
_accessors = {"dt", "cat", "str", "sparse"}
# tolist is not actually deprecated, just suppressed in the __dir__
- _deprecations = generic.NDFrame._deprecations | frozenset(
- ["asobject", "reshape", "valid", "tolist"]
+ _deprecations = (
+ generic.NDFrame._deprecations
+ | DirNamesMixin._deprecations
+ | frozenset(["asobject", "reshape", "valid", "tolist", "ftype", "real", "imag"])
)
# Override cache_readonly bc Series is mutable
| Closes https://github.com/pandas-dev/pandas/issues/27900
I didn't yet add any tests, because I am not fully sure how to write them (the IPython tests we already have clearly don't work, I suppose the programmatic `ip.Completer.completions(..)` doesn't go through jedi)
| https://api.github.com/repos/pandas-dev/pandas/pulls/28524 | 2019-09-19T12:01:09Z | 2019-09-20T12:43:54Z | 2019-09-20T12:43:53Z | 2019-09-20T15:56:19Z |
DEV: skip pandas/__init__.py in isort's pre-commit hook | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index b79f0f71dac23..3f98273a336cf 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -15,3 +15,4 @@ repos:
hooks:
- id: isort
language: python_venv
+ exclude: ^pandas/__init__\.py$|^pandas/core/api\.py$
| I noticed that when you modified `pandas/__init__.py`, isort actually completely reordered it when using pre-commit hook. Apparantly, isort ignores the skip config when you explicitly pass a path to isort (pre-commit basically does `isort pandas/__init__.py` when that file changed).
See https://github.com/pre-commit/mirrors-isort/issues/9, which suggested to add this exclude to the pre-commit config (although that duplicates the "skip" information from setup.cfg) | https://api.github.com/repos/pandas-dev/pandas/pulls/28517 | 2019-09-19T07:41:39Z | 2019-09-19T14:17:25Z | 2019-09-19T14:17:25Z | 2019-09-19T14:17:29Z |
CLN: clean-up internal sparse imports + restructure sparse submodule | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index a5af4e727391a..b890278d9ca30 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -96,9 +96,10 @@ Deprecations
Removed SparseSeries and SparseDataFrame
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-``SparseSeries`` and ``SparseDataFrame`` have been removed (:issue:`28425`).
-We recommend using a ``Series`` or ``DataFrame`` with sparse values instead.
-See :ref:`sparse.migration` for help with migrating existing code.
+``SparseSeries``, ``SparseDataFrame`` and the ``DataFrame.to_sparse`` method
+have been removed (:issue:`28425`). We recommend using a ``Series`` or
+``DataFrame`` with sparse values instead. See :ref:`sparse.migration` for help
+with migrating existing code.
Removal of prior version deprecations/changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/pandas/__init__.py b/pandas/__init__.py
index 59ecc7f609ae9..6d0c55a45ed46 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -114,7 +114,7 @@
DataFrame,
)
-from pandas.core.sparse.api import SparseArray, SparseDtype
+from pandas.core.arrays.sparse import SparseArray, SparseDtype
from pandas.tseries.api import infer_freq
from pandas.tseries import offsets
diff --git a/pandas/core/arrays/sparse/__init__.py b/pandas/core/arrays/sparse/__init__.py
new file mode 100644
index 0000000000000..75f3819fb19fd
--- /dev/null
+++ b/pandas/core/arrays/sparse/__init__.py
@@ -0,0 +1,5 @@
+# flake8: noqa: F401
+
+from .accessor import SparseAccessor, SparseFrameAccessor
+from .array import BlockIndex, IntIndex, SparseArray, _make_index
+from .dtype import SparseDtype
diff --git a/pandas/core/arrays/sparse/accessor.py b/pandas/core/arrays/sparse/accessor.py
new file mode 100644
index 0000000000000..57fd6d284af31
--- /dev/null
+++ b/pandas/core/arrays/sparse/accessor.py
@@ -0,0 +1,336 @@
+"""Sparse accessor"""
+
+import numpy as np
+
+from pandas.compat._optional import import_optional_dependency
+
+from pandas.core.dtypes.cast import find_common_type
+
+from pandas.core.accessor import PandasDelegate, delegate_names
+
+from .array import SparseArray
+from .dtype import SparseDtype
+
+
+class BaseAccessor:
+ _validation_msg = "Can only use the '.sparse' accessor with Sparse data."
+
+ def __init__(self, data=None):
+ self._parent = data
+ self._validate(data)
+
+ def _validate(self, data):
+ raise NotImplementedError
+
+
+@delegate_names(
+ SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"
+)
+class SparseAccessor(BaseAccessor, PandasDelegate):
+ """
+ Accessor for SparseSparse from other sparse matrix data types.
+ """
+
+ def _validate(self, data):
+ if not isinstance(data.dtype, SparseDtype):
+ raise AttributeError(self._validation_msg)
+
+ def _delegate_property_get(self, name, *args, **kwargs):
+ return getattr(self._parent.array, name)
+
+ def _delegate_method(self, name, *args, **kwargs):
+ if name == "from_coo":
+ return self.from_coo(*args, **kwargs)
+ elif name == "to_coo":
+ return self.to_coo(*args, **kwargs)
+ else:
+ raise ValueError
+
+ @classmethod
+ def from_coo(cls, A, dense_index=False):
+ """
+ Create a Series with sparse values from a scipy.sparse.coo_matrix.
+
+ Parameters
+ ----------
+ A : scipy.sparse.coo_matrix
+ dense_index : bool, default False
+ If False (default), the SparseSeries index consists of only the
+ coords of the non-null entries of the original coo_matrix.
+ If True, the SparseSeries index consists of the full sorted
+ (row, col) coordinates of the coo_matrix.
+
+ Returns
+ -------
+ s : Series
+ A Series with sparse values.
+
+ Examples
+ --------
+ >>> from scipy import sparse
+ >>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),
+ shape=(3, 4))
+ >>> A
+ <3x4 sparse matrix of type '<class 'numpy.float64'>'
+ with 3 stored elements in COOrdinate format>
+ >>> A.todense()
+ matrix([[ 0., 0., 1., 2.],
+ [ 3., 0., 0., 0.],
+ [ 0., 0., 0., 0.]])
+ >>> ss = pd.Series.sparse.from_coo(A)
+ >>> ss
+ 0 2 1
+ 3 2
+ 1 0 3
+ dtype: float64
+ BlockIndex
+ Block locations: array([0], dtype=int32)
+ Block lengths: array([3], dtype=int32)
+ """
+ from pandas.core.arrays.sparse.scipy_sparse import _coo_to_sparse_series
+ from pandas import Series
+
+ result = _coo_to_sparse_series(A, dense_index=dense_index)
+ result = Series(result.array, index=result.index, copy=False)
+
+ return result
+
+ def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False):
+ """
+ Create a scipy.sparse.coo_matrix from a Series with MultiIndex.
+
+ Use row_levels and column_levels to determine the row and column
+ coordinates respectively. row_levels and column_levels are the names
+ (labels) or numbers of the levels. {row_levels, column_levels} must be
+ a partition of the MultiIndex level names (or numbers).
+
+ Parameters
+ ----------
+ row_levels : tuple/list
+ column_levels : tuple/list
+ sort_labels : bool, default False
+ Sort the row and column labels before forming the sparse matrix.
+
+ Returns
+ -------
+ y : scipy.sparse.coo_matrix
+ rows : list (row labels)
+ columns : list (column labels)
+
+ Examples
+ --------
+ >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
+ >>> s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0),
+ (1, 2, 'a', 1),
+ (1, 1, 'b', 0),
+ (1, 1, 'b', 1),
+ (2, 1, 'b', 0),
+ (2, 1, 'b', 1)],
+ names=['A', 'B', 'C', 'D'])
+ >>> ss = s.astype("Sparse")
+ >>> A, rows, columns = ss.sparse.to_coo(row_levels=['A', 'B'],
+ ... column_levels=['C', 'D'],
+ ... sort_labels=True)
+ >>> A
+ <3x4 sparse matrix of type '<class 'numpy.float64'>'
+ with 3 stored elements in COOrdinate format>
+ >>> A.todense()
+ matrix([[ 0., 0., 1., 3.],
+ [ 3., 0., 0., 0.],
+ [ 0., 0., 0., 0.]])
+ >>> rows
+ [(1, 1), (1, 2), (2, 1)]
+ >>> columns
+ [('a', 0), ('a', 1), ('b', 0), ('b', 1)]
+ """
+ from pandas.core.arrays.sparse.scipy_sparse import _sparse_series_to_coo
+
+ A, rows, columns = _sparse_series_to_coo(
+ self._parent, row_levels, column_levels, sort_labels=sort_labels
+ )
+ return A, rows, columns
+
+ def to_dense(self):
+ """
+ Convert a Series from sparse values to dense.
+
+ .. versionadded:: 0.25.0
+
+ Returns
+ -------
+ Series:
+ A Series with the same values, stored as a dense array.
+
+ Examples
+ --------
+ >>> series = pd.Series(pd.SparseArray([0, 1, 0]))
+ >>> series
+ 0 0
+ 1 1
+ 2 0
+ dtype: Sparse[int64, 0]
+
+ >>> series.sparse.to_dense()
+ 0 0
+ 1 1
+ 2 0
+ dtype: int64
+ """
+ from pandas import Series
+
+ return Series(
+ self._parent.array.to_dense(),
+ index=self._parent.index,
+ name=self._parent.name,
+ )
+
+
+class SparseFrameAccessor(BaseAccessor, PandasDelegate):
+ """
+ DataFrame accessor for sparse data.
+
+ .. versionadded:: 0.25.0
+ """
+
+ def _validate(self, data):
+ dtypes = data.dtypes
+ if not all(isinstance(t, SparseDtype) for t in dtypes):
+ raise AttributeError(self._validation_msg)
+
+ @classmethod
+ def from_spmatrix(cls, data, index=None, columns=None):
+ """
+ Create a new DataFrame from a scipy sparse matrix.
+
+ .. versionadded:: 0.25.0
+
+ Parameters
+ ----------
+ data : scipy.sparse.spmatrix
+ Must be convertible to csc format.
+ index, columns : Index, optional
+ Row and column labels to use for the resulting DataFrame.
+ Defaults to a RangeIndex.
+
+ Returns
+ -------
+ DataFrame
+ Each column of the DataFrame is stored as a
+ :class:`SparseArray`.
+
+ Examples
+ --------
+ >>> import scipy.sparse
+ >>> mat = scipy.sparse.eye(3)
+ >>> pd.DataFrame.sparse.from_spmatrix(mat)
+ 0 1 2
+ 0 1.0 0.0 0.0
+ 1 0.0 1.0 0.0
+ 2 0.0 0.0 1.0
+ """
+ from pandas import DataFrame
+
+ data = data.tocsc()
+ index, columns = cls._prep_index(data, index, columns)
+ sparrays = [SparseArray.from_spmatrix(data[:, i]) for i in range(data.shape[1])]
+ data = dict(enumerate(sparrays))
+ result = DataFrame(data, index=index)
+ result.columns = columns
+ return result
+
+ def to_dense(self):
+ """
+ Convert a DataFrame with sparse values to dense.
+
+ .. versionadded:: 0.25.0
+
+ Returns
+ -------
+ DataFrame
+ A DataFrame with the same values stored as dense arrays.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame({"A": pd.SparseArray([0, 1, 0])})
+ >>> df.sparse.to_dense()
+ A
+ 0 0
+ 1 1
+ 2 0
+ """
+ from pandas import DataFrame
+
+ data = {k: v.array.to_dense() for k, v in self._parent.items()}
+ return DataFrame(data, index=self._parent.index, columns=self._parent.columns)
+
+ def to_coo(self):
+ """
+ Return the contents of the frame as a sparse SciPy COO matrix.
+
+ .. versionadded:: 0.25.0
+
+ Returns
+ -------
+ coo_matrix : scipy.sparse.spmatrix
+ If the caller is heterogeneous and contains booleans or objects,
+ the result will be of dtype=object. See Notes.
+
+ Notes
+ -----
+ The dtype will be the lowest-common-denominator type (implicit
+ upcasting); that is to say if the dtypes (even of numeric types)
+ are mixed, the one that accommodates all will be chosen.
+
+ e.g. If the dtypes are float16 and float32, dtype will be upcast to
+ float32. By numpy.find_common_type convention, mixing int64 and
+ and uint64 will result in a float64 dtype.
+ """
+ import_optional_dependency("scipy")
+ from scipy.sparse import coo_matrix
+
+ dtype = find_common_type(self._parent.dtypes)
+ if isinstance(dtype, SparseDtype):
+ dtype = dtype.subtype
+
+ cols, rows, datas = [], [], []
+ for col, name in enumerate(self._parent):
+ s = self._parent[name]
+ row = s.array.sp_index.to_int_index().indices
+ cols.append(np.repeat(col, len(row)))
+ rows.append(row)
+ datas.append(s.array.sp_values.astype(dtype, copy=False))
+
+ cols = np.concatenate(cols)
+ rows = np.concatenate(rows)
+ datas = np.concatenate(datas)
+ return coo_matrix((datas, (rows, cols)), shape=self._parent.shape)
+
+ @property
+ def density(self) -> float:
+ """
+ Ratio of non-sparse points to total (dense) data points
+ represented in the DataFrame.
+ """
+ return np.mean([column.array.density for _, column in self._parent.items()])
+
+ @staticmethod
+ def _prep_index(data, index, columns):
+ import pandas.core.indexes.base as ibase
+
+ N, K = data.shape
+ if index is None:
+ index = ibase.default_index(N)
+ if columns is None:
+ columns = ibase.default_index(K)
+
+ if len(columns) != K:
+ raise ValueError(
+ "Column length mismatch: {columns} vs. {K}".format(
+ columns=len(columns), K=K
+ )
+ )
+ if len(index) != N:
+ raise ValueError(
+ "Index length mismatch: {index} vs. {N}".format(index=len(index), N=N)
+ )
+ return index, columns
diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse/array.py
similarity index 71%
rename from pandas/core/arrays/sparse.py
rename to pandas/core/arrays/sparse/array.py
index c88289c3a4592..5acc922734529 100644
--- a/pandas/core/arrays/sparse.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -4,7 +4,6 @@
from collections import abc
import numbers
import operator
-import re
from typing import Any, Callable
import warnings
@@ -15,11 +14,9 @@
from pandas._libs.sparse import BlockIndex, IntIndex, SparseIndex
from pandas._libs.tslibs import NaT
import pandas.compat as compat
-from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning
-from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.cast import (
astype_nansafe,
construct_1d_arraylike_from_scalar,
@@ -37,7 +34,6 @@
is_string_dtype,
pandas_dtype,
)
-from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndexClass,
@@ -46,8 +42,6 @@
)
from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna
-from pandas._typing import Dtype
-from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algos
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
from pandas.core.base import PandasObject
@@ -58,329 +52,7 @@
import pandas.io.formats.printing as printing
-
-# ----------------------------------------------------------------------------
-# Dtype
-@register_extension_dtype
-class SparseDtype(ExtensionDtype):
- """
- Dtype for data stored in :class:`SparseArray`.
-
- This dtype implements the pandas ExtensionDtype interface.
-
- .. versionadded:: 0.24.0
-
- Parameters
- ----------
- dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
- The dtype of the underlying array storing the non-fill value values.
- fill_value : scalar, optional
- The scalar value not stored in the SparseArray. By default, this
- depends on `dtype`.
-
- =========== ==========
- dtype na_value
- =========== ==========
- float ``np.nan``
- int ``0``
- bool ``False``
- datetime64 ``pd.NaT``
- timedelta64 ``pd.NaT``
- =========== ==========
-
- The default value may be overridden by specifying a `fill_value`.
-
- Attributes
- ----------
- None
-
- Methods
- -------
- None
- """
-
- # We include `_is_na_fill_value` in the metadata to avoid hash collisions
- # between SparseDtype(float, 0.0) and SparseDtype(float, nan).
- # Without is_na_fill_value in the comparison, those would be equal since
- # hash(nan) is (sometimes?) 0.
- _metadata = ("_dtype", "_fill_value", "_is_na_fill_value")
-
- def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None:
-
- if isinstance(dtype, type(self)):
- if fill_value is None:
- fill_value = dtype.fill_value
- dtype = dtype.subtype
-
- dtype = pandas_dtype(dtype)
- if is_string_dtype(dtype):
- dtype = np.dtype("object")
-
- if fill_value is None:
- fill_value = na_value_for_dtype(dtype)
-
- if not is_scalar(fill_value):
- raise ValueError(
- "fill_value must be a scalar. Got {} instead".format(fill_value)
- )
- self._dtype = dtype
- self._fill_value = fill_value
-
- def __hash__(self):
- # Python3 doesn't inherit __hash__ when a base class overrides
- # __eq__, so we explicitly do it here.
- return super().__hash__()
-
- def __eq__(self, other):
- # We have to override __eq__ to handle NA values in _metadata.
- # The base class does simple == checks, which fail for NA.
- if isinstance(other, str):
- try:
- other = self.construct_from_string(other)
- except TypeError:
- return False
-
- if isinstance(other, type(self)):
- subtype = self.subtype == other.subtype
- if self._is_na_fill_value:
- # this case is complicated by two things:
- # SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
- # SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
- # i.e. we want to treat any floating-point NaN as equal, but
- # not a floating-point NaN and a datetime NaT.
- fill_value = (
- other._is_na_fill_value
- and isinstance(self.fill_value, type(other.fill_value))
- or isinstance(other.fill_value, type(self.fill_value))
- )
- else:
- fill_value = self.fill_value == other.fill_value
-
- return subtype and fill_value
- return False
-
- @property
- def fill_value(self):
- """
- The fill value of the array.
-
- Converting the SparseArray to a dense ndarray will fill the
- array with this value.
-
- .. warning::
-
- It's possible to end up with a SparseArray that has ``fill_value``
- values in ``sp_values``. This can occur, for example, when setting
- ``SparseArray.fill_value`` directly.
- """
- return self._fill_value
-
- @property
- def _is_na_fill_value(self):
- return isna(self.fill_value)
-
- @property
- def _is_numeric(self):
- return not is_object_dtype(self.subtype)
-
- @property
- def _is_boolean(self):
- return is_bool_dtype(self.subtype)
-
- @property
- def kind(self):
- """
- The sparse kind. Either 'integer', or 'block'.
- """
- return self.subtype.kind
-
- @property
- def type(self):
- return self.subtype.type
-
- @property
- def subtype(self):
- return self._dtype
-
- @property
- def name(self):
- return "Sparse[{}, {}]".format(self.subtype.name, self.fill_value)
-
- def __repr__(self):
- return self.name
-
- @classmethod
- def construct_array_type(cls):
- return SparseArray
-
- @classmethod
- def construct_from_string(cls, string):
- """
- Construct a SparseDtype from a string form.
-
- Parameters
- ----------
- string : str
- Can take the following forms.
-
- string dtype
- ================ ============================
- 'int' SparseDtype[np.int64, 0]
- 'Sparse' SparseDtype[np.float64, nan]
- 'Sparse[int]' SparseDtype[np.int64, 0]
- 'Sparse[int, 0]' SparseDtype[np.int64, 0]
- ================ ============================
-
- It is not possible to specify non-default fill values
- with a string. An argument like ``'Sparse[int, 1]'``
- will raise a ``TypeError`` because the default fill value
- for integers is 0.
-
- Returns
- -------
- SparseDtype
- """
- msg = "Could not construct SparseDtype from '{}'".format(string)
- if string.startswith("Sparse"):
- try:
- sub_type, has_fill_value = cls._parse_subtype(string)
- except ValueError:
- raise TypeError(msg)
- else:
- result = SparseDtype(sub_type)
- msg = (
- "Could not construct SparseDtype from '{}'.\n\nIt "
- "looks like the fill_value in the string is not "
- "the default for the dtype. Non-default fill_values "
- "are not supported. Use the 'SparseDtype()' "
- "constructor instead."
- )
- if has_fill_value and str(result) != string:
- raise TypeError(msg.format(string))
- return result
- else:
- raise TypeError(msg)
-
- @staticmethod
- def _parse_subtype(dtype):
- """
- Parse a string to get the subtype
-
- Parameters
- ----------
- dtype : str
- A string like
-
- * Sparse[subtype]
- * Sparse[subtype, fill_value]
-
- Returns
- -------
- subtype : str
-
- Raises
- ------
- ValueError
- When the subtype cannot be extracted.
- """
- xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$")
- m = xpr.match(dtype)
- has_fill_value = False
- if m:
- subtype = m.groupdict()["subtype"]
- has_fill_value = m.groupdict()["fill_value"] or has_fill_value
- elif dtype == "Sparse":
- subtype = "float64"
- else:
- raise ValueError("Cannot parse {}".format(dtype))
- return subtype, has_fill_value
-
- @classmethod
- def is_dtype(cls, dtype):
- dtype = getattr(dtype, "dtype", dtype)
- if isinstance(dtype, str) and dtype.startswith("Sparse"):
- sub_type, _ = cls._parse_subtype(dtype)
- dtype = np.dtype(sub_type)
- elif isinstance(dtype, cls):
- return True
- return isinstance(dtype, np.dtype) or dtype == "Sparse"
-
- def update_dtype(self, dtype):
- """
- Convert the SparseDtype to a new dtype.
-
- This takes care of converting the ``fill_value``.
-
- Parameters
- ----------
- dtype : Union[str, numpy.dtype, SparseDtype]
- The new dtype to use.
-
- * For a SparseDtype, it is simply returned
- * For a NumPy dtype (or str), the current fill value
- is converted to the new dtype, and a SparseDtype
- with `dtype` and the new fill value is returned.
-
- Returns
- -------
- SparseDtype
- A new SparseDtype with the corret `dtype` and fill value
- for that `dtype`.
-
- Raises
- ------
- ValueError
- When the current fill value cannot be converted to the
- new `dtype` (e.g. trying to convert ``np.nan`` to an
- integer dtype).
-
-
- Examples
- --------
- >>> SparseDtype(int, 0).update_dtype(float)
- Sparse[float64, 0.0]
-
- >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
- Sparse[float64, nan]
- """
- cls = type(self)
- dtype = pandas_dtype(dtype)
-
- if not isinstance(dtype, cls):
- fill_value = astype_nansafe(np.array(self.fill_value), dtype).item()
- dtype = cls(dtype, fill_value=fill_value)
-
- return dtype
-
- @property
- def _subtype_with_str(self):
- """
- Whether the SparseDtype's subtype should be considered ``str``.
-
- Typically, pandas will store string data in an object-dtype array.
- When converting values to a dtype, e.g. in ``.astype``, we need to
- be more specific, we need the actual underlying type.
-
- Returns
- -------
-
- >>> SparseDtype(int, 1)._subtype_with_str
- dtype('int64')
-
- >>> SparseDtype(object, 1)._subtype_with_str
- dtype('O')
-
- >>> dtype = SparseDtype(str, '')
- >>> dtype.subtype
- dtype('O')
-
- >>> dtype._subtype_with_str
- str
- """
- if isinstance(self.fill_value, str):
- return type(self.fill_value)
- return self.subtype
-
+from .dtype import SparseDtype
# ----------------------------------------------------------------------------
# Array
@@ -1925,331 +1597,3 @@ def _make_index(length, indices, kind):
else: # pragma: no cover
raise ValueError("must be block or integer type")
return index
-
-
-# ----------------------------------------------------------------------------
-# Accessor
-
-
-class BaseAccessor:
- _validation_msg = "Can only use the '.sparse' accessor with Sparse data."
-
- def __init__(self, data=None):
- self._parent = data
- self._validate(data)
-
- def _validate(self, data):
- raise NotImplementedError
-
-
-@delegate_names(
- SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"
-)
-class SparseAccessor(BaseAccessor, PandasDelegate):
- """
- Accessor for SparseSparse from other sparse matrix data types.
- """
-
- def _validate(self, data):
- if not isinstance(data.dtype, SparseDtype):
- raise AttributeError(self._validation_msg)
-
- def _delegate_property_get(self, name, *args, **kwargs):
- return getattr(self._parent.array, name)
-
- def _delegate_method(self, name, *args, **kwargs):
- if name == "from_coo":
- return self.from_coo(*args, **kwargs)
- elif name == "to_coo":
- return self.to_coo(*args, **kwargs)
- else:
- raise ValueError
-
- @classmethod
- def from_coo(cls, A, dense_index=False):
- """
- Create a Series with sparse values from a scipy.sparse.coo_matrix.
-
- Parameters
- ----------
- A : scipy.sparse.coo_matrix
- dense_index : bool, default False
- If False (default), the SparseSeries index consists of only the
- coords of the non-null entries of the original coo_matrix.
- If True, the SparseSeries index consists of the full sorted
- (row, col) coordinates of the coo_matrix.
-
- Returns
- -------
- s : Series
- A Series with sparse values.
-
- Examples
- --------
- >>> from scipy import sparse
- >>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),
- shape=(3, 4))
- >>> A
- <3x4 sparse matrix of type '<class 'numpy.float64'>'
- with 3 stored elements in COOrdinate format>
- >>> A.todense()
- matrix([[ 0., 0., 1., 2.],
- [ 3., 0., 0., 0.],
- [ 0., 0., 0., 0.]])
- >>> ss = pd.Series.sparse.from_coo(A)
- >>> ss
- 0 2 1
- 3 2
- 1 0 3
- dtype: float64
- BlockIndex
- Block locations: array([0], dtype=int32)
- Block lengths: array([3], dtype=int32)
- """
- from pandas.core.sparse.scipy_sparse import _coo_to_sparse_series
- from pandas import Series
-
- result = _coo_to_sparse_series(A, dense_index=dense_index)
- result = Series(result.array, index=result.index, copy=False)
-
- return result
-
- def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False):
- """
- Create a scipy.sparse.coo_matrix from a Series with MultiIndex.
-
- Use row_levels and column_levels to determine the row and column
- coordinates respectively. row_levels and column_levels are the names
- (labels) or numbers of the levels. {row_levels, column_levels} must be
- a partition of the MultiIndex level names (or numbers).
-
- Parameters
- ----------
- row_levels : tuple/list
- column_levels : tuple/list
- sort_labels : bool, default False
- Sort the row and column labels before forming the sparse matrix.
-
- Returns
- -------
- y : scipy.sparse.coo_matrix
- rows : list (row labels)
- columns : list (column labels)
-
- Examples
- --------
- >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
- >>> s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0),
- (1, 2, 'a', 1),
- (1, 1, 'b', 0),
- (1, 1, 'b', 1),
- (2, 1, 'b', 0),
- (2, 1, 'b', 1)],
- names=['A', 'B', 'C', 'D'])
- >>> ss = s.astype("Sparse")
- >>> A, rows, columns = ss.sparse.to_coo(row_levels=['A', 'B'],
- ... column_levels=['C', 'D'],
- ... sort_labels=True)
- >>> A
- <3x4 sparse matrix of type '<class 'numpy.float64'>'
- with 3 stored elements in COOrdinate format>
- >>> A.todense()
- matrix([[ 0., 0., 1., 3.],
- [ 3., 0., 0., 0.],
- [ 0., 0., 0., 0.]])
- >>> rows
- [(1, 1), (1, 2), (2, 1)]
- >>> columns
- [('a', 0), ('a', 1), ('b', 0), ('b', 1)]
- """
- from pandas.core.sparse.scipy_sparse import _sparse_series_to_coo
-
- A, rows, columns = _sparse_series_to_coo(
- self._parent, row_levels, column_levels, sort_labels=sort_labels
- )
- return A, rows, columns
-
- def to_dense(self):
- """
- Convert a Series from sparse values to dense.
-
- .. versionadded:: 0.25.0
-
- Returns
- -------
- Series:
- A Series with the same values, stored as a dense array.
-
- Examples
- --------
- >>> series = pd.Series(pd.SparseArray([0, 1, 0]))
- >>> series
- 0 0
- 1 1
- 2 0
- dtype: Sparse[int64, 0]
-
- >>> series.sparse.to_dense()
- 0 0
- 1 1
- 2 0
- dtype: int64
- """
- from pandas import Series
-
- return Series(
- self._parent.array.to_dense(),
- index=self._parent.index,
- name=self._parent.name,
- )
-
-
-class SparseFrameAccessor(BaseAccessor, PandasDelegate):
- """
- DataFrame accessor for sparse data.
-
- .. versionadded:: 0.25.0
- """
-
- def _validate(self, data):
- dtypes = data.dtypes
- if not all(isinstance(t, SparseDtype) for t in dtypes):
- raise AttributeError(self._validation_msg)
-
- @classmethod
- def from_spmatrix(cls, data, index=None, columns=None):
- """
- Create a new DataFrame from a scipy sparse matrix.
-
- .. versionadded:: 0.25.0
-
- Parameters
- ----------
- data : scipy.sparse.spmatrix
- Must be convertible to csc format.
- index, columns : Index, optional
- Row and column labels to use for the resulting DataFrame.
- Defaults to a RangeIndex.
-
- Returns
- -------
- DataFrame
- Each column of the DataFrame is stored as a
- :class:`SparseArray`.
-
- Examples
- --------
- >>> import scipy.sparse
- >>> mat = scipy.sparse.eye(3)
- >>> pd.DataFrame.sparse.from_spmatrix(mat)
- 0 1 2
- 0 1.0 0.0 0.0
- 1 0.0 1.0 0.0
- 2 0.0 0.0 1.0
- """
- from pandas import DataFrame
-
- data = data.tocsc()
- index, columns = cls._prep_index(data, index, columns)
- sparrays = [SparseArray.from_spmatrix(data[:, i]) for i in range(data.shape[1])]
- data = dict(enumerate(sparrays))
- result = DataFrame(data, index=index)
- result.columns = columns
- return result
-
- def to_dense(self):
- """
- Convert a DataFrame with sparse values to dense.
-
- .. versionadded:: 0.25.0
-
- Returns
- -------
- DataFrame
- A DataFrame with the same values stored as dense arrays.
-
- Examples
- --------
- >>> df = pd.DataFrame({"A": pd.SparseArray([0, 1, 0])})
- >>> df.sparse.to_dense()
- A
- 0 0
- 1 1
- 2 0
- """
- from pandas import DataFrame
-
- data = {k: v.array.to_dense() for k, v in self._parent.items()}
- return DataFrame(data, index=self._parent.index, columns=self._parent.columns)
-
- def to_coo(self):
- """
- Return the contents of the frame as a sparse SciPy COO matrix.
-
- .. versionadded:: 0.25.0
-
- Returns
- -------
- coo_matrix : scipy.sparse.spmatrix
- If the caller is heterogeneous and contains booleans or objects,
- the result will be of dtype=object. See Notes.
-
- Notes
- -----
- The dtype will be the lowest-common-denominator type (implicit
- upcasting); that is to say if the dtypes (even of numeric types)
- are mixed, the one that accommodates all will be chosen.
-
- e.g. If the dtypes are float16 and float32, dtype will be upcast to
- float32. By numpy.find_common_type convention, mixing int64 and
- and uint64 will result in a float64 dtype.
- """
- import_optional_dependency("scipy")
- from scipy.sparse import coo_matrix
-
- dtype = find_common_type(self._parent.dtypes)
- if isinstance(dtype, SparseDtype):
- dtype = dtype.subtype
-
- cols, rows, datas = [], [], []
- for col, name in enumerate(self._parent):
- s = self._parent[name]
- row = s.array.sp_index.to_int_index().indices
- cols.append(np.repeat(col, len(row)))
- rows.append(row)
- datas.append(s.array.sp_values.astype(dtype, copy=False))
-
- cols = np.concatenate(cols)
- rows = np.concatenate(rows)
- datas = np.concatenate(datas)
- return coo_matrix((datas, (rows, cols)), shape=self._parent.shape)
-
- @property
- def density(self) -> float:
- """
- Ratio of non-sparse points to total (dense) data points
- represented in the DataFrame.
- """
- return np.mean([column.array.density for _, column in self._parent.items()])
-
- @staticmethod
- def _prep_index(data, index, columns):
- import pandas.core.indexes.base as ibase
-
- N, K = data.shape
- if index is None:
- index = ibase.default_index(N)
- if columns is None:
- columns = ibase.default_index(K)
-
- if len(columns) != K:
- raise ValueError(
- "Column length mismatch: {columns} vs. {K}".format(
- columns=len(columns), K=K
- )
- )
- if len(index) != N:
- raise ValueError(
- "Index length mismatch: {index} vs. {N}".format(index=len(index), N=N)
- )
- return index, columns
diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py
new file mode 100644
index 0000000000000..6fd73ae14fff1
--- /dev/null
+++ b/pandas/core/arrays/sparse/dtype.py
@@ -0,0 +1,343 @@
+"""Sparse Dtype"""
+
+import re
+from typing import Any
+
+import numpy as np
+
+from pandas.core.dtypes.base import ExtensionDtype
+from pandas.core.dtypes.cast import astype_nansafe
+from pandas.core.dtypes.common import (
+ is_bool_dtype,
+ is_object_dtype,
+ is_scalar,
+ is_string_dtype,
+ pandas_dtype,
+)
+from pandas.core.dtypes.dtypes import register_extension_dtype
+from pandas.core.dtypes.missing import isna, na_value_for_dtype
+
+from pandas._typing import Dtype
+
+
+@register_extension_dtype
+class SparseDtype(ExtensionDtype):
+ """
+ Dtype for data stored in :class:`SparseArray`.
+
+ This dtype implements the pandas ExtensionDtype interface.
+
+ .. versionadded:: 0.24.0
+
+ Parameters
+ ----------
+ dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
+ The dtype of the underlying array storing the non-fill value values.
+ fill_value : scalar, optional
+ The scalar value not stored in the SparseArray. By default, this
+ depends on `dtype`.
+
+ =========== ==========
+ dtype na_value
+ =========== ==========
+ float ``np.nan``
+ int ``0``
+ bool ``False``
+ datetime64 ``pd.NaT``
+ timedelta64 ``pd.NaT``
+ =========== ==========
+
+ The default value may be overridden by specifying a `fill_value`.
+
+ Attributes
+ ----------
+ None
+
+ Methods
+ -------
+ None
+ """
+
+ # We include `_is_na_fill_value` in the metadata to avoid hash collisions
+ # between SparseDtype(float, 0.0) and SparseDtype(float, nan).
+ # Without is_na_fill_value in the comparison, those would be equal since
+ # hash(nan) is (sometimes?) 0.
+ _metadata = ("_dtype", "_fill_value", "_is_na_fill_value")
+
+ def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None:
+
+ if isinstance(dtype, type(self)):
+ if fill_value is None:
+ fill_value = dtype.fill_value
+ dtype = dtype.subtype
+
+ dtype = pandas_dtype(dtype)
+ if is_string_dtype(dtype):
+ dtype = np.dtype("object")
+
+ if fill_value is None:
+ fill_value = na_value_for_dtype(dtype)
+
+ if not is_scalar(fill_value):
+ raise ValueError(
+ "fill_value must be a scalar. Got {} instead".format(fill_value)
+ )
+ self._dtype = dtype
+ self._fill_value = fill_value
+
+ def __hash__(self):
+ # Python3 doesn't inherit __hash__ when a base class overrides
+ # __eq__, so we explicitly do it here.
+ return super().__hash__()
+
+ def __eq__(self, other):
+ # We have to override __eq__ to handle NA values in _metadata.
+ # The base class does simple == checks, which fail for NA.
+ if isinstance(other, str):
+ try:
+ other = self.construct_from_string(other)
+ except TypeError:
+ return False
+
+ if isinstance(other, type(self)):
+ subtype = self.subtype == other.subtype
+ if self._is_na_fill_value:
+ # this case is complicated by two things:
+ # SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
+ # SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
+ # i.e. we want to treat any floating-point NaN as equal, but
+ # not a floating-point NaN and a datetime NaT.
+ fill_value = (
+ other._is_na_fill_value
+ and isinstance(self.fill_value, type(other.fill_value))
+ or isinstance(other.fill_value, type(self.fill_value))
+ )
+ else:
+ fill_value = self.fill_value == other.fill_value
+
+ return subtype and fill_value
+ return False
+
+ @property
+ def fill_value(self):
+ """
+ The fill value of the array.
+
+ Converting the SparseArray to a dense ndarray will fill the
+ array with this value.
+
+ .. warning::
+
+ It's possible to end up with a SparseArray that has ``fill_value``
+ values in ``sp_values``. This can occur, for example, when setting
+ ``SparseArray.fill_value`` directly.
+ """
+ return self._fill_value
+
+ @property
+ def _is_na_fill_value(self):
+ return isna(self.fill_value)
+
+ @property
+ def _is_numeric(self):
+ return not is_object_dtype(self.subtype)
+
+ @property
+ def _is_boolean(self):
+ return is_bool_dtype(self.subtype)
+
+ @property
+ def kind(self):
+ """
+ The sparse kind. Either 'integer', or 'block'.
+ """
+ return self.subtype.kind
+
+ @property
+ def type(self):
+ return self.subtype.type
+
+ @property
+ def subtype(self):
+ return self._dtype
+
+ @property
+ def name(self):
+ return "Sparse[{}, {}]".format(self.subtype.name, self.fill_value)
+
+ def __repr__(self):
+ return self.name
+
+ @classmethod
+ def construct_array_type(cls):
+ from .array import SparseArray
+
+ return SparseArray
+
+ @classmethod
+ def construct_from_string(cls, string):
+ """
+ Construct a SparseDtype from a string form.
+
+ Parameters
+ ----------
+ string : str
+ Can take the following forms.
+
+ string dtype
+ ================ ============================
+ 'int' SparseDtype[np.int64, 0]
+ 'Sparse' SparseDtype[np.float64, nan]
+ 'Sparse[int]' SparseDtype[np.int64, 0]
+ 'Sparse[int, 0]' SparseDtype[np.int64, 0]
+ ================ ============================
+
+ It is not possible to specify non-default fill values
+ with a string. An argument like ``'Sparse[int, 1]'``
+ will raise a ``TypeError`` because the default fill value
+ for integers is 0.
+
+ Returns
+ -------
+ SparseDtype
+ """
+ msg = "Could not construct SparseDtype from '{}'".format(string)
+ if string.startswith("Sparse"):
+ try:
+ sub_type, has_fill_value = cls._parse_subtype(string)
+ except ValueError:
+ raise TypeError(msg)
+ else:
+ result = SparseDtype(sub_type)
+ msg = (
+ "Could not construct SparseDtype from '{}'.\n\nIt "
+ "looks like the fill_value in the string is not "
+ "the default for the dtype. Non-default fill_values "
+ "are not supported. Use the 'SparseDtype()' "
+ "constructor instead."
+ )
+ if has_fill_value and str(result) != string:
+ raise TypeError(msg.format(string))
+ return result
+ else:
+ raise TypeError(msg)
+
+ @staticmethod
+ def _parse_subtype(dtype):
+ """
+ Parse a string to get the subtype
+
+ Parameters
+ ----------
+ dtype : str
+ A string like
+
+ * Sparse[subtype]
+ * Sparse[subtype, fill_value]
+
+ Returns
+ -------
+ subtype : str
+
+ Raises
+ ------
+ ValueError
+ When the subtype cannot be extracted.
+ """
+ xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$")
+ m = xpr.match(dtype)
+ has_fill_value = False
+ if m:
+ subtype = m.groupdict()["subtype"]
+ has_fill_value = m.groupdict()["fill_value"] or has_fill_value
+ elif dtype == "Sparse":
+ subtype = "float64"
+ else:
+ raise ValueError("Cannot parse {}".format(dtype))
+ return subtype, has_fill_value
+
+ @classmethod
+ def is_dtype(cls, dtype):
+ dtype = getattr(dtype, "dtype", dtype)
+ if isinstance(dtype, str) and dtype.startswith("Sparse"):
+ sub_type, _ = cls._parse_subtype(dtype)
+ dtype = np.dtype(sub_type)
+ elif isinstance(dtype, cls):
+ return True
+ return isinstance(dtype, np.dtype) or dtype == "Sparse"
+
+ def update_dtype(self, dtype):
+ """
+ Convert the SparseDtype to a new dtype.
+
+ This takes care of converting the ``fill_value``.
+
+ Parameters
+ ----------
+ dtype : Union[str, numpy.dtype, SparseDtype]
+ The new dtype to use.
+
+ * For a SparseDtype, it is simply returned
+ * For a NumPy dtype (or str), the current fill value
+ is converted to the new dtype, and a SparseDtype
+ with `dtype` and the new fill value is returned.
+
+ Returns
+ -------
+ SparseDtype
+ A new SparseDtype with the corret `dtype` and fill value
+ for that `dtype`.
+
+ Raises
+ ------
+ ValueError
+ When the current fill value cannot be converted to the
+ new `dtype` (e.g. trying to convert ``np.nan`` to an
+ integer dtype).
+
+
+ Examples
+ --------
+ >>> SparseDtype(int, 0).update_dtype(float)
+ Sparse[float64, 0.0]
+
+ >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
+ Sparse[float64, nan]
+ """
+ cls = type(self)
+ dtype = pandas_dtype(dtype)
+
+ if not isinstance(dtype, cls):
+ fill_value = astype_nansafe(np.array(self.fill_value), dtype).item()
+ dtype = cls(dtype, fill_value=fill_value)
+
+ return dtype
+
+ @property
+ def _subtype_with_str(self):
+ """
+ Whether the SparseDtype's subtype should be considered ``str``.
+
+ Typically, pandas will store string data in an object-dtype array.
+ When converting values to a dtype, e.g. in ``.astype``, we need to
+ be more specific, we need the actual underlying type.
+
+ Returns
+ -------
+
+ >>> SparseDtype(int, 1)._subtype_with_str
+ dtype('int64')
+
+ >>> SparseDtype(object, 1)._subtype_with_str
+ dtype('O')
+
+ >>> dtype = SparseDtype(str, '')
+ >>> dtype.subtype
+ dtype('O')
+
+ >>> dtype._subtype_with_str
+ str
+ """
+ if isinstance(self.fill_value, str):
+ return type(self.fill_value)
+ return self.subtype
diff --git a/pandas/core/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py
similarity index 100%
rename from pandas/core/sparse/scipy_sparse.py
rename to pandas/core/arrays/sparse/scipy_sparse.py
diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py
index 071a8db707b69..f1d2803ce5505 100644
--- a/pandas/tests/arrays/sparse/test_arithmetics.py
+++ b/pandas/tests/arrays/sparse/test_arithmetics.py
@@ -5,7 +5,7 @@
import pandas as pd
from pandas.core import ops
-from pandas.core.sparse.api import SparseDtype
+from pandas.core.arrays.sparse import SparseDtype
import pandas.util.testing as tm
diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py
index 5d5ee565c7891..c02d8ae4e7429 100644
--- a/pandas/tests/arrays/sparse/test_array.py
+++ b/pandas/tests/arrays/sparse/test_array.py
@@ -10,7 +10,7 @@
import pandas as pd
from pandas import isna
-from pandas.core.sparse.api import SparseArray, SparseDtype
+from pandas.core.arrays.sparse import SparseArray, SparseDtype
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
diff --git a/pandas/tests/sparse/test_combine_concat.py b/pandas/tests/arrays/sparse/test_combine_concat.py
similarity index 100%
rename from pandas/tests/sparse/test_combine_concat.py
rename to pandas/tests/arrays/sparse/test_combine_concat.py
diff --git a/pandas/tests/arrays/sparse/test_dtype.py b/pandas/tests/arrays/sparse/test_dtype.py
index db8f62962f0b0..aa8d2afca11e6 100644
--- a/pandas/tests/arrays/sparse/test_dtype.py
+++ b/pandas/tests/arrays/sparse/test_dtype.py
@@ -2,7 +2,7 @@
import pytest
import pandas as pd
-from pandas.core.sparse.api import SparseDtype
+from pandas.core.arrays.sparse import SparseDtype
@pytest.mark.parametrize(
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index 3288c9c584565..036b0213973d6 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -30,7 +30,7 @@
import pandas as pd
from pandas import Categorical, CategoricalIndex, IntervalIndex, Series, date_range
-from pandas.core.sparse.api import SparseDtype
+from pandas.core.arrays.sparse import SparseDtype
import pandas.util.testing as tm
diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py
index 9d08981d39894..5e80c317a587b 100644
--- a/pandas/tests/reshape/test_reshape.py
+++ b/pandas/tests/reshape/test_reshape.py
@@ -8,7 +8,7 @@
import pandas as pd
from pandas import Categorical, DataFrame, Index, Series, get_dummies
-from pandas.core.sparse.api import SparseArray, SparseDtype
+from pandas.core.arrays.sparse import SparseArray, SparseDtype
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
diff --git a/pandas/tests/sparse/__init__.py b/pandas/tests/sparse/__init__.py
deleted file mode 100644
index e69de29bb2d1d..0000000000000
| Updating our internal imports to use the path were they actually live, not the old `core.sparse.api` one.
While doing this, I was thinking now that for the tests, we could actually also use the top-level `pd.` imports.
@TomAugspurger in the `core/sparse/` module, the only actual code living there is the scipy_sparse code. I moved that to `arrays` module (but could also be moved within the sparse.py file, or create a sparse subdirectory, ..).
Do we want to raise a deprecation warning on pandas.core.sparse.api? | https://api.github.com/repos/pandas-dev/pandas/pulls/28516 | 2019-09-19T07:20:38Z | 2019-09-27T14:58:18Z | 2019-09-27T14:58:18Z | 2019-09-27T14:59:19Z |
REF: Parametrize test | diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index f87d6dba72e68..5eb9a067b11e4 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -528,32 +528,33 @@ def test_as_array_datetime_tz(self):
assert mgr.get("g").dtype == "datetime64[ns, CET]"
assert mgr.as_array().dtype == "object"
- def test_astype(self):
+ @pytest.mark.parametrize("t", ["float16", "float32", "float64", "int32", "int64"])
+ def test_astype(self, t):
# coerce all
mgr = create_mgr("c: f4; d: f2; e: f8")
- for t in ["float16", "float32", "float64", "int32", "int64"]:
- t = np.dtype(t)
- tmgr = mgr.astype(t)
- assert tmgr.get("c").dtype.type == t
- assert tmgr.get("d").dtype.type == t
- assert tmgr.get("e").dtype.type == t
+
+ t = np.dtype(t)
+ tmgr = mgr.astype(t)
+ assert tmgr.get("c").dtype.type == t
+ assert tmgr.get("d").dtype.type == t
+ assert tmgr.get("e").dtype.type == t
# mixed
mgr = create_mgr("a,b: object; c: bool; d: datetime; e: f4; f: f2; g: f8")
- for t in ["float16", "float32", "float64", "int32", "int64"]:
- t = np.dtype(t)
- tmgr = mgr.astype(t, errors="ignore")
- assert tmgr.get("c").dtype.type == t
- assert tmgr.get("e").dtype.type == t
- assert tmgr.get("f").dtype.type == t
- assert tmgr.get("g").dtype.type == t
-
- assert tmgr.get("a").dtype.type == np.object_
- assert tmgr.get("b").dtype.type == np.object_
- if t != np.int64:
- assert tmgr.get("d").dtype.type == np.datetime64
- else:
- assert tmgr.get("d").dtype.type == t
+
+ t = np.dtype(t)
+ tmgr = mgr.astype(t, errors="ignore")
+ assert tmgr.get("c").dtype.type == t
+ assert tmgr.get("e").dtype.type == t
+ assert tmgr.get("f").dtype.type == t
+ assert tmgr.get("g").dtype.type == t
+
+ assert tmgr.get("a").dtype.type == np.object_
+ assert tmgr.get("b").dtype.type == np.object_
+ if t != np.int64:
+ assert tmgr.get("d").dtype.type == np.datetime64
+ else:
+ assert tmgr.get("d").dtype.type == t
def test_convert(self):
def _compare(old_mgr, new_mgr):
| Let me know if I'm mistaken here, but I think we would prefer parametrization over for loops inside tests (e.g., so `pytest` flags which case is failing), and this should be equivalent. | https://api.github.com/repos/pandas-dev/pandas/pulls/28515 | 2019-09-19T00:40:34Z | 2019-09-19T14:32:40Z | 2019-09-19T14:32:40Z | 2019-09-19T16:53:20Z |
TST: suppress 1485 warnings issued by xml parser | diff --git a/pandas/tests/io/excel/__init__.py b/pandas/tests/io/excel/__init__.py
index e69de29bb2d1d..550172329fc57 100644
--- a/pandas/tests/io/excel/__init__.py
+++ b/pandas/tests/io/excel/__init__.py
@@ -0,0 +1,6 @@
+import pytest
+
+pytestmark = pytest.mark.filterwarnings(
+ # Looks like tree.getiterator is deprecated in favor of tree.iter
+ "ignore:This method will be removed in future versions:PendingDeprecationWarning"
+)
| So we don't have to scroll through tons of
```
pandas/tests/io/excel/test_xlrd.py::test_excel_table_sheet_by_index[.xlsm]
/usr/local/lib/python3.7/site-packages/xlrd/xlsx.py:312: PendingDeprecationWarning: This method will be removed in future versions. Use 'tree.iter()' or 'list(tree.iter())' instead.
for elem in self.tree.iter() if Element_has_iter else self.tree.getiterator():
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/28514 | 2019-09-18T23:47:04Z | 2019-09-19T15:49:21Z | 2019-09-19T15:49:21Z | 2019-09-19T16:05:23Z |
CLN: dont catch Exception when calling maybe_convert_numeric | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index ac9b57dc8d342..e31918c21c2ac 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -796,7 +796,7 @@ def maybe_convert_objects(values: np.ndarray, convert_numeric: bool = True):
new_values = lib.maybe_convert_numeric(
values, set(), coerce_numeric=True
)
- except Exception:
+ except (ValueError, TypeError):
pass
else:
# if we are all nans then leave me alone
@@ -875,7 +875,7 @@ def soft_convert_objects(
if numeric and is_object_dtype(values.dtype):
try:
converted = lib.maybe_convert_numeric(values, set(), coerce_numeric=True)
- except Exception:
+ except (ValueError, TypeError):
pass
else:
# If all NaNs, then do not-alter
@@ -953,9 +953,10 @@ def try_datetime(v):
# we might have a sequence of the same-datetimes with tz's
# if so coerce to a DatetimeIndex; if they are not the same,
# then these stay as object dtype, xref GH19671
+ from pandas._libs.tslibs import conversion
+ from pandas import DatetimeIndex
+
try:
- from pandas._libs.tslibs import conversion
- from pandas import DatetimeIndex
values, tz = conversion.datetime_to_datetime64(v)
return DatetimeIndex(values).tz_localize("UTC").tz_convert(tz=tz)
diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py
index a0e2c8d9cab65..fa33d11bda7eb 100644
--- a/pandas/core/tools/numeric.py
+++ b/pandas/core/tools/numeric.py
@@ -137,21 +137,20 @@ def to_numeric(arg, errors="raise", downcast=None):
else:
values = arg
- try:
- if is_numeric_dtype(values):
- pass
- elif is_datetime_or_timedelta_dtype(values):
- values = values.astype(np.int64)
- else:
- values = ensure_object(values)
- coerce_numeric = errors not in ("ignore", "raise")
+ if is_numeric_dtype(values):
+ pass
+ elif is_datetime_or_timedelta_dtype(values):
+ values = values.astype(np.int64)
+ else:
+ values = ensure_object(values)
+ coerce_numeric = errors not in ("ignore", "raise")
+ try:
values = lib.maybe_convert_numeric(
values, set(), coerce_numeric=coerce_numeric
)
-
- except Exception:
- if errors == "raise":
- raise
+ except (ValueError, TypeError):
+ if errors == "raise":
+ raise
# attempt downcast only if the data has been successfully converted
# to a numerical dtype and if a downcast method has been specified
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 72f1adf0aad3d..3678e32943b2e 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1782,14 +1782,17 @@ def _infer_types(self, values, na_values, try_num_bool=True):
np.putmask(values, mask, np.nan)
return values, na_count
- if try_num_bool:
+ if try_num_bool and is_object_dtype(values.dtype):
+ # exclude e.g DatetimeIndex here
try:
result = lib.maybe_convert_numeric(values, na_values, False)
- na_count = isna(result).sum()
- except Exception:
+ except (ValueError, TypeError):
+ # e.g. encountering datetime string gets ValueError
+ # TypeError can be raised in floatify
result = values
- if values.dtype == np.object_:
- na_count = parsers.sanitize_objects(result, na_values, False)
+ na_count = parsers.sanitize_objects(result, na_values, False)
+ else:
+ na_count = isna(result).sum()
else:
result = values
if values.dtype == np.object_:
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 0a8707bdac3a0..cfa6304909bb7 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -379,9 +379,12 @@ def test_isinf_scalar(self):
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar("a")
- def test_maybe_convert_numeric_infinities(self):
+ @pytest.mark.parametrize("maybe_int", [True, False])
+ @pytest.mark.parametrize(
+ "infinity", ["inf", "inF", "iNf", "Inf", "iNF", "InF", "INf", "INF"]
+ )
+ def test_maybe_convert_numeric_infinities(self, infinity, maybe_int):
# see gh-13274
- infinities = ["inf", "inF", "iNf", "Inf", "iNF", "InF", "INf", "INF"]
na_values = {"", "NULL", "nan"}
pos = np.array(["inf"], dtype=np.float64)
@@ -389,35 +392,31 @@ def test_maybe_convert_numeric_infinities(self):
msg = "Unable to parse string"
- for infinity in infinities:
- for maybe_int in (True, False):
- out = lib.maybe_convert_numeric(
- np.array([infinity], dtype=object), na_values, maybe_int
- )
- tm.assert_numpy_array_equal(out, pos)
-
- out = lib.maybe_convert_numeric(
- np.array(["-" + infinity], dtype=object), na_values, maybe_int
- )
- tm.assert_numpy_array_equal(out, neg)
-
- out = lib.maybe_convert_numeric(
- np.array([infinity], dtype=object), na_values, maybe_int
- )
- tm.assert_numpy_array_equal(out, pos)
-
- out = lib.maybe_convert_numeric(
- np.array(["+" + infinity], dtype=object), na_values, maybe_int
- )
- tm.assert_numpy_array_equal(out, pos)
-
- # too many characters
- with pytest.raises(ValueError, match=msg):
- lib.maybe_convert_numeric(
- np.array(["foo_" + infinity], dtype=object),
- na_values,
- maybe_int,
- )
+ out = lib.maybe_convert_numeric(
+ np.array([infinity], dtype=object), na_values, maybe_int
+ )
+ tm.assert_numpy_array_equal(out, pos)
+
+ out = lib.maybe_convert_numeric(
+ np.array(["-" + infinity], dtype=object), na_values, maybe_int
+ )
+ tm.assert_numpy_array_equal(out, neg)
+
+ out = lib.maybe_convert_numeric(
+ np.array([infinity], dtype=object), na_values, maybe_int
+ )
+ tm.assert_numpy_array_equal(out, pos)
+
+ out = lib.maybe_convert_numeric(
+ np.array(["+" + infinity], dtype=object), na_values, maybe_int
+ )
+ tm.assert_numpy_array_equal(out, pos)
+
+ # too many characters
+ with pytest.raises(ValueError, match=msg):
+ lib.maybe_convert_numeric(
+ np.array(["foo_" + infinity], dtype=object), na_values, maybe_int
+ )
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
| and parametrized one of the tests for maybe_convert_numeric | https://api.github.com/repos/pandas-dev/pandas/pulls/28513 | 2019-09-18T23:07:38Z | 2019-09-20T12:35:26Z | 2019-09-20T12:35:26Z | 2019-09-20T14:16:27Z |
Pick and choose numpy version based on Python 2 or 3. | diff --git a/ci/deps/azure-27-compat.yaml b/ci/deps/azure-27-compat.yaml
index c68b51fbd6644..aec57c05e87b8 100644
--- a/ci/deps/azure-27-compat.yaml
+++ b/ci/deps/azure-27-compat.yaml
@@ -9,7 +9,7 @@ dependencies:
- numexpr=2.6.1
- numpy=1.12.0
- openpyxl=2.5.5
- - pytables=3.4.2
+ - pytables>=3.5.2
- python-dateutil=2.5.0
- python=2.7*
- pytz=2013b
diff --git a/ci/incremental/setup_conda_environment.cmd b/ci/incremental/setup_conda_environment.cmd
index c104d78591384..2b1f6b5bd2f15 100644
--- a/ci/incremental/setup_conda_environment.cmd
+++ b/ci/incremental/setup_conda_environment.cmd
@@ -12,6 +12,8 @@ call deactivate
conda list
@rem Clean up any left-over from a previous build
conda remove --all -q -y -n pandas-dev
+@rem free channel needed for older packages
+conda config --set restore_free_channel true
@rem Scipy, CFFI, jinja2 and IPython are optional dependencies, but exercised in the test suite
conda env create --file=ci\deps\azure-windows-%CONDA_PY%.yaml
diff --git a/ci/incremental/setup_conda_environment.sh b/ci/incremental/setup_conda_environment.sh
index f174c17a614d8..43690d427ac70 100755
--- a/ci/incremental/setup_conda_environment.sh
+++ b/ci/incremental/setup_conda_environment.sh
@@ -15,6 +15,9 @@ conda list
# `conda env remove` issue)
conda remove --all -q -y -n pandas-dev
+# free channel needed for older packages
+conda config --set restore_free_channel true
+
echo
echo "[create env]"
time conda env create -q --file="${ENV_FILE}" || exit 1
diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index d1a940f119228..730d726b534f9 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -50,6 +50,9 @@ conda config --set ssl_verify false || exit 1
conda config --set quiet true --set always_yes true --set changeps1 false || exit 1
conda update -q conda
+# free channel needed for older packages
+conda config --set restore_free_channel true
+
# Useful for debugging any issues with conda
conda info -a || exit 1
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 92364fcc9ebd2..9e61afea2ec60 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -256,7 +256,7 @@ Optional Dependencies
version. Version 0.28.2 or higher.
* `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions, Version 0.18.1 or higher
* `xarray <http://xarray.pydata.org>`__: pandas like handling for > 2 dims, needed for converting Panels to xarray objects. Version 0.7.0 or higher is recommended.
-* `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage, Version 3.4.2 or higher
+* `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage, Version 3.5.2 or higher
* `pyarrow <http://arrow.apache.org/docs/python/>`__ (>= 0.9.0): necessary for feather-based storage.
* `Apache Parquet <https://parquet.apache.org/>`__, either `pyarrow <http://arrow.apache.org/docs/python/>`__ (>= 0.7.0) or `fastparquet <https://fastparquet.readthedocs.io/en/latest>`__ (>= 0.2.1) for parquet-based storage. The `snappy <https://pypi.org/project/python-snappy>`__ and `brotli <https://pypi.org/project/brotlipy>`__ are available for compression support.
* `SQLAlchemy <http://www.sqlalchemy.org>`__: for SQL database support. Version 0.8.1 or higher recommended. Besides SQLAlchemy, you also need a database specific driver. You can find an overview of supported drivers for each SQL dialect in the `SQLAlchemy docs <http://docs.sqlalchemy.org/en/latest/dialects/index.html>`__. Some common drivers are:
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index a49ea2cf493a6..97ef8fbb1d1c2 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -460,7 +460,7 @@ If installed, we now require:
+-----------------+-----------------+----------+
| pyarrow | 0.9.0 | |
+-----------------+-----------------+----------+
-| pytables | 3.4.2 | |
+| pytables | 3.5.2 | |
+-----------------+-----------------+----------+
| scipy | 0.18.1 | |
+-----------------+-----------------+----------+
diff --git a/environment.yml b/environment.yml
index ff7c5d56052d2..39f5680f4316c 100644
--- a/environment.yml
+++ b/environment.yml
@@ -42,7 +42,7 @@ dependencies:
- numexpr>=2.6.8
- openpyxl
- pyarrow>=0.9.0
- - pytables>=3.4.2
+ - pytables>=3.5.2
- pytest-cov
- pytest-xdist
- s3fs
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index fb40c08aeb28a..842bf8da56dd8 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2522,7 +2522,7 @@ def memory_usage(self, index=True, deep=False):
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
- index in returned Series. If ``index=True``, the memory usage of
+ index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 02d8b0a70aab6..e9ec4e1a55d0e 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -31,7 +31,7 @@ nbsphinx
numexpr>=2.6.8
openpyxl
pyarrow>=0.9.0
-tables>=3.4.2
+tables>=3.5.2
pytest-cov
pytest-xdist
s3fs
diff --git a/setup.py b/setup.py
index 2a67b21414f63..6bcf28fae676c 100755
--- a/setup.py
+++ b/setup.py
@@ -31,13 +31,22 @@ def is_platform_mac():
min_numpy_ver = '1.12.0'
+max_numpy_ver = ''
+
+if (sys.version_info < (3, 0)):
+ max_numpy_ver = '1.17'
+
+numpy_install_rule = 'numpy >= {numpy_ver}'.format(numpy_ver=min_numpy_ver)
+if max_numpy_ver:
+ numpy_install_rule += ', < {numpy_ver}'.format(numpy_ver=max_numpy_ver)
+
setuptools_kwargs = {
'install_requires': [
'python-dateutil >= 2.5.0',
'pytz >= 2011k',
- 'numpy >= {numpy_ver}'.format(numpy_ver=min_numpy_ver),
+ numpy_install_rule,
],
- 'setup_requires': ['numpy >= {numpy_ver}'.format(numpy_ver=min_numpy_ver)],
+ 'setup_requires': [numpy_install_rule],
'zip_safe': False,
}
| - [x] closes #27435
- [ ] tests added / passed
- [ ] passes `black pandas` (not applicable, `black setup.py` changes too many quotes, polluting the diff).
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
cc @jorisvandenbossche | https://api.github.com/repos/pandas-dev/pandas/pulls/28511 | 2019-09-18T21:06:07Z | 2020-01-01T21:00:01Z | null | 2020-01-01T21:00:02Z |
Re-implemented parametrization of test_frame_from_json_to_json | diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 9c489c7cc17ec..415b1d81eb3e4 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -7,7 +7,7 @@
import numpy as np
import pytest
-from pandas.compat import PY35, is_platform_32bit
+from pandas.compat import PY35, is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
@@ -154,322 +154,212 @@ def test_frame_non_unique_columns_raises(self, orient):
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
- def test_frame_from_json_to_json(self):
- def _check_orient(
- df,
- orient,
- dtype=None,
- numpy=False,
- convert_axes=True,
- check_dtype=True,
- raise_ok=None,
- sort=None,
- check_index_type=True,
- check_column_type=True,
- check_numpy_dtype=False,
- ):
- if sort is not None:
- df = df.sort_values(sort)
- else:
- df = df.sort_index()
-
- # if we are not unique, then check that we are raising ValueError
- # for the appropriate orients
- if not df.index.is_unique and orient in ["index", "columns"]:
- msg = "DataFrame index must be unique for orient='{}'".format(orient)
- with pytest.raises(ValueError, match=msg):
- df.to_json(orient=orient)
- return
- if not df.columns.is_unique and orient in ["index", "columns", "records"]:
- # TODO: not executed. fix this.
- with pytest.raises(ValueError, match="ksjkajksfjksjfkjs"):
- df.to_json(orient=orient)
- return
-
- dfjson = df.to_json(orient=orient)
-
- try:
- unser = read_json(
- dfjson,
- orient=orient,
- dtype=dtype,
- numpy=numpy,
- convert_axes=convert_axes,
- )
- except Exception as detail:
- if raise_ok is not None:
- if isinstance(detail, raise_ok):
- return
- raise
-
- if sort is not None and sort in unser.columns:
- unser = unser.sort_values(sort)
- else:
- unser = unser.sort_index()
-
- if not dtype:
- check_dtype = False
-
- if not convert_axes and df.index.dtype.type == np.datetime64:
- unser.index = DatetimeIndex(unser.index.values.astype("i8") * 1e6)
- if orient == "records":
- # index is not captured in this orientation
- tm.assert_almost_equal(
- df.values, unser.values, check_dtype=check_numpy_dtype
- )
- tm.assert_index_equal(
- df.columns, unser.columns, exact=check_column_type
- )
- elif orient == "values":
- # index and cols are not captured in this orientation
- if numpy is True and df.shape == (0, 0):
- assert unser.shape[0] == 0
- else:
- tm.assert_almost_equal(
- df.values, unser.values, check_dtype=check_numpy_dtype
- )
- elif orient == "split":
- # index and col labels might not be strings
- unser.index = [str(i) for i in unser.index]
- unser.columns = [str(i) for i in unser.columns]
-
- if sort is None:
- unser = unser.sort_index()
- tm.assert_almost_equal(
- df.values, unser.values, check_dtype=check_numpy_dtype
- )
- else:
- if convert_axes:
- tm.assert_frame_equal(
- df,
- unser,
- check_dtype=check_dtype,
- check_index_type=check_index_type,
- check_column_type=check_column_type,
- )
- else:
- tm.assert_frame_equal(
- df, unser, check_less_precise=False, check_dtype=check_dtype
- )
-
- def _check_all_orients(
- df,
- dtype=None,
- convert_axes=True,
- raise_ok=None,
- sort=None,
- check_index_type=True,
- check_column_type=True,
- ):
+ def test_frame_default_orient(self):
+ assert self.frame.to_json() == self.frame.to_json(orient="columns")
- # numpy=False
- if convert_axes:
- _check_orient(
- df,
- "columns",
- dtype=dtype,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
- _check_orient(
- df,
- "records",
- dtype=dtype,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
- _check_orient(
- df,
- "split",
- dtype=dtype,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
- _check_orient(
- df,
- "index",
- dtype=dtype,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
- _check_orient(
- df,
- "values",
- dtype=dtype,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
+ @pytest.mark.parametrize("dtype", [False, float])
+ @pytest.mark.parametrize("convert_axes", [True, False])
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype):
+ data = self.frame.to_json(orient=orient)
+ result = pd.read_json(
+ data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
+ )
- _check_orient(df, "columns", dtype=dtype, convert_axes=False, sort=sort)
- _check_orient(df, "records", dtype=dtype, convert_axes=False, sort=sort)
- _check_orient(df, "split", dtype=dtype, convert_axes=False, sort=sort)
- _check_orient(df, "index", dtype=dtype, convert_axes=False, sort=sort)
- _check_orient(df, "values", dtype=dtype, convert_axes=False, sort=sort)
-
- # numpy=True and raise_ok might be not None, so ignore the error
- if convert_axes:
- _check_orient(
- df,
- "columns",
- dtype=dtype,
- numpy=True,
- raise_ok=raise_ok,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
- _check_orient(
- df,
- "records",
- dtype=dtype,
- numpy=True,
- raise_ok=raise_ok,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
- _check_orient(
- df,
- "split",
- dtype=dtype,
- numpy=True,
- raise_ok=raise_ok,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
- _check_orient(
- df,
- "index",
- dtype=dtype,
- numpy=True,
- raise_ok=raise_ok,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
- _check_orient(
- df,
- "values",
- dtype=dtype,
- numpy=True,
- raise_ok=raise_ok,
- sort=sort,
- check_index_type=False,
- check_column_type=False,
- )
+ expected = self.frame.copy()
- _check_orient(
- df,
- "columns",
- dtype=dtype,
- numpy=True,
- convert_axes=False,
- raise_ok=raise_ok,
- sort=sort,
- )
- _check_orient(
- df,
- "records",
- dtype=dtype,
- numpy=True,
- convert_axes=False,
- raise_ok=raise_ok,
- sort=sort,
- )
- _check_orient(
- df,
- "split",
- dtype=dtype,
- numpy=True,
- convert_axes=False,
- raise_ok=raise_ok,
- sort=sort,
- )
- _check_orient(
- df,
- "index",
- dtype=dtype,
- numpy=True,
- convert_axes=False,
- raise_ok=raise_ok,
- sort=sort,
- )
- _check_orient(
- df,
- "values",
- dtype=dtype,
- numpy=True,
- convert_axes=False,
- raise_ok=raise_ok,
- sort=sort,
- )
+ if not numpy and (orient == "index" or (PY35 and orient == "columns")):
+ # TODO: debug why sort is required
+ expected = expected.sort_index()
- # basic
- _check_all_orients(self.frame)
- assert self.frame.to_json() == self.frame.to_json(orient="columns")
+ if orient == "records" or orient == "values":
+ expected = expected.reset_index(drop=True)
+ if orient == "values":
+ expected.columns = range(len(expected.columns))
+
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("dtype", [False, np.int64])
+ @pytest.mark.parametrize("convert_axes", [True, False])
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
+ data = self.intframe.to_json(orient=orient)
+ result = pd.read_json(
+ data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
+ )
+ expected = self.intframe.copy()
+ if not numpy and (orient == "index" or (PY35 and orient == "columns")):
+ expected = expected.sort_index()
- _check_all_orients(self.intframe, dtype=self.intframe.values.dtype)
- _check_all_orients(self.intframe, dtype=False)
+ if orient == "records" or orient == "values":
+ expected = expected.reset_index(drop=True)
+ if orient == "values":
+ expected.columns = range(len(expected.columns))
- # big one
- # index and columns are strings as all unserialised JSON object keys
- # are assumed to be strings
- biggie = DataFrame(
+ if (
+ numpy
+ and (is_platform_32bit() or is_platform_windows())
+ and not dtype
+ and orient != "split"
+ ):
+ # TODO: see what is causing roundtrip dtype loss
+ expected = expected.astype(np.int32)
+
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
+ @pytest.mark.parametrize("convert_axes", [True, False])
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
+ df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
+ dtype=dtype,
)
- _check_all_orients(biggie, dtype=False, convert_axes=False)
- # dtypes
- _check_all_orients(
- DataFrame(biggie, dtype=np.float64), dtype=np.float64, convert_axes=False
+ # TODO: do we even need to support U3 dtypes?
+ if numpy and dtype == "U3" and orient != "split":
+ pytest.xfail("Can't decode directly to array")
+
+ data = df.to_json(orient=orient)
+ result = pd.read_json(
+ data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
- _check_all_orients(
- DataFrame(biggie, dtype=np.int), dtype=np.int, convert_axes=False
+
+ expected = df.copy()
+ if not numpy and (orient == "index" or (PY35 and orient == "columns")):
+ expected = expected.sort_index()
+
+ if not dtype:
+ expected = expected.astype(np.int64)
+
+ # index columns, and records orients cannot fully preserve the string
+ # dtype for axes as the index and column labels are used as keys in
+ # JSON objects. JSON keys are by definition strings, so there's no way
+ # to disambiguate whether those keys actually were strings or numeric
+ # beforehand and numeric wins out.
+ # TODO: Split should be able to support this
+ if convert_axes and (orient in ("split", "index", "columns")):
+ expected.columns = expected.columns.astype(np.int64)
+ expected.index = expected.index.astype(np.int64)
+ elif orient == "records" and convert_axes:
+ expected.columns = expected.columns.astype(np.int64)
+
+ if orient == "records" or orient == "values":
+ expected = expected.reset_index(drop=True)
+ if orient == "values":
+ expected.columns = range(len(expected.columns))
+
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("convert_axes", [True, False])
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_roundtrip_categorical(self, orient, convert_axes, numpy):
+ # TODO: create a better frame to test with and improve coverage
+ if orient in ("index", "columns"):
+ pytest.xfail(
+ "Can't have duplicate index values for orient '{}')".format(orient)
+ )
+
+ data = self.categorical.to_json(orient=orient)
+ if numpy and orient in ("records", "values"):
+ pytest.xfail("Orient {} is broken with numpy=True".format(orient))
+
+ result = pd.read_json(
+ data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
- _check_all_orients(
- DataFrame(biggie, dtype="U3"),
- dtype="U3",
- convert_axes=False,
- raise_ok=ValueError,
+
+ expected = self.categorical.copy()
+ expected.index = expected.index.astype(str) # Categorical not preserved
+ expected.index.name = None # index names aren't preserved in JSON
+
+ if not numpy and (orient == "index" or (PY35 and orient == "columns")):
+ expected = expected.sort_index()
+
+ if orient == "records" or orient == "values":
+ expected = expected.reset_index(drop=True)
+ if orient == "values":
+ expected.columns = range(len(expected.columns))
+
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("convert_axes", [True, False])
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_roundtrip_empty(self, orient, convert_axes, numpy):
+ data = self.empty_frame.to_json(orient=orient)
+ result = pd.read_json(
+ data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
+ expected = self.empty_frame.copy()
+
+ # TODO: both conditions below are probably bugs
+ if convert_axes:
+ expected.index = expected.index.astype(float)
+ expected.columns = expected.columns.astype(float)
+ if numpy and orient == "values":
+ expected = expected.reindex([0], axis=1).reset_index(drop=True)
- # categorical
- _check_all_orients(self.categorical, sort="sort", raise_ok=ValueError)
+ tm.assert_frame_equal(result, expected)
- # empty
- _check_all_orients(
- self.empty_frame, check_index_type=False, check_column_type=False
+ @pytest.mark.parametrize("convert_axes", [True, False])
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
+ # TODO: improve coverage with date_format parameter
+ data = self.tsframe.to_json(orient=orient)
+ result = pd.read_json(
+ data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
+ expected = self.tsframe.copy()
+
+ if not convert_axes: # one off for ts handling
+ # DTI gets converted to epoch values
+ idx = expected.index.astype(np.int64) // 1000000
+ if orient != "split": # TODO: handle consistently across orients
+ idx = idx.astype(str)
+
+ expected.index = idx
- # time series data
- _check_all_orients(self.tsframe)
+ if orient == "records" or orient == "values":
+ expected = expected.reset_index(drop=True)
+ if orient == "values":
+ expected.columns = range(len(expected.columns))
+
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("convert_axes", [True, False])
+ @pytest.mark.parametrize("numpy", [True, False])
+ def test_roundtrip_mixed(self, orient, convert_axes, numpy):
+ if numpy and orient != "split":
+ pytest.xfail("Can't decode directly to array")
- # mixed data
index = pd.Index(["a", "b", "c", "d", "e"])
- data = {
+ values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
- df = DataFrame(data=data, index=index)
- _check_orient(df, "split", check_dtype=False)
- _check_orient(df, "records", check_dtype=False)
- _check_orient(df, "values", check_dtype=False)
- _check_orient(df, "columns", check_dtype=False)
- # index oriented is problematic as it is read back in in a transposed
- # state, so the columns are interpreted as having mixed data and
- # given object dtypes.
- # force everything to have object dtype beforehand
- _check_orient(df.transpose().transpose(), "index", dtype=False)
+
+ df = DataFrame(data=values, index=index)
+
+ data = df.to_json(orient=orient)
+ result = pd.read_json(
+ data, orient=orient, convert_axes=convert_axes, numpy=numpy
+ )
+
+ expected = df.copy()
+ expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
+
+ if not numpy and (orient == "index" or (PY35 and orient == "columns")):
+ expected = expected.sort_index()
+
+ if orient == "records" or orient == "values":
+ expected = expected.reset_index(drop=True)
+ if orient == "values":
+ expected.columns = range(len(expected.columns))
+
+ tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,msg,orient",
| This piece was broken off of #27838 as it made the diff much larger, so hopefully easier to digest on its own.
As is, parametrization here has brought up a lot of rough edges which are responsible for some of the complexity. These are noted with TODOs and summarized as follows (save Py35 issues, which aren't worth addressing at this point):
- Frame order is not maintained when `numpy=False` (default) and `orient="index"`
- On windows or 32 bit platforms it appears that `np.int64` roundtrips as `np.int32` (maybe not an issue?)
- `orient="split"` does not preserve strings in the index if those strings are numeric, though it should be able to
- `convert_axes` may have surprising behavior when dealing with empty DataFrames
- DTI seem to roundtrip as strings when written with epoch format for all but `orient="split"
Not all of these are the same priority, but figure worth leaving as follow ups | https://api.github.com/repos/pandas-dev/pandas/pulls/28510 | 2019-09-18T21:02:02Z | 2019-09-20T17:53:30Z | 2019-09-20T17:53:30Z | 2019-09-20T18:06:29Z |
[WIP, ENH] Adds cumulative methods to ea | diff --git a/doc/source/reference/extensions.rst b/doc/source/reference/extensions.rst
index 7b451ed3bf296..bc21fcf61b2db 100644
--- a/doc/source/reference/extensions.rst
+++ b/doc/source/reference/extensions.rst
@@ -32,6 +32,7 @@ objects.
.. autosummary::
:toctree: api/
+ api.extensions.ExtensionArray._accumulate
api.extensions.ExtensionArray._concat_same_type
api.extensions.ExtensionArray._formatter
api.extensions.ExtensionArray._from_factorized
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 9f3ccb3e14116..590c23d955453 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -342,6 +342,7 @@ Other enhancements
- ``compute.use_numba`` now exists as a configuration option that utilizes the numba engine when available (:issue:`33966`, :issue:`35374`)
- :meth:`Series.plot` now supports asymmetric error bars. Previously, if :meth:`Series.plot` received a "2xN" array with error values for ``yerr`` and/or ``xerr``, the left/lower values (first row) were mirrored, while the right/upper values (second row) were ignored. Now, the first row represents the left/lower error values and the second row the right/upper error values. (:issue:`9536`)
+
.. ---------------------------------------------------------------------------
.. _whatsnew_110.notable_bug_fixes:
diff --git a/pandas/conftest.py b/pandas/conftest.py
index ce572e42abec6..c5df29829634e 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -1005,6 +1005,17 @@ def all_logical_operators(request):
return request.param
+_all_numeric_accumulations = ["cumsum", "cumprod", "cummin", "cummax"]
+
+
+@pytest.fixture(params=_all_numeric_accumulations)
+def all_numeric_accumulations(request):
+ """
+ Fixture for numeric accumulation names
+ """
+ return request.param
+
+
# ----------------------------------------------------------------
# Data sets/files
# ----------------------------------------------------------------
diff --git a/pandas/core/array_algos/datetimelike_accumulations.py b/pandas/core/array_algos/datetimelike_accumulations.py
new file mode 100644
index 0000000000000..f2e0bb0ba3ff6
--- /dev/null
+++ b/pandas/core/array_algos/datetimelike_accumulations.py
@@ -0,0 +1,69 @@
+from typing import Callable
+
+import numpy as np
+
+from pandas._libs import iNaT
+
+from pandas.core.dtypes.missing import isna
+
+"""
+datetimelke_accumulations.py is for accumulations of datetimelike extension arrays
+"""
+
+
+def _cum_func(
+ func: Callable,
+ values: np.ndarray,
+ *,
+ skipna: bool = True,
+):
+ """
+ Accumulations for 1D datetimelike arrays.
+
+ Parameters
+ ----------
+ func : np.cumsum, np.cumprod, np.maximum.accumulate, np.minimum.accumulate
+ values : np.ndarray
+ Numpy array with the values (can be of any dtype that support the
+ operation).
+ skipna : bool, default True
+ Whether to skip NA.
+ """
+ try:
+ fill_value = {
+ np.cumprod: 1,
+ np.maximum.accumulate: np.iinfo(np.int64).min,
+ np.cumsum: 0,
+ np.minimum.accumulate: np.iinfo(np.int64).max,
+ }[func]
+ except KeyError:
+ raise ValueError(f"No accumulation for {func} implemented on BaseMaskedArray")
+
+ mask = isna(values)
+ y = values.view("i8")
+ y[mask] = fill_value
+
+ if not skipna:
+ # This is different compared to the recent implementation for datetimelikes
+ # but is the same as the implementation for masked arrays
+ mask = np.maximum.accumulate(mask)
+
+ result = func(y)
+ result[mask] = iNaT
+ return result
+
+
+def cumsum(values: np.ndarray, *, skipna: bool = True):
+ return _cum_func(np.cumsum, values, skipna=skipna)
+
+
+def cumprod(values: np.ndarray, *, skipna: bool = True):
+ return _cum_func(np.cumprod, values, skipna=skipna)
+
+
+def cummin(values: np.ndarray, *, skipna: bool = True):
+ return _cum_func(np.minimum.accumulate, values, skipna=skipna)
+
+
+def cummax(values: np.ndarray, *, skipna: bool = True):
+ return _cum_func(np.maximum.accumulate, values, skipna=skipna)
diff --git a/pandas/core/array_algos/masked_accumulations.py b/pandas/core/array_algos/masked_accumulations.py
new file mode 100644
index 0000000000000..fee283227df57
--- /dev/null
+++ b/pandas/core/array_algos/masked_accumulations.py
@@ -0,0 +1,78 @@
+from typing import Callable
+
+import numpy as np
+
+from pandas.core.dtypes.common import (
+ is_float_dtype,
+ is_integer_dtype,
+)
+
+"""
+masked_accumulations.py is for accumulation algorithms using a mask-based approach
+for missing values.
+"""
+
+
+def _cum_func(
+ func: Callable,
+ values: np.ndarray,
+ mask: np.ndarray,
+ *,
+ skipna: bool = True,
+):
+ """
+ Accumulations for 1D masked array.
+
+ Parameters
+ ----------
+ func : np.cumsum, np.cumprod, np.maximum.accumulate, np.minimum.accumulate
+ values : np.ndarray
+ Numpy array with the values (can be of any dtype that support the
+ operation).
+ mask : np.ndarray
+ Boolean numpy array (True values indicate missing values).
+ skipna : bool, default True
+ Whether to skip NA.
+ """
+ dtype_info = None
+ if is_float_dtype(values):
+ dtype_info = np.finfo(values.dtype.type)
+ elif is_integer_dtype(values):
+ dtype_info = np.iinfo(values.dtype.type)
+ else:
+ raise NotImplementedError(
+ f"No masked accumulation defined for dtype {values.dtype.type}"
+ )
+ try:
+ fill_value = {
+ np.cumprod: 1,
+ np.maximum.accumulate: dtype_info.min,
+ np.cumsum: 0,
+ np.minimum.accumulate: dtype_info.max,
+ }[func]
+ except KeyError:
+ raise ValueError(f"No accumulation for {func} implemented on BaseMaskedArray")
+
+ values[mask] = fill_value
+
+ if not skipna:
+ mask = np.maximum.accumulate(mask)
+
+ values = func(values)
+ return values, mask
+
+
+def cumsum(values: np.ndarray, mask: np.ndarray, *, skipna: bool = True):
+ return _cum_func(np.cumsum, values, mask, skipna=skipna)
+
+
+def cumprod(values: np.ndarray, mask: np.ndarray, *, skipna: bool = True):
+ return _cum_func(np.cumprod, values, mask, skipna=skipna)
+
+
+def cummin(values: np.ndarray, mask: np.ndarray, *, skipna: bool = True):
+ return _cum_func(np.minimum.accumulate, values, mask, skipna=skipna)
+
+
+def cummax(values: np.ndarray, mask: np.ndarray, *, skipna: bool = True):
+ return _cum_func(np.maximum.accumulate, values, mask, skipna=skipna)
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index edc8fa14ca142..09d6a030040f5 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -108,6 +108,7 @@ class ExtensionArray:
take
unique
view
+ _accumulate
_concat_same_type
_formatter
_from_factorized
@@ -157,8 +158,9 @@ class ExtensionArray:
as they only compose abstract methods. Still, a more efficient
implementation may be available, and these methods can be overridden.
- One can implement methods to handle array reductions.
+ One can implement methods to handle array accumulations or reductions.
+ * _accumulate
* _reduce
One can implement methods to handle parsing from strings that will be used
@@ -1253,6 +1255,37 @@ def _concat_same_type(
# of objects
_can_hold_na = True
+ def _accumulate(
+ self: ExtensionArray, name: str, *, skipna=True, **kwargs
+ ) -> ExtensionArray:
+ """
+ Return an ExtensionArray performing an accumulation operation.
+ The underlying data type might change
+
+ Parameters
+ ----------
+ name : str
+ Name of the function, supported values are:
+ - cummin
+ - cummax
+ - cumsum
+ - cumprod
+ skipna : bool, default True
+ If True, skip NA values.
+ **kwargs
+ Additional keyword arguments passed to the accumulation function.
+ Currently, there is no supported kwarg.
+
+ Returns
+ -------
+ array
+
+ Raises
+ ------
+ NotImplementedError : subclass does not define accumulations
+ """
+ raise NotImplementedError(f"cannot perform {name} with type {self.dtype}")
+
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
"""
Return a scalar result of performing the reduction operation.
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py
index 260cd08707473..f8a628d347229 100644
--- a/pandas/core/arrays/boolean.py
+++ b/pandas/core/arrays/boolean.py
@@ -712,6 +712,15 @@ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
return super()._reduce(name, skipna=skipna, **kwargs)
+ def _accumulate(
+ self, name: str, *, skipna: bool = True, **kwargs
+ ) -> BaseMaskedArray:
+ from pandas.core.arrays import IntegerArray
+
+ data = self._data.astype(int)
+ mask = self._mask
+ return IntegerArray(data, mask)._accumulate(name, skipna=skipna, **kwargs)
+
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
Parameters
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 5dd55ff0f1fa2..230bdf8cdd1ea 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -93,6 +93,7 @@
isin,
unique1d,
)
+from pandas.core.array_algos import datetimelike_accumulations
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays._mixins import (
NDArrayBackedExtensionArray,
@@ -1204,6 +1205,22 @@ def _time_shift(self, periods, freq=None):
# to be passed explicitly.
return self._generate_range(start=start, end=end, periods=None, freq=self.freq)
+ def _accumulate(
+ self, name: str, *, skipna: bool = True, **kwargs
+ ) -> DatetimeLikeArrayT:
+
+ data = self._data.copy()
+
+ if name in {"cummin", "cummax"}:
+ op = getattr(datetimelike_accumulations, name)
+ data = op(data, skipna=skipna, **kwargs)
+
+ return type(self)._simple_new(data, freq=self.freq, dtype=self.dtype)
+
+ raise NotImplementedError(
+ f"Accumlation {name} not implemented for {type(self)}"
+ )
+
@unpack_zerodim_and_defer("__add__")
def __add__(self, other):
other_dtype = getattr(other, "dtype", None)
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index bae14f4e560c2..59fd406407655 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -49,7 +49,10 @@
isin,
take,
)
-from pandas.core.array_algos import masked_reductions
+from pandas.core.array_algos import (
+ masked_accumulations,
+ masked_reductions,
+)
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import ExtensionArray
from pandas.core.indexers import check_array_indexer
@@ -457,3 +460,19 @@ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
return libmissing.NA
return result
+
+ def _accumulate(
+ self, name: str, *, skipna: bool = True, **kwargs
+ ) -> BaseMaskedArray:
+ data = self._data
+ mask = self._mask
+
+ if name in {"cumsum", "cumprod", "cummin", "cummax"}:
+ op = getattr(masked_accumulations, name)
+ data, mask = op(data, mask, skipna=skipna, **kwargs)
+
+ return type(self)(data, mask, copy=False)
+
+ raise NotImplementedError(
+ "Accumlation {name} not implemented for BaseMaskedArray"
+ )
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 893644be23a0e..457e4de1ce015 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -57,6 +57,7 @@
from pandas.core import nanops
from pandas.core.algorithms import checked_add_with_arr
+from pandas.core.array_algos import datetimelike_accumulations
from pandas.core.arrays import (
IntegerArray,
datetimelike as dtl,
@@ -403,6 +404,24 @@ def std(
return self._box_func(result)
return self._from_backing_data(result)
+ # ----------------------------------------------------------------
+ # Accumulations
+
+ def _accumulate(
+ self, name: str, *, skipna: bool = True, **kwargs
+ ) -> TimedeltaArray:
+
+ data = self._data.copy()
+
+ if name in {"cumsum", "cumsum"}:
+ op = getattr(datetimelike_accumulations, name)
+ data = op(data, skipna=skipna, **kwargs)
+
+ return type(self)._simple_new(data, freq=None, dtype=self.dtype)
+
+ else:
+ return super()._accumulate(name, skipna=skipna, **kwargs)
+
# ----------------------------------------------------------------
# Rendering Methods
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index a32ae7090ef8b..5c97263401259 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10320,7 +10320,14 @@ def _accum_func(self, name: str, func, axis=None, skipna=True, *args, **kwargs):
def block_accum_func(blk_values):
values = blk_values.T if hasattr(blk_values, "T") else blk_values
- result = nanops.na_accum_func(values, func, skipna=skipna)
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
+
+ values = ensure_wrapped_if_datetimelike(values)
+
+ if isinstance(values, ExtensionArray):
+ result = values._accumulate(name, skipna=skipna, **kwargs)
+ else:
+ result = nanops.na_accum_func(values, func, skipna=skipna)
result = result.T if hasattr(result, "T") else result
return result
diff --git a/pandas/tests/extension/base/__init__.py b/pandas/tests/extension/base/__init__.py
index 9cf3bdab40d0b..00d641a8e1895 100644
--- a/pandas/tests/extension/base/__init__.py
+++ b/pandas/tests/extension/base/__init__.py
@@ -41,6 +41,10 @@ class TestMyDtype(BaseDtypeTests):
``assert_series_equal`` on your base test class.
"""
+from pandas.tests.extension.base.accumulate import ( # noqa
+ BaseNoAccumulateTests,
+ BaseNumericAccumulateTests,
+)
from pandas.tests.extension.base.casting import BaseCastingTests # noqa
from pandas.tests.extension.base.constructors import BaseConstructorsTests # noqa
from pandas.tests.extension.base.dtype import BaseDtypeTests # noqa
diff --git a/pandas/tests/extension/base/accumulate.py b/pandas/tests/extension/base/accumulate.py
new file mode 100644
index 0000000000000..882f96572791c
--- /dev/null
+++ b/pandas/tests/extension/base/accumulate.py
@@ -0,0 +1,36 @@
+import pytest
+
+import pandas as pd
+from pandas.tests.extension.base.base import BaseExtensionTests
+
+
+class BaseAccumulateTests(BaseExtensionTests):
+ """
+ Accumulation specific tests. Generally these only
+ make sense for numeric/boolean operations.
+ """
+
+ def check_accumulate(self, s, op_name, skipna):
+ result = getattr(s, op_name)(skipna=skipna)
+ expected = getattr(s.astype("float64"), op_name)(skipna=skipna)
+ self.assert_series_equal(result, expected, check_dtype=False)
+
+
+class BaseNoAccumulateTests(BaseAccumulateTests):
+ """ we don't define any accumulations """
+
+ @pytest.mark.parametrize("skipna", [True, False])
+ def test_accumulate_series_numeric(self, data, all_numeric_accumulations, skipna):
+ op_name = all_numeric_accumulations
+ s = pd.Series(data)
+
+ with pytest.raises(NotImplementedError):
+ getattr(s, op_name)(skipna=skipna)
+
+
+class BaseNumericAccumulateTests(BaseAccumulateTests):
+ @pytest.mark.parametrize("skipna", [True, False])
+ def test_accumulate_series(self, data, all_numeric_accumulations, skipna):
+ op_name = all_numeric_accumulations
+ s = pd.Series(data)
+ self.check_accumulate(s, op_name, skipna)
diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py
index 3ef3beaa9c1b1..862aa5cd7dde5 100644
--- a/pandas/tests/extension/test_boolean.py
+++ b/pandas/tests/extension/test_boolean.py
@@ -393,5 +393,15 @@ class TestUnaryOps(base.BaseUnaryOpsTests):
pass
+class TestNumericAccumulation(base.BaseNumericAccumulateTests):
+ def check_accumulate(self, s, op_name, skipna):
+ result = getattr(s, op_name)(skipna=skipna)
+ expected = getattr(pd.Series(s.astype("float64")), op_name)(skipna=skipna)
+ tm.assert_series_equal(result, expected, check_dtype=False)
+
+
+# TODO parsing not yet supported
+# class TestParsing(base.BaseParsingTests):
+# pass
class TestParsing(base.BaseParsingTests):
pass
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index 3f1f2c02c79f7..404f8132e5f1d 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -151,6 +151,10 @@ class TestReduce(base.BaseNoReduceTests):
pass
+class TestAccumulate(base.BaseNoAccumulateTests):
+ pass
+
+
class TestMethods(base.BaseMethodsTests):
@pytest.mark.skip(reason="Unobserved categories included")
def test_value_counts(self, all_data, dropna):
diff --git a/pandas/tests/extension/test_floating.py b/pandas/tests/extension/test_floating.py
index 617dfc694741e..213e37e015975 100644
--- a/pandas/tests/extension/test_floating.py
+++ b/pandas/tests/extension/test_floating.py
@@ -221,3 +221,7 @@ class TestPrinting(base.BasePrintingTests):
class TestParsing(base.BaseParsingTests):
pass
+
+
+class TestNumericAccumulation(base.BaseNumericAccumulateTests):
+ pass
diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py
index 2305edc1e1327..50b57be6dd275 100644
--- a/pandas/tests/extension/test_integer.py
+++ b/pandas/tests/extension/test_integer.py
@@ -247,6 +247,54 @@ class TestBooleanReduce(base.BaseBooleanReduceTests):
pass
+class TestNumericAccumulation(base.BaseNumericAccumulateTests):
+ def check_accumulate(self, s, op_name, skipna):
+ result = getattr(s, op_name)(skipna=skipna)
+ expected = getattr(pd.Series(s.astype("float64")), op_name)(skipna=skipna)
+ tm.assert_series_equal(result, expected, check_dtype=False)
+ # # overwrite to ensure pd.NA is tested instead of np.nan
+ # # https://github.com/pandas-dev/pandas/issues/30958
+ # if op_name == "cumsum":
+ # if s.dtype.name.startswith("U"):
+ # expected_dtype = "uint64"
+ # else:
+ # expected_dtype = "int64"
+ # result = getattr(s, op_name)(skipna=skipna)
+ # expected = pd.Series(
+ # integer_array(
+ # getattr(s.astype("float64"), op_name)(skipna=skipna),
+ # dtype=expected_dtype,
+ # )
+ # )
+ # tm.assert_series_equal(result, expected)
+ # elif op_name in ["cummax", "cummin"]:
+ # expected_dtype = s.dtype
+ # result = getattr(s, op_name)(skipna=skipna)
+ # expected = pd.Series(
+ # integer_array(
+ # getattr(s.astype("float64"), op_name)(skipna=skipna),
+ # dtype=expected_dtype,
+ # )
+ # )
+ # tm.assert_series_equal(result, expected)
+ # elif op_name == "cumprod":
+ # if s.dtype.name.startswith("U"):
+ # expected_dtype = "uint64"
+ # else:
+ # expected_dtype = "int64"
+ # result = getattr(s[:20], op_name)(skipna=skipna)
+ # expected = pd.Series(
+ # integer_array(
+ # getattr(s[:20].astype("float64"), op_name)(skipna=skipna),
+ # dtype=expected_dtype,
+ # )
+ # )
+ # tm.assert_series_equal(result, expected)
+
+ # else:
+ # raise
+
+
class TestPrinting(base.BasePrintingTests):
pass
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index 067fada5edcae..45b34ed0d300d 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -464,3 +464,7 @@ def test_EA_types(self, engine, data):
expected_msg = r".*must implement _from_sequence_of_strings.*"
with pytest.raises(NotImplementedError, match=expected_msg):
super().test_EA_types(engine, data)
+
+
+class TestNoNumericAccumulations(base.BaseNoAccumulateTests):
+ pass
| - [x] closes #28385
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Problem specific
- [X] Add abstract tests similar to pandas/tests/extension/base/reduce.py
- [X] Add TestFixtures for Accumulations
- [x] Implement Accumulation for IntegerArray
- [ ] Implement Accumulation for DecimalArray - out of scope
- [ ] Implement Accumulation for DatetileLikeArrayMixin ? - out of scope
- [x] Use BaseNoAccumulateTest where applicable - implemented this for categorical | https://api.github.com/repos/pandas-dev/pandas/pulls/28509 | 2019-09-18T19:32:41Z | 2021-10-04T00:13:12Z | null | 2021-10-04T00:13:12Z |
BUG: fix array_equivalent with mismatched tzawareness | diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 594de703258a4..1c2f80b832201 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -55,8 +55,7 @@ cimport pandas._libs.util as util
from pandas._libs.util cimport is_nan, UINT64_MAX, INT64_MAX, INT64_MIN
from pandas._libs.tslib import array_to_datetime
-from pandas._libs.tslibs.nattype cimport NPY_NAT
-from pandas._libs.tslibs.nattype import NaT
+from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT
from pandas._libs.tslibs.conversion cimport convert_to_tsobject
from pandas._libs.tslibs.timedeltas cimport convert_to_timedelta64
from pandas._libs.tslibs.timezones cimport get_timezone, tz_compare
@@ -525,9 +524,18 @@ def array_equivalent_object(left: object[:], right: object[:]) -> bool:
# we are either not equal or both nan
# I think None == None will be true here
- if not (PyObject_RichCompareBool(x, y, Py_EQ) or
- (x is None or is_nan(x)) and (y is None or is_nan(y))):
- return False
+ try:
+ if not (PyObject_RichCompareBool(x, y, Py_EQ) or
+ (x is None or is_nan(x)) and (y is None or is_nan(y))):
+ return False
+ except TypeError as err:
+ # Avoid raising TypeError on tzawareness mismatch
+ # TODO: This try/except can be removed if/when Timestamp
+ # comparisons are change dto match datetime, see GH#28507
+ if "tz-naive and tz-aware" in str(err):
+ return False
+ raise
+
return True
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index 6dd032b9248ed..cd87fbef02e4f 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -445,8 +445,14 @@ def array_equivalent(left, right, strict_nan=False):
if not isinstance(right_value, float) or not np.isnan(right_value):
return False
else:
- if np.any(left_value != right_value):
- return False
+ try:
+ if np.any(left_value != right_value):
+ return False
+ except TypeError as err:
+ if "Cannot compare tz-naive" in str(err):
+ # tzawareness compat failure, see GH#28507
+ return False
+ raise
return True
# NaNs can occur in float and complex arrays.
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 6ef9d78ff9e97..c7e9dd5f0ea6d 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4324,12 +4324,9 @@ def equals(self, other):
# if other is not object, use other's logic for coercion
return other.equals(self)
- try:
- return array_equivalent(
- com.values_from_object(self), com.values_from_object(other)
- )
- except Exception:
- return False
+ return array_equivalent(
+ com.values_from_object(self), com.values_from_object(other)
+ )
def identical(self, other):
"""
diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py
index 1a292d5bfcbb6..25b447e1df7d4 100644
--- a/pandas/tests/dtypes/test_missing.py
+++ b/pandas/tests/dtypes/test_missing.py
@@ -25,6 +25,9 @@
from pandas import DatetimeIndex, Float64Index, NaT, Series, TimedeltaIndex, date_range
from pandas.util import testing as tm
+now = pd.Timestamp.now()
+utcnow = pd.Timestamp.now("UTC")
+
@pytest.mark.parametrize("notna_f", [notna, notnull])
def test_notna_notnull(notna_f):
@@ -332,6 +335,29 @@ def test_array_equivalent():
assert not array_equivalent(DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan]))
+@pytest.mark.parametrize(
+ "lvalue, rvalue",
+ [
+ # There are 3 variants for each of lvalue and rvalue. We include all
+ # three for the tz-naive `now` and exclude the datetim64 variant
+ # for utcnow because it drops tzinfo.
+ (now, utcnow),
+ (now.to_datetime64(), utcnow),
+ (now.to_pydatetime(), utcnow),
+ (now, utcnow),
+ (now.to_datetime64(), utcnow.to_pydatetime()),
+ (now.to_pydatetime(), utcnow.to_pydatetime()),
+ ],
+)
+def test_array_equivalent_tzawareness(lvalue, rvalue):
+ # we shouldn't raise if comparing tzaware and tznaive datetimes
+ left = np.array([lvalue], dtype=object)
+ right = np.array([rvalue], dtype=object)
+
+ assert not array_equivalent(left, right, strict_nan=True)
+ assert not array_equivalent(left, right, strict_nan=False)
+
+
def test_array_equivalent_compat():
# see gh-13388
m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)])
| Some of this can be simplified if we change the behavior discussed in #28507. | https://api.github.com/repos/pandas-dev/pandas/pulls/28508 | 2019-09-18T19:18:34Z | 2019-09-20T12:38:23Z | 2019-09-20T12:38:23Z | 2019-09-20T14:15:56Z |
BUG/CLN: DatetimeTZDtype.construct_from_string Exception | diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index aa7e6801ba431..fcdb89dd8a334 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -685,7 +685,7 @@ def __init__(self, unit="ns", tz=None):
tz = timezones.tz_standardize(tz)
elif tz is not None:
raise pytz.UnknownTimeZoneError(tz)
- elif tz is None:
+ if tz is None:
raise TypeError("A 'tz' is required.")
self._unit = unit
@@ -737,14 +737,17 @@ def construct_from_string(cls, string):
"""
if isinstance(string, str):
msg = "Could not construct DatetimeTZDtype from '{}'"
- try:
- match = cls._match.match(string)
- if match:
- d = match.groupdict()
+ match = cls._match.match(string)
+ if match:
+ d = match.groupdict()
+ try:
return cls(unit=d["unit"], tz=d["tz"])
- except Exception:
- # TODO(py3): Change this pass to `raise TypeError(msg) from e`
- pass
+ except (KeyError, TypeError, ValueError) as err:
+ # KeyError if maybe_get_tz tries and fails to get a
+ # pytz timezone (actually pytz.UnknownTimeZoneError).
+ # TypeError if we pass a nonsense tz;
+ # ValueError if we pass a unit other than "ns"
+ raise TypeError(msg.format(string)) from err
raise TypeError(msg.format(string))
raise TypeError("Could not construct DatetimeTZDtype")
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index 3288c9c584565..6013e58bc6929 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -248,9 +248,19 @@ def test_construct_from_string_raises(self):
with pytest.raises(TypeError, match="notatz"):
DatetimeTZDtype.construct_from_string("datetime64[ns, notatz]")
- with pytest.raises(TypeError, match="^Could not construct DatetimeTZDtype$"):
+ msg = "^Could not construct DatetimeTZDtype"
+ with pytest.raises(TypeError, match=msg):
+ # list instead of string
DatetimeTZDtype.construct_from_string(["datetime64[ns, notatz]"])
+ with pytest.raises(TypeError, match=msg):
+ # non-nano unit
+ DatetimeTZDtype.construct_from_string("datetime64[ps, UTC]")
+
+ with pytest.raises(TypeError, match=msg):
+ # dateutil str that returns None from gettz
+ DatetimeTZDtype.construct_from_string("datetime64[ns, dateutil/invalid]")
+
def test_is_dtype(self):
assert not DatetimeTZDtype.is_dtype(None)
assert DatetimeTZDtype.is_dtype(self.dtype)
| The bug part of this is that "dateutil/something_invalid" fails to raise on master because dateutil.tz.gettz("something_invalid") returns None instead of raising. | https://api.github.com/repos/pandas-dev/pandas/pulls/28505 | 2019-09-18T17:33:49Z | 2019-09-20T12:34:28Z | 2019-09-20T12:34:28Z | 2019-09-20T14:25:15Z |
Fix traceback.format_exc(e) call | diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index aee58f808d9e6..f28a01dd5a1df 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -2289,9 +2289,7 @@ def wrapper(*args, **kwargs):
"Skipping test due to known errno"
" and error {error}".format(error=e)
)
-
- e_str = str(e)
-
+ e_str = traceback.format_exc()
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
"Skipping test because exception "
| As of https://docs.python.org/2/library/traceback.html#traceback.format_exc , format_exc() accepts an integer for traceback depth limit, not the traceback itself.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28503 | 2019-09-18T17:00:27Z | 2019-09-25T17:52:36Z | null | 2019-09-25T17:52:36Z |
Cleaned up JSON test with ambiguous DTA usage | diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 4851f4bd27a7b..9c489c7cc17ec 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -813,20 +813,11 @@ def test_series_roundtrip_simple(self, orient, numpy):
@pytest.mark.parametrize("dtype", [False, None])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_object(self, orient, numpy, dtype):
- # TODO: see why tm.makeObjectSeries provides back DTA
- dtSeries = Series(
- [str(d) for d in self.objSeries],
- index=self.objSeries.index,
- name=self.objSeries.name,
- )
- data = dtSeries.to_json(orient=orient)
+ data = self.objSeries.to_json(orient=orient)
result = pd.read_json(
data, typ="series", orient=orient, numpy=numpy, dtype=dtype
)
- if dtype is False:
- expected = dtSeries.copy()
- else:
- expected = self.objSeries.copy()
+ expected = self.objSeries.copy()
if not numpy and PY35 and orient in ("index", "columns"):
expected = expected.sort_index()
@@ -897,6 +888,19 @@ def test_series_with_dtype(self):
expected = Series([4] * 3)
assert_series_equal(result, expected)
+ @pytest.mark.parametrize(
+ "dtype,expected",
+ [
+ (True, Series(["2000-01-01"], dtype="datetime64[ns]")),
+ (False, Series([946684800000])),
+ ],
+ )
+ def test_series_with_dtype_datetime(self, dtype, expected):
+ s = Series(["2000-01-01"], dtype="datetime64[ns]")
+ data = s.to_json()
+ result = pd.read_json(data, typ="series", dtype=dtype)
+ assert_series_equal(result, expected)
+
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
| Follow up to #27838 enabled by #28444
I created a new test specific to the handling of DTA with `dtype=X` to provide a more logical separation of concerns | https://api.github.com/repos/pandas-dev/pandas/pulls/28502 | 2019-09-18T15:38:03Z | 2019-09-18T16:27:03Z | 2019-09-18T16:27:03Z | 2019-09-18T16:27:14Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.