title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
BUG: dropna incorrect with categoricals in pivot_table | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 2b64ef32c1eb6..97a5975dad9a6 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -29,6 +29,8 @@ Fixed Regressions
- Bug in :meth:`~DataFrame.to_csv` causes encoding error when compression and encoding are specified (:issue:`21241`, :issue:`21118`)
- Bug preventing pandas from being importable with -OO optimization (:issue:`21071`)
- Bug in :meth:`Categorical.fillna` incorrectly raising a ``TypeError`` when `value` the individual categories are iterable and `value` is an iterable (:issue:`21097`, :issue:`19788`)
+- Regression in :func:`pivot_table` where an ordered ``Categorical`` with missing
+ values for the pivot's ``index`` would give a mis-aligned result (:issue:`21133`)
.. _whatsnew_0231.performance:
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index e02420323704e..9a2ad5d13d77a 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -1,8 +1,10 @@
# pylint: disable=E1103
-from pandas.core.dtypes.common import is_list_like, is_scalar
+from pandas.core.dtypes.common import (
+ is_list_like, is_scalar, is_integer_dtype)
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
+from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.reshape.concat import concat
from pandas.core.series import Series
@@ -79,8 +81,22 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
pass
values = list(values)
- grouped = data.groupby(keys, observed=dropna)
+ # group by the cartesian product of the grouper
+ # if we have a categorical
+ grouped = data.groupby(keys, observed=False)
agged = grouped.agg(aggfunc)
+ if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns):
+ agged = agged.dropna(how='all')
+
+ # gh-21133
+ # we want to down cast if
+ # the original values are ints
+ # as we grouped with a NaN value
+ # and then dropped, coercing to floats
+ for v in [v for v in values if v in data and v in agged]:
+ if (is_integer_dtype(data[v]) and
+ not is_integer_dtype(agged[v])):
+ agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype)
table = agged
if table.index.nlevels > 1:
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index d2cf3fc11e165..3ec60d50f2792 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
from datetime import datetime, date, timedelta
@@ -16,6 +17,11 @@
from pandas.api.types import CategoricalDtype as CDT
+@pytest.fixture(params=[True, False])
+def dropna(request):
+ return request.param
+
+
class TestPivotTable(object):
def setup_method(self, method):
@@ -109,7 +115,6 @@ def test_pivot_table_categorical(self):
index=exp_index)
tm.assert_frame_equal(result, expected)
- @pytest.mark.parametrize('dropna', [True, False])
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
@@ -137,6 +142,25 @@ def test_pivot_table_dropna_categoricals(self, dropna):
tm.assert_frame_equal(result, expected)
+ def test_pivot_with_non_observable_dropna(self, dropna):
+ # gh-21133
+ df = pd.DataFrame(
+ {'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
+ categories=['low', 'high'],
+ ordered=True),
+ 'B': range(5)})
+
+ result = df.pivot_table(index='A', values='B', dropna=dropna)
+ expected = pd.DataFrame(
+ {'B': [2, 3]},
+ index=pd.Index(
+ pd.Categorical.from_codes([0, 1],
+ categories=['low', 'high'],
+ ordered=True),
+ name='A'))
+
+ tm.assert_frame_equal(result, expected)
+
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
| closes #21133
| https://api.github.com/repos/pandas-dev/pandas/pulls/21252 | 2018-05-29T23:50:34Z | 2018-06-07T22:05:58Z | 2018-06-07T22:05:58Z | 2018-06-12T16:30:35Z |
Append Mode for ExcelWriter with openpyxl | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index e931450cb5c01..6997ea84e5b83 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -8,6 +8,8 @@ v0.24.0
New features
~~~~~~~~~~~~
+- ``ExcelWriter`` now accepts ``mode`` as a keyword argument, enabling append to existing workbooks when using the ``openpyxl`` engine (:issue:`3441`)
+
.. _whatsnew_0240.enhancements.other:
Other Enhancements
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index 5608c29637447..e86d33742b266 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -804,6 +804,10 @@ class ExcelWriter(object):
datetime_format : string, default None
Format string for datetime objects written into Excel files
(e.g. 'YYYY-MM-DD HH:MM:SS')
+ mode : {'w' or 'a'}, default 'w'
+ File mode to use (write or append).
+
+ .. versionadded:: 0.24.0
Notes
-----
@@ -897,7 +901,8 @@ def save(self):
pass
def __init__(self, path, engine=None,
- date_format=None, datetime_format=None, **engine_kwargs):
+ date_format=None, datetime_format=None, mode='w',
+ **engine_kwargs):
# validate that this engine can handle the extension
if isinstance(path, string_types):
ext = os.path.splitext(path)[-1]
@@ -919,6 +924,8 @@ def __init__(self, path, engine=None,
else:
self.datetime_format = datetime_format
+ self.mode = mode
+
def __fspath__(self):
return _stringify_path(self.path)
@@ -993,23 +1000,27 @@ class _OpenpyxlWriter(ExcelWriter):
engine = 'openpyxl'
supported_extensions = ('.xlsx', '.xlsm')
- def __init__(self, path, engine=None, **engine_kwargs):
+ def __init__(self, path, engine=None, mode='w', **engine_kwargs):
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook
- super(_OpenpyxlWriter, self).__init__(path, **engine_kwargs)
+ super(_OpenpyxlWriter, self).__init__(path, mode=mode, **engine_kwargs)
- # Create workbook object with default optimized_write=True.
- self.book = Workbook()
+ if self.mode == 'a': # Load from existing workbook
+ from openpyxl import load_workbook
+ book = load_workbook(self.path)
+ self.book = book
+ else:
+ # Create workbook object with default optimized_write=True.
+ self.book = Workbook()
- # Openpyxl 1.6.1 adds a dummy sheet. We remove it.
- if self.book.worksheets:
- try:
- self.book.remove(self.book.worksheets[0])
- except AttributeError:
+ if self.book.worksheets:
+ try:
+ self.book.remove(self.book.worksheets[0])
+ except AttributeError:
- # compat
- self.book.remove_sheet(self.book.worksheets[0])
+ # compat - for openpyxl <= 2.4
+ self.book.remove_sheet(self.book.worksheets[0])
def save(self):
"""
@@ -1443,11 +1454,16 @@ class _XlwtWriter(ExcelWriter):
engine = 'xlwt'
supported_extensions = ('.xls',)
- def __init__(self, path, engine=None, encoding=None, **engine_kwargs):
+ def __init__(self, path, engine=None, encoding=None, mode='w',
+ **engine_kwargs):
# Use the xlwt module as the Excel writer.
import xlwt
engine_kwargs['engine'] = engine
- super(_XlwtWriter, self).__init__(path, **engine_kwargs)
+
+ if mode == 'a':
+ raise ValueError('Append mode is not supported with xlwt!')
+
+ super(_XlwtWriter, self).__init__(path, mode=mode, **engine_kwargs)
if encoding is None:
encoding = 'ascii'
@@ -1713,13 +1729,18 @@ class _XlsxWriter(ExcelWriter):
supported_extensions = ('.xlsx',)
def __init__(self, path, engine=None,
- date_format=None, datetime_format=None, **engine_kwargs):
+ date_format=None, datetime_format=None, mode='w',
+ **engine_kwargs):
# Use the xlsxwriter module as the Excel writer.
import xlsxwriter
+ if mode == 'a':
+ raise ValueError('Append mode is not supported with xlsxwriter!')
+
super(_XlsxWriter, self).__init__(path, engine=engine,
date_format=date_format,
datetime_format=datetime_format,
+ mode=mode,
**engine_kwargs)
self.book = xlsxwriter.Workbook(path, **engine_kwargs)
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index 05423474f330a..2a225e6fe6a45 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -2006,6 +2006,31 @@ def test_write_cells_merge_styled(self, merge_cells, ext, engine):
assert xcell_b1.font == openpyxl_sty_merged
assert xcell_a2.font == openpyxl_sty_merged
+ @pytest.mark.parametrize("mode,expected", [
+ ('w', ['baz']), ('a', ['foo', 'bar', 'baz'])])
+ def test_write_append_mode(self, merge_cells, ext, engine, mode, expected):
+ import openpyxl
+ df = DataFrame([1], columns=['baz'])
+
+ with ensure_clean(ext) as f:
+ wb = openpyxl.Workbook()
+ wb.worksheets[0].title = 'foo'
+ wb.worksheets[0]['A1'].value = 'foo'
+ wb.create_sheet('bar')
+ wb.worksheets[1]['A1'].value = 'bar'
+ wb.save(f)
+
+ writer = ExcelWriter(f, engine=engine, mode=mode)
+ df.to_excel(writer, sheet_name='baz', index=False)
+ writer.save()
+
+ wb2 = openpyxl.load_workbook(f)
+ result = [sheet.title for sheet in wb2.worksheets]
+ assert result == expected
+
+ for index, cell_value in enumerate(expected):
+ assert wb2.worksheets[index]['A1'].value == cell_value
+
@td.skip_if_no('xlwt')
@pytest.mark.parametrize("merge_cells,ext,engine", [
@@ -2060,6 +2085,13 @@ def test_to_excel_styleconverter(self, merge_cells, ext, engine):
assert xlwt.Alignment.HORZ_CENTER == xls_style.alignment.horz
assert xlwt.Alignment.VERT_TOP == xls_style.alignment.vert
+ def test_write_append_mode_raises(self, merge_cells, ext, engine):
+ msg = "Append mode is not supported with xlwt!"
+
+ with ensure_clean(ext) as f:
+ with tm.assert_raises_regex(ValueError, msg):
+ ExcelWriter(f, engine=engine, mode='a')
+
@td.skip_if_no('xlsxwriter')
@pytest.mark.parametrize("merge_cells,ext,engine", [
@@ -2111,6 +2143,13 @@ def test_column_format(self, merge_cells, ext, engine):
assert read_num_format == num_format
+ def test_write_append_mode_raises(self, merge_cells, ext, engine):
+ msg = "Append mode is not supported with xlsxwriter!"
+
+ with ensure_clean(ext) as f:
+ with tm.assert_raises_regex(ValueError, msg):
+ ExcelWriter(f, engine=engine, mode='a')
+
class TestExcelWriterEngineTests(object):
| - [X] closes #3441
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21251 | 2018-05-29T23:40:00Z | 2018-06-19T01:03:15Z | 2018-06-19T01:03:15Z | 2019-05-02T21:13:37Z |
EHN: to_csv compression accepts file-like object | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index e931450cb5c01..55e76512b2440 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -13,7 +13,7 @@ New features
Other Enhancements
^^^^^^^^^^^^^^^^^^
- :func:`to_datetime` now supports the ``%Z`` and ``%z`` directive when passed into ``format`` (:issue:`13486`)
--
+- :func:`to_csv` now supports ``compression`` keyword when a file handle is passed. (:issue:`21227`)
-
.. _whatsnew_0240.api_breaking:
@@ -184,4 +184,3 @@ Other
-
-
-
-
diff --git a/pandas/conftest.py b/pandas/conftest.py
index b09cb872a12fb..a463f573c82e0 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -105,6 +105,16 @@ def compression(request):
return request.param
+@pytest.fixture(params=['gzip', 'bz2', 'zip',
+ pytest.param('xz', marks=td.skip_if_no_lzma)])
+def compression_only(request):
+ """
+ Fixture for trying common compression types in compression tests excluding
+ uncompressed case
+ """
+ return request.param
+
+
@pytest.fixture(scope='module')
def datetime_tz_utc():
from datetime import timezone
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 22677b19192e1..0899e9cd87aba 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1689,8 +1689,7 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : string, optional
A string representing the compression to use in the output file.
- Allowed values are 'gzip', 'bz2', 'zip', 'xz'. This input is only
- used when the first argument is a filename.
+ Allowed values are 'gzip', 'bz2', 'zip', 'xz'.
line_terminator : string, default ``'\n'``
The newline character or character sequence to use in the output
file
diff --git a/pandas/core/series.py b/pandas/core/series.py
index c9329e8b9e572..f25f73513df30 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3761,8 +3761,7 @@ def to_csv(self, path=None, index=True, sep=",", na_rep='',
non-ascii, for python versions prior to 3
compression : string, optional
A string representing the compression to use in the output file.
- Allowed values are 'gzip', 'bz2', 'zip', 'xz'. This input is only
- used when the first argument is a filename.
+ Allowed values are 'gzip', 'bz2', 'zip', 'xz'.
date_format: string, default None
Format string for datetime objects.
decimal: string, default '.'
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index 29b8d29af0808..0be2a180fbfa2 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -154,9 +154,9 @@ def save(self):
# GH 17778 handles compression for byte strings.
if not close and self.compression:
f.close()
- with open(self.path_or_buf, 'r') as f:
+ with open(f.name, 'r') as f:
data = f.read()
- f, handles = _get_handle(self.path_or_buf, self.mode,
+ f, handles = _get_handle(f.name, self.mode,
encoding=encoding,
compression=self.compression)
f.write(data)
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index bb7ee1b911fee..88e469731060d 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -231,13 +231,33 @@ def test_standardize_mapping():
columns=['X', 'Y', 'Z']),
Series(100 * [0.123456, 0.234567, 0.567567], name='X')])
@pytest.mark.parametrize('method', ['to_pickle', 'to_json', 'to_csv'])
-def test_compression_size(obj, method, compression):
- if not compression:
- pytest.skip("only test compression case.")
+def test_compression_size(obj, method, compression_only):
with tm.ensure_clean() as filename:
- getattr(obj, method)(filename, compression=compression)
+ getattr(obj, method)(filename, compression=compression_only)
compressed = os.path.getsize(filename)
getattr(obj, method)(filename, compression=None)
uncompressed = os.path.getsize(filename)
assert uncompressed > compressed
+
+
+@pytest.mark.parametrize('obj', [
+ DataFrame(100 * [[0.123456, 0.234567, 0.567567],
+ [12.32112, 123123.2, 321321.2]],
+ columns=['X', 'Y', 'Z']),
+ Series(100 * [0.123456, 0.234567, 0.567567], name='X')])
+@pytest.mark.parametrize('method', ['to_csv'])
+def test_compression_size_fh(obj, method, compression_only):
+
+ with tm.ensure_clean() as filename:
+ with open(filename, 'w') as fh:
+ getattr(obj, method)(fh, compression=compression_only)
+ # GH 17778
+ assert fh.closed
+ compressed = os.path.getsize(filename)
+ with tm.ensure_clean() as filename:
+ with open(filename, 'w') as fh:
+ getattr(obj, method)(fh, compression=None)
+ assert not fh.closed
+ uncompressed = os.path.getsize(filename)
+ assert uncompressed > compressed
| - [x] closes #21227
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Handle an unsupported case when a file-like object instead of path passed into to_csv with compression. According to documentation, compression keyword requires it to be a filename.
At the moment, when a handle is passed, it appears to be uncompressed.
Tentative enhancement. | https://api.github.com/repos/pandas-dev/pandas/pulls/21249 | 2018-05-29T17:03:13Z | 2018-05-30T23:27:47Z | 2018-05-30T23:27:47Z | 2018-05-31T20:06:22Z |
BUG: Support to create DataFrame from list subclasses | diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py
index 60f6a66e07a7b..dfb6ab5b189b2 100644
--- a/asv_bench/benchmarks/frame_ctor.py
+++ b/asv_bench/benchmarks/frame_ctor.py
@@ -91,4 +91,17 @@ def time_frame_from_ndarray(self):
self.df = DataFrame(self.data)
+class FromLists(object):
+
+ goal_time = 0.2
+
+ def setup(self):
+ N = 1000
+ M = 100
+ self.data = [[j for j in range(M)] for i in range(N)]
+
+ def time_frame_from_lists(self):
+ self.df = DataFrame(self.data)
+
+
from .pandas_vb_common import setup # noqa: F401
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index cc40e6d42a70b..54faa9ba75d9f 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1606,6 +1606,7 @@ Other
- Bug in :meth:`DataFrame.combine_first` in which column types were unexpectedly converted to float (:issue:`20699`)
- Bug where C variables were declared with external linkage causing import errors if certain other C libraries were imported before Pandas. (:issue:`24113`)
- Constructing a DataFrame with an index argument that wasn't already an instance of :class:`~pandas.core.Index` was broken in `4efb39f <https://github.com/pandas-dev/pandas/commit/4efb39f01f5880122fa38d91e12d217ef70fad9e>`_ (:issue:`22227`).
+- Bug in :func:`to_object_array` prevented list subclasses to be used to create :class:`DataFrame` (:issue:`21226`)
.. _whatsnew_0.24.0.contributors:
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 0c081986d83c5..2736133a79d8e 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -2208,7 +2208,7 @@ def map_infer(ndarray arr, object f, bint convert=1):
return result
-def to_object_array(rows: list, min_width: int=0):
+def to_object_array(rows: object, int min_width=0):
"""
Convert a list of lists into an object array.
@@ -2229,20 +2229,22 @@ def to_object_array(rows: list, min_width: int=0):
cdef:
Py_ssize_t i, j, n, k, tmp
ndarray[object, ndim=2] result
+ list input_rows
list row
- n = len(rows)
+ input_rows = <list>rows
+ n = len(input_rows)
k = min_width
for i in range(n):
- tmp = len(rows[i])
+ tmp = len(input_rows[i])
if tmp > k:
k = tmp
result = np.empty((n, k), dtype=object)
for i in range(n):
- row = rows[i]
+ row = <list>input_rows[i]
for j in range(len(row)):
result[i, j] = row[j]
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 76e92042cbe6a..fa1117a647850 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -2165,6 +2165,15 @@ def test_constructor_range_dtype(self, dtype):
result = DataFrame({'A': range(5)}, dtype=dtype)
tm.assert_frame_equal(result, expected)
+ def test_frame_from_list_subclass(self):
+ # GH21226
+ class List(list):
+ pass
+
+ expected = DataFrame([[1, 2, 3], [4, 5, 6]])
+ result = DataFrame(List([List([1, 2, 3]), List([4, 5, 6])]))
+ tm.assert_frame_equal(result, expected)
+
class TestDataFrameConstructorWithDatetimeTZ(TestData):
| - [x] closes #21226
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Alternative fix could be that instead of doing `isinstance` check in frame's `_to_arrays` method:
```
if isinstance(data[0], (list, tuple)):
```
We would do:
```
if type(data[0]) in (list, tuple):
```
This would then assure that `to_object_array` is called really just with exactly lists. But currently there is a mismatch, `if` checks with subtypes, while `to_object_array` does not support them. I think it is better to support them so this merge request adds support for subtypes/subclasses of list. | https://api.github.com/repos/pandas-dev/pandas/pulls/21238 | 2018-05-29T04:16:58Z | 2018-12-15T21:21:24Z | 2018-12-15T21:21:24Z | 2018-12-15T23:23:09Z |
pct change bug issue 21200 | diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 4d0373e4571da..80317d6806346 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -382,6 +382,33 @@ Backwards incompatible API changes
- :meth:`read_csv` will now raise a ``ValueError`` if a column with missing values is declared as having dtype ``bool`` (:issue:`20591`)
- The column order of the resultant :class:`DataFrame` from :meth:`MultiIndex.to_frame` is now guaranteed to match the :attr:`MultiIndex.names` order. (:issue:`22420`)
+Percentage change on groupby changes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Fixed a bug where calling :func:`SeriesGroupBy.pct_change` or :func:`DataFrameGroupBy.pct_change` would previously work across groups when calculating the percent change, where it now correctly works per group (:issue:`21200`, :issue:`21235`).
+
+.. ipython:: python
+
+ df = pd.DataFrame({'grp': ['a', 'a', 'b'], 'foo': [1.0, 1.1, 2.2]})
+ df
+
+Previous behavior:
+
+.. code-block:: ipython
+
+ In [1]: df.groupby('grp').pct_change()
+ Out[1]:
+ foo
+ 0 NaN
+ 1 0.1
+ 2 1.0
+
+New behavior:
+
+.. ipython:: python
+
+ df.groupby('grp').pct_change()
+
.. _whatsnew_0240.api_breaking.deps:
Dependencies have increased minimum versions
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 2f54f61818aa6..47ac1260d5179 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1221,9 +1221,15 @@ def _apply_to_column_groupbys(self, func):
return func(self)
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None):
- """Calculate percent change of each value to previous entry in group"""
+ """Calcuate pct_change of each value to previous entry in group"""
+ # TODO: Remove this conditional when #23918 is fixed
+ if freq:
+ return self.apply(lambda x: x.pct_change(periods=periods,
+ fill_method=fill_method,
+ limit=limit, freq=freq))
filled = getattr(self, fill_method)(limit=limit)
- shifted = filled.shift(periods=periods, freq=freq)
+ fill_grp = filled.groupby(self.grouper.labels)
+ shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 45eaa3efa948a..4b915922cef93 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -2025,11 +2025,10 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
fill_method=fill_method,
limit=limit, freq=freq,
axis=axis))
-
- filled = getattr(self, fill_method)(limit=limit).drop(
- self.grouper.names, axis=1)
- shifted = filled.shift(periods=periods, freq=freq)
-
+ filled = getattr(self, fill_method)(limit=limit)
+ filled = filled.drop(self.grouper.names, axis=1)
+ fill_grp = filled.groupby(self.grouper.labels)
+ shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
@Substitution(name='groupby')
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py
index dbbf6e583796f..b6361b4ad76a0 100644
--- a/pandas/tests/groupby/test_transform.py
+++ b/pandas/tests/groupby/test_transform.py
@@ -765,36 +765,36 @@ def test_pad_stable_sorting(fill_method):
@pytest.mark.parametrize("test_series", [True, False])
+@pytest.mark.parametrize("freq", [
+ None,
+ pytest.param('D', marks=pytest.mark.xfail(
+ reason='GH#23918 before method uses freq in vectorized approach'))])
@pytest.mark.parametrize("periods,fill_method,limit", [
(1, 'ffill', None), (1, 'ffill', 1),
(1, 'bfill', None), (1, 'bfill', 1),
(-1, 'ffill', None), (-1, 'ffill', 1),
- (-1, 'bfill', None), (-1, 'bfill', 1)])
-def test_pct_change(test_series, periods, fill_method, limit):
- vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
- exp_vals = Series(vals).pct_change(periods=periods,
- fill_method=fill_method,
- limit=limit).tolist()
-
- df = DataFrame({'key': ['a'] * len(vals) + ['b'] * len(vals),
- 'vals': vals * 2})
- grp = df.groupby('key')
-
- def get_result(grp_obj):
- return grp_obj.pct_change(periods=periods,
- fill_method=fill_method,
- limit=limit)
+ (-1, 'bfill', None), (-1, 'bfill', 1),
+])
+def test_pct_change(test_series, freq, periods, fill_method, limit):
+ # GH 21200, 21621
+ vals = [3, np.nan, np.nan, np.nan, 1, 2, 4, 10, np.nan, 4]
+ keys = ['a', 'b']
+ key_v = np.repeat(keys, len(vals))
+ df = DataFrame({'key': key_v, 'vals': vals * 2})
+
+ df_g = getattr(df.groupby('key'), fill_method)(limit=limit)
+ grp = df_g.groupby('key')
+
+ expected = grp['vals'].obj / grp['vals'].shift(periods) - 1
if test_series:
- exp = pd.Series(exp_vals * 2)
- exp.name = 'vals'
- grp = grp['vals']
- result = get_result(grp)
- tm.assert_series_equal(result, exp)
+ result = df.groupby('key')['vals'].pct_change(
+ periods=periods, fill_method=fill_method, limit=limit, freq=freq)
+ tm.assert_series_equal(result, expected)
else:
- exp = DataFrame({'vals': exp_vals * 2})
- result = get_result(grp)
- tm.assert_frame_equal(result, exp)
+ result = df.groupby('key').pct_change(
+ periods=periods, fill_method=fill_method, limit=limit, freq=freq)
+ tm.assert_frame_equal(result, expected.to_frame('vals'))
@pytest.mark.parametrize("func", [np.any, np.all])
| closes #21200
closes #21621
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This addresses [#21200](https://github.com/pandas-dev/pandas/issues/21200?_pjax=%23js-repo-pjax-container) . When there are different groups in a dataframe, by using groupby it is expected that the pct_change function be applied on each group. However, combining groupby with pct_change does not produce the correct result.
**Explanation:**
Currently the groupby method in the pandas series and pandas dataframe pct change method can implement a vectorized solution, rather than calling apply, if certain conditions are met. For the pandas series method, the vectorized solution is the only option.
This is certainly inappropriate in cases where the groupby object is non-monotonic in its group order. To solve this I've added a check for monotonicity in both the series and dataframe implementation, as well as adding the opportunity to call apply for the series method.
In addition, I have augmented the UT to accept a parameter that can shuffle the dataframe, in order to ensure that the correct calculation occurs.
**Concern**
One concern I have is that depending on whether the apply or vectorized solution is used within the pct change method (e.g. depending on whether the groupby object is monotonic or not), the result returned to the user may have a different index structure. While this was the case prior to the PR, It's not clear to me if (1) this is an acceptable design within the pandas infrastructure, and (2) whether or not this is within the scope of a single PR that was originally opened to address a very specific bug.
As this is my first pandas PR, I would certainly appreciate feedback, and will incorporate any constructive feedback into future issues. | https://api.github.com/repos/pandas-dev/pandas/pulls/21235 | 2018-05-28T23:10:49Z | 2018-12-12T12:41:38Z | 2018-12-12T12:41:37Z | 2023-02-12T20:13:28Z |
CLN: move common printing utilties to pandas.io.formats.printing | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index f79288c167356..145d116261a82 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -57,17 +57,11 @@
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
-from pandas.io.formats.printing import pprint_thing
+from pandas.io.formats.printing import (
+ pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
-from pandas.core.config import get_option
from pandas.core.strings import StringMethods
-
-# simplify
-default_pprint = lambda x, max_seq_items=None: \
- pprint_thing(x, escape_chars=('\t', '\r', '\n'), quote_strings=True,
- max_seq_items=max_seq_items)
-
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
@@ -1034,7 +1028,7 @@ def _format_space(self):
@property
def _formatter_func(self):
"""
- Return the formatted data as a unicode string
+ Return the formatter function
"""
return default_pprint
@@ -1042,125 +1036,20 @@ def _format_data(self, name=None):
"""
Return the formatted data as a unicode string
"""
- from pandas.io.formats.console import get_console_size
- from pandas.io.formats.format import _get_adjustment
- display_width, _ = get_console_size()
- if display_width is None:
- display_width = get_option('display.width') or 80
- if name is None:
- name = self.__class__.__name__
-
- space1 = "\n%s" % (' ' * (len(name) + 1))
- space2 = "\n%s" % (' ' * (len(name) + 2))
-
- n = len(self)
- sep = ','
- max_seq_items = get_option('display.max_seq_items') or n
- formatter = self._formatter_func
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
- # are we a truncated display
- is_truncated = n > max_seq_items
-
- # adj can optionally handle unicode eastern asian width
- adj = _get_adjustment()
-
- def _extend_line(s, line, value, display_width, next_line_prefix):
-
- if (adj.len(line.rstrip()) + adj.len(value.rstrip()) >=
- display_width):
- s += line.rstrip()
- line = next_line_prefix
- line += value
- return s, line
-
- def best_len(values):
- if values:
- return max(adj.len(x) for x in values)
- else:
- return 0
-
- if n == 0:
- summary = '[], '
- elif n == 1:
- first = formatter(self[0])
- summary = '[%s], ' % first
- elif n == 2:
- first = formatter(self[0])
- last = formatter(self[-1])
- summary = '[%s, %s], ' % (first, last)
- else:
-
- if n > max_seq_items:
- n = min(max_seq_items // 2, 10)
- head = [formatter(x) for x in self[:n]]
- tail = [formatter(x) for x in self[-n:]]
- else:
- head = []
- tail = [formatter(x) for x in self]
-
- # adjust all values to max length if needed
- if is_justify:
-
- # however, if we are not truncated and we are only a single
- # line, then don't justify
- if (is_truncated or
- not (len(', '.join(head)) < display_width and
- len(', '.join(tail)) < display_width)):
- max_len = max(best_len(head), best_len(tail))
- head = [x.rjust(max_len) for x in head]
- tail = [x.rjust(max_len) for x in tail]
-
- summary = ""
- line = space2
-
- for i in range(len(head)):
- word = head[i] + sep + ' '
- summary, line = _extend_line(summary, line, word,
- display_width, space2)
-
- if is_truncated:
- # remove trailing space of last line
- summary += line.rstrip() + space2 + '...'
- line = space2
-
- for i in range(len(tail) - 1):
- word = tail[i] + sep + ' '
- summary, line = _extend_line(summary, line, word,
- display_width, space2)
-
- # last value: no sep added + 1 space of width used for trailing ','
- summary, line = _extend_line(summary, line, tail[-1],
- display_width - 2, space2)
- summary += line
- summary += '],'
-
- if len(summary) > (display_width):
- summary += space1
- else: # one row
- summary += ' '
-
- # remove initial space
- summary = '[' + summary[len(space2):]
-
- return summary
+ return format_object_summary(self, self._formatter_func,
+ is_justify=is_justify, name=name)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
- attrs = []
- attrs.append(('dtype', "'%s'" % self.dtype))
- if self.name is not None:
- attrs.append(('name', default_pprint(self.name)))
- max_seq_items = get_option('display.max_seq_items') or len(self)
- if len(self) > max_seq_items:
- attrs.append(('length', len(self)))
- return attrs
+ return format_object_attrs(self)
def to_series(self, index=None, name=None):
"""
diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py
index a101113da23ba..e22d7bce42841 100644
--- a/pandas/io/formats/printing.py
+++ b/pandas/io/formats/printing.py
@@ -261,3 +261,157 @@ class TableSchemaFormatter(BaseFormatter):
# unregister tableschema mime-type
if mimetype in formatters:
formatters[mimetype].enabled = False
+
+
+default_pprint = lambda x, max_seq_items=None: \
+ pprint_thing(x, escape_chars=('\t', '\r', '\n'), quote_strings=True,
+ max_seq_items=max_seq_items)
+
+
+def format_object_summary(obj, formatter, is_justify=True, name=None):
+ """
+ Return the formatted obj as a unicode string
+
+ Parameters
+ ----------
+ obj : object
+ must be iterable and support __getitem__
+ formatter : callable
+ string formatter for an element
+ is_justify : boolean
+ should justify the display
+ name : name, optiona
+ defaults to the class name of the obj
+
+ Returns
+ -------
+ summary string
+
+ """
+ from pandas.io.formats.console import get_console_size
+ from pandas.io.formats.format import _get_adjustment
+
+ display_width, _ = get_console_size()
+ if display_width is None:
+ display_width = get_option('display.width') or 80
+ if name is None:
+ name = obj.__class__.__name__
+
+ space1 = "\n%s" % (' ' * (len(name) + 1))
+ space2 = "\n%s" % (' ' * (len(name) + 2))
+
+ n = len(obj)
+ sep = ','
+ max_seq_items = get_option('display.max_seq_items') or n
+
+ # are we a truncated display
+ is_truncated = n > max_seq_items
+
+ # adj can optionally handle unicode eastern asian width
+ adj = _get_adjustment()
+
+ def _extend_line(s, line, value, display_width, next_line_prefix):
+
+ if (adj.len(line.rstrip()) + adj.len(value.rstrip()) >=
+ display_width):
+ s += line.rstrip()
+ line = next_line_prefix
+ line += value
+ return s, line
+
+ def best_len(values):
+ if values:
+ return max(adj.len(x) for x in values)
+ else:
+ return 0
+
+ if n == 0:
+ summary = '[], '
+ elif n == 1:
+ first = formatter(obj[0])
+ summary = '[%s], ' % first
+ elif n == 2:
+ first = formatter(obj[0])
+ last = formatter(obj[-1])
+ summary = '[%s, %s], ' % (first, last)
+ else:
+
+ if n > max_seq_items:
+ n = min(max_seq_items // 2, 10)
+ head = [formatter(x) for x in obj[:n]]
+ tail = [formatter(x) for x in obj[-n:]]
+ else:
+ head = []
+ tail = [formatter(x) for x in obj]
+
+ # adjust all values to max length if needed
+ if is_justify:
+
+ # however, if we are not truncated and we are only a single
+ # line, then don't justify
+ if (is_truncated or
+ not (len(', '.join(head)) < display_width and
+ len(', '.join(tail)) < display_width)):
+ max_len = max(best_len(head), best_len(tail))
+ head = [x.rjust(max_len) for x in head]
+ tail = [x.rjust(max_len) for x in tail]
+
+ summary = ""
+ line = space2
+
+ for i in range(len(head)):
+ word = head[i] + sep + ' '
+ summary, line = _extend_line(summary, line, word,
+ display_width, space2)
+
+ if is_truncated:
+ # remove trailing space of last line
+ summary += line.rstrip() + space2 + '...'
+ line = space2
+
+ for i in range(len(tail) - 1):
+ word = tail[i] + sep + ' '
+ summary, line = _extend_line(summary, line, word,
+ display_width, space2)
+
+ # last value: no sep added + 1 space of width used for trailing ','
+ summary, line = _extend_line(summary, line, tail[-1],
+ display_width - 2, space2)
+ summary += line
+ summary += '],'
+
+ if len(summary) > (display_width):
+ summary += space1
+ else: # one row
+ summary += ' '
+
+ # remove initial space
+ summary = '[' + summary[len(space2):]
+
+ return summary
+
+
+def format_object_attrs(obj):
+ """
+ Return a list of tuples of the (attr, formatted_value)
+ for common attrs, including dtype, name, length
+
+ Parameters
+ ----------
+ obj : object
+ must be iterable
+
+ Returns
+ -------
+ list
+
+ """
+ attrs = []
+ if hasattr(obj, 'dtype'):
+ attrs.append(('dtype', "'{}'".format(obj.dtype)))
+ if getattr(obj, 'name', None) is not None:
+ attrs.append(('name', default_pprint(obj.name)))
+ max_seq_items = get_option('display.max_seq_items') or len(obj)
+ if len(obj) > max_seq_items:
+ attrs.append(('length', len(obj)))
+ return attrs
| https://api.github.com/repos/pandas-dev/pandas/pulls/21234 | 2018-05-28T23:00:27Z | 2018-05-29T01:46:20Z | 2018-05-29T01:46:20Z | 2018-05-29T01:46:20Z | |
BUG: df.agg, df.transform and df.apply use different methods when axis=1 than when axis=0 | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 3b04d9937d7f2..04c2e253cfa5d 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -475,7 +475,9 @@ Numeric
- Bug in :class:`Series` ``__rmatmul__`` doesn't support matrix vector multiplication (:issue:`21530`)
- Bug in :func:`factorize` fails with read-only array (:issue:`12813`)
- Fixed bug in :func:`unique` handled signed zeros inconsistently: for some inputs 0.0 and -0.0 were treated as equal and for some inputs as different. Now they are treated as equal for all inputs (:issue:`21866`)
--
+- Bug in :meth:`DataFrame.agg`, :meth:`DataFrame.transform` and :meth:`DataFrame.apply` where,
+ when supplied with a list of functions and ``axis=1`` (e.g. ``df.apply(['sum', 'mean'], axis=1)``),
+ a ``TypeError`` was wrongly raised. For all three methods such calculation are now done correctly. (:issue:`16679`).
-
Strings
diff --git a/pandas/conftest.py b/pandas/conftest.py
index a979c3fc3bfac..e878b32fcad7b 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -60,6 +60,26 @@ def spmatrix(request):
return getattr(sparse, request.param + '_matrix')
+@pytest.fixture(params=[0, 1, 'index', 'columns'],
+ ids=lambda x: "axis {!r}".format(x))
+def axis(request):
+ """
+ Fixture for returning the axis numbers of a DataFrame.
+ """
+ return request.param
+
+
+axis_frame = axis
+
+
+@pytest.fixture(params=[0, 'index'], ids=lambda x: "axis {!r}".format(x))
+def axis_series(request):
+ """
+ Fixture for returning the axis numbers of a Series.
+ """
+ return request.param
+
+
@pytest.fixture
def ip():
"""
@@ -103,6 +123,41 @@ def all_arithmetic_operators(request):
return request.param
+# use sorted as dicts in py<3.6 have random order, which xdist doesn't like
+_cython_table = sorted(((key, value) for key, value in
+ pd.core.base.SelectionMixin._cython_table.items()),
+ key=lambda x: x[0].__class__.__name__)
+
+
+@pytest.fixture(params=_cython_table)
+def cython_table_items(request):
+ return request.param
+
+
+def _get_cython_table_params(ndframe, func_names_and_expected):
+ """combine frame, functions from SelectionMixin._cython_table
+ keys and expected result.
+
+ Parameters
+ ----------
+ ndframe : DataFrame or Series
+ func_names_and_expected : Sequence of two items
+ The first item is a name of a NDFrame method ('sum', 'prod') etc.
+ The second item is the expected return value
+
+ Returns
+ -------
+ results : list
+ List of three items (DataFrame, function, expected result)
+ """
+ results = []
+ for func_name, expected in func_names_and_expected:
+ results.append((ndframe, func_name, expected))
+ results += [(ndframe, func, expected) for func, name in _cython_table
+ if name == func_name]
+ return results
+
+
@pytest.fixture(params=['__eq__', '__ne__', '__le__',
'__lt__', '__ge__', '__gt__'])
def all_compare_operators(request):
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 27ac5038276d6..989becbf133ca 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -5,6 +5,8 @@
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.common import (
is_extension_type,
+ is_dict_like,
+ is_list_like,
is_sequence)
from pandas.util._decorators import cache_readonly
@@ -105,6 +107,11 @@ def agg_axis(self):
def get_result(self):
""" compute the results """
+ # dispatch to agg
+ if is_list_like(self.f) or is_dict_like(self.f):
+ return self.obj.aggregate(self.f, axis=self.axis,
+ *self.args, **self.kwds)
+
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
@@ -308,15 +315,6 @@ def wrap_results(self):
class FrameRowApply(FrameApply):
axis = 0
- def get_result(self):
-
- # dispatch to agg
- if isinstance(self.f, (list, dict)):
- return self.obj.aggregate(self.f, axis=self.axis,
- *self.args, **self.kwds)
-
- return super(FrameRowApply, self).get_result()
-
def apply_broadcast(self):
return super(FrameRowApply, self).apply_broadcast(self.obj)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 16332738ce610..a66b9a7e92e85 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -6070,19 +6070,34 @@ def _gotitem(self,
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
- # TODO: flipped axis
result = None
- if axis == 0:
- try:
- result, how = self._aggregate(func, axis=0, *args, **kwargs)
- except TypeError:
- pass
+ try:
+ result, how = self._aggregate(func, axis=axis, *args, **kwargs)
+ except TypeError:
+ pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
+ def _aggregate(self, arg, axis=0, *args, **kwargs):
+ if axis == 1:
+ # NDFrame.aggregate returns a tuple, and we need to transpose
+ # only result
+ result, how = (super(DataFrame, self.T)
+ ._aggregate(arg, *args, **kwargs))
+ result = result.T if result is not None else result
+ return result, how
+ return super(DataFrame, self)._aggregate(arg, *args, **kwargs)
+
agg = aggregate
+ @Appender(_shared_docs['transform'] % _shared_doc_kwargs)
+ def transform(self, func, axis=0, *args, **kwargs):
+ axis = self._get_axis_number(axis)
+ if axis == 1:
+ return super(DataFrame, self.T).transform(func, *args, **kwargs).T
+ return super(DataFrame, self).transform(func, *args, **kwargs)
+
def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None,
result_type=None, args=(), **kwds):
"""
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 16105014bf74e..1126500fa55b2 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9193,16 +9193,14 @@ def ewm(self, com=None, span=None, halflife=None, alpha=None,
cls.ewm = ewm
- @Appender(_shared_docs['transform'] % _shared_doc_kwargs)
- def transform(self, func, *args, **kwargs):
- result = self.agg(func, *args, **kwargs)
- if is_scalar(result) or len(result) != len(self):
- raise ValueError("transforms cannot produce "
- "aggregated results")
+ @Appender(_shared_docs['transform'] % _shared_doc_kwargs)
+ def transform(self, func, *args, **kwargs):
+ result = self.agg(func, *args, **kwargs)
+ if is_scalar(result) or len(result) != len(self):
+ raise ValueError("transforms cannot produce "
+ "aggregated results")
- return result
-
- cls.transform = transform
+ return result
# ----------------------------------------------------------------------
# Misc methods
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index dfb2961befe35..e038588b76ffd 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -5,7 +5,9 @@
import pytest
import operator
+from collections import OrderedDict
from datetime import datetime
+from itertools import chain
import warnings
import numpy as np
@@ -18,6 +20,7 @@
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
+from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
@@ -813,81 +816,97 @@ def test_consistency_for_boxed(self, box):
assert_frame_equal(result, expected)
-def zip_frames(*frames):
+def zip_frames(frames, axis=1):
"""
- take a list of frames, zip the columns together for each
- assume that these all have the first frame columns
+ take a list of frames, zip them together under the
+ assumption that these all have the first frames' index/columns.
- return a new frame
+ Returns
+ -------
+ new_frame : DataFrame
"""
- columns = frames[0].columns
- zipped = [f[c] for c in columns for f in frames]
- return pd.concat(zipped, axis=1)
+ if axis == 1:
+ columns = frames[0].columns
+ zipped = [f.loc[:, c] for c in columns for f in frames]
+ return pd.concat(zipped, axis=1)
+ else:
+ index = frames[0].index
+ zipped = [f.loc[i, :] for i in index for f in frames]
+ return pd.DataFrame(zipped)
class TestDataFrameAggregate(TestData):
- def test_agg_transform(self):
+ def test_agg_transform(self, axis):
+ other_axis = 1 if axis in {0, 'index'} else 0
with np.errstate(all='ignore'):
- f_sqrt = np.sqrt(self.frame)
f_abs = np.abs(self.frame)
+ f_sqrt = np.sqrt(self.frame)
# ufunc
- result = self.frame.transform(np.sqrt)
+ result = self.frame.transform(np.sqrt, axis=axis)
expected = f_sqrt.copy()
assert_frame_equal(result, expected)
- result = self.frame.apply(np.sqrt)
+ result = self.frame.apply(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
- result = self.frame.transform(np.sqrt)
+ result = self.frame.transform(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
# list-like
- result = self.frame.apply([np.sqrt])
+ result = self.frame.apply([np.sqrt], axis=axis)
expected = f_sqrt.copy()
- expected.columns = pd.MultiIndex.from_product(
- [self.frame.columns, ['sqrt']])
+ if axis in {0, 'index'}:
+ expected.columns = pd.MultiIndex.from_product(
+ [self.frame.columns, ['sqrt']])
+ else:
+ expected.index = pd.MultiIndex.from_product(
+ [self.frame.index, ['sqrt']])
assert_frame_equal(result, expected)
- result = self.frame.transform([np.sqrt])
+ result = self.frame.transform([np.sqrt], axis=axis)
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both
# functions per series and then concatting
- expected = zip_frames(f_sqrt, f_abs)
- expected.columns = pd.MultiIndex.from_product(
- [self.frame.columns, ['sqrt', 'absolute']])
- result = self.frame.apply([np.sqrt, np.abs])
+ result = self.frame.apply([np.abs, np.sqrt], axis=axis)
+ expected = zip_frames([f_abs, f_sqrt], axis=other_axis)
+ if axis in {0, 'index'}:
+ expected.columns = pd.MultiIndex.from_product(
+ [self.frame.columns, ['absolute', 'sqrt']])
+ else:
+ expected.index = pd.MultiIndex.from_product(
+ [self.frame.index, ['absolute', 'sqrt']])
assert_frame_equal(result, expected)
- result = self.frame.transform(['sqrt', np.abs])
+ result = self.frame.transform([np.abs, 'sqrt'], axis=axis)
assert_frame_equal(result, expected)
- def test_transform_and_agg_err(self):
+ def test_transform_and_agg_err(self, axis):
# cannot both transform and agg
def f():
- self.frame.transform(['max', 'min'])
+ self.frame.transform(['max', 'min'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
- self.frame.agg(['max', 'sqrt'])
+ self.frame.agg(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
- self.frame.transform(['max', 'sqrt'])
+ self.frame.transform(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
df = pd.DataFrame({'A': range(5), 'B': 5})
def f():
with np.errstate(all='ignore'):
- df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']})
+ df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']}, axis=axis)
@pytest.mark.parametrize('method', [
'abs', 'shift', 'pct_change', 'cumsum', 'rank',
@@ -946,43 +965,57 @@ def test_agg_dict_nested_renaming_depr(self):
df = pd.DataFrame({'A': range(5), 'B': 5})
# nested renaming
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df.agg({'A': {'foo': 'min'},
'B': {'bar': 'max'}})
- def test_agg_reduce(self):
+ def test_agg_reduce(self, axis):
+ other_axis = 1 if axis in {0, 'index'} else 0
+ name1, name2 = self.frame.axes[other_axis].unique()[:2].sort_values()
+
# all reducers
- expected = zip_frames(self.frame.mean().to_frame(),
- self.frame.max().to_frame(),
- self.frame.sum().to_frame()).T
- expected.index = ['mean', 'max', 'sum']
- result = self.frame.agg(['mean', 'max', 'sum'])
+ expected = pd.concat([self.frame.mean(axis=axis),
+ self.frame.max(axis=axis),
+ self.frame.sum(axis=axis),
+ ], axis=1)
+ expected.columns = ['mean', 'max', 'sum']
+ expected = expected.T if axis in {0, 'index'} else expected
+
+ result = self.frame.agg(['mean', 'max', 'sum'], axis=axis)
assert_frame_equal(result, expected)
# dict input with scalars
- result = self.frame.agg({'A': 'mean', 'B': 'sum'})
- expected = Series([self.frame.A.mean(), self.frame.B.sum()],
- index=['A', 'B'])
- assert_series_equal(result.reindex_like(expected), expected)
+ func = OrderedDict([(name1, 'mean'), (name2, 'sum')])
+ result = self.frame.agg(func, axis=axis)
+ expected = Series([self.frame.loc(other_axis)[name1].mean(),
+ self.frame.loc(other_axis)[name2].sum()],
+ index=[name1, name2])
+ assert_series_equal(result, expected)
# dict input with lists
- result = self.frame.agg({'A': ['mean'], 'B': ['sum']})
- expected = DataFrame({'A': Series([self.frame.A.mean()],
- index=['mean']),
- 'B': Series([self.frame.B.sum()],
- index=['sum'])})
- assert_frame_equal(result.reindex_like(expected), expected)
+ func = OrderedDict([(name1, ['mean']), (name2, ['sum'])])
+ result = self.frame.agg(func, axis=axis)
+ expected = DataFrame({
+ name1: Series([self.frame.loc(other_axis)[name1].mean()],
+ index=['mean']),
+ name2: Series([self.frame.loc(other_axis)[name2].sum()],
+ index=['sum'])})
+ expected = expected.T if axis in {1, 'columns'} else expected
+ assert_frame_equal(result, expected)
# dict input with lists with multiple
- result = self.frame.agg({'A': ['mean', 'sum'],
- 'B': ['sum', 'max']})
- expected = DataFrame({'A': Series([self.frame.A.mean(),
- self.frame.A.sum()],
- index=['mean', 'sum']),
- 'B': Series([self.frame.B.sum(),
- self.frame.B.max()],
- index=['sum', 'max'])})
- assert_frame_equal(result.reindex_like(expected), expected)
+ func = OrderedDict([(name1, ['mean', 'sum']), (name2, ['sum', 'max'])])
+ result = self.frame.agg(func, axis=axis)
+ expected = DataFrame(OrderedDict([
+ (name1, Series([self.frame.loc(other_axis)[name1].mean(),
+ self.frame.loc(other_axis)[name1].sum()],
+ index=['mean', 'sum'])),
+ (name2, Series([self.frame.loc(other_axis)[name2].sum(),
+ self.frame.loc(other_axis)[name2].max()],
+ index=['sum', 'max'])),
+ ]))
+ expected = expected.T if axis in {1, 'columns'} else expected
+ assert_frame_equal(result, expected)
def test_nuiscance_columns(self):
@@ -1056,3 +1089,67 @@ def test_non_callable_aggregates(self):
expected = df.size
assert result == expected
+
+ @pytest.mark.parametrize("df, func, expected", chain(
+ _get_cython_table_params(
+ DataFrame(), [
+ ('sum', Series()),
+ ('max', Series()),
+ ('min', Series()),
+ ('all', Series(dtype=bool)),
+ ('any', Series(dtype=bool)),
+ ('mean', Series()),
+ ('prod', Series()),
+ ('std', Series()),
+ ('var', Series()),
+ ('median', Series()),
+ ]),
+ _get_cython_table_params(
+ DataFrame([[np.nan, 1], [1, 2]]), [
+ ('sum', Series([1., 3])),
+ ('max', Series([1., 2])),
+ ('min', Series([1., 1])),
+ ('all', Series([True, True])),
+ ('any', Series([True, True])),
+ ('mean', Series([1, 1.5])),
+ ('prod', Series([1., 2])),
+ ('std', Series([np.nan, 0.707107])),
+ ('var', Series([np.nan, 0.5])),
+ ('median', Series([1, 1.5])),
+ ]),
+ ))
+ def test_agg_cython_table(self, df, func, expected, axis):
+ # GH21224
+ # test reducing functions in
+ # pandas.core.base.SelectionMixin._cython_table
+ result = df.agg(func, axis=axis)
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("df, func, expected", chain(
+ _get_cython_table_params(
+ DataFrame(), [
+ ('cumprod', DataFrame()),
+ ('cumsum', DataFrame()),
+ ]),
+ _get_cython_table_params(
+ DataFrame([[np.nan, 1], [1, 2]]), [
+ ('cumprod', DataFrame([[np.nan, 1], [1., 2.]])),
+ ('cumsum', DataFrame([[np.nan, 1], [1., 3.]])),
+ ]),
+ ))
+ def test_agg_cython_table_transform(self, df, func, expected, axis):
+ # GH21224
+ # test transforming functions in
+ # pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
+ result = df.agg(func, axis=axis)
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("df, func, expected", _get_cython_table_params(
+ DataFrame([['a', 'b'], ['b', 'a']]), [
+ ['cumprod', TypeError],
+ ]),
+ )
+ def test_agg_cython_table_raises(self, df, func, expected, axis):
+ # GH21224
+ with pytest.raises(expected):
+ df.agg(func, axis=axis)
diff --git a/pandas/tests/generic/test_label_or_level_utils.py b/pandas/tests/generic/test_label_or_level_utils.py
index 8b133e654a869..8e4d28fc796df 100644
--- a/pandas/tests/generic/test_label_or_level_utils.py
+++ b/pandas/tests/generic/test_label_or_level_utils.py
@@ -76,14 +76,13 @@ def assert_level_reference(frame, levels, axis):
# DataFrame
# ---------
-@pytest.mark.parametrize('axis', [0, 1])
def test_is_level_or_label_reference_df_simple(df_levels, axis):
# Compute expected labels and levels
expected_labels, expected_levels = get_labels_levels(df_levels)
# Transpose frame if axis == 1
- if axis == 1:
+ if axis in {1, 'columns'}:
df_levels = df_levels.T
# Perform checks
@@ -91,11 +90,10 @@ def test_is_level_or_label_reference_df_simple(df_levels, axis):
assert_label_reference(df_levels, expected_labels, axis=axis)
-@pytest.mark.parametrize('axis', [0, 1])
def test_is_level_reference_df_ambig(df_ambig, axis):
# Transpose frame if axis == 1
- if axis == 1:
+ if axis in {1, 'columns'}:
df_ambig = df_ambig.T
# df has both an on-axis level and off-axis label named L1
@@ -165,11 +163,10 @@ def test_is_label_or_level_reference_panel_error(panel):
# DataFrame
# ---------
-@pytest.mark.parametrize('axis', [0, 1])
def test_check_label_or_level_ambiguity_df(df_ambig, axis):
# Transpose frame if axis == 1
- if axis == 1:
+ if axis in {1, 'columns'}:
df_ambig = df_ambig.T
# df_ambig has both an on-axis level and off-axis label named L1
@@ -179,7 +176,7 @@ def test_check_label_or_level_ambiguity_df(df_ambig, axis):
assert df_ambig._check_label_or_level_ambiguity('L1', axis=axis)
warning_msg = w[0].message.args[0]
- if axis == 0:
+ if axis in {0, 'index'}:
assert warning_msg.startswith("'L1' is both an index level "
"and a column label")
else:
@@ -239,7 +236,7 @@ def test_check_label_or_level_ambiguity_panel_error(panel):
# ===============================
def assert_label_values(frame, labels, axis):
for label in labels:
- if axis == 0:
+ if axis in {0, 'index'}:
expected = frame[label]._values
else:
expected = frame.loc[label]._values
@@ -251,7 +248,7 @@ def assert_label_values(frame, labels, axis):
def assert_level_values(frame, levels, axis):
for level in levels:
- if axis == 0:
+ if axis in {0, 'index'}:
expected = frame.index.get_level_values(level=level)._values
else:
expected = (frame.columns
@@ -264,14 +261,13 @@ def assert_level_values(frame, levels, axis):
# DataFrame
# ---------
-@pytest.mark.parametrize('axis', [0, 1])
def test_get_label_or_level_values_df_simple(df_levels, axis):
# Compute expected labels and levels
expected_labels, expected_levels = get_labels_levels(df_levels)
# Transpose frame if axis == 1
- if axis == 1:
+ if axis in {1, 'columns'}:
df_levels = df_levels.T
# Perform checks
@@ -279,11 +275,10 @@ def test_get_label_or_level_values_df_simple(df_levels, axis):
assert_level_values(df_levels, expected_levels, axis=axis)
-@pytest.mark.parametrize('axis', [0, 1])
def test_get_label_or_level_values_df_ambig(df_ambig, axis):
# Transpose frame if axis == 1
- if axis == 1:
+ if axis in {1, 'columns'}:
df_ambig = df_ambig.T
# df has both an on-axis level and off-axis label named L1
@@ -300,11 +295,10 @@ def test_get_label_or_level_values_df_ambig(df_ambig, axis):
assert_label_values(df_ambig, ['L3'], axis=axis)
-@pytest.mark.parametrize('axis', [0, 1])
def test_get_label_or_level_values_df_duplabels(df_duplabels, axis):
# Transpose frame if axis == 1
- if axis == 1:
+ if axis in {1, 'columns'}:
df_duplabels = df_duplabels.T
# df has unambiguous level 'L1'
@@ -314,7 +308,7 @@ def test_get_label_or_level_values_df_duplabels(df_duplabels, axis):
assert_label_values(df_duplabels, ['L3'], axis=axis)
# df has duplicate labels 'L2'
- if axis == 0:
+ if axis in {0, 'index'}:
expected_msg = "The column label 'L2' is not unique"
else:
expected_msg = "The index label 'L2' is not unique"
@@ -361,7 +355,7 @@ def assert_labels_dropped(frame, labels, axis):
for label in labels:
df_dropped = frame._drop_labels_or_levels(label, axis=axis)
- if axis == 0:
+ if axis in {0, 'index'}:
assert label in frame.columns
assert label not in df_dropped.columns
else:
@@ -373,7 +367,7 @@ def assert_levels_dropped(frame, levels, axis):
for level in levels:
df_dropped = frame._drop_labels_or_levels(level, axis=axis)
- if axis == 0:
+ if axis in {0, 'index'}:
assert level in frame.index.names
assert level not in df_dropped.index.names
else:
@@ -383,14 +377,13 @@ def assert_levels_dropped(frame, levels, axis):
# DataFrame
# ---------
-@pytest.mark.parametrize('axis', [0, 1])
def test_drop_labels_or_levels_df(df_levels, axis):
# Compute expected labels and levels
expected_labels, expected_levels = get_labels_levels(df_levels)
# Transpose frame if axis == 1
- if axis == 1:
+ if axis in {1, 'columns'}:
df_levels = df_levels.T
# Perform checks
diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py
index b28b9f342695f..b717d75d835d0 100644
--- a/pandas/tests/series/test_apply.py
+++ b/pandas/tests/series/test_apply.py
@@ -4,6 +4,7 @@
import pytest
from collections import Counter, defaultdict, OrderedDict
+from itertools import chain
import numpy as np
import pandas as pd
@@ -11,8 +12,10 @@
from pandas import (Index, Series, DataFrame, isna)
from pandas.compat import lrange
from pandas import compat
-from pandas.util.testing import assert_series_equal, assert_frame_equal
+from pandas.util.testing import (assert_series_equal,
+ assert_frame_equal)
import pandas.util.testing as tm
+from pandas.conftest import _get_cython_table_params
from .common import TestData
@@ -331,6 +334,85 @@ def test_non_callable_aggregates(self):
('mean', 1.5)]))
assert_series_equal(result[expected.index], expected)
+ @pytest.mark.parametrize("series, func, expected", chain(
+ _get_cython_table_params(Series(), [
+ ('sum', 0),
+ ('max', np.nan),
+ ('min', np.nan),
+ ('all', True),
+ ('any', False),
+ ('mean', np.nan),
+ ('prod', 1),
+ ('std', np.nan),
+ ('var', np.nan),
+ ('median', np.nan),
+ ]),
+ _get_cython_table_params(Series([np.nan, 1, 2, 3]), [
+ ('sum', 6),
+ ('max', 3),
+ ('min', 1),
+ ('all', True),
+ ('any', True),
+ ('mean', 2),
+ ('prod', 6),
+ ('std', 1),
+ ('var', 1),
+ ('median', 2),
+ ]),
+ _get_cython_table_params(Series('a b c'.split()), [
+ ('sum', 'abc'),
+ ('max', 'c'),
+ ('min', 'a'),
+ ('all', 'c'), # see GH12863
+ ('any', 'a'),
+ ]),
+ ))
+ def test_agg_cython_table(self, series, func, expected):
+ # GH21224
+ # test reducing functions in
+ # pandas.core.base.SelectionMixin._cython_table
+ result = series.agg(func)
+ if tm.is_number(expected):
+ assert np.isclose(result, expected, equal_nan=True)
+ else:
+ assert result == expected
+
+ @pytest.mark.parametrize("series, func, expected", chain(
+ _get_cython_table_params(Series(), [
+ ('cumprod', Series([], Index([]))),
+ ('cumsum', Series([], Index([]))),
+ ]),
+ _get_cython_table_params(Series([np.nan, 1, 2, 3]), [
+ ('cumprod', Series([np.nan, 1, 2, 6])),
+ ('cumsum', Series([np.nan, 1, 3, 6])),
+ ]),
+ _get_cython_table_params(Series('a b c'.split()), [
+ ('cumsum', Series(['a', 'ab', 'abc'])),
+ ]),
+ ))
+ def test_agg_cython_table_transform(self, series, func, expected):
+ # GH21224
+ # test transforming functions in
+ # pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
+ result = series.agg(func)
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("series, func, expected", chain(
+ _get_cython_table_params(Series('a b c'.split()), [
+ ('mean', TypeError), # mean raises TypeError
+ ('prod', TypeError),
+ ('std', TypeError),
+ ('var', TypeError),
+ ('median', TypeError),
+ ('cumprod', TypeError),
+ ])
+ ))
+ def test_agg_cython_table_raises(self, series, func, expected):
+ # GH21224
+ with pytest.raises(expected):
+ # e.g. Series('a b'.split()).cumprod() will raise
+ series.agg(func)
+
class TestSeriesMap(TestData):
| - [x] closes #16679
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This is a splitoff from #21123, to only fix #16679. #19629 will be fixed in a separate PR afterwards.
Passing functions to ``df.agg``, ``df.transform`` and ``df.apply`` may use different methods when ``axis=1``, than when,``axis=0``, and give different results when NaNs are supplied.
Explanation
-------------
Passing the functions in ``SelectionMixin._cython_table`` to ``df.agg`` should defer to use the relevant cython functions. This currently works as expected when ``axis=0``, but not when ``axis=1``.
The reason for this difference is that ``df.aggregate`` currently defers to ``df._aggregate`` when ``axis=0``, but defers to ``df.apply``, when ``axis=1``, and these may give different result when passed functions and the series/frame contains Nan values. I've solved this by transposing df in ``DataFrame._aggragate`` when ``axis=1``, and passing the possibly transposed on to the super method.
Also, ``df.apply`` delegates back to ``df.agg``, when given lists or dicts as inputs, but only works when axis=0. This PR fixes this, so axis=1 works the as axis=0.
The tests have been heavily parametrized, helping ensure that various ways to call the methods now give correct results for both axes.
@WillAyd @jreback (reviewers of #21123) | https://api.github.com/repos/pandas-dev/pandas/pulls/21224 | 2018-05-27T21:40:04Z | 2018-07-28T14:24:49Z | 2018-07-28T14:24:49Z | 2018-09-16T23:20:10Z |
ENH: Merge DataFrame and Series using `on` (GH21220) | diff --git a/doc/source/merging.rst b/doc/source/merging.rst
index d78e476dd7837..98914c13d4d31 100644
--- a/doc/source/merging.rst
+++ b/doc/source/merging.rst
@@ -506,8 +506,8 @@ You can also pass a list of dicts or Series:
.. _merging.join:
-Database-style DataFrame joining/merging
-----------------------------------------
+Database-style DataFrame or named Series joining/merging
+--------------------------------------------------------
pandas has full-featured, **high performance** in-memory join operations
idiomatically very similar to relational databases like SQL. These methods
@@ -522,7 +522,7 @@ Users who are familiar with SQL but new to pandas might be interested in a
:ref:`comparison with SQL<compare_with_sql.join>`.
pandas provides a single function, :func:`~pandas.merge`, as the entry point for
-all standard database join operations between ``DataFrame`` objects:
+all standard database join operations between ``DataFrame`` or named ``Series`` objects:
::
@@ -531,23 +531,23 @@ all standard database join operations between ``DataFrame`` objects:
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None)
-* ``left``: A DataFrame object.
-* ``right``: Another DataFrame object.
+* ``left``: A DataFrame or named Series object.
+* ``right``: Another DataFrame or named Series object.
* ``on``: Column or index level names to join on. Must be found in both the left
- and right DataFrame objects. If not passed and ``left_index`` and
+ and right DataFrame and/or Series objects. If not passed and ``left_index`` and
``right_index`` are ``False``, the intersection of the columns in the
- DataFrames will be inferred to be the join keys.
-* ``left_on``: Columns or index levels from the left DataFrame to use as
+ DataFrames and/or Series will be inferred to be the join keys.
+* ``left_on``: Columns or index levels from the left DataFrame or Series to use as
keys. Can either be column names, index level names, or arrays with length
- equal to the length of the DataFrame.
-* ``right_on``: Columns or index levels from the right DataFrame to use as
+ equal to the length of the DataFrame or Series.
+* ``right_on``: Columns or index levels from the right DataFrame or Series to use as
keys. Can either be column names, index level names, or arrays with length
- equal to the length of the DataFrame.
+ equal to the length of the DataFrame or Series.
* ``left_index``: If ``True``, use the index (row labels) from the left
- DataFrame as its join key(s). In the case of a DataFrame with a MultiIndex
+ DataFrame or Series as its join key(s). In the case of a DataFrame or Series with a MultiIndex
(hierarchical), the number of levels must match the number of join keys
- from the right DataFrame.
-* ``right_index``: Same usage as ``left_index`` for the right DataFrame
+ from the right DataFrame or Series.
+* ``right_index``: Same usage as ``left_index`` for the right DataFrame or Series
* ``how``: One of ``'left'``, ``'right'``, ``'outer'``, ``'inner'``. Defaults
to ``inner``. See below for more detailed description of each method.
* ``sort``: Sort the result DataFrame by the join keys in lexicographical
@@ -555,7 +555,7 @@ all standard database join operations between ``DataFrame`` objects:
substantially in many cases.
* ``suffixes``: A tuple of string suffixes to apply to overlapping
columns. Defaults to ``('_x', '_y')``.
-* ``copy``: Always copy data (default ``True``) from the passed DataFrame
+* ``copy``: Always copy data (default ``True``) from the passed DataFrame or named Series
objects, even when reindexing is not necessary. Cannot be avoided in many
cases but may improve performance / memory usage. The cases where copying
can be avoided are somewhat pathological but this option is provided
@@ -563,8 +563,8 @@ all standard database join operations between ``DataFrame`` objects:
* ``indicator``: Add a column to the output DataFrame called ``_merge``
with information on the source of each row. ``_merge`` is Categorical-type
and takes on a value of ``left_only`` for observations whose merge key
- only appears in ``'left'`` DataFrame, ``right_only`` for observations whose
- merge key only appears in ``'right'`` DataFrame, and ``both`` if the
+ only appears in ``'left'`` DataFrame or Series, ``right_only`` for observations whose
+ merge key only appears in ``'right'`` DataFrame or Series, and ``both`` if the
observation's merge key is found in both.
* ``validate`` : string, default None.
@@ -584,10 +584,10 @@ all standard database join operations between ``DataFrame`` objects:
Support for specifying index levels as the ``on``, ``left_on``, and
``right_on`` parameters was added in version 0.23.0.
+ Support for merging named ``Series`` objects was added in version 0.24.0.
-The return type will be the same as ``left``. If ``left`` is a ``DataFrame``
-and ``right`` is a subclass of DataFrame, the return type will still be
-``DataFrame``.
+The return type will be the same as ``left``. If ``left`` is a ``DataFrame`` or named ``Series``
+and ``right`` is a subclass of ``DataFrame``, the return type will still be ``DataFrame``.
``merge`` is a function in the pandas namespace, and it is also available as a
``DataFrame`` instance method :meth:`~DataFrame.merge`, with the calling
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index b015495b095b6..769bda992956b 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -12,6 +12,7 @@ v0.24.0 (Month XX, 2018)
New features
~~~~~~~~~~~~
+- :func:`merge` now directly allows merge between objects of type ``DataFrame`` and named ``Series``, without the need to convert the ``Series`` object into a ``DataFrame`` beforehand (:issue:`21220`)
- ``ExcelWriter`` now accepts ``mode`` as a keyword argument, enabling append to existing workbooks when using the ``openpyxl`` engine (:issue:`3441`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 4578d2ac08199..873170eb9813b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -137,8 +137,8 @@
"""
_merge_doc = """
-Merge DataFrame objects by performing a database-style join operation by
-columns or indexes.
+Merge DataFrame or named Series objects by performing a database-style join
+operation by columns or indexes.
If joining columns on columns, the DataFrame indexes *will be
ignored*. Otherwise if joining indexes on indexes or indexes on a column or
@@ -146,7 +146,7 @@
Parameters
----------%s
-right : DataFrame, Series or dict
+right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
Type of merge to be performed.
@@ -217,6 +217,7 @@
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
+Support for merging named Series objects was added in version 0.24.0
See Also
--------
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 25d8cb4e804a2..caaeb1bad2358 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -11,7 +11,7 @@
import pandas.compat as compat
from pandas import (Categorical, DataFrame,
- Index, MultiIndex, Timedelta)
+ Index, MultiIndex, Timedelta, Series)
from pandas.core.arrays.categorical import _recode_for_categories
from pandas.core.frame import _merge_doc
from pandas.core.dtypes.common import (
@@ -493,6 +493,8 @@ def __init__(self, left, right, how='inner', on=None,
left_index=False, right_index=False, sort=True,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
+ left = validate_operand(left)
+ right = validate_operand(right)
self.left = self.orig_left = left
self.right = self.orig_right = right
self.how = how
@@ -519,13 +521,6 @@ def __init__(self, left, right, how='inner', on=None,
raise ValueError(
'indicator option can only accept boolean or string arguments')
- if not isinstance(left, DataFrame):
- raise ValueError('can not merge DataFrame with instance of '
- 'type {left}'.format(left=type(left)))
- if not isinstance(right, DataFrame):
- raise ValueError('can not merge DataFrame with instance of '
- 'type {right}'.format(right=type(right)))
-
if not is_bool(left_index):
raise ValueError(
'left_index parameter must be of type bool, not '
@@ -1645,3 +1640,16 @@ def _should_fill(lname, rname):
def _any(x):
return x is not None and com._any_not_none(*x)
+
+
+def validate_operand(obj):
+ if isinstance(obj, DataFrame):
+ return obj
+ elif isinstance(obj, Series):
+ if obj.name is None:
+ raise ValueError('Cannot merge a Series without a name')
+ else:
+ return obj.to_frame()
+ else:
+ raise TypeError('Can only merge Series or DataFrame objects, '
+ 'a {obj} was passed'.format(obj=type(obj)))
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index 1b8f3632d381c..09f511886583c 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -228,16 +228,18 @@ def test_join_on_fails_with_different_column_counts(self):
index=tm.makeCustomIndex(10, 2))
merge(df, df2, right_on='a', left_on=['a', 'b'])
- def test_join_on_fails_with_wrong_object_type(self):
- # GH12081
- wrongly_typed = [Series([0, 1]), 2, 'str', None, np.array([0, 1])]
- df = DataFrame({'a': [1, 1]})
+ @pytest.mark.parametrize("wrong_type", [2, 'str', None, np.array([0, 1])])
+ def test_join_on_fails_with_wrong_object_type(self, wrong_type):
+ # GH12081 - original issue
+
+ # GH21220 - merging of Series and DataFrame is now allowed
+ # Edited test to remove the Series object from test parameters
- for obj in wrongly_typed:
- with tm.assert_raises_regex(ValueError, str(type(obj))):
- merge(obj, df, left_on='a', right_on='a')
- with tm.assert_raises_regex(ValueError, str(type(obj))):
- merge(df, obj, left_on='a', right_on='a')
+ df = DataFrame({'a': [1, 1]})
+ with tm.assert_raises_regex(TypeError, str(type(wrong_type))):
+ merge(wrong_type, df, left_on='a', right_on='a')
+ with tm.assert_raises_regex(TypeError, str(type(wrong_type))):
+ merge(df, wrong_type, left_on='a', right_on='a')
def test_join_on_pass_vector(self):
expected = self.target.join(self.source, on='C')
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index 037bd9cc7cd18..42df4511578f1 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -1887,3 +1887,33 @@ def test_merge_index_types(index):
OrderedDict([('left_data', [1, 2]), ('right_data', [1.0, 2.0])]),
index=index)
assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("on,left_on,right_on,left_index,right_index,nms,nm", [
+ (['outer', 'inner'], None, None, False, False, ['outer', 'inner'], 'B'),
+ (None, None, None, True, True, ['outer', 'inner'], 'B'),
+ (None, ['outer', 'inner'], None, False, True, None, 'B'),
+ (None, None, ['outer', 'inner'], True, False, None, 'B'),
+ (['outer', 'inner'], None, None, False, False, ['outer', 'inner'], None),
+ (None, None, None, True, True, ['outer', 'inner'], None),
+ (None, ['outer', 'inner'], None, False, True, None, None),
+ (None, None, ['outer', 'inner'], True, False, None, None)])
+def test_merge_series(on, left_on, right_on, left_index, right_index, nms, nm):
+ # GH 21220
+ a = pd.DataFrame({"A": [1, 2, 3, 4]},
+ index=pd.MultiIndex.from_product([['a', 'b'], [0, 1]],
+ names=['outer', 'inner']))
+ b = pd.Series([1, 2, 3, 4],
+ index=pd.MultiIndex.from_product([['a', 'b'], [1, 2]],
+ names=['outer', 'inner']), name=nm)
+ expected = pd.DataFrame({"A": [2, 4], "B": [1, 3]},
+ index=pd.MultiIndex.from_product([['a', 'b'], [1]],
+ names=nms))
+ if nm is not None:
+ result = pd.merge(a, b, on=on, left_on=left_on, right_on=right_on,
+ left_index=left_index, right_index=right_index)
+ tm.assert_frame_equal(result, expected)
+ else:
+ with tm.assert_raises_regex(ValueError, 'a Series without a name'):
+ result = pd.merge(a, b, on=on, left_on=left_on, right_on=right_on,
+ left_index=left_index, right_index=right_index)
| - [x] closes #21220
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21223 | 2018-05-27T15:08:47Z | 2018-07-23T17:02:14Z | 2018-07-23T17:02:14Z | 2018-07-23T17:03:41Z |
DOC: correct header line in v0.24.0 | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index b94377af770f4..43e513c9d03f5 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -1,7 +1,7 @@
.. _whatsnew_0240:
v0.24.0
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------
.. _whatsnew_0240.enhancements:
| Small change, but I *think* that this was the cause of a bunch of warnings in the doc build | https://api.github.com/repos/pandas-dev/pandas/pulls/21218 | 2018-05-26T10:43:24Z | 2018-05-26T13:03:14Z | 2018-05-26T13:03:14Z | 2018-05-26T13:11:17Z |
CI: revert skip of geopandas downstream test | diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index c28e2052bd93e..9e46084898b57 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -96,7 +96,6 @@ def test_pandas_datareader():
'F', 'quandl', '2017-01-01', '2017-02-01')
-@pytest.mark.xfail(reaason="downstream install issue")
def test_geopandas():
geopandas = import_module('geopandas') # noqa
| xref https://github.com/pandas-dev/pandas/pull/21204 | https://api.github.com/repos/pandas-dev/pandas/pulls/21217 | 2018-05-26T09:47:46Z | 2018-06-04T09:53:07Z | 2018-06-04T09:53:07Z | 2018-06-12T16:30:34Z |
API/BUG: DatetimeIndex correctly localizes integer data | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 6d5e40d37c8df..c29197725a2b6 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -36,7 +36,7 @@ Datetimelike API Changes
Other API Changes
^^^^^^^^^^^^^^^^^
--
+- :class:`DatetimeIndex` now accepts :class:`Int64Index` arguments as epoch timestamps (:issue:`20997`)
-
-
@@ -92,7 +92,7 @@ Datetimelike
^^^^^^^^^^^^
- Fixed bug where two :class:`DateOffset` objects with different ``normalize`` attributes could evaluate as equal (:issue:`21404`)
--
+- Bug in :class:`Index` with ``datetime64[ns, tz]`` dtype that did not localize integer data correctly (:issue:`20964`)
-
Timedelta
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index d9e4ef7db1158..36345a32a3bf7 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1175,6 +1175,10 @@ def astype(self, dtype, copy=True):
return CategoricalIndex(self.values, name=self.name, dtype=dtype,
copy=copy)
try:
+ if is_datetime64tz_dtype(dtype):
+ from pandas.core.indexes.datetimes import DatetimeIndex
+ return DatetimeIndex(self.values, name=self.name, dtype=dtype,
+ copy=copy)
return Index(self.values.astype(dtype, copy=copy), name=self.name,
dtype=dtype)
except (TypeError, ValueError):
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 66622814f172d..e944df7aa83c6 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -395,57 +395,43 @@ def __new__(cls, data=None,
# data must be Index or np.ndarray here
if not (is_datetime64_dtype(data) or is_datetimetz(data) or
- is_integer_dtype(data)):
+ is_integer_dtype(data) or lib.infer_dtype(data) == 'integer'):
data = tools.to_datetime(data, dayfirst=dayfirst,
yearfirst=yearfirst)
- if issubclass(data.dtype.type, np.datetime64) or is_datetimetz(data):
-
- if isinstance(data, DatetimeIndex):
- if tz is None:
- tz = data.tz
- elif data.tz is None:
- data = data.tz_localize(tz, ambiguous=ambiguous)
- else:
- # the tz's must match
- if str(tz) != str(data.tz):
- msg = ('data is already tz-aware {0}, unable to '
- 'set specified tz: {1}')
- raise TypeError(msg.format(data.tz, tz))
+ if isinstance(data, DatetimeIndex):
+ if tz is None:
+ tz = data.tz
+ elif data.tz is None:
+ data = data.tz_localize(tz, ambiguous=ambiguous)
+ else:
+ # the tz's must match
+ if str(tz) != str(data.tz):
+ msg = ('data is already tz-aware {0}, unable to '
+ 'set specified tz: {1}')
+ raise TypeError(msg.format(data.tz, tz))
- subarr = data.values
+ subarr = data.values
- if freq is None:
- freq = data.freq
- verify_integrity = False
- else:
- if data.dtype != _NS_DTYPE:
- subarr = conversion.ensure_datetime64ns(data)
- else:
- subarr = data
+ if freq is None:
+ freq = data.freq
+ verify_integrity = False
+ elif issubclass(data.dtype.type, np.datetime64):
+ if data.dtype != _NS_DTYPE:
+ data = conversion.ensure_datetime64ns(data)
+ if tz is not None:
+ # Convert tz-naive to UTC
+ tz = timezones.maybe_get_tz(tz)
+ data = conversion.tz_localize_to_utc(data.view('i8'), tz,
+ ambiguous=ambiguous)
+ subarr = data.view(_NS_DTYPE)
else:
# must be integer dtype otherwise
- if isinstance(data, Int64Index):
- raise TypeError('cannot convert Int64Index->DatetimeIndex')
+ # assume this data are epoch timestamps
if data.dtype != _INT64_DTYPE:
- data = data.astype(np.int64)
+ data = data.astype(np.int64, copy=False)
subarr = data.view(_NS_DTYPE)
- if isinstance(subarr, DatetimeIndex):
- if tz is None:
- tz = subarr.tz
- else:
- if tz is not None:
- tz = timezones.maybe_get_tz(tz)
-
- if (not isinstance(data, DatetimeIndex) or
- getattr(data, 'tz', None) is None):
- # Convert tz-naive to UTC
- ints = subarr.view('i8')
- subarr = conversion.tz_localize_to_utc(ints, tz,
- ambiguous=ambiguous)
- subarr = subarr.view(_NS_DTYPE)
-
subarr = cls._simple_new(subarr, name=name, freq=freq, tz=tz)
if dtype is not None:
if not is_dtype_equal(subarr.dtype, dtype):
@@ -807,8 +793,9 @@ def _mpl_repr(self):
@cache_readonly
def _is_dates_only(self):
+ """Return a boolean if we are only dates (and don't have a timezone)"""
from pandas.io.formats.format import _is_dates_only
- return _is_dates_only(self.values)
+ return _is_dates_only(self.values) and self.tz is None
@property
def _formatter_func(self):
@@ -1244,7 +1231,7 @@ def join(self, other, how='left', level=None, return_indexers=False,
See Index.join
"""
if (not isinstance(other, DatetimeIndex) and len(other) > 0 and
- other.inferred_type not in ('floating', 'mixed-integer',
+ other.inferred_type not in ('floating', 'integer', 'mixed-integer',
'mixed-integer-float', 'mixed')):
try:
other = DatetimeIndex(other)
@@ -2100,8 +2087,9 @@ def normalize(self):
dtype='datetime64[ns, Asia/Calcutta]', freq=None)
"""
new_values = conversion.date_normalize(self.asi8, self.tz)
- return DatetimeIndex(new_values, freq='infer', name=self.name,
- tz=self.tz)
+ return DatetimeIndex(new_values,
+ freq='infer',
+ name=self.name).tz_localize(self.tz)
@Substitution(klass='DatetimeIndex')
@Appender(_shared_docs['searchsorted'])
@@ -2182,8 +2170,6 @@ def insert(self, loc, item):
try:
new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
- if self.tz is not None:
- new_dates = conversion.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq,
tz=self.tz)
except (AttributeError, TypeError):
@@ -2221,8 +2207,6 @@ def delete(self, loc):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
- if self.tz is not None:
- new_dates = conversion.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz)
def tz_convert(self, tz):
diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py
index 8acdd301f241a..64b8f48f6a4e1 100644
--- a/pandas/tests/indexes/datetimes/test_astype.py
+++ b/pandas/tests/indexes/datetimes/test_astype.py
@@ -225,6 +225,16 @@ def _check_rng(rng):
_check_rng(rng_eastern)
_check_rng(rng_utc)
+ @pytest.mark.parametrize('tz, dtype', [
+ ['US/Pacific', 'datetime64[ns, US/Pacific]'],
+ [None, 'datetime64[ns]']])
+ def test_integer_index_astype_datetime(self, tz, dtype):
+ # GH 20997, 20964
+ val = [pd.Timestamp('2018-01-01', tz=tz).value]
+ result = pd.Index(val).astype(dtype)
+ expected = pd.DatetimeIndex(['2018-01-01'], tz=tz)
+ tm.assert_index_equal(result, expected)
+
class TestToPeriod(object):
diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py
index b138b79caac76..f7682a965c038 100644
--- a/pandas/tests/indexes/datetimes/test_construction.py
+++ b/pandas/tests/indexes/datetimes/test_construction.py
@@ -1,8 +1,10 @@
-import pytest
+from datetime import timedelta
+from operator import attrgetter
+from functools import partial
+import pytest
import pytz
import numpy as np
-from datetime import timedelta
import pandas as pd
from pandas import offsets
@@ -26,25 +28,28 @@ def test_construction_caching(self):
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
- def test_construction_with_alt(self):
-
- i = pd.date_range('20130101', periods=5, freq='H', tz='US/Eastern')
- i2 = DatetimeIndex(i, dtype=i.dtype)
- tm.assert_index_equal(i, i2)
- assert i.tz.zone == 'US/Eastern'
-
- i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz)
- tm.assert_index_equal(i, i2)
- assert i.tz.zone == 'US/Eastern'
-
- i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype)
- tm.assert_index_equal(i, i2)
- assert i.tz.zone == 'US/Eastern'
-
- i2 = DatetimeIndex(
- i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz)
- tm.assert_index_equal(i, i2)
- assert i.tz.zone == 'US/Eastern'
+ @pytest.mark.parametrize('kwargs', [
+ {'tz': 'dtype.tz'},
+ {'dtype': 'dtype'},
+ {'dtype': 'dtype', 'tz': 'dtype.tz'}])
+ def test_construction_with_alt(self, kwargs, tz_aware_fixture):
+ tz = tz_aware_fixture
+ i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
+ kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
+ result = DatetimeIndex(i, **kwargs)
+ tm.assert_index_equal(i, result)
+
+ @pytest.mark.parametrize('kwargs', [
+ {'tz': 'dtype.tz'},
+ {'dtype': 'dtype'},
+ {'dtype': 'dtype', 'tz': 'dtype.tz'}])
+ def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
+ tz = tz_aware_fixture
+ i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
+ kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
+ result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
+ expected = i.tz_localize(None).tz_localize('UTC').tz_convert(tz)
+ tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
@@ -478,6 +483,19 @@ def test_constructor_timestamp_near_dst(self):
ts[1].to_pydatetime()])
tm.assert_index_equal(result, expected)
+ @pytest.mark.parametrize('klass', [Index, DatetimeIndex])
+ @pytest.mark.parametrize('box', [
+ np.array, partial(np.array, dtype=object), list])
+ @pytest.mark.parametrize('tz, dtype', [
+ ['US/Pacific', 'datetime64[ns, US/Pacific]'],
+ [None, 'datetime64[ns]']])
+ def test_constructor_with_int_tz(self, klass, box, tz, dtype):
+ # GH 20997, 20964
+ ts = Timestamp('2018-01-01', tz=tz)
+ result = klass(box([ts.value]), dtype=dtype)
+ expected = klass([ts])
+ assert result == expected
+
class TestTimeSeries(object):
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index c264f5f79e47e..b8bd218ec25ab 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -402,26 +402,33 @@ def test_constructor_dtypes_to_timedelta(self, cast_index, vals):
index = Index(vals)
assert isinstance(index, TimedeltaIndex)
- @pytest.mark.parametrize("values", [
- # pass values without timezone, as DatetimeIndex localizes it
- pd.date_range('2011-01-01', periods=5).values,
- pd.date_range('2011-01-01', periods=5).asi8])
+ @pytest.mark.parametrize("attr, utc", [
+ ['values', False],
+ ['asi8', True]])
@pytest.mark.parametrize("klass", [pd.Index, pd.DatetimeIndex])
- def test_constructor_dtypes_datetime(self, tz_naive_fixture, values,
+ def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, utc,
klass):
- index = pd.date_range('2011-01-01', periods=5, tz=tz_naive_fixture)
+ # Test constructing with a datetimetz dtype
+ # .values produces numpy datetimes, so these are considered naive
+ # .asi8 produces integers, so these are considered epoch timestamps
+ index = pd.date_range('2011-01-01', periods=5)
+ arg = getattr(index, attr)
+ if utc:
+ index = index.tz_localize('UTC').tz_convert(tz_naive_fixture)
+ else:
+ index = index.tz_localize(tz_naive_fixture)
dtype = index.dtype
- result = klass(values, tz=tz_naive_fixture)
+ result = klass(arg, tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
- result = klass(values, dtype=dtype)
+ result = klass(arg, dtype=dtype)
tm.assert_index_equal(result, index)
- result = klass(list(values), tz=tz_naive_fixture)
+ result = klass(list(arg), tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
- result = klass(list(values), dtype=dtype)
+ result = klass(list(arg), dtype=dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("attr", ['values', 'asi8'])
| closes #20997
closes #20964
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I did a little refactoring of `Datetime.__new__` that converts passed data but made it easier to fix this issue. I had to modify some methods (join, normalize, delete, insert) and tests that weren't assuming integer data weren't epoch timestamps. | https://api.github.com/repos/pandas-dev/pandas/pulls/21216 | 2018-05-26T05:39:36Z | 2018-06-14T10:05:29Z | 2018-06-14T10:05:28Z | 2018-06-14T17:28:21Z |
BUG: Categorical.fillna with iterables | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 4876678baaa6e..f33f03a3eb500 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -66,6 +66,7 @@ Categorical
^^^^^^^^^^^
- Bug in :func:`pandas.util.testing.assert_index_equal` which raised ``AssertionError`` incorrectly, when comparing two :class:`CategoricalIndex` objects with param ``check_categorical=False`` (:issue:`19776`)
+- Bug in :meth:`Categorical.fillna` incorrectly raising a ``TypeError`` when `value` the individual categories are iterable and `value` is an iterable (:issue:`21097`, :issue:`19788`)
Conversion
^^^^^^^^^^
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index abcb9ae3494b5..a1a8f098b582e 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -12,6 +12,7 @@
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
+from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
@@ -1751,7 +1752,7 @@ def fillna(self, value=None, method=None, limit=None):
values[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
- elif is_scalar(value):
+ elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
diff --git a/pandas/tests/categorical/test_missing.py b/pandas/tests/categorical/test_missing.py
index 5133c97d8b590..c78f02245a5b4 100644
--- a/pandas/tests/categorical/test_missing.py
+++ b/pandas/tests/categorical/test_missing.py
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
+import collections
+
import numpy as np
import pytest
@@ -68,3 +70,16 @@ def test_fillna_raises(self, fillna_kwargs, msg):
with tm.assert_raises_regex(ValueError, msg):
cat.fillna(**fillna_kwargs)
+
+ @pytest.mark.parametrize("named", [True, False])
+ def test_fillna_iterable_category(self, named):
+ # https://github.com/pandas-dev/pandas/issues/21097
+ if named:
+ Point = collections.namedtuple("Point", "x y")
+ else:
+ Point = lambda *args: args # tuple
+ cat = Categorical([Point(0, 0), Point(0, 1), None])
+ result = cat.fillna(Point(0, 0))
+ expected = Categorical([Point(0, 0), Point(0, 1), Point(0, 0)])
+
+ tm.assert_categorical_equal(result, expected)
| Closes https://github.com/pandas-dev/pandas/issues/19788
Closes https://github.com/pandas-dev/pandas/issues/21097
Note that `Series.fillna` still doesn't allow iterables. I'm not sure that we should allow that, as it's potentially confusing what that would do. | https://api.github.com/repos/pandas-dev/pandas/pulls/21215 | 2018-05-26T03:06:27Z | 2018-05-28T21:57:57Z | 2018-05-28T21:57:57Z | 2018-06-12T14:27:45Z |
DOC: Add numeric_only to DataFrame.quantile | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index d4ce8dc166b09..22677b19192e1 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7089,6 +7089,9 @@ def quantile(self, q=0.5, axis=0, numeric_only=True,
0 <= q <= 1, the quantile(s) to compute
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
+ numeric_only : boolean, default True
+ If False, the quantile of datetime and timedelta data will be
+ computed as well
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
@@ -7116,7 +7119,7 @@ def quantile(self, q=0.5, axis=0, numeric_only=True,
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
- columns=['a', 'b'])
+ columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
@@ -7126,6 +7129,20 @@ def quantile(self, q=0.5, axis=0, numeric_only=True,
0.1 1.3 3.7
0.5 2.5 55.0
+ Specifying `numeric_only=False` will also compute the quantile of
+ datetime and timedelta data.
+
+ >>> df = pd.DataFrame({'A': [1, 2],
+ 'B': [pd.Timestamp('2010'),
+ pd.Timestamp('2011')],
+ 'C': [pd.Timedelta('1 days'),
+ pd.Timedelta('2 days')]})
+ >>> df.quantile(0.5, numeric_only=False)
+ A 1.5
+ B 2010-07-02 12:00:00
+ C 1 days 12:00:00
+ Name: 0.5, dtype: object
+
See Also
--------
pandas.core.window.Rolling.quantile
| - [x] closes #18608
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
My confusion with #18608 was primarily due to the `numeric_only` not being specified in the docs. Additional we do have tests covering this case in `pandas/tests/frame/test_quantile.py` | https://api.github.com/repos/pandas-dev/pandas/pulls/21214 | 2018-05-26T02:36:46Z | 2018-05-29T01:07:45Z | 2018-05-29T01:07:45Z | 2018-06-08T17:17:41Z |
Stable Sorting Algorithm for Fillna Indexer | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 4876678baaa6e..f400de5f1ad02 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -52,6 +52,7 @@ Groupby/Resample/Rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug in :func:`DataFrame.agg` where applying multiple aggregation functions to a :class:`DataFrame` with duplicated column names would cause a stack overflow (:issue:`21063`)
+- Bug in :func:`pandas.core.groupby.GroupBy.ffill` and :func:`pandas.core.groupby.GroupBy.bfill` where the fill within a grouping would not always be applied as intended due to the implementations' use of a non-stable sort (:issue:`21207`)
Strings
^^^^^^^
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index 43afd1e0f5969..a6dbaff17e543 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -297,7 +297,8 @@ def group_fillna_indexer(ndarray[int64_t] out, ndarray[int64_t] labels,
# Make sure all arrays are the same size
assert N == len(labels) == len(mask)
- sorted_labels = np.argsort(labels).astype(np.int64, copy=False)
+ sorted_labels = np.argsort(labels, kind='mergesort').astype(
+ np.int64, copy=False)
if direction == 'bfill':
sorted_labels = sorted_labels[::-1]
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py
index 626057c1ea760..7fccf1f57a886 100644
--- a/pandas/tests/groupby/test_transform.py
+++ b/pandas/tests/groupby/test_transform.py
@@ -721,6 +721,23 @@ def interweave(list_obj):
assert_frame_equal(result, exp)
+@pytest.mark.parametrize("fill_method", ['ffill', 'bfill'])
+def test_pad_stable_sorting(fill_method):
+ # GH 21207
+ x = [0] * 20
+ y = [np.nan] * 10 + [1] * 10
+
+ if fill_method == 'bfill':
+ y = y[::-1]
+
+ df = pd.DataFrame({'x': x, 'y': y})
+ expected = df.copy()
+
+ result = getattr(df.groupby('x'), fill_method)()
+
+ tm.assert_frame_equal(result, expected)
+
+
@pytest.mark.parametrize("test_series", [True, False])
@pytest.mark.parametrize("periods,fill_method,limit", [
(1, 'ffill', None), (1, 'ffill', 1),
| - [X] closes #21207
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Haven't added a whatsnew yet if only because I need to think through the best way to phrase this / investigate where it doesn't work. I have a feeling the default `kind` argument has a bug in it back in NumPy, but `mergesort` is guaranteed as a stable sort so may be preferable anyway in spite of the performance hit.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.sort.html#numpy.sort
Will post ASVs later as well - let me know of feedback in the interim | https://api.github.com/repos/pandas-dev/pandas/pulls/21212 | 2018-05-25T21:58:56Z | 2018-05-29T01:20:39Z | 2018-05-29T01:20:39Z | 2018-06-08T17:18:03Z |
DOC: minor documentation fix on frame.dropna | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1d8f225bd4342..d4ce8dc166b09 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4177,8 +4177,9 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None,
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
- .. deprecated:: 0.23.0: Pass tuple or list to drop on multiple
- axes.
+ .. deprecated:: 0.23.0
+ Pass tuple or list to drop on multiple axes.
+
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
| - [x] closes #21209
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
After this PR, it should look like this:
<img width="749" alt="pr" src="https://user-images.githubusercontent.com/14131823/40563992-e1536170-605e-11e8-8fd7-13bc1e0981d0.png">
| https://api.github.com/repos/pandas-dev/pandas/pulls/21210 | 2018-05-25T20:00:12Z | 2018-05-25T21:06:09Z | 2018-05-25T21:06:09Z | 2018-05-25T21:07:06Z |
CI: use latest deps for pandas-datareader, python-dateutil | diff --git a/ci/travis-36.yaml b/ci/travis-36.yaml
index fe057e714761e..006276ba1a65f 100644
--- a/ci/travis-36.yaml
+++ b/ci/travis-36.yaml
@@ -18,12 +18,10 @@ dependencies:
- numexpr
- numpy
- openpyxl
- - pandas-datareader
- psycopg2
- pyarrow
- pymysql
- pytables
- - python-dateutil
- python-snappy
- python=3.6*
- pytz
@@ -45,3 +43,5 @@ dependencies:
- pip:
- brotlipy
- coverage
+ - pandas-datareader
+ - python-dateutil
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index a595d9f18d6b8..c28e2052bd93e 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -87,6 +87,7 @@ def test_pandas_gbq(df):
pandas_gbq = import_module('pandas_gbq') # noqa
+@pytest.mark.xfail(reason="0.7.0 pending")
@tm.network
def test_pandas_datareader():
@@ -95,6 +96,7 @@ def test_pandas_datareader():
'F', 'quandl', '2017-01-01', '2017-02-01')
+@pytest.mark.xfail(reaason="downstream install issue")
def test_geopandas():
geopandas = import_module('geopandas') # noqa
| xfail geopandas
| https://api.github.com/repos/pandas-dev/pandas/pulls/21204 | 2018-05-25T10:42:00Z | 2018-05-25T11:32:05Z | 2018-05-25T11:32:05Z | 2018-06-12T16:30:34Z |
BUG: make dense ranks results scale to 100 percent | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 726ab73e8f933..f2bc81eea186b 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -51,6 +51,7 @@ Groupby/Resample/Rolling
- Bug in :func:`DataFrame.agg` where applying multiple aggregation functions to a :class:`DataFrame` with duplicated column names would cause a stack overflow (:issue:`21063`)
- Bug in :func:`pandas.core.groupby.GroupBy.ffill` and :func:`pandas.core.groupby.GroupBy.bfill` where the fill within a grouping would not always be applied as intended due to the implementations' use of a non-stable sort (:issue:`21207`)
+- Bug in :func:`pandas.core.groupby.GroupBy.rank` where results did not scale to 100% when specifying ``method='dense'`` and ``pct=True``
Strings
^^^^^^^
diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in
index 6a33e4a09476d..b3e9b7c9e69ee 100644
--- a/pandas/_libs/groupby_helper.pxi.in
+++ b/pandas/_libs/groupby_helper.pxi.in
@@ -418,7 +418,7 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
bint is_datetimelike, object ties_method,
bint ascending, bint pct, object na_option):
"""
- Provides the rank of values within each group.
+ Provides the rank of values within each group.
Parameters
----------
@@ -451,8 +451,8 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
"""
cdef:
TiebreakEnumType tiebreak
- Py_ssize_t i, j, N, K, val_start=0, grp_start=0, dups=0, sum_ranks=0
- Py_ssize_t grp_vals_seen=1, grp_na_count=0
+ Py_ssize_t i, j, N, K, grp_start=0, dups=0, sum_ranks=0
+ Py_ssize_t grp_vals_seen=1, grp_na_count=0, grp_tie_count=0
ndarray[int64_t] _as
ndarray[float64_t, ndim=2] grp_sizes
ndarray[{{c_type}}] masked_vals
@@ -563,6 +563,7 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
dups = sum_ranks = 0
val_start = i
grp_vals_seen += 1
+ grp_tie_count +=1
# Similar to the previous conditional, check now if we are moving
# to a new group. If so, keep track of the index where the new
@@ -571,11 +572,16 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
# (used by pct calculations later). also be sure to reset any of
# the items helping to calculate dups
if i == N - 1 or labels[_as[i]] != labels[_as[i+1]]:
- for j in range(grp_start, i + 1):
- grp_sizes[_as[j], 0] = i - grp_start + 1 - grp_na_count
+ if tiebreak != TIEBREAK_DENSE:
+ for j in range(grp_start, i + 1):
+ grp_sizes[_as[j], 0] = i - grp_start + 1 - grp_na_count
+ else:
+ for j in range(grp_start, i + 1):
+ grp_sizes[_as[j], 0] = (grp_tie_count -
+ (grp_na_count > 0))
dups = sum_ranks = 0
grp_na_count = 0
- val_start = i + 1
+ grp_tie_count = 0
grp_start = i + 1
grp_vals_seen = 1
diff --git a/pandas/tests/groupby/test_rank.py b/pandas/tests/groupby/test_rank.py
index 6ad8b4905abff..203c3c73bec94 100644
--- a/pandas/tests/groupby/test_rank.py
+++ b/pandas/tests/groupby/test_rank.py
@@ -59,9 +59,9 @@ def test_rank_apply():
('first', False, False, [3., 4., 1., 5., 2.]),
('first', False, True, [.6, .8, .2, 1., .4]),
('dense', True, False, [1., 1., 3., 1., 2.]),
- ('dense', True, True, [0.2, 0.2, 0.6, 0.2, 0.4]),
+ ('dense', True, True, [1. / 3., 1. / 3., 3. / 3., 1. / 3., 2. / 3.]),
('dense', False, False, [3., 3., 1., 3., 2.]),
- ('dense', False, True, [.6, .6, .2, .6, .4]),
+ ('dense', False, True, [3. / 3., 3. / 3., 1. / 3., 3. / 3., 2. / 3.]),
])
def test_rank_args(grps, vals, ties_method, ascending, pct, exp):
key = np.repeat(grps, len(vals))
@@ -126,7 +126,7 @@ def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
@pytest.mark.parametrize("grps", [
['qux'], ['qux', 'quux']])
@pytest.mark.parametrize("vals", [
- [2, 2, np.nan, 8, 2, 6, np.nan, np.nan], # floats
+ [2, 2, np.nan, 8, 2, 6, np.nan, np.nan],
[pd.Timestamp('2018-01-02'), pd.Timestamp('2018-01-02'), np.nan,
pd.Timestamp('2018-01-08'), pd.Timestamp('2018-01-02'),
pd.Timestamp('2018-01-06'), np.nan, np.nan]
@@ -167,11 +167,11 @@ def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
('dense', True, 'keep', False,
[1., 1., np.nan, 3., 1., 2., np.nan, np.nan]),
('dense', True, 'keep', True,
- [0.2, 0.2, np.nan, 0.6, 0.2, 0.4, np.nan, np.nan]),
+ [1. / 3., 1. / 3., np.nan, 3. / 3., 1. / 3., 2. / 3., np.nan, np.nan]),
('dense', False, 'keep', False,
[3., 3., np.nan, 1., 3., 2., np.nan, np.nan]),
('dense', False, 'keep', True,
- [.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]),
+ [3. / 3., 3. / 3., np.nan, 1. / 3., 3. / 3., 2. / 3., np.nan, np.nan]),
('average', True, 'no_na', False, [2., 2., 7., 5., 2., 4., 7., 7.]),
('average', True, 'no_na', True,
[0.25, 0.25, 0.875, 0.625, 0.25, 0.5, 0.875, 0.875]),
@@ -198,10 +198,10 @@ def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
[0.375, 0.5, 0.75, 0.125, 0.625, 0.25, 0.875, 1.]),
('dense', True, 'no_na', False, [1., 1., 4., 3., 1., 2., 4., 4.]),
('dense', True, 'no_na', True,
- [0.125, 0.125, 0.5, 0.375, 0.125, 0.25, 0.5, 0.5]),
+ [0.25, 0.25, 1., 0.75, 0.25, 0.5, 1., 1.]),
('dense', False, 'no_na', False, [3., 3., 4., 1., 3., 2., 4., 4.]),
('dense', False, 'no_na', True,
- [0.375, 0.375, 0.5, 0.125, 0.375, 0.25, 0.5, 0.5])
+ [0.75, 0.75, 1., 0.25, 0.75, 0.5, 1., 1.])
])
def test_rank_args_missing(grps, vals, ties_method, ascending,
na_option, pct, exp):
| - [x] closes #20731
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21203 | 2018-05-25T08:12:23Z | 2018-05-31T20:44:53Z | 2018-05-31T20:44:53Z | 2018-06-08T17:20:38Z |
BUG: Fix inconsistency between the shape properties of SparseSeries and SparseArray (#21126) | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index 4876678baaa6e..f5df37cd42e4f 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -67,6 +67,11 @@ Categorical
- Bug in :func:`pandas.util.testing.assert_index_equal` which raised ``AssertionError`` incorrectly, when comparing two :class:`CategoricalIndex` objects with param ``check_categorical=False`` (:issue:`19776`)
+Sparse
+^^^^^^
+
+- Bug in :attr:`SparseArray.shape` which previously only returned the shape :attr:`SparseArray.sp_values` (:issue:`21126`)
+
Conversion
^^^^^^^^^^
diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py
index 5532d7522cd2d..ff58f7d104ff9 100644
--- a/pandas/core/sparse/array.py
+++ b/pandas/core/sparse/array.py
@@ -290,6 +290,7 @@ def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.fill_value, self.sp_index
+ object_state[2] = self.sp_values.__reduce__()[2]
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
@@ -339,6 +340,10 @@ def values(self):
output.put(int_index.indices, self)
return output
+ @property
+ def shape(self):
+ return (len(self),)
+
@property
def sp_values(self):
# caching not an option, leaks memory
diff --git a/pandas/tests/sparse/test_array.py b/pandas/tests/sparse/test_array.py
index 6c0c83cf65ff7..b3330f866ba1f 100644
--- a/pandas/tests/sparse/test_array.py
+++ b/pandas/tests/sparse/test_array.py
@@ -454,6 +454,17 @@ def test_values_asarray(self):
assert_almost_equal(self.arr.to_dense(), self.arr_data)
assert_almost_equal(self.arr.sp_values, np.asarray(self.arr))
+ @pytest.mark.parametrize('data,shape,dtype', [
+ ([0, 0, 0, 0, 0], (5,), None),
+ ([], (0,), None),
+ ([0], (1,), None),
+ (['A', 'A', np.nan, 'B'], (4,), np.object)
+ ])
+ def test_shape(self, data, shape, dtype):
+ # GH 21126
+ out = SparseArray(data, dtype=dtype)
+ assert out.shape == shape
+
def test_to_dense(self):
vals = np.array([1, np.nan, np.nan, 3, np.nan])
res = SparseArray(vals).to_dense()
| - [x] closes #21126
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21198 | 2018-05-25T02:48:29Z | 2018-05-31T10:27:33Z | 2018-05-31T10:27:32Z | 2018-06-12T16:30:34Z |
CLN: Remove duplicate Categorical subsection from 0.23.1 whatsnew | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index a7ba0dfbbd1c4..4876678baaa6e 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -97,8 +97,3 @@ Reshaping
- Bug in :func:`concat` where error was raised in concatenating :class:`Series` with numpy scalar and tuple names (:issue:`21015`)
-
-
-Categorical
-^^^^^^^^^^^
-
--
| Unless I'm misreading, it looks like there are two Categorical subsections under the Bug Fixes section, with the first subsection starting on line 65. | https://api.github.com/repos/pandas-dev/pandas/pulls/21197 | 2018-05-25T00:31:54Z | 2018-05-25T07:09:54Z | 2018-05-25T07:09:54Z | 2018-06-12T14:33:18Z |
BUG: Should not raise errors in .set_names for MultiIndex with nlevels == 1 (GH21149) | diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt
index b0e08e8583cd1..fc6f3f3bfa614 100644
--- a/doc/source/whatsnew/v0.23.1.txt
+++ b/doc/source/whatsnew/v0.23.1.txt
@@ -80,6 +80,7 @@ Indexing
- Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`)
- Bug in :func:`interval_range` when ``start``/``periods`` or ``end``/``periods`` are specified with float ``start`` or ``end`` (:issue:`21161`)
+- Bug in :meth:`MultiIndex.set_names` where error raised for a ``MultiIndex`` with ``nlevels == 1`` (:issue:`21149`)
-
I/O
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index f79288c167356..25d4e1be983e7 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1384,7 +1384,8 @@ def set_names(self, names, level=None, inplace=False):
names=[u'baz', u'bar'])
"""
- if level is not None and self.nlevels == 1:
+ from .multi import MultiIndex
+ if level is not None and not isinstance(self, MultiIndex):
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index c9f6bc9151d00..0ab3447909d9b 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -165,6 +165,22 @@ def test_set_name_methods(self):
assert res is None
assert ind.names == new_names2
+ @pytest.mark.parametrize('inplace', [True, False])
+ def test_set_names_with_nlevel_1(self, inplace):
+ # GH 21149
+ # Ensure that .set_names for MultiIndex with
+ # nlevels == 1 does not raise any errors
+ expected = pd.MultiIndex(levels=[[0, 1]],
+ labels=[[0, 1]],
+ names=['first'])
+ m = pd.MultiIndex.from_product([[0, 1]])
+ result = m.set_names('first', level=0, inplace=inplace)
+
+ if inplace:
+ result = m
+
+ tm.assert_index_equal(result, expected)
+
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
| - [x] partially #21149
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Initial PR #21182 is closed - and now split in 2 different PRs | https://api.github.com/repos/pandas-dev/pandas/pulls/21196 | 2018-05-24T18:31:39Z | 2018-05-29T01:32:25Z | 2018-05-29T01:32:25Z | 2018-06-08T17:18:13Z |
CLN: Comparison methods for MultiIndex should have consistent behaviour for all nlevels (GH21149) | diff --git a/doc/source/whatsnew/v0.23.2.txt b/doc/source/whatsnew/v0.23.2.txt
index 3e4326dea2ecc..0d3f9cb8dd3b6 100644
--- a/doc/source/whatsnew/v0.23.2.txt
+++ b/doc/source/whatsnew/v0.23.2.txt
@@ -52,6 +52,7 @@ Bug Fixes
**Indexing**
- Bug in :meth:`Index.get_indexer_non_unique` with categorical key (:issue:`21448`)
+- Bug in comparison operations for :class:`MultiIndex` where error was raised on equality / inequality comparison involving a MultiIndex with ``nlevels == 1`` (:issue:`21149`)
-
**I/O**
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 36345a32a3bf7..4b32e5d4f5654 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -91,7 +91,8 @@ def cmp_method(self, other):
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
- if is_object_dtype(self) and self.nlevels == 1:
+ from .multi import MultiIndex
+ if is_object_dtype(self) and not isinstance(self, MultiIndex):
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index 0ab3447909d9b..ab53002ee1587 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -3307,3 +3307,20 @@ def test_duplicate_multiindex_labels(self):
with pytest.raises(ValueError):
ind.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]],
inplace=True)
+
+ def test_multiindex_compare(self):
+ # GH 21149
+ # Ensure comparison operations for MultiIndex with nlevels == 1
+ # behave consistently with those for MultiIndex with nlevels > 1
+
+ midx = pd.MultiIndex.from_product([[0, 1]])
+
+ # Equality self-test: MultiIndex object vs self
+ expected = pd.Series([True, True])
+ result = pd.Series(midx == midx)
+ tm.assert_series_equal(result, expected)
+
+ # Greater than comparison: MultiIndex object vs self
+ expected = pd.Series([False, False])
+ result = pd.Series(midx > midx)
+ tm.assert_series_equal(result, expected)
| - [x] closes #21149
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Initial PR #21182 is closed - and now split in 2 different PRs
`def cmp_method` raised ValueError for equality / inequality comparisons of MultiIndex with nlevels == 1, which was inconsistent with behaviour for MultiIndex with nlevels > 1 (details of issue below) - this has now been fixed
Currently (as of 0.23.0), comparing MultiIndex of nlevels==1 with another of same length raises a ValueError e.g.
```python
[In] midx=pd.MultiIndex.from_product([[0, 1]])
[In] midx
[Out] MultiIndex(levels=[[0, 1]],
labels=[[0, 1]])
[In] midx == midx
[Out] ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
```
whereas the behaviour should be consistent with that for MultiIndex with nlevels>1 as follows:
```python
[In] midx == midx
[Out] array([ True, True])
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/21195 | 2018-05-24T18:27:12Z | 2018-06-14T10:23:15Z | 2018-06-14T10:23:15Z | 2018-06-29T14:48:51Z |
BUG: DecimalArray and JSONArray that are empty return incorrect results for isna() | diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py
index 32cf29818e069..af26d83df3fe2 100644
--- a/pandas/tests/extension/base/missing.py
+++ b/pandas/tests/extension/base/missing.py
@@ -18,6 +18,11 @@ def test_isna(self, data_missing):
expected = pd.Series(expected)
self.assert_series_equal(result, expected)
+ # GH 21189
+ result = pd.Series(data_missing).drop([0, 1]).isna()
+ expected = pd.Series([], dtype=bool)
+ self.assert_series_equal(result, expected)
+
def test_dropna_series(self, data_missing):
ser = pd.Series(data_missing)
result = ser.dropna()
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index e9431bd0c233c..90f0181beab0d 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -90,7 +90,7 @@ def nbytes(self):
return 0
def isna(self):
- return np.array([x.is_nan() for x in self._data])
+ return np.array([x.is_nan() for x in self._data], dtype=bool)
@property
def _na_value(self):
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 88bb66f38b35c..10be7836cb8d7 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -108,7 +108,8 @@ def nbytes(self):
return sys.getsizeof(self.data)
def isna(self):
- return np.array([x == self.dtype.na_value for x in self.data])
+ return np.array([x == self.dtype.na_value for x in self.data],
+ dtype=bool)
def take(self, indexer, allow_fill=False, fill_value=None):
# re-implement here, since NumPy has trouble setting
|
- [x] closes #21189
- [x] tests added / passed
- Modified tests/extension/base/missing.py to test when arrays are empty
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
- Did not put anything here because it's internal code
Probably for @TomAugspurger to review.
| https://api.github.com/repos/pandas-dev/pandas/pulls/21190 | 2018-05-24T09:42:18Z | 2018-05-24T14:59:28Z | 2018-05-24T14:59:28Z | 2018-05-24T19:41:08Z |
fix hashing string-casting error | diff --git a/doc/source/whatsnew/v0.23.2.txt b/doc/source/whatsnew/v0.23.2.txt
index 5b3e607956f7a..c908d29716a7d 100644
--- a/doc/source/whatsnew/v0.23.2.txt
+++ b/doc/source/whatsnew/v0.23.2.txt
@@ -81,6 +81,7 @@ Bug Fixes
**Categorical**
+- Bug in rendering :class:`Series` with ``Categorical`` dtype in rare conditions under Python 2.7 (:issue:`21002`)
-
**Timezones**
diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx
index c6f182ac5003f..4489847518a1d 100644
--- a/pandas/_libs/hashing.pyx
+++ b/pandas/_libs/hashing.pyx
@@ -8,8 +8,7 @@ import numpy as np
from numpy cimport ndarray, uint8_t, uint32_t, uint64_t
from util cimport _checknull
-from cpython cimport (PyString_Check,
- PyBytes_Check,
+from cpython cimport (PyBytes_Check,
PyUnicode_Check)
from libc.stdlib cimport malloc, free
@@ -62,9 +61,7 @@ def hash_object_array(ndarray[object] arr, object key, object encoding='utf8'):
cdef list datas = []
for i in range(n):
val = arr[i]
- if PyString_Check(val):
- data = <bytes>val.encode(encoding)
- elif PyBytes_Check(val):
+ if PyBytes_Check(val):
data = <bytes>val
elif PyUnicode_Check(val):
data = <bytes>val.encode(encoding)
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py
index 97236f028b1c4..730c2b7865f1f 100644
--- a/pandas/tests/series/test_repr.py
+++ b/pandas/tests/series/test_repr.py
@@ -11,6 +11,7 @@
from pandas import (Index, Series, DataFrame, date_range, option_context,
Categorical, period_range, timedelta_range)
from pandas.core.index import MultiIndex
+from pandas.core.base import StringMixin
from pandas.compat import lrange, range, u
from pandas import compat
@@ -202,6 +203,35 @@ def test_latex_repr(self):
class TestCategoricalRepr(object):
+ def test_categorical_repr_unicode(self):
+ # GH#21002 if len(index) > 60, sys.getdefaultencoding()=='ascii',
+ # and we are working in PY2, then rendering a Categorical could raise
+ # UnicodeDecodeError by trying to decode when it shouldn't
+
+ class County(StringMixin):
+ name = u'San Sebastián'
+ state = u'PR'
+
+ def __unicode__(self):
+ return self.name + u', ' + self.state
+
+ cat = pd.Categorical([County() for n in range(61)])
+ idx = pd.Index(cat)
+ ser = idx.to_series()
+
+ if compat.PY3:
+ # no reloading of sys, just check that the default (utf8) works
+ # as expected
+ repr(ser)
+ str(ser)
+
+ else:
+ # set sys.defaultencoding to ascii, then change it back after
+ # the test
+ with tm.set_defaultencoding('ascii'):
+ repr(ser)
+ str(ser)
+
def test_categorical_repr(self):
a = Series(Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index d26a2116fb3ce..b9e53dfc80020 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -553,6 +553,28 @@ def _valid_locales(locales, normalize):
# Stdout / stderr decorators
+@contextmanager
+def set_defaultencoding(encoding):
+ """
+ Set default encoding (as given by sys.getdefaultencoding()) to the given
+ encoding; restore on exit.
+
+ Parameters
+ ----------
+ encoding : str
+ """
+ if not PY2:
+ raise ValueError("set_defaultencoding context is only available "
+ "in Python 2.")
+ orig = sys.getdefaultencoding()
+ reload(sys) # noqa:F821
+ sys.setdefaultencoding(encoding)
+ try:
+ yield
+ finally:
+ sys.setdefaultencoding(orig)
+
+
def capture_stdout(f):
"""
Decorator to capture stdout in a buffer so that it can be checked
| - [x] closes #21002
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/21187 | 2018-05-24T02:25:08Z | 2018-06-21T10:18:54Z | 2018-06-21T10:18:54Z | 2018-06-29T15:03:04Z |
ENH: add in extension dtype registry | diff --git a/doc/source/extending.rst b/doc/source/extending.rst
index 38b3b19031a0e..dcabfed2b6021 100644
--- a/doc/source/extending.rst
+++ b/doc/source/extending.rst
@@ -91,8 +91,16 @@ extension array for IP Address data, this might be ``ipaddress.IPv4Address``.
See the `extension dtype source`_ for interface definition.
+.. versionadded:: 0.24.0
+
+:class:`pandas.api.extension.ExtensionDtype` can be registered to pandas to allow creation via a string dtype name.
+This allows one to instantiate ``Series`` and ``.astype()`` with a registered string name, for
+example ``'category'`` is a registered string accessor for the ``CategoricalDtype``.
+
+See the `extension dtype dtypes`_ for more on how to register dtypes.
+
:class:`~pandas.api.extensions.ExtensionArray`
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This class provides all the array-like functionality. ExtensionArrays are
limited to 1 dimension. An ExtensionArray is linked to an ExtensionDtype via the
@@ -179,6 +187,7 @@ To use a test, subclass it:
See https://github.com/pandas-dev/pandas/blob/master/pandas/tests/extension/base/__init__.py
for a list of all the tests available.
+.. _extension dtype dtypes: https://github.com/pandas-dev/pandas/blob/master/pandas/core/dtypes/dtypes.py
.. _extension dtype source: https://github.com/pandas-dev/pandas/blob/master/pandas/core/dtypes/base.py
.. _extension array source: https://github.com/pandas-dev/pandas/blob/master/pandas/core/arrays/base.py
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index b275796237191..a08ac36ef4409 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -128,6 +128,23 @@ Previous Behavior:
In [3]: pi - pi[0]
Out[3]: Int64Index([0, 1, 2], dtype='int64')
+.. _whatsnew_0240.api.extension:
+
+ExtensionType Changes
+^^^^^^^^^^^^^^^^^^^^^
+
+- ``ExtensionDtype`` has gained the ability to instantiate from string dtypes, e.g. ``decimal`` would instantiate a registered ``DecimalDtype``; furthermore
+ the ``ExtensionDtype`` has gained the method ``construct_array_type`` (:issue:`21185`)
+- The ``ExtensionArray`` constructor, ``_from_sequence`` now take the keyword arg ``copy=False`` (:issue:`21185`)
+- Bug in :meth:`Series.get` for ``Series`` using ``ExtensionArray`` and integer index (:issue:`21257`)
+- :meth:`Series.combine()` works correctly with :class:`~pandas.api.extensions.ExtensionArray` inside of :class:`Series` (:issue:`20825`)
+- :meth:`Series.combine()` with scalar argument now works for any function type (:issue:`21248`)
+-
+
+.. _whatsnew_0240.api.other:
+
+Other API Changes
+^^^^^^^^^^^^^^^^^
.. _whatsnew_0240.api.incompatibilities:
@@ -168,6 +185,7 @@ Other API Changes
^^^^^^^^^^^^^^^^^
- :class:`DatetimeIndex` now accepts :class:`Int64Index` arguments as epoch timestamps (:issue:`20997`)
+- Invalid construction of ``IntervalDtype`` will now always raise a ``TypeError`` rather than a ``ValueError`` if the subdtype is invalid (:issue:`21185`)
-
-
@@ -330,13 +348,6 @@ Reshaping
-
-
-ExtensionArray
-^^^^^^^^^^^^^^
-
-- Bug in :meth:`Series.get` for ``Series`` using ``ExtensionArray`` and integer index (:issue:`21257`)
-- :meth:`Series.combine()` works correctly with :class:`~pandas.api.extensions.ExtensionArray` inside of :class:`Series` (:issue:`20825`)
-- :meth:`Series.combine()` with scalar argument now works for any function type (:issue:`21248`)
--
-
Other
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index dc726a736d34f..c4e4f5471c4be 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -154,7 +154,7 @@ def _reconstruct_data(values, dtype, original):
"""
from pandas import Index
if is_extension_array_dtype(dtype):
- pass
+ values = dtype.construct_array_type()._from_sequence(values)
elif is_datetime64tz_dtype(dtype) or is_period_dtype(dtype):
values = Index(original)._shallow_copy(values, name=None)
elif is_bool_dtype(dtype):
@@ -705,7 +705,7 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
else:
- if is_categorical_dtype(values) or is_sparse(values):
+ if is_extension_array_dtype(values) or is_sparse(values):
# handle Categorical and sparse,
result = Series(values)._values.value_counts(dropna=dropna)
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index a572fff1c44d7..fe4e461b0bd4f 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -54,6 +54,7 @@ class ExtensionArray(object):
methods:
* fillna
+ * dropna
* unique
* factorize / _values_for_factorize
* argsort / _values_for_argsort
@@ -87,7 +88,7 @@ class ExtensionArray(object):
# Constructors
# ------------------------------------------------------------------------
@classmethod
- def _from_sequence(cls, scalars):
+ def _from_sequence(cls, scalars, copy=False):
"""Construct a new ExtensionArray from a sequence of scalars.
Parameters
@@ -95,6 +96,8 @@ def _from_sequence(cls, scalars):
scalars : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type``.
+ copy : boolean, default False
+ if True, copy the underlying data
Returns
-------
ExtensionArray
@@ -384,6 +387,16 @@ def fillna(self, value=None, method=None, limit=None):
new_values = self.copy()
return new_values
+ def dropna(self):
+ """ Return ExtensionArray without NA values
+
+ Returns
+ -------
+ valid : ExtensionArray
+ """
+
+ return self[~self.isna()]
+
def unique(self):
"""Compute the ExtensionArray of unique values.
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index 49e98c16c716e..5f405e0d10657 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -109,6 +109,11 @@ class ExtensionDtype(_DtypeOpsMixin):
* name
* construct_from_string
+ Optionally one can override construct_array_type for construction
+ with the name of this dtype via the Registry
+
+ * construct_array_type
+
The `na_value` class attribute can be used to set the default NA value
for this type. :attr:`numpy.nan` is used by default.
@@ -156,6 +161,16 @@ def name(self):
"""
raise AbstractMethodError(self)
+ @classmethod
+ def construct_array_type(cls):
+ """Return the array type associated with this dtype
+
+ Returns
+ -------
+ type
+ """
+ raise NotImplementedError
+
@classmethod
def construct_from_string(cls, string):
"""Attempt to construct this type from a string.
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 65328dfc7347e..2cd8144e43cea 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -648,6 +648,11 @@ def conv(r, dtype):
def astype_nansafe(arr, dtype, copy=True):
""" return a view if copy is False, but
need to be very careful as the result shape could change! """
+
+ # dispatch on extension dtype if needed
+ if is_extension_array_dtype(dtype):
+ return dtype.array_type._from_sequence(arr, copy=copy)
+
if not isinstance(dtype, np.dtype):
dtype = pandas_dtype(dtype)
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 05f82c67ddb8b..ef4f36dc6df33 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -5,10 +5,11 @@
PY3, PY36)
from pandas._libs import algos, lib
from pandas._libs.tslibs import conversion
+
from pandas.core.dtypes.dtypes import (
- CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype,
+ registry, CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype,
DatetimeTZDtypeType, PeriodDtype, PeriodDtypeType, IntervalDtype,
- IntervalDtypeType, ExtensionDtype, PandasExtensionDtype)
+ IntervalDtypeType, ExtensionDtype)
from pandas.core.dtypes.generic import (
ABCCategorical, ABCPeriodIndex, ABCDatetimeIndex, ABCSeries,
ABCSparseArray, ABCSparseSeries, ABCCategoricalIndex, ABCIndexClass,
@@ -1977,38 +1978,13 @@ def pandas_dtype(dtype):
np.dtype or a pandas dtype
"""
- if isinstance(dtype, DatetimeTZDtype):
- return dtype
- elif isinstance(dtype, PeriodDtype):
- return dtype
- elif isinstance(dtype, CategoricalDtype):
- return dtype
- elif isinstance(dtype, IntervalDtype):
- return dtype
- elif isinstance(dtype, string_types):
- try:
- return DatetimeTZDtype.construct_from_string(dtype)
- except TypeError:
- pass
-
- if dtype.startswith('period[') or dtype.startswith('Period['):
- # do not parse string like U as period[U]
- try:
- return PeriodDtype.construct_from_string(dtype)
- except TypeError:
- pass
-
- elif dtype.startswith('interval') or dtype.startswith('Interval'):
- try:
- return IntervalDtype.construct_from_string(dtype)
- except TypeError:
- pass
+ # registered extension types
+ result = registry.find(dtype)
+ if result is not None:
+ return result
- try:
- return CategoricalDtype.construct_from_string(dtype)
- except TypeError:
- pass
- elif isinstance(dtype, (PandasExtensionDtype, ExtensionDtype)):
+ # un-registered extension types
+ if isinstance(dtype, ExtensionDtype):
return dtype
try:
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 1e762c2be92a6..de837efc235a0 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -8,6 +8,65 @@
from .base import ExtensionDtype, _DtypeOpsMixin
+class Registry(object):
+ """
+ Registry for dtype inference
+
+ The registry allows one to map a string repr of a extension
+ dtype to an extenstion dtype.
+
+ Multiple extension types can be registered.
+ These are tried in order.
+
+ Examples
+ --------
+ registry.register(MyExtensionDtype)
+ """
+ dtypes = []
+
+ @classmethod
+ def register(self, dtype):
+ """
+ Parameters
+ ----------
+ dtype : ExtensionDtype
+ """
+ if not issubclass(dtype, (PandasExtensionDtype, ExtensionDtype)):
+ raise ValueError("can only register pandas extension dtypes")
+
+ self.dtypes.append(dtype)
+
+ def find(self, dtype):
+ """
+ Parameters
+ ----------
+ dtype : PandasExtensionDtype or string
+
+ Returns
+ -------
+ return the first matching dtype, otherwise return None
+ """
+ if not isinstance(dtype, compat.string_types):
+ dtype_type = dtype
+ if not isinstance(dtype, type):
+ dtype_type = type(dtype)
+ if issubclass(dtype_type, (PandasExtensionDtype, ExtensionDtype)):
+ return dtype
+
+ return None
+
+ for dtype_type in self.dtypes:
+ try:
+ return dtype_type.construct_from_string(dtype)
+ except TypeError:
+ pass
+
+ return None
+
+
+registry = Registry()
+
+
class PandasExtensionDtype(_DtypeOpsMixin):
"""
A np.dtype duck-typed class, suitable for holding a custom dtype.
@@ -265,6 +324,17 @@ def _hash_categories(categories, ordered=True):
else:
return np.bitwise_xor.reduce(hashed)
+ @classmethod
+ def construct_array_type(cls):
+ """Return the array type associated with this dtype
+
+ Returns
+ -------
+ type
+ """
+ from pandas import Categorical
+ return Categorical
+
@classmethod
def construct_from_string(cls, string):
""" attempt to construct this type from a string, raise a TypeError if
@@ -556,11 +626,16 @@ def _parse_dtype_strict(cls, freq):
@classmethod
def construct_from_string(cls, string):
"""
- attempt to construct this type from a string, raise a TypeError
- if its not possible
+ Strict construction from a string, raise a TypeError if not
+ possible
"""
from pandas.tseries.offsets import DateOffset
- if isinstance(string, (compat.string_types, DateOffset)):
+
+ if (isinstance(string, compat.string_types) and
+ (string.startswith('period[') or
+ string.startswith('Period[')) or
+ isinstance(string, DateOffset)):
+ # do not parse string like U as period[U]
# avoid tuple to be regarded as freq
try:
return cls(freq=string)
@@ -660,7 +735,7 @@ def __new__(cls, subtype=None):
try:
subtype = pandas_dtype(subtype)
except TypeError:
- raise ValueError("could not construct IntervalDtype")
+ raise TypeError("could not construct IntervalDtype")
if is_categorical_dtype(subtype) or is_string_dtype(subtype):
# GH 19016
@@ -682,8 +757,11 @@ def construct_from_string(cls, string):
attempt to construct this type from a string, raise a TypeError
if its not possible
"""
- if isinstance(string, compat.string_types):
+ if (isinstance(string, compat.string_types) and
+ (string.startswith('interval') or
+ string.startswith('Interval'))):
return cls(string)
+
msg = "a string needs to be passed, got type {typ}"
raise TypeError(msg.format(typ=type(string)))
@@ -727,3 +805,10 @@ def is_dtype(cls, dtype):
else:
return False
return super(IntervalDtype, cls).is_dtype(dtype)
+
+
+# register the dtypes in search order
+registry.register(DatetimeTZDtype)
+registry.register(PeriodDtype)
+registry.register(IntervalDtype)
+registry.register(CategoricalDtype)
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 80619c7beb28c..2fd4e099777bf 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -796,7 +796,7 @@ def astype(self, dtype, copy=True):
@cache_readonly
def dtype(self):
"""Return the dtype object of the underlying data"""
- return IntervalDtype.construct_from_string(str(self.left.dtype))
+ return IntervalDtype(self.left.dtype.name)
@property
def inferred_type(self):
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index fe508dc1bb0bc..a5e9107b8a660 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -633,8 +633,9 @@ def _astype(self, dtype, copy=False, errors='raise', values=None,
return self.make_block(Categorical(self.values, dtype=dtype))
# astype processing
- dtype = np.dtype(dtype)
- if self.dtype == dtype:
+ if not is_extension_array_dtype(dtype):
+ dtype = np.dtype(dtype)
+ if is_dtype_equal(self.dtype, dtype):
if copy:
return self.copy()
return self
@@ -662,7 +663,13 @@ def _astype(self, dtype, copy=False, errors='raise', values=None,
# _astype_nansafe works fine with 1-d only
values = astype_nansafe(values.ravel(), dtype, copy=True)
- values = values.reshape(self.shape)
+
+ # TODO(extension)
+ # should we make this attribute?
+ try:
+ values = values.reshape(self.shape)
+ except AttributeError:
+ pass
newb = make_block(values, placement=self.mgr_locs,
klass=klass)
@@ -3170,6 +3177,10 @@ def get_block_type(values, dtype=None):
cls = TimeDeltaBlock
elif issubclass(vtype, np.complexfloating):
cls = ComplexBlock
+ elif is_categorical(values):
+ cls = CategoricalBlock
+ elif is_extension_array_dtype(values):
+ cls = ExtensionBlock
elif issubclass(vtype, np.datetime64):
assert not is_datetimetz(values)
cls = DatetimeBlock
@@ -3179,10 +3190,6 @@ def get_block_type(values, dtype=None):
cls = IntBlock
elif dtype == np.bool_:
cls = BoolBlock
- elif is_categorical(values):
- cls = CategoricalBlock
- elif is_extension_array_dtype(values):
- cls = ExtensionBlock
else:
cls = ObjectBlock
return cls
diff --git a/pandas/core/series.py b/pandas/core/series.py
index cdb901d18767c..af3906a3e5c45 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4092,11 +4092,9 @@ def _try_cast(arr, take_fast_path):
subarr = Categorical(arr, dtype.categories,
ordered=dtype.ordered)
elif is_extension_array_dtype(dtype):
- # We don't allow casting to third party dtypes, since we don't
- # know what array belongs to which type.
- msg = ("Cannot cast data to extension dtype '{}'. "
- "Pass the extension array directly.".format(dtype))
- raise ValueError(msg)
+ # create an extension array from its dtype
+ array_type = dtype.construct_array_type()
+ subarr = array_type(subarr, copy=copy)
elif dtype is not None and raise_cast_failure:
raise
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index c46f4b5ad9c18..2133d0c981b71 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -514,7 +514,6 @@ def _to_str_columns(self):
Render a DataFrame to a list of columns (as lists of strings).
"""
frame = self.tr_frame
-
# may include levels names also
str_index = self._get_formatted_index(frame)
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index eee53a2fcac6a..62e0f1cb717f0 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -7,10 +7,9 @@
from pandas import (
Series, Categorical, CategoricalIndex, IntervalIndex, date_range)
-from pandas.compat import string_types
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype, PeriodDtype,
- IntervalDtype, CategoricalDtype)
+ IntervalDtype, CategoricalDtype, registry)
from pandas.core.dtypes.common import (
is_categorical_dtype, is_categorical,
is_datetime64tz_dtype, is_datetimetz,
@@ -448,7 +447,7 @@ def test_construction_not_supported(self, subtype):
def test_construction_errors(self):
msg = 'could not construct IntervalDtype'
- with tm.assert_raises_regex(ValueError, msg):
+ with tm.assert_raises_regex(TypeError, msg):
IntervalDtype('xx')
def test_construction_from_string(self):
@@ -458,14 +457,21 @@ def test_construction_from_string(self):
assert is_dtype_equal(self.dtype, result)
@pytest.mark.parametrize('string', [
- 'foo', 'interval[foo]', 'foo[int64]', 0, 3.14, ('a', 'b'), None])
+ 'foo', 'foo[int64]', 0, 3.14, ('a', 'b'), None])
def test_construction_from_string_errors(self, string):
- if isinstance(string, string_types):
- error, msg = ValueError, 'could not construct IntervalDtype'
- else:
- error, msg = TypeError, 'a string needs to be passed, got type'
+ # these are invalid entirely
+ msg = 'a string needs to be passed, got type'
+
+ with tm.assert_raises_regex(TypeError, msg):
+ IntervalDtype.construct_from_string(string)
+
+ @pytest.mark.parametrize('string', [
+ 'interval[foo]'])
+ def test_construction_from_string_error_subtype(self, string):
+ # this is an invalid subtype
+ msg = 'could not construct IntervalDtype'
- with tm.assert_raises_regex(error, msg):
+ with tm.assert_raises_regex(TypeError, msg):
IntervalDtype.construct_from_string(string)
def test_subclass(self):
@@ -767,3 +773,25 @@ def test_update_dtype_errors(self, bad_dtype):
msg = 'a CategoricalDtype must be passed to perform an update, '
with tm.assert_raises_regex(ValueError, msg):
dtype.update_dtype(bad_dtype)
+
+
+@pytest.mark.parametrize(
+ 'dtype',
+ [DatetimeTZDtype, CategoricalDtype,
+ PeriodDtype, IntervalDtype])
+def test_registry(dtype):
+ assert dtype in registry.dtypes
+
+
+@pytest.mark.parametrize(
+ 'dtype, expected',
+ [('int64', None),
+ ('interval', IntervalDtype()),
+ ('interval[int64]', IntervalDtype()),
+ ('interval[datetime64[ns]]', IntervalDtype('datetime64[ns]')),
+ ('category', CategoricalDtype()),
+ ('period[D]', PeriodDtype('D')),
+ ('datetime64[ns, US/Eastern]', DatetimeTZDtype('ns', 'US/Eastern'))])
+def test_registry_find(dtype, expected):
+
+ assert registry.find(dtype) == expected
diff --git a/pandas/tests/extension/base/constructors.py b/pandas/tests/extension/base/constructors.py
index 489a430bb4020..fdd2b99d9b3c7 100644
--- a/pandas/tests/extension/base/constructors.py
+++ b/pandas/tests/extension/base/constructors.py
@@ -45,3 +45,14 @@ def test_series_given_mismatched_index_raises(self, data):
msg = 'Length of passed values is 3, index implies 5'
with tm.assert_raises_regex(ValueError, msg):
pd.Series(data[:3], index=[0, 1, 2, 3, 4])
+
+ def test_from_dtype(self, data):
+ # construct from our dtype & string dtype
+ dtype = data.dtype
+
+ expected = pd.Series(data)
+ result = pd.Series(list(data), dtype=dtype)
+ self.assert_series_equal(result, expected)
+
+ result = pd.Series(list(data), dtype=str(dtype))
+ self.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py
index 63d3d807c270c..52a12816c8722 100644
--- a/pandas/tests/extension/base/dtype.py
+++ b/pandas/tests/extension/base/dtype.py
@@ -1,3 +1,4 @@
+import pytest
import numpy as np
import pandas as pd
@@ -46,3 +47,10 @@ def test_eq_with_str(self, dtype):
def test_eq_with_numpy_object(self, dtype):
assert dtype != np.dtype('object')
+
+ def test_array_type(self, data, dtype):
+ assert dtype.construct_array_type() is type(data)
+
+ def test_array_type_with_arg(self, data, dtype):
+ with pytest.raises(NotImplementedError):
+ dtype.construct_array_type('foo')
diff --git a/pandas/tests/extension/base/interface.py b/pandas/tests/extension/base/interface.py
index 8ef8debbdc666..69de0e1900831 100644
--- a/pandas/tests/extension/base/interface.py
+++ b/pandas/tests/extension/base/interface.py
@@ -40,6 +40,16 @@ def test_repr(self, data):
df = pd.DataFrame({"A": data})
repr(df)
+ def test_repr_array(self, data):
+ # some arrays may be able to assert
+ # attributes in the repr
+ repr(data)
+
+ def test_repr_array_long(self, data):
+ # some arrays may be able to assert a ... in the repr
+ with pd.option_context('display.max_seq_items', 1):
+ repr(data)
+
def test_dtype_name_in_info(self, data):
buf = StringIO()
pd.DataFrame({"A": data}).info(buf=buf)
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index 23227867ee4d7..c660687f16590 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -19,7 +19,8 @@ def test_value_counts(self, all_data, dropna):
other = all_data
result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
- expected = pd.Series(other).value_counts(dropna=dropna).sort_index()
+ expected = pd.Series(other).value_counts(
+ dropna=dropna).sort_index()
self.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py
index af26d83df3fe2..43b2702c72193 100644
--- a/pandas/tests/extension/base/missing.py
+++ b/pandas/tests/extension/base/missing.py
@@ -23,6 +23,11 @@ def test_isna(self, data_missing):
expected = pd.Series([], dtype=bool)
self.assert_series_equal(result, expected)
+ def test_dropna_array(self, data_missing):
+ result = data_missing.dropna()
+ expected = data_missing[[1]]
+ self.assert_extension_array_equal(result, expected)
+
def test_dropna_series(self, data_missing):
ser = pd.Series(data_missing)
result = ser.dropna()
diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py
index fe920a47ab740..c83726c5278a5 100644
--- a/pandas/tests/extension/base/reshaping.py
+++ b/pandas/tests/extension/base/reshaping.py
@@ -84,6 +84,7 @@ def test_concat_columns(self, data, na_value):
expected = pd.DataFrame({
'A': data._from_sequence(list(data[:3]) + [na_value]),
'B': [np.nan, 1, 2, 3]})
+
result = pd.concat([df1, df2], axis=1)
self.assert_frame_equal(result, expected)
result = pd.concat([df1['A'], df2['B']], axis=1)
diff --git a/pandas/tests/extension/category/test_categorical.py b/pandas/tests/extension/category/test_categorical.py
index ae0d72c204d13..715e8bd40a2d0 100644
--- a/pandas/tests/extension/category/test_categorical.py
+++ b/pandas/tests/extension/category/test_categorical.py
@@ -62,7 +62,9 @@ def data_for_grouping():
class TestDtype(base.BaseDtypeTests):
- pass
+
+ def test_array_type_with_arg(self, data, dtype):
+ assert dtype.construct_array_type() is Categorical
class TestInterface(base.BaseInterfaceTests):
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 3f2f24cd26af0..33adebbbe5780 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -16,6 +16,16 @@ class DecimalDtype(ExtensionDtype):
name = 'decimal'
na_value = decimal.Decimal('NaN')
+ @classmethod
+ def construct_array_type(cls):
+ """Return the array type associated with this dtype
+
+ Returns
+ -------
+ type
+ """
+ return DecimalArray
+
@classmethod
def construct_from_string(cls, string):
if string == cls.name:
@@ -28,7 +38,7 @@ def construct_from_string(cls, string):
class DecimalArray(ExtensionArray, ExtensionScalarOpsMixin):
dtype = DecimalDtype()
- def __init__(self, values):
+ def __init__(self, values, copy=False):
for val in values:
if not isinstance(val, self.dtype.type):
raise TypeError("All values must be of type " +
@@ -44,7 +54,7 @@ def __init__(self, values):
# self._values = self.values = self.data
@classmethod
- def _from_sequence(cls, scalars):
+ def _from_sequence(cls, scalars, copy=False):
return cls(scalars)
@classmethod
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 45ee7f227c4f0..8fd3d1a57f6c8 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -100,7 +100,9 @@ def assert_frame_equal(self, left, right, *args, **kwargs):
class TestDtype(BaseDecimal, base.BaseDtypeTests):
- pass
+
+ def test_array_type_with_arg(self, data, dtype):
+ assert dtype.construct_array_type() is DecimalArray
class TestInterface(BaseDecimal, base.BaseInterfaceTests):
@@ -108,7 +110,11 @@ class TestInterface(BaseDecimal, base.BaseInterfaceTests):
class TestConstructors(BaseDecimal, base.BaseConstructorsTests):
- pass
+
+ @pytest.mark.xfail(reason="not implemented constructor from dtype")
+ def test_from_dtype(self, data):
+ # construct from our dtype & string dtype
+ pass
class TestReshaping(BaseDecimal, base.BaseReshapingTests):
@@ -155,6 +161,10 @@ class TestGroupby(BaseDecimal, base.BaseGroupbyTests):
pass
+# TODO(extension)
+@pytest.mark.xfail(reason=(
+ "raising AssertionError as this is not implemented, "
+ "though easy enough to do"))
def test_series_constructor_coerce_data_to_extension_dtype_raises():
xpr = ("Cannot cast data to extension dtype 'decimal'. Pass the "
"extension array directly.")
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index d3043bf0852d2..160bf259e1e32 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -32,6 +32,16 @@ class JSONDtype(ExtensionDtype):
# source compatibility with Py2.
na_value = {}
+ @classmethod
+ def construct_array_type(cls):
+ """Return the array type associated with this dtype
+
+ Returns
+ -------
+ type
+ """
+ return JSONArray
+
@classmethod
def construct_from_string(cls, string):
if string == cls.name:
@@ -44,7 +54,7 @@ def construct_from_string(cls, string):
class JSONArray(ExtensionArray):
dtype = JSONDtype()
- def __init__(self, values):
+ def __init__(self, values, copy=False):
for val in values:
if not isinstance(val, self.dtype.type):
raise TypeError("All values must be of type " +
@@ -59,7 +69,7 @@ def __init__(self, values):
# self._values = self.values = self.data
@classmethod
- def _from_sequence(cls, scalars):
+ def _from_sequence(cls, scalars, copy=False):
return cls(scalars)
@classmethod
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 268134dc8c333..7eeaf7946663e 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -107,7 +107,9 @@ def assert_frame_equal(self, left, right, *args, **kwargs):
class TestDtype(BaseJSON, base.BaseDtypeTests):
- pass
+
+ def test_array_type_with_arg(self, data, dtype):
+ assert dtype.construct_array_type() is JSONArray
class TestInterface(BaseJSON, base.BaseInterfaceTests):
@@ -130,7 +132,11 @@ def test_custom_asserts(self):
class TestConstructors(BaseJSON, base.BaseConstructorsTests):
- pass
+
+ @pytest.mark.xfail(reason="not implemented constructor from dtype")
+ def test_from_dtype(self, data):
+ # construct from our dtype & string dtype
+ pass
class TestReshaping(BaseJSON, base.BaseReshapingTests):
| precursor to #21160 | https://api.github.com/repos/pandas-dev/pandas/pulls/21185 | 2018-05-24T00:35:49Z | 2018-07-03T14:25:11Z | 2018-07-03T14:25:11Z | 2018-07-03T14:25:35Z |
BUG: Series.combine() fails with ExtensionArray inside of Series | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index c69de149a0f35..8d26c51ae6527 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -178,9 +178,18 @@ Reshaping
-
-
+ExtensionArray
+^^^^^^^^^^^^^^
+
+- :meth:`Series.combine()` works correctly with :class:`~pandas.api.extensions.ExtensionArray` inside of :class:`Series` (:issue:`20825`)
+- :meth:`Series.combine()` with scalar argument now works for any function type (:issue:`21248`)
+-
+-
+
Other
^^^^^
-
-
-
+-
diff --git a/pandas/core/series.py b/pandas/core/series.py
index d8bdd9ac9ed22..0564cdbbb2014 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2204,7 +2204,7 @@ def _binop(self, other, func, level=None, fill_value=None):
result.name = None
return result
- def combine(self, other, func, fill_value=np.nan):
+ def combine(self, other, func, fill_value=None):
"""
Perform elementwise binary operation on two Series using given function
with optional fill value when an index is missing from one Series or
@@ -2216,6 +2216,8 @@ def combine(self, other, func, fill_value=np.nan):
func : function
Function that takes two scalars as inputs and return a scalar
fill_value : scalar value
+ The default specifies to use the appropriate NaN value for
+ the underlying dtype of the Series
Returns
-------
@@ -2235,20 +2237,38 @@ def combine(self, other, func, fill_value=np.nan):
Series.combine_first : Combine Series values, choosing the calling
Series's values first
"""
+ if fill_value is None:
+ fill_value = na_value_for_dtype(self.dtype, compat=False)
+
if isinstance(other, Series):
+ # If other is a Series, result is based on union of Series,
+ # so do this element by element
new_index = self.index.union(other.index)
new_name = ops.get_op_result_name(self, other)
- new_values = np.empty(len(new_index), dtype=self.dtype)
- for i, idx in enumerate(new_index):
+ new_values = []
+ for idx in new_index:
lv = self.get(idx, fill_value)
rv = other.get(idx, fill_value)
with np.errstate(all='ignore'):
- new_values[i] = func(lv, rv)
+ new_values.append(func(lv, rv))
else:
+ # Assume that other is a scalar, so apply the function for
+ # each element in the Series
new_index = self.index
with np.errstate(all='ignore'):
- new_values = func(self._values, other)
+ new_values = [func(lv, other) for lv in self._values]
new_name = self.name
+
+ if is_categorical_dtype(self.values):
+ pass
+ elif is_extension_array_dtype(self.values):
+ # The function can return something of any type, so check
+ # if the type is compatible with the calling EA
+ try:
+ new_values = self._values._from_sequence(new_values)
+ except TypeError:
+ pass
+
return self._constructor(new_values, index=new_index, name=new_name)
def combine_first(self, other):
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index c5436aa731d50..23227867ee4d7 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -103,3 +103,37 @@ def test_factorize_equivalence(self, data_for_grouping, na_sentinel):
tm.assert_numpy_array_equal(l1, l2)
self.assert_extension_array_equal(u1, u2)
+
+ def test_combine_le(self, data_repeated):
+ # GH 20825
+ # Test that combine works when doing a <= (le) comparison
+ orig_data1, orig_data2 = data_repeated(2)
+ s1 = pd.Series(orig_data1)
+ s2 = pd.Series(orig_data2)
+ result = s1.combine(s2, lambda x1, x2: x1 <= x2)
+ expected = pd.Series([a <= b for (a, b) in
+ zip(list(orig_data1), list(orig_data2))])
+ self.assert_series_equal(result, expected)
+
+ val = s1.iloc[0]
+ result = s1.combine(val, lambda x1, x2: x1 <= x2)
+ expected = pd.Series([a <= val for a in list(orig_data1)])
+ self.assert_series_equal(result, expected)
+
+ def test_combine_add(self, data_repeated):
+ # GH 20825
+ orig_data1, orig_data2 = data_repeated(2)
+ s1 = pd.Series(orig_data1)
+ s2 = pd.Series(orig_data2)
+ result = s1.combine(s2, lambda x1, x2: x1 + x2)
+ expected = pd.Series(
+ orig_data1._from_sequence([a + b for (a, b) in
+ zip(list(orig_data1),
+ list(orig_data2))]))
+ self.assert_series_equal(result, expected)
+
+ val = s1.iloc[0]
+ result = s1.combine(val, lambda x1, x2: x1 + x2)
+ expected = pd.Series(
+ orig_data1._from_sequence([a + val for a in list(orig_data1)]))
+ self.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/category/test_categorical.py b/pandas/tests/extension/category/test_categorical.py
index 530a4e7a22a7a..61fdb8454b542 100644
--- a/pandas/tests/extension/category/test_categorical.py
+++ b/pandas/tests/extension/category/test_categorical.py
@@ -1,6 +1,7 @@
import string
import pytest
+import pandas as pd
import numpy as np
from pandas.api.types import CategoricalDtype
@@ -29,6 +30,15 @@ def data_missing():
return Categorical([np.nan, 'A'])
+@pytest.fixture
+def data_repeated():
+ """Return different versions of data for count times"""
+ def gen(count):
+ for _ in range(count):
+ yield Categorical(make_data())
+ yield gen
+
+
@pytest.fixture
def data_for_sorting():
return Categorical(['A', 'B', 'C'], categories=['C', 'A', 'B'],
@@ -154,6 +164,22 @@ class TestMethods(base.BaseMethodsTests):
def test_value_counts(self, all_data, dropna):
pass
+ def test_combine_add(self, data_repeated):
+ # GH 20825
+ # When adding categoricals in combine, result is a string
+ orig_data1, orig_data2 = data_repeated(2)
+ s1 = pd.Series(orig_data1)
+ s2 = pd.Series(orig_data2)
+ result = s1.combine(s2, lambda x1, x2: x1 + x2)
+ expected = pd.Series(([a + b for (a, b) in
+ zip(list(orig_data1), list(orig_data2))]))
+ self.assert_series_equal(result, expected)
+
+ val = s1.iloc[0]
+ result = s1.combine(val, lambda x1, x2: x1 + x2)
+ expected = pd.Series([a + val for a in list(orig_data1)])
+ self.assert_series_equal(result, expected)
+
class TestCasting(base.BaseCastingTests):
pass
diff --git a/pandas/tests/extension/conftest.py b/pandas/tests/extension/conftest.py
index bbd31c4071b91..4bbbb7df2f399 100644
--- a/pandas/tests/extension/conftest.py
+++ b/pandas/tests/extension/conftest.py
@@ -30,6 +30,15 @@ def all_data(request, data, data_missing):
return data_missing
+@pytest.fixture
+def data_repeated():
+ """Return different versions of data for count times"""
+ def gen(count):
+ for _ in range(count):
+ yield NotImplementedError
+ yield gen
+
+
@pytest.fixture
def data_for_sorting():
"""Length-3 array with a known sort order.
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 90f0181beab0d..cc6fadc483d5e 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -28,7 +28,9 @@ class DecimalArray(ExtensionArray):
dtype = DecimalDtype()
def __init__(self, values):
- assert all(isinstance(v, decimal.Decimal) for v in values)
+ for val in values:
+ if not isinstance(val, self.dtype.type):
+ raise TypeError
values = np.asarray(values, dtype=object)
self._data = values
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 1f8cf0264f62f..f74b4d7e94f11 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -25,6 +25,14 @@ def data_missing():
return DecimalArray([decimal.Decimal('NaN'), decimal.Decimal(1)])
+@pytest.fixture
+def data_repeated():
+ def gen(count):
+ for _ in range(count):
+ yield DecimalArray(make_data())
+ yield gen
+
+
@pytest.fixture
def data_for_sorting():
return DecimalArray([decimal.Decimal('1'),
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index b7ac8033f3f6d..85a282ae4007f 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -187,6 +187,14 @@ def test_sort_values_missing(self, data_missing_for_sorting, ascending):
super(TestMethods, self).test_sort_values_missing(
data_missing_for_sorting, ascending)
+ @pytest.mark.skip(reason="combine for JSONArray not supported")
+ def test_combine_le(self, data_repeated):
+ pass
+
+ @pytest.mark.skip(reason="combine for JSONArray not supported")
+ def test_combine_add(self, data_repeated):
+ pass
+
class TestCasting(BaseJSON, base.BaseCastingTests):
@pytest.mark.xfail
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index 6cf60e818c845..f35cce6ac9d71 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -60,6 +60,19 @@ def test_append_duplicates(self):
with tm.assert_raises_regex(ValueError, msg):
pd.concat([s1, s2], verify_integrity=True)
+ def test_combine_scalar(self):
+ # GH 21248
+ # Note - combine() with another Series is tested elsewhere because
+ # it is used when testing operators
+ s = pd.Series([i * 10 for i in range(5)])
+ result = s.combine(3, lambda x, y: x + y)
+ expected = pd.Series([i * 10 + 3 for i in range(5)])
+ tm.assert_series_equal(result, expected)
+
+ result = s.combine(22, lambda x, y: min(x, y))
+ expected = pd.Series([min(i * 10, 22) for i in range(5)])
+ tm.assert_series_equal(result, expected)
+
def test_combine_first(self):
values = tm.makeIntIndex(20).values.astype(float)
series = Series(values, index=tm.makeIntIndex(20))
| - [x] closes #20825
closes #21248
- [x] tests added / passed
- extension/category/test_categorical.py:test_combine
- extension/decimal/test_decimal.py:test_combine
- series/test_combine_concat.py:test_combine_scalar
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
- modified `whatsnew.v0.24.0.txt`
This is a split of #20889 to just fix the `combine` issue.
| https://api.github.com/repos/pandas-dev/pandas/pulls/21183 | 2018-05-23T18:39:32Z | 2018-06-08T11:34:34Z | 2018-06-08T11:34:33Z | 2018-06-08T13:53:54Z |
Backport PR #29010 on branch 0.25.x (Setuptools CI fixup) | diff --git a/ci/setup_env.sh b/ci/setup_env.sh
index 02e48f6cb9af1..1834b9674d898 100755
--- a/ci/setup_env.sh
+++ b/ci/setup_env.sh
@@ -55,6 +55,7 @@ echo
echo "update conda"
conda config --set ssl_verify false
conda config --set quiet true --set always_yes true --set changeps1 false
+conda install pip # create conda to create a historical artifact for pip & setuptools
conda update -n base conda
echo "conda info -a"
| Backport PR #29010: Setuptools CI fixup | https://api.github.com/repos/pandas-dev/pandas/pulls/29015 | 2019-10-15T21:38:04Z | 2019-10-16T12:37:34Z | 2019-10-16T12:37:34Z | 2019-10-16T12:37:35Z |
CLN: Fix typing in pandas\tests\arrays\test_datetimelike.py (#28926) | diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index 7c482664bca48..117a19acbfc3a 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -1,3 +1,5 @@
+from typing import Type, Union
+
import numpy as np
import pytest
@@ -5,6 +7,9 @@
import pandas as pd
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray
+from pandas.core.indexes.datetimes import DatetimeIndex
+from pandas.core.indexes.period import PeriodIndex
+from pandas.core.indexes.timedeltas import TimedeltaIndex
import pandas.util.testing as tm
@@ -52,7 +57,7 @@ def timedelta_index(request):
class SharedTests:
- index_cls = None
+ index_cls = None # type: Type[Union[DatetimeIndex, PeriodIndex, TimedeltaIndex]]
def test_compare_len1_raises(self):
# make sure we raise when comparing with different lengths, specific
diff --git a/setup.cfg b/setup.cfg
index c9ba13443e97c..ed95380544b05 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -136,9 +136,6 @@ ignore_errors=True
[mypy-pandas.tests.arithmetic.test_datetime64]
ignore_errors=True
-[mypy-pandas.tests.arrays.test_datetimelike]
-ignore_errors=True
-
[mypy-pandas.tests.dtypes.test_common]
ignore_errors=True
| - [x] partially adresses #28926
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/29014 | 2019-10-15T21:32:37Z | 2019-10-23T00:52:51Z | 2019-10-23T00:52:51Z | 2019-10-23T06:01:37Z |
TST: restore xfail for test_apply | diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 2831c07cb21d3..5391cb5ce821f 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -4,7 +4,7 @@
import numpy as np
import pytest
-from pandas.compat import PY37, is_platform_windows
+from pandas.compat import PY37
import pandas as pd
from pandas import (
@@ -209,10 +209,9 @@ def test_level_get_group(observed):
assert_frame_equal(result, expected)
-# GH#21636 previously flaky on py37
-@pytest.mark.xfail(
- is_platform_windows() and PY37, reason="Flaky, GH-27902", strict=False
-)
+# GH#21636 flaky on py37; may be related to older numpy, see discussion
+# https://github.com/MacPython/pandas-wheels/pull/64
+@pytest.mark.xfail(PY37, reason="Flaky, GH-27902", strict=False)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
@@ -229,6 +228,9 @@ def test_apply(ordered):
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
+ # GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
+ # is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
+ # when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
assert_frame_equal(result, expected)
| xref https://github.com/MacPython/pandas-wheels/pull/64 | https://api.github.com/repos/pandas-dev/pandas/pulls/29013 | 2019-10-15T20:57:05Z | 2019-10-16T12:25:42Z | 2019-10-16T12:25:41Z | 2019-10-16T15:23:34Z |
Setuptools CI fixup | diff --git a/ci/setup_env.sh b/ci/setup_env.sh
index be8c3645691fe..794130355fd74 100755
--- a/ci/setup_env.sh
+++ b/ci/setup_env.sh
@@ -55,6 +55,7 @@ echo
echo "update conda"
conda config --set ssl_verify false
conda config --set quiet true --set always_yes true --set changeps1 false
+conda install pip # create conda to create a historical artifact for pip & setuptools
conda update -n base conda
echo "conda info -a"
| Closes https://github.com/pandas-dev/pandas/issues/29008 | https://api.github.com/repos/pandas-dev/pandas/pulls/29010 | 2019-10-15T20:08:49Z | 2019-10-15T21:37:41Z | 2019-10-15T21:37:40Z | 2019-10-15T21:37:45Z |
CLN: Fix mypy error in 'pandas/tests/computation/test_eval.py' | diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index b6ffd8a83e409..4d40cd3a2d4ca 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -2,6 +2,7 @@
from functools import reduce
from itertools import product
import operator
+from typing import Dict, Type
import warnings
import numpy as np
@@ -19,7 +20,11 @@
from pandas.core.computation.check import _NUMEXPR_VERSION
from pandas.core.computation.engines import NumExprClobberingError, _engines
import pandas.core.computation.expr as expr
-from pandas.core.computation.expr import PandasExprVisitor, PythonExprVisitor
+from pandas.core.computation.expr import (
+ BaseExprVisitor,
+ PandasExprVisitor,
+ PythonExprVisitor,
+)
from pandas.core.computation.expressions import _NUMEXPR_INSTALLED, _USE_NUMEXPR
from pandas.core.computation.ops import (
_arith_ops_syms,
@@ -1884,7 +1889,7 @@ def test_invalid_parser():
"python": PythonExprVisitor,
"pytables": pytables.ExprVisitor,
"pandas": PandasExprVisitor,
-}
+} # type: Dict[str, Type[BaseExprVisitor]]
@pytest.mark.parametrize("engine", _engines)
diff --git a/setup.cfg b/setup.cfg
index 462e79dae1039..ca15386b2c429 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -145,9 +145,6 @@ ignore_errors=True
[mypy-pandas.tests.arrays.test_period]
ignore_errors=True
-[mypy-pandas.tests.computation.test_eval]
-ignore_errors=True
-
[mypy-pandas.tests.dtypes.test_common]
ignore_errors=True
| Added explicit typing annotation for _parsers: Dict[str, Type[BaseExprVisitor]]
Also added some import for Dict, Type and BaseExprVisitor
- [x] relates to #28926
- [x] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/29007 | 2019-10-15T17:15:07Z | 2019-10-16T18:43:33Z | 2019-10-16T18:43:32Z | 2019-10-16T18:43:39Z |
CLN: Fix mypy errors in pandas/tests/io/test_sql.py Reverted and added type changes | diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 7491cef17ebfc..183a47c6039ec 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -583,7 +583,7 @@ class _TestSQLApi(PandasSQLTest):
"""
flavor = "sqlite"
- mode = None
+ mode = None # type: str
def setup_connect(self):
self.conn = self.connect()
@@ -1234,7 +1234,7 @@ class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
- flavor = None
+ flavor = None # type: str
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
diff --git a/setup.cfg b/setup.cfg
index 55d25abde585c..462e79dae1039 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -205,9 +205,6 @@ ignore_errors=True
[mypy-pandas.tests.io.json.test_ujson]
ignore_errors=True
-[mypy-pandas.tests.io.test_sql]
-ignore_errors=True
-
[mypy-pandas.tests.plotting.test_backend]
ignore_errors=True
| - [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
ref closed PR #28979 | https://api.github.com/repos/pandas-dev/pandas/pulls/29006 | 2019-10-15T16:13:25Z | 2019-10-16T03:47:07Z | 2019-10-16T03:47:07Z | 2019-10-16T03:47:16Z |
Backport PR #29000 on branch 0.25.x | diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst
index 59aedd2c5e8e6..35229591ecfae 100644
--- a/doc/source/whatsnew/v0.25.2.rst
+++ b/doc/source/whatsnew/v0.25.2.rst
@@ -1,6 +1,6 @@
.. _whatsnew_0252:
-What's new in 0.25.2 (October XX, 2019)
+What's new in 0.25.2 (October 15, 2019)
---------------------------------------
These are the changes in pandas 0.25.2. See :ref:`release` for a full changelog
@@ -15,93 +15,25 @@ including other versions of pandas.
Bug fixes
~~~~~~~~~
-Categorical
-^^^^^^^^^^^
-
--
-
-Datetimelike
-^^^^^^^^^^^^
-
--
--
--
-
-Timezones
-^^^^^^^^^
-
--
-
-Numeric
-^^^^^^^
-
--
--
--
--
-
-Conversion
-^^^^^^^^^^
-
--
-
-Interval
-^^^^^^^^
-
--
-
Indexing
^^^^^^^^
-- Fix regression in :meth:`DataFrame.reindex` not following ``limit`` argument (:issue:`28631`).
+- Fix regression in :meth:`DataFrame.reindex` not following the ``limit`` argument (:issue:`28631`).
- Fix regression in :meth:`RangeIndex.get_indexer` for decreasing :class:`RangeIndex` where target values may be improperly identified as missing/present (:issue:`28678`)
--
--
--
-
-Missing
-^^^^^^^
-
--
I/O
^^^
-- Fix regression in notebook display where <th> tags not used for :attr:`DataFrame.index` (:issue:`28204`).
+- Fix regression in notebook display where ``<th>`` tags were missing for :attr:`DataFrame.index` values (:issue:`28204`).
- Regression in :meth:`~DataFrame.to_csv` where writing a :class:`Series` or :class:`DataFrame` indexed by an :class:`IntervalIndex` would incorrectly raise a ``TypeError`` (:issue:`28210`)
- Fix :meth:`~DataFrame.to_csv` with ``ExtensionArray`` with list-like values (:issue:`28840`).
--
-
-Plotting
-^^^^^^^^
-
--
--
--
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug incorrectly raising an ``IndexError`` when passing a list of quantiles to :meth:`pandas.core.groupby.DataFrameGroupBy.quantile` (:issue:`28113`).
--
--
--
-
-Reshaping
-^^^^^^^^^
-
--
--
--
--
--
-
-Sparse
-^^^^^^
-
--
+- Bug in :meth:`pandas.core.groupby.GroupBy.shift`, :meth:`pandas.core.groupby.GroupBy.bfill` and :meth:`pandas.core.groupby.GroupBy.ffill` where timezone information would be dropped (:issue:`19995`, :issue:`27992`)
-Other
^^^^^
- Compatibility with Python 3.8 in :meth:`DataFrame.query` (:issue:`27261`)
| https://api.github.com/repos/pandas-dev/pandas/pulls/29005 | 2019-10-15T13:54:10Z | 2019-10-15T15:21:34Z | 2019-10-15T15:21:34Z | 2019-10-15T15:22:50Z | |
fix is_scalar documentation (#28998) | diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 1c2f80b832201..5761f1ffb0c0a 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -137,8 +137,8 @@ def is_scalar(val: object) -> bool:
Examples
--------
- >>> dt = pd.datetime.datetime(2018, 10, 3)
- >>> pd.is_scalar(dt)
+ >>> dt = datetime.datetime(2018, 10, 3)
+ >>> pd.api.types.is_scalar(dt)
True
>>> pd.api.types.is_scalar([2, 3])
| - [x] closes #28998
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
is_scalar documentation was wrong, see #28998 | https://api.github.com/repos/pandas-dev/pandas/pulls/29004 | 2019-10-15T12:20:19Z | 2019-10-15T15:21:59Z | 2019-10-15T15:21:58Z | 2019-10-15T15:25:22Z |
Backport PR #28730 on branch 0.25.x (CI: 3.8 build) | diff --git a/.travis.yml b/.travis.yml
index 8335a6ee92bef..6465abb317c01 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -31,6 +31,12 @@ matrix:
- python: 3.5
include:
+ - dist: bionic
+ # 18.04
+ python: 3.8-dev
+ env:
+ - JOB="3.8-dev" PATTERN="(not slow and not network)"
+
- dist: trusty
env:
- JOB="3.7" ENV_FILE="ci/deps/travis-37.yaml" PATTERN="(not slow and not network)"
@@ -72,6 +78,7 @@ before_install:
# This overrides travis and tells it to look nowhere.
- export BOTO_CONFIG=/dev/null
+
install:
- echo "install start"
- ci/prep_cython_cache.sh
@@ -79,17 +86,19 @@ install:
- ci/submit_cython_cache.sh
- echo "install done"
+
before_script:
# display server (for clipboard functionality) needs to be started here,
# does not work if done in install:setup_env.sh (GH-26103)
- export DISPLAY=":99.0"
- echo "sh -e /etc/init.d/xvfb start"
- - sh -e /etc/init.d/xvfb start
+ - if [ "$JOB" != "3.8-dev" ]; then sh -e /etc/init.d/xvfb start; fi
- sleep 3
script:
- echo "script start"
- - source activate pandas-dev
+ - echo "$JOB"
+ - if [ "$JOB" != "3.8-dev" ]; then source activate pandas-dev; fi
- ci/run_tests.sh
after_script:
diff --git a/ci/build38.sh b/ci/build38.sh
new file mode 100644
index 0000000000000..5c798c17301e0
--- /dev/null
+++ b/ci/build38.sh
@@ -0,0 +1,25 @@
+#!/bin/bash -e
+# Special build for python3.8 until numpy puts its own wheels up
+
+sudo apt-get install build-essential gcc xvfb
+pip install --no-deps -U pip wheel setuptools
+pip install python-dateutil pytz cython pytest pytest-xdist hypothesis
+
+# Possible alternative for getting numpy:
+# pip install --pre -f https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com/ numpy
+git clone https://github.com/numpy/numpy
+cd numpy
+python setup.py build_ext --inplace
+python setup.py install
+cd ..
+rm -rf numpy
+
+python setup.py build_ext -inplace
+python -m pip install --no-build-isolation -e .
+
+python -c "import sys; print(sys.version_info)"
+python -c "import pandas as pd"
+python -c "import hypothesis"
+
+# TODO: Is there anything else in setup_env that we really want to do?
+# ci/setup_env.sh
diff --git a/ci/setup_env.sh b/ci/setup_env.sh
index 88742e0483c7e..02e48f6cb9af1 100755
--- a/ci/setup_env.sh
+++ b/ci/setup_env.sh
@@ -1,5 +1,9 @@
#!/bin/bash -e
+if [ "$JOB" == "3.8-dev" ]; then
+ /bin/bash ci/build38.sh
+ exit 0
+fi
# edit the locale file if needed
if [ -n "$LOCALE_OVERRIDE" ]; then
| Backport PR #28730: CI: 3.8 build | https://api.github.com/repos/pandas-dev/pandas/pulls/29002 | 2019-10-15T11:39:03Z | 2019-10-15T13:51:37Z | 2019-10-15T13:51:37Z | 2019-10-15T13:51:38Z |
DOC: 0.25.2 whatsnew cleanup | diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst
index 73a5ac5f840be..a99751f9bab9f 100644
--- a/doc/source/whatsnew/v0.25.2.rst
+++ b/doc/source/whatsnew/v0.25.2.rst
@@ -1,6 +1,6 @@
.. _whatsnew_0252:
-What's new in 0.25.2 (October XX, 2019)
+What's new in 0.25.2 (October 15, 2019)
---------------------------------------
These are the changes in pandas 0.25.2. See :ref:`release` for a full changelog
@@ -15,91 +15,24 @@ including other versions of pandas.
Bug fixes
~~~~~~~~~
-Categorical
-^^^^^^^^^^^
-
--
-
-Datetimelike
-^^^^^^^^^^^^
-
--
--
--
-
-Timezones
-^^^^^^^^^
-
--
-
-Numeric
-^^^^^^^
-
--
--
--
--
-
-Conversion
-^^^^^^^^^^
-
--
-
-Interval
-^^^^^^^^
-
--
-
Indexing
^^^^^^^^
-- Fix regression in :meth:`DataFrame.reindex` not following ``limit`` argument (:issue:`28631`).
+- Fix regression in :meth:`DataFrame.reindex` not following the ``limit`` argument (:issue:`28631`).
- Fix regression in :meth:`RangeIndex.get_indexer` for decreasing :class:`RangeIndex` where target values may be improperly identified as missing/present (:issue:`28678`)
--
--
-
-Missing
-^^^^^^^
-
--
I/O
^^^
-- Fix regression in notebook display where <th> tags not used for :attr:`DataFrame.index` (:issue:`28204`).
+- Fix regression in notebook display where ``<th>`` tags were missing for :attr:`DataFrame.index` values (:issue:`28204`).
- Regression in :meth:`~DataFrame.to_csv` where writing a :class:`Series` or :class:`DataFrame` indexed by an :class:`IntervalIndex` would incorrectly raise a ``TypeError`` (:issue:`28210`)
- Fix :meth:`~DataFrame.to_csv` with ``ExtensionArray`` with list-like values (:issue:`28840`).
--
-
-Plotting
-^^^^^^^^
-
--
--
--
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug incorrectly raising an ``IndexError`` when passing a list of quantiles to :meth:`pandas.core.groupby.DataFrameGroupBy.quantile` (:issue:`28113`).
- Bug in :meth:`pandas.core.groupby.GroupBy.shift`, :meth:`pandas.core.groupby.GroupBy.bfill` and :meth:`pandas.core.groupby.GroupBy.ffill` where timezone information would be dropped (:issue:`19995`, :issue:`27992`)
--
--
--
-
-Reshaping
-^^^^^^^^^
-
--
--
--
--
--
-
-Sparse
-^^^^^^
-
--
Other
^^^^^
| https://api.github.com/repos/pandas-dev/pandas/pulls/29000 | 2019-10-15T11:36:22Z | 2019-10-15T13:51:07Z | 2019-10-15T13:51:07Z | 2019-10-15T13:51:58Z | |
Backport PR #28982 on branch 0.25.x (Document 3.8 compatibility) | diff --git a/ci/deps/azure-36-32bit.yaml b/ci/deps/azure-36-32bit.yaml
index 321cc203961d5..1e2e6c33e8c15 100644
--- a/ci/deps/azure-36-32bit.yaml
+++ b/ci/deps/azure-36-32bit.yaml
@@ -3,6 +3,7 @@ channels:
- defaults
- conda-forge
dependencies:
+ - attrs=19.1.0
- gcc_linux-32
- gcc_linux-32
- gxx_linux-32
@@ -11,7 +12,7 @@ dependencies:
- python=3.6.*
- pytz=2017.2
# universal
- - pytest>=4.0.2,<5.0.0
+ - pytest
- pytest-xdist
- pytest-mock
- pytest-azurepipelines
diff --git a/doc/source/install.rst b/doc/source/install.rst
index fc99b458fa0af..7d1150c2f65fa 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -18,7 +18,7 @@ Instructions for installing from source,
Python version support
----------------------
-Officially Python 3.5.3 and above, 3.6, and 3.7.
+Officially Python 3.5.3 and above, 3.6, 3.7, and 3.8.
Installing pandas
-----------------
diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst
index 0fea95710d638..8e43d06001a5f 100644
--- a/doc/source/whatsnew/v0.25.2.rst
+++ b/doc/source/whatsnew/v0.25.2.rst
@@ -6,6 +6,10 @@ What's new in 0.25.2 (October XX, 2019)
These are the changes in pandas 0.25.2. See :ref:`release` for a full changelog
including other versions of pandas.
+.. note::
+
+ Pandas 0.25.2 adds compatibility for Python 3.8 (:issue:`28147`).
+
.. _whatsnew_0252.bug_fixes:
Bug fixes
diff --git a/setup.py b/setup.py
index 50f58ceaf7c2e..8cc60ba6352c2 100755
--- a/setup.py
+++ b/setup.py
@@ -230,6 +230,7 @@ def build_extensions(self):
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
"Programming Language :: Cython",
"Topic :: Scientific/Engineering",
]
| Backport PR #28982: Document 3.8 compatibility | https://api.github.com/repos/pandas-dev/pandas/pulls/28999 | 2019-10-15T11:32:17Z | 2019-10-15T13:50:33Z | 2019-10-15T13:50:33Z | 2019-10-15T13:50:33Z |
CLN: Type error fix in tests\tseries\offsets\test_yqm_offsets.py | diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index ddf2c6e65b474..5cc10bf00203d 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -1,4 +1,5 @@
from datetime import date, datetime, time as dt_time, timedelta
+from typing import Type
import numpy as np
import pytest
@@ -92,7 +93,7 @@ def test_to_M8():
class Base:
- _offset = None
+ _offset = None # type: Type[DateOffset]
d = Timestamp(datetime(2008, 1, 2))
timezones = [
diff --git a/setup.cfg b/setup.cfg
index 462e79dae1039..4adbf4ed29dc7 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -219,6 +219,3 @@ ignore_errors=True
[mypy-pandas.tests.tseries.offsets.test_offsets]
ignore_errors=True
-
-[mypy-pandas.tests.tseries.offsets.test_yqm_offsets]
-ignore_errors=True
| - [ ] xref #28926
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28996 | 2019-10-15T09:47:22Z | 2019-10-22T00:36:48Z | 2019-10-22T00:36:48Z | 2019-10-22T00:36:55Z |
ENH: Informative dtype message for for assert_series_equal | diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py
index 9571e8027ccf7..86e5d506e0779 100644
--- a/pandas/tests/util/test_assert_frame_equal.py
+++ b/pandas/tests/util/test_assert_frame_equal.py
@@ -141,7 +141,7 @@ def test_empty_dtypes(check_dtype):
df1["col1"] = df1["col1"].astype("int64")
if check_dtype:
- msg = "Attributes are different"
+ msg = r"Attributes of DataFrame\..* are different"
with pytest.raises(AssertionError, match=msg):
assert_frame_equal(df1, df2, **kwargs)
else:
diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py
index a12d9386eb159..bad3f2e67f8bb 100644
--- a/pandas/tests/util/test_assert_series_equal.py
+++ b/pandas/tests/util/test_assert_series_equal.py
@@ -179,7 +179,7 @@ def test_series_equal_values_mismatch(check_less_precise):
def test_series_equal_categorical_mismatch(check_categorical):
- msg = """Attributes are different
+ msg = """Attributes of Series are different
Attribute "dtype" are different
\\[left\\]: CategoricalDtype\\(categories=\\['a', 'b'\\], ordered=False\\)
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 4cf2776f5aa7c..73535e55d4fa5 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -1156,7 +1156,9 @@ def assert_series_equal(
):
pass
else:
- assert_attr_equal("dtype", left, right)
+ assert_attr_equal(
+ "dtype", left, right, obj="Attributes of {obj}".format(obj=obj)
+ )
if check_exact:
assert_numpy_array_equal(
@@ -1315,8 +1317,9 @@ def assert_frame_equal(
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
- AssertionError: Attributes are different
...
+ AssertionError: Attributes of DataFrame.iloc[:, 1] are different
+
Attribute "dtype" are different
[left]: int64
[right]: float64
| Pass obj to assert_attr_equal in assert_series_equal.
- [ ] closes #28991
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
- [x] Fix example in `assert_frame_equal` docstring | https://api.github.com/repos/pandas-dev/pandas/pulls/28993 | 2019-10-15T09:18:26Z | 2019-10-18T17:28:58Z | 2019-10-18T17:28:58Z | 2021-03-08T11:19:23Z |
tests/indexing/test_coercion.py typefix | diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index 05b58b0eca9b8..4f38d7beb9c0b 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -1,4 +1,5 @@
import itertools
+from typing import Dict, List
import numpy as np
import pytest
@@ -928,7 +929,7 @@ class TestReplaceSeriesCoercion(CoercionBase):
klasses = ["series"]
method = "replace"
- rep = {}
+ rep = {} # type: Dict[str, List]
rep["object"] = ["a", "b"]
rep["int64"] = [4, 5]
rep["float64"] = [1.1, 2.2]
diff --git a/setup.cfg b/setup.cfg
index 64494bf84363e..eb6e0269a2ea5 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -196,9 +196,6 @@ ignore_errors=True
[mypy-pandas.tests.indexes.timedeltas.test_timedelta]
ignore_errors=True
-[mypy-pandas.tests.indexing.test_coercion]
-ignore_errors=True
-
[mypy-pandas.tests.indexing.test_loc]
ignore_errors=True
| - [ ] xref #28926
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28990 | 2019-10-15T08:10:48Z | 2019-10-16T23:52:14Z | 2019-10-16T23:52:14Z | 2019-10-16T23:52:20Z |
Backport PR #28841: BUG: use EA.astype in ExtensionBlock.to_native_types (#28841) | diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst
index 0fea95710d638..49e87b34482b8 100644
--- a/doc/source/whatsnew/v0.25.2.rst
+++ b/doc/source/whatsnew/v0.25.2.rst
@@ -65,7 +65,7 @@ I/O
- Fix regression in notebook display where <th> tags not used for :attr:`DataFrame.index` (:issue:`28204`).
- Regression in :meth:`~DataFrame.to_csv` where writing a :class:`Series` or :class:`DataFrame` indexed by an :class:`IntervalIndex` would incorrectly raise a ``TypeError`` (:issue:`28210`)
--
+- Fix :meth:`~DataFrame.to_csv` with ``ExtensionArray`` with list-like values (:issue:`28840`).
-
Plotting
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 33517087c5d76..74189142af79f 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -749,7 +749,6 @@ def _try_coerce_and_cast_result(self, result, dtype=None):
def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
-
values = self.get_values()
if slicer is not None:
@@ -1848,6 +1847,23 @@ def get_values(self, dtype=None):
def to_dense(self):
return np.asarray(self.values)
+ def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs):
+ """override to use ExtensionArray astype for the conversion"""
+ values = self.values
+ if slicer is not None:
+ values = values[slicer]
+ mask = isna(values)
+
+ try:
+ values = values.astype(str)
+ values[mask] = na_rep
+ except Exception:
+ # eg SparseArray does not support setitem, needs to be converted to ndarray
+ return super().to_native_types(slicer, na_rep, quoting, **kwargs)
+
+ # we are expected to return a 2-d ndarray
+ return values.reshape(1, len(values))
+
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.
@@ -2374,6 +2390,7 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeBlock):
is_extension = True
_can_hold_element = DatetimeBlock._can_hold_element
+ to_native_types = DatetimeBlock.to_native_types
@property
def _holder(self):
diff --git a/pandas/tests/extension/list/__init__.py b/pandas/tests/extension/list/__init__.py
new file mode 100644
index 0000000000000..108f1937d07d3
--- /dev/null
+++ b/pandas/tests/extension/list/__init__.py
@@ -0,0 +1,3 @@
+from .array import ListArray, ListDtype, make_data
+
+__all__ = ["ListArray", "ListDtype", "make_data"]
diff --git a/pandas/tests/extension/list/array.py b/pandas/tests/extension/list/array.py
new file mode 100644
index 0000000000000..0ca9fadb68829
--- /dev/null
+++ b/pandas/tests/extension/list/array.py
@@ -0,0 +1,133 @@
+"""
+Test extension array for storing nested data in a pandas container.
+
+The ListArray stores an ndarray of lists.
+"""
+import numbers
+import random
+import string
+
+import numpy as np
+
+from pandas.core.dtypes.base import ExtensionDtype
+
+import pandas as pd
+from pandas.core.arrays import ExtensionArray
+
+
+class ListDtype(ExtensionDtype):
+ type = list
+ name = "list"
+ na_value = np.nan
+
+ @classmethod
+ def construct_array_type(cls):
+ """
+ Return the array type associated with this dtype.
+
+ Returns
+ -------
+ type
+ """
+ return ListArray
+
+ @classmethod
+ def construct_from_string(cls, string):
+ if string == cls.name:
+ return cls()
+ else:
+ raise TypeError("Cannot construct a '{}' from '{}'".format(cls, string))
+
+
+class ListArray(ExtensionArray):
+ dtype = ListDtype()
+ __array_priority__ = 1000
+
+ def __init__(self, values, dtype=None, copy=False):
+ if not isinstance(values, np.ndarray):
+ raise TypeError("Need to pass a numpy array as values")
+ for val in values:
+ if not isinstance(val, self.dtype.type) and not pd.isna(val):
+ raise TypeError("All values must be of type " + str(self.dtype.type))
+ self.data = values
+
+ @classmethod
+ def _from_sequence(cls, scalars, dtype=None, copy=False):
+ data = np.empty(len(scalars), dtype=object)
+ data[:] = scalars
+ return cls(data)
+
+ def __getitem__(self, item):
+ if isinstance(item, numbers.Integral):
+ return self.data[item]
+ else:
+ # slice, list-like, mask
+ return type(self)(self.data[item])
+
+ def __len__(self) -> int:
+ return len(self.data)
+
+ def isna(self):
+ return np.array(
+ [not isinstance(x, list) and np.isnan(x) for x in self.data], dtype=bool
+ )
+
+ def take(self, indexer, allow_fill=False, fill_value=None):
+ # re-implement here, since NumPy has trouble setting
+ # sized objects like UserDicts into scalar slots of
+ # an ndarary.
+ indexer = np.asarray(indexer)
+ msg = (
+ "Index is out of bounds or cannot do a "
+ "non-empty take from an empty array."
+ )
+
+ if allow_fill:
+ if fill_value is None:
+ fill_value = self.dtype.na_value
+ # bounds check
+ if (indexer < -1).any():
+ raise ValueError
+ try:
+ output = [
+ self.data[loc] if loc != -1 else fill_value for loc in indexer
+ ]
+ except IndexError:
+ raise IndexError(msg)
+ else:
+ try:
+ output = [self.data[loc] for loc in indexer]
+ except IndexError:
+ raise IndexError(msg)
+
+ return self._from_sequence(output)
+
+ def copy(self):
+ return type(self)(self.data[:])
+
+ def astype(self, dtype, copy=True):
+ if isinstance(dtype, type(self.dtype)) and dtype == self.dtype:
+ if copy:
+ return self.copy()
+ return self
+ elif pd.api.types.is_string_dtype(dtype) and not pd.api.types.is_object_dtype(
+ dtype
+ ):
+ # numpy has problems with astype(str) for nested elements
+ return np.array([str(x) for x in self.data], dtype=dtype)
+ return np.array(self.data, dtype=dtype, copy=copy)
+
+ @classmethod
+ def _concat_same_type(cls, to_concat):
+ data = np.concatenate([x.data for x in to_concat])
+ return cls(data)
+
+
+def make_data():
+ # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer
+ data = np.empty(100, dtype=object)
+ data[:] = [
+ [random.choice(string.ascii_letters) for _ in range(random.randint(0, 10))]
+ for _ in range(100)
+ ]
+ return data
diff --git a/pandas/tests/extension/list/test_list.py b/pandas/tests/extension/list/test_list.py
new file mode 100644
index 0000000000000..c5c4417155562
--- /dev/null
+++ b/pandas/tests/extension/list/test_list.py
@@ -0,0 +1,30 @@
+import pytest
+
+import pandas as pd
+
+from .array import ListArray, ListDtype, make_data
+
+
+@pytest.fixture
+def dtype():
+ return ListDtype()
+
+
+@pytest.fixture
+def data():
+ """Length-100 ListArray for semantics test."""
+ data = make_data()
+
+ while len(data[0]) == len(data[1]):
+ data = make_data()
+
+ return ListArray(data)
+
+
+def test_to_csv(data):
+ # https://github.com/pandas-dev/pandas/issues/28840
+ # array with list-likes fail when doing astype(str) on the numpy array
+ # which was done in to_native_types
+ df = pd.DataFrame({"a": data})
+ res = df.to_csv()
+ assert str(data[0]) in res
| https://api.github.com/repos/pandas-dev/pandas/pulls/28985 | 2019-10-15T02:38:13Z | 2019-10-15T13:17:18Z | 2019-10-15T13:17:18Z | 2019-10-15T13:17:18Z | |
Document 3.8 compatibility | diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst
index fc99b458fa0af..7d1150c2f65fa 100644
--- a/doc/source/getting_started/install.rst
+++ b/doc/source/getting_started/install.rst
@@ -18,7 +18,7 @@ Instructions for installing from source,
Python version support
----------------------
-Officially Python 3.5.3 and above, 3.6, and 3.7.
+Officially Python 3.5.3 and above, 3.6, 3.7, and 3.8.
Installing pandas
-----------------
diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst
index 9789c9fce3541..54a451e4427a2 100644
--- a/doc/source/whatsnew/v0.25.2.rst
+++ b/doc/source/whatsnew/v0.25.2.rst
@@ -6,6 +6,10 @@ What's new in 0.25.2 (October XX, 2019)
These are the changes in pandas 0.25.2. See :ref:`release` for a full changelog
including other versions of pandas.
+.. note::
+
+ Pandas 0.25.2 adds compatibility for Python 3.8 (:issue:`28147`).
+
.. _whatsnew_0252.bug_fixes:
Bug fixes
diff --git a/setup.py b/setup.py
index 04aedcb101e25..c35a0e75ecb80 100755
--- a/setup.py
+++ b/setup.py
@@ -228,6 +228,7 @@ def build_extensions(self):
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
"Programming Language :: Cython",
"Topic :: Scientific/Engineering",
]
| Closes #28147 | https://api.github.com/repos/pandas-dev/pandas/pulls/28982 | 2019-10-14T21:46:18Z | 2019-10-15T11:31:52Z | 2019-10-15T11:31:52Z | 2019-10-15T11:31:55Z |
CLN: declare types in rank_1d_, rank_2d | diff --git a/pandas/_libs/algos_rank_helper.pxi.in b/pandas/_libs/algos_rank_helper.pxi.in
index 5dac94394c7ed..1ba1667b687be 100644
--- a/pandas/_libs/algos_rank_helper.pxi.in
+++ b/pandas/_libs/algos_rank_helper.pxi.in
@@ -24,7 +24,7 @@ dtypes = [('object', 'object', 'Infinity()', 'NegInfinity()'),
@cython.wraparound(False)
@cython.boundscheck(False)
-def rank_1d_{{dtype}}(object in_arr, ties_method='average',
+def rank_1d_{{dtype}}({{ctype}}[:] in_arr, ties_method='average',
ascending=True, na_option='keep', pct=False):
"""
Fast NaN-friendly version of scipy.stats.rankdata
@@ -189,7 +189,7 @@ def rank_1d_{{dtype}}(object in_arr, ties_method='average',
return ranks
-def rank_2d_{{dtype}}(object in_arr, axis=0, ties_method='average',
+def rank_2d_{{dtype}}({{ctype}}[:, :] in_arr, axis=0, ties_method='average',
ascending=True, na_option='keep', pct=False):
"""
Fast NaN-friendly version of scipy.stats.rankdata
@@ -226,12 +226,10 @@ def rank_2d_{{dtype}}(object in_arr, axis=0, ties_method='average',
keep_na = na_option == 'keep'
- in_arr = np.asarray(in_arr)
-
if axis == 0:
- values = in_arr.T.copy()
+ values = np.asarray(in_arr).T.copy()
else:
- values = in_arr.copy()
+ values = np.asarray(in_arr).copy()
{{if dtype == 'object'}}
if values.dtype != np.object_:
| This is necessary before we can move these functions to use fused types. | https://api.github.com/repos/pandas-dev/pandas/pulls/28978 | 2019-10-14T18:37:15Z | 2019-10-15T12:09:06Z | 2019-10-15T12:09:06Z | 2019-10-15T15:23:58Z |
CLN: remove unnecessary get_value_at calls | diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 979dad6db0838..22f7104debf10 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -41,11 +41,13 @@ cdef inline bint is_definitely_invalid_key(object val):
cpdef get_value_at(ndarray arr, object loc, object tz=None):
+ obj = util.get_value_at(arr, loc)
+
if arr.descr.type_num == NPY_DATETIME:
- return Timestamp(util.get_value_at(arr, loc), tz=tz)
+ return Timestamp(obj, tz=tz)
elif arr.descr.type_num == NPY_TIMEDELTA:
- return Timedelta(util.get_value_at(arr, loc))
- return util.get_value_at(arr, loc)
+ return Timedelta(obj)
+ return obj
# Don't populate hash tables in monotonic indexes larger than this
@@ -102,6 +104,9 @@ cdef class IndexEngine:
arr[loc] = value
cpdef get_loc(self, object val):
+ cdef:
+ Py_ssize_t loc
+
if is_definitely_invalid_key(val):
raise TypeError("'{val}' is an invalid key".format(val=val))
@@ -114,7 +119,7 @@ cdef class IndexEngine:
loc = _bin_search(values, val) # .searchsorted(val, side='left')
if loc >= len(values):
raise KeyError(val)
- if util.get_value_at(values, loc) != val:
+ if values[loc] != val:
raise KeyError(val)
return loc
@@ -352,22 +357,22 @@ cdef Py_ssize_t _bin_search(ndarray values, object val) except -1:
Py_ssize_t mid = 0, lo = 0, hi = len(values) - 1
object pval
- if hi == 0 or (hi > 0 and val > util.get_value_at(values, hi)):
+ if hi == 0 or (hi > 0 and val > values[hi]):
return len(values)
while lo < hi:
mid = (lo + hi) // 2
- pval = util.get_value_at(values, mid)
+ pval = values[mid]
if val < pval:
hi = mid
elif val > pval:
lo = mid + 1
else:
- while mid > 0 and val == util.get_value_at(values, mid - 1):
+ while mid > 0 and val == values[mid - 1]:
mid -= 1
return mid
- if val <= util.get_value_at(values, mid):
+ if val <= values[mid]:
return mid
else:
return mid + 1
@@ -387,13 +392,16 @@ cdef class DatetimeEngine(Int64Engine):
return 'M8[ns]'
def __contains__(self, object val):
+ cdef:
+ int64_t loc
+
if self.over_size_threshold and self.is_monotonic_increasing:
if not self.is_unique:
return self._get_loc_duplicates(val)
values = self._get_index_values()
conv = maybe_datetimelike_to_i8(val)
loc = values.searchsorted(conv, side='left')
- return util.get_value_at(values, loc) == conv
+ return values[loc] == conv
self._ensure_mapping_populated()
return maybe_datetimelike_to_i8(val) in self.mapping
@@ -405,6 +413,8 @@ cdef class DatetimeEngine(Int64Engine):
return algos.is_monotonic(values, timelike=True)
cpdef get_loc(self, object val):
+ cdef:
+ int64_t loc
if is_definitely_invalid_key(val):
raise TypeError
@@ -422,7 +432,7 @@ cdef class DatetimeEngine(Int64Engine):
self._date_check_type(val)
raise KeyError(val)
- if loc == len(values) or util.get_value_at(values, loc) != conv:
+ if loc == len(values) or values[loc] != conv:
raise KeyError(val)
return loc
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 1c2f80b832201..a3a50644e58f3 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -782,8 +782,16 @@ def generate_slices(const int64_t[:] labels, Py_ssize_t ngroups):
return starts, ends
-def indices_fast(object index, const int64_t[:] labels, list keys,
+def indices_fast(ndarray index, const int64_t[:] labels, list keys,
list sorted_labels):
+ """
+ Parameters
+ ----------
+ index : ndarray
+ labels : ndarray[int64]
+ keys : list
+ sorted_labels : list[ndarray[int64]]
+ """
cdef:
Py_ssize_t i, j, k, lab, cur, start, n = len(labels)
dict result = {}
@@ -803,8 +811,7 @@ def indices_fast(object index, const int64_t[:] labels, list keys,
if lab != -1:
tup = PyTuple_New(k)
for j in range(k):
- val = util.get_value_at(keys[j],
- sorted_labels[j][i - 1])
+ val = keys[j][sorted_labels[j][i - 1]]
PyTuple_SET_ITEM(tup, j, val)
Py_INCREF(val)
@@ -814,8 +821,7 @@ def indices_fast(object index, const int64_t[:] labels, list keys,
tup = PyTuple_New(k)
for j in range(k):
- val = util.get_value_at(keys[j],
- sorted_labels[j][n - 1])
+ val = keys[j][sorted_labels[j][n - 1]]
PyTuple_SET_ITEM(tup, j, val)
Py_INCREF(val)
result[tup] = index[start:]
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 34eb9412451c5..0eac0e94f0beb 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -121,7 +121,7 @@ cdef class Reducer:
for i in range(self.nresults):
if has_ndarray_labels:
- name = util.get_value_at(labels, i)
+ name = labels[i]
elif has_labels:
# labels is an ExtensionArray
name = labels[i]
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index e6edad656d430..94810369785d3 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -303,8 +303,8 @@ def get_flattened_iterator(comp_ids, ngroups, levels, labels):
def get_indexer_dict(label_list, keys):
- """ return a diction of {labels} -> {indexers} """
- shape = list(map(len, keys))
+ """ return a dict of {labels} -> {indexers} """
+ shape = [len(x) for x in keys]
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
ngroups = (
| I found in another branch that these calls didn't play nicely with `nogil`, so went through and removed the ones that are unnecessary. Turns out we really only need the one in libindex.get_value_at, since that is the only place where we _arent_ assured that `loc` is intlike. | https://api.github.com/repos/pandas-dev/pandas/pulls/28977 | 2019-10-14T18:13:36Z | 2019-10-15T12:08:42Z | 2019-10-15T12:08:42Z | 2019-10-15T15:26:44Z |
PERF: Benchmark merge with non-int64 and tolerance (#28922) | diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py
index 6aa82a43a4d6a..5cf9f6336ba0c 100644
--- a/asv_bench/benchmarks/join_merge.py
+++ b/asv_bench/benchmarks/join_merge.py
@@ -273,10 +273,10 @@ def time_merge_ordered(self):
class MergeAsof:
- params = [["backward", "forward", "nearest"]]
- param_names = ["direction"]
+ params = [["backward", "forward", "nearest"], [None, 5]]
+ param_names = ["direction", "tolerance"]
- def setup(self, direction):
+ def setup(self, direction, tolerance):
one_count = 200000
two_count = 1000000
@@ -303,6 +303,9 @@ def setup(self, direction):
df1["time32"] = np.int32(df1.time)
df2["time32"] = np.int32(df2.time)
+ df1["timeu64"] = np.uint64(df1.time)
+ df2["timeu64"] = np.uint64(df2.time)
+
self.df1a = df1[["time", "value1"]]
self.df2a = df2[["time", "value2"]]
self.df1b = df1[["time", "key", "value1"]]
@@ -313,22 +316,52 @@ def setup(self, direction):
self.df2d = df2[["time32", "value2"]]
self.df1e = df1[["time", "key", "key2", "value1"]]
self.df2e = df2[["time", "key", "key2", "value2"]]
+ self.df1f = df1[["timeu64", "value1"]]
+ self.df2f = df2[["timeu64", "value2"]]
+
+ def time_on_int(self, direction, tolerance):
+ merge_asof(
+ self.df1a, self.df2a, on="time", direction=direction, tolerance=tolerance
+ )
- def time_on_int(self, direction):
- merge_asof(self.df1a, self.df2a, on="time", direction=direction)
+ def time_on_int32(self, direction, tolerance):
+ merge_asof(
+ self.df1d, self.df2d, on="time32", direction=direction, tolerance=tolerance
+ )
- def time_on_int32(self, direction):
- merge_asof(self.df1d, self.df2d, on="time32", direction=direction)
+ def time_on_uint64(self, direction, tolerance):
+ merge_asof(
+ self.df1f, self.df2f, on="timeu64", direction=direction, tolerance=tolerance
+ )
- def time_by_object(self, direction):
- merge_asof(self.df1b, self.df2b, on="time", by="key", direction=direction)
+ def time_by_object(self, direction, tolerance):
+ merge_asof(
+ self.df1b,
+ self.df2b,
+ on="time",
+ by="key",
+ direction=direction,
+ tolerance=tolerance,
+ )
- def time_by_int(self, direction):
- merge_asof(self.df1c, self.df2c, on="time", by="key2", direction=direction)
+ def time_by_int(self, direction, tolerance):
+ merge_asof(
+ self.df1c,
+ self.df2c,
+ on="time",
+ by="key2",
+ direction=direction,
+ tolerance=tolerance,
+ )
- def time_multiby(self, direction):
+ def time_multiby(self, direction, tolerance):
merge_asof(
- self.df1e, self.df2e, on="time", by=["key", "key2"], direction=direction
+ self.df1e,
+ self.df2e,
+ on="time",
+ by=["key", "key2"],
+ direction=direction,
+ tolerance=tolerance,
)
| - [X] closes #28922
- [ ] tests added / passed
- [X] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/28974 | 2019-10-14T14:46:30Z | 2019-10-22T00:41:22Z | 2019-10-22T00:41:22Z | 2019-10-22T06:57:15Z |
CLN: pandas-dev#28926 Fix mypy errors in pandas/tests/io/parser/conftest.py | diff --git a/pandas/tests/io/parser/conftest.py b/pandas/tests/io/parser/conftest.py
index 2c347a096006a..183ad500b15f3 100644
--- a/pandas/tests/io/parser/conftest.py
+++ b/pandas/tests/io/parser/conftest.py
@@ -1,4 +1,5 @@
import os
+from typing import List, Optional
import pytest
@@ -6,9 +7,9 @@
class BaseParser:
- engine = None
+ engine = None # type: Optional[str]
low_memory = True
- float_precision_choices = []
+ float_precision_choices = [] # type: List[Optional[str]]
def update_kwargs(self, kwargs):
kwargs = kwargs.copy()
@@ -59,11 +60,11 @@ def csv1(csv_dir_path):
_py_parsers_only = [_pythonParser]
_c_parsers_only = [_cParserHighMemory, _cParserLowMemory]
-_all_parsers = _c_parsers_only + _py_parsers_only
+_all_parsers = [*_c_parsers_only, *_py_parsers_only]
_py_parser_ids = ["python"]
_c_parser_ids = ["c_high", "c_low"]
-_all_parser_ids = _c_parser_ids + _py_parser_ids
+_all_parser_ids = [*_c_parser_ids, *_py_parser_ids]
@pytest.fixture(params=_all_parsers, ids=_all_parser_ids)
diff --git a/setup.cfg b/setup.cfg
index 64494bf84363e..e3cfd8858cc4a 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -205,9 +205,6 @@ ignore_errors=True
[mypy-pandas.tests.io.json.test_ujson]
ignore_errors=True
-[mypy-pandas.tests.io.parser.conftest]
-ignore_errors=True
-
[mypy-pandas.tests.io.test_sql]
ignore_errors=True
| - [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/28973 | 2019-10-14T12:08:20Z | 2019-10-15T12:30:28Z | 2019-10-15T12:30:28Z | 2019-10-15T12:34:01Z |
CLN: pandas-dev#28926 Fix pandas/tests/tseries/offsets/test_offsets_properties | diff --git a/pandas/tests/tseries/offsets/test_offsets_properties.py b/pandas/tests/tseries/offsets/test_offsets_properties.py
index 880ff1f137520..a05de78e299f7 100644
--- a/pandas/tests/tseries/offsets/test_offsets_properties.py
+++ b/pandas/tests/tseries/offsets/test_offsets_properties.py
@@ -36,8 +36,8 @@
with warnings.catch_warnings():
warnings.simplefilter("ignore")
- min_dt = (pd.Timestamp(1900, 1, 1).to_pydatetime(),)
- max_dt = (pd.Timestamp(1900, 1, 1).to_pydatetime(),)
+ min_dt = pd.Timestamp(1900, 1, 1).to_pydatetime()
+ max_dt = pd.Timestamp(1900, 1, 1).to_pydatetime()
gen_date_range = st.builds(
pd.date_range,
diff --git a/setup.cfg b/setup.cfg
index 64494bf84363e..775999bc21b97 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -226,8 +226,5 @@ ignore_errors=True
[mypy-pandas.tests.tseries.offsets.test_offsets]
ignore_errors=True
-[mypy-pandas.tests.tseries.offsets.test_offsets_properties]
-ignore_errors=True
-
[mypy-pandas.tests.tseries.offsets.test_yqm_offsets]
ignore_errors=True
| …/teset_offsets_properties.py
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/28972 | 2019-10-14T11:01:31Z | 2019-10-15T12:20:16Z | 2019-10-15T12:20:15Z | 2019-10-15T12:33:28Z |
fix #28926 mypy error in pandas\tests\arrays\test_array.py | diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index a21d9e67e49e5..78cc54db4b1b8 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -831,7 +831,9 @@ def _raise_on_incompatible(left, right):
def period_array(
- data: Sequence[Optional[Period]], freq: Optional[Tick] = None, copy: bool = False
+ data: Sequence[Optional[Period]],
+ freq: Optional[Union[str, Tick]] = None,
+ copy: bool = False,
) -> PeriodArray:
"""
Construct a new PeriodArray from a sequence of Period scalars.
diff --git a/setup.cfg b/setup.cfg
index f32deff9dafb8..c9ba13443e97c 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -136,15 +136,9 @@ ignore_errors=True
[mypy-pandas.tests.arithmetic.test_datetime64]
ignore_errors=True
-[mypy-pandas.tests.arrays.test_array]
-ignore_errors=True
-
[mypy-pandas.tests.arrays.test_datetimelike]
ignore_errors=True
-[mypy-pandas.tests.arrays.test_period]
-ignore_errors=True
-
[mypy-pandas.tests.dtypes.test_common]
ignore_errors=True
@@ -190,9 +184,6 @@ ignore_errors=True
[mypy-pandas.tests.series.test_operators]
ignore_errors=True
-[mypy-pandas.tests.test_base]
-ignore_errors=True
-
[mypy-pandas.tests.tseries.offsets.test_offsets]
ignore_errors=True
| - [ ] xref #28926
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28970 | 2019-10-14T08:52:52Z | 2019-10-21T11:57:29Z | 2019-10-21T11:57:28Z | 2019-10-21T11:57:40Z |
CLN: Exception in _libs | diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in
index ac713a928973f..08bfaf21db9fb 100644
--- a/pandas/_libs/intervaltree.pxi.in
+++ b/pandas/_libs/intervaltree.pxi.in
@@ -158,7 +158,7 @@ cdef class IntervalTree(IntervalMixin):
# TODO: write get_indexer_intervals
cdef:
- size_t old_len
+ Py_ssize_t old_len
Py_ssize_t i
Int64Vector result
@@ -179,7 +179,7 @@ cdef class IntervalTree(IntervalMixin):
the given array of scalar targets. Non-unique positions are repeated.
"""
cdef:
- size_t old_len
+ Py_ssize_t old_len
Py_ssize_t i
Int64Vector result, missing
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 1c2f80b832201..36dddbb446e83 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -2066,7 +2066,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
floats[i] = float(val)
complexes[i] = complex(val)
seen.float_ = 1
- except Exception:
+ except (ValueError, TypeError):
seen.object_ = 1
break
else:
@@ -2346,7 +2346,8 @@ def to_object_array_tuples(rows: object):
row = rows[i]
for j in range(len(row)):
result[i, j] = row[j]
- except Exception:
+ except TypeError:
+ # e.g. "Expected tuple, got list"
# upcast any subclasses to tuple
for i in range(n):
row = (rows[i],) if checknull(rows[i]) else tuple(rows[i])
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index 33665484311ba..bf0a0ae5a3fe9 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -581,7 +581,7 @@ def try_parse_dates(object[:] values, parser=None,
else:
result[i] = parse_date(values[i])
except Exception:
- # failed
+ # Since parser is user-defined, we can't guess what it migh raise
return values
else:
parse_date = parser
diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx
index cbfbc14c35b35..bc1fdfae99de9 100644
--- a/pandas/_libs/tslibs/timezones.pyx
+++ b/pandas/_libs/tslibs/timezones.pyx
@@ -226,11 +226,8 @@ cdef object get_dst_info(object tz):
if treat_tz_as_pytz(tz):
trans = np.array(tz._utc_transition_times, dtype='M8[ns]')
trans = trans.view('i8')
- try:
- if tz._utc_transition_times[0].year == 1:
- trans[0] = NPY_NAT + 1
- except Exception:
- pass
+ if tz._utc_transition_times[0].year == 1:
+ trans[0] = NPY_NAT + 1
deltas = unbox_utcoffsets(tz._transition_info)
typ = 'pytz'
| I think this is the last of the ones in _libs.
Also fix some build warnings in interval.pyx | https://api.github.com/repos/pandas-dev/pandas/pulls/28967 | 2019-10-14T03:43:25Z | 2019-10-15T12:18:37Z | 2019-10-15T12:18:36Z | 2019-10-15T15:22:32Z |
Fix mypy errors for pandas\tests\*: test_convert_to.py | diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py
index 3f0768ad5bdac..c9a7507969f5b 100644
--- a/pandas/tests/frame/test_convert_to.py
+++ b/pandas/tests/frame/test_convert_to.py
@@ -575,9 +575,9 @@ def test_frame_to_dict_tz(self):
),
),
(
- defaultdict(list),
+ defaultdict(dict),
defaultdict(
- list,
+ dict,
{
0: {"int_col": 1, "float_col": 1.0},
1: {"int_col": 2, "float_col": 2.0},
diff --git a/setup.cfg b/setup.cfg
index 462e79dae1039..229fb41bf5d79 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -166,9 +166,6 @@ ignore_errors=True
[mypy-pandas.tests.frame.test_constructors]
ignore_errors=True
-[mypy-pandas.tests.frame.test_convert_to]
-ignore_errors=True
-
[mypy-pandas.tests.indexes.datetimes.test_datetimelike]
ignore_errors=True
| - [x ] xref #28926
- [x ] tests added / passed
- [x ] passes `black pandas`
- [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [N/A ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28965 | 2019-10-14T03:09:49Z | 2019-10-18T17:56:07Z | 2019-10-18T17:56:07Z | 2019-10-18T17:56:18Z |
TST: added test for df.loc modify datetime columns | diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 35291efecd1ac..5e517d556a095 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -712,6 +712,32 @@ def test_loc_assign_non_ns_datetime(self, unit):
expected = Series(df.loc[:, "expected"], name=unit)
tm.assert_series_equal(df.loc[:, unit], expected)
+ def test_loc_modify_datetime(self):
+ # see gh-28837
+ df = DataFrame.from_dict(
+ {"date": [1485264372711, 1485265925110, 1540215845888, 1540282121025]}
+ )
+
+ df["date_dt"] = pd.to_datetime(df["date"], unit="ms", cache=True)
+
+ df.loc[:, "date_dt_cp"] = df.loc[:, "date_dt"]
+ df.loc[[2, 3], "date_dt_cp"] = df.loc[[2, 3], "date_dt"]
+
+ expected = DataFrame(
+ [
+ [1485264372711, "2017-01-24 13:26:12.711", "2017-01-24 13:26:12.711"],
+ [1485265925110, "2017-01-24 13:52:05.110", "2017-01-24 13:52:05.110"],
+ [1540215845888, "2018-10-22 13:44:05.888", "2018-10-22 13:44:05.888"],
+ [1540282121025, "2018-10-23 08:08:41.025", "2018-10-23 08:08:41.025"],
+ ],
+ columns=["date", "date_dt", "date_dt_cp"],
+ )
+
+ columns = ["date_dt", "date_dt_cp"]
+ expected[columns] = expected[columns].apply(pd.to_datetime)
+
+ tm.assert_frame_equal(df, expected)
+
def test_loc_setitem_frame(self):
df = self.frame_labels
| relevant GitHub issue #28837
- [x] closes #28837
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28964 | 2019-10-14T02:48:18Z | 2019-10-29T06:02:28Z | 2019-10-29T06:02:28Z | 2019-10-29T06:02:44Z |
CLN: move small bits outside of try/excepts | diff --git a/pandas/_libs/algos_take_helper.pxi.in b/pandas/_libs/algos_take_helper.pxi.in
index 3a3adc71875ed..f10061a417c03 100644
--- a/pandas/_libs/algos_take_helper.pxi.in
+++ b/pandas/_libs/algos_take_helper.pxi.in
@@ -276,7 +276,6 @@ cdef _take_2d(ndarray[take_t, ndim=2] values, object idx):
Py_ssize_t i, j, N, K
ndarray[Py_ssize_t, ndim=2, cast=True] indexer = idx
ndarray[take_t, ndim=2] result
- object val
N, K = (<object>values).shape
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 56ffd3db6e942..d07a120560196 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -267,7 +267,7 @@ def aggregate(self, func, *args, **kwargs):
agg = aggregate
- def _try_aggregate_string_function(self, arg, *args, **kwargs):
+ def _try_aggregate_string_function(self, arg: str, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
@@ -292,12 +292,10 @@ def _try_aggregate_string_function(self, arg, *args, **kwargs):
f = getattr(np, arg, None)
if f is not None:
- try:
+ if hasattr(self, "__array__"):
+ # in particular exclude Window
return f(self, *args, **kwargs)
- except (AttributeError, TypeError):
- pass
-
raise AttributeError(
"'{arg}' is not a valid function for "
"'{cls}' object".format(arg=arg, cls=type(self).__name__)
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 068d5e5275f0d..76a3893d3af2a 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -952,6 +952,7 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True, min_count=-1):
if alt is None:
# we cannot perform the operation
# in an alternate way, exclude the block
+ assert how == "ohlc"
deleted_items.append(locs)
continue
@@ -1025,17 +1026,20 @@ def _aggregate_frame(self, func, *args, **kwargs):
if axis != obj._info_axis_number:
try:
for name, data in self:
- result[name] = self._try_cast(func(data, *args, **kwargs), data)
+ fres = func(data, *args, **kwargs)
+ result[name] = self._try_cast(fres, data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
+ data = self.get_group(name, obj=obj)
try:
- data = self.get_group(name, obj=obj)
- result[name] = self._try_cast(func(data, *args, **kwargs), data)
+ fres = func(data, *args, **kwargs)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
+ else:
+ result[name] = self._try_cast(fres, data)
return self._wrap_frame_output(result, obj)
@@ -1410,9 +1414,10 @@ def _transform_item_by_item(self, obj, wrapper):
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
- inds.append(i)
except Exception:
pass
+ else:
+ inds.append(i)
if len(output) == 0:
raise TypeError("Transform function invalid for data types")
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index cc297629a7004..8461b4381e2ea 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -598,14 +598,7 @@ def pipe(self, func, *args, **kwargs):
plot = property(GroupByPlot)
def _make_wrapper(self, name):
- if name not in self._apply_whitelist:
- is_callable = callable(getattr(self._selected_obj, name, None))
- kind = " callable " if is_callable else " "
- msg = (
- "Cannot access{0}attribute {1!r} of {2!r} objects, try "
- "using the 'apply' method".format(kind, name, type(self).__name__)
- )
- raise AttributeError(msg)
+ assert name in self._apply_whitelist
self._set_group_selection()
@@ -919,9 +912,10 @@ def _python_agg_general(self, func, *args, **kwargs):
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
- output[name] = self._try_cast(result, obj, numeric_only=True)
except TypeError:
continue
+ else:
+ output[name] = self._try_cast(result, obj, numeric_only=True)
if len(output) == 0:
return self._python_apply_general(f)
| https://api.github.com/repos/pandas-dev/pandas/pulls/28962 | 2019-10-14T02:15:28Z | 2019-10-16T12:21:57Z | 2019-10-16T12:21:56Z | 2019-10-16T15:25:52Z | |
CLN: fix mypy errors in pandas\tests\indexes\interval\test_base.py #28926 | diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index b657d8d16df81..1ac6370860ba6 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -1,4 +1,5 @@
import gc
+from typing import Optional, Type
import numpy as np
import pytest
@@ -30,7 +31,7 @@
class Base:
""" base class for index sub-class tests """
- _holder = None
+ _holder = None # type: Optional[Type[Index]]
_compat_props = ["shape", "ndim", "size", "nbytes"]
def test_pickle_compat_construction(self):
diff --git a/setup.cfg b/setup.cfg
index f7920fb61b942..d4657100c1291 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -148,33 +148,12 @@ ignore_errors=True
[mypy-pandas.tests.extension.json.test_json]
ignore_errors=True
-[mypy-pandas.tests.indexes.datetimes.test_datetimelike]
-ignore_errors=True
-
-[mypy-pandas.tests.indexes.interval.test_base]
-ignore_errors=True
-
[mypy-pandas.tests.indexes.interval.test_interval_tree]
ignore_errors=True
-[mypy-pandas.tests.indexes.period.test_period]
-ignore_errors=True
-
[mypy-pandas.tests.indexes.test_base]
ignore_errors=True
-[mypy-pandas.tests.indexes.test_category]
-ignore_errors=True
-
-[mypy-pandas.tests.indexes.test_numeric]
-ignore_errors=True
-
-[mypy-pandas.tests.indexes.test_range]
-ignore_errors=True
-
-[mypy-pandas.tests.indexes.timedeltas.test_timedelta]
-ignore_errors=True
-
[mypy-pandas.tests.indexing.test_loc]
ignore_errors=True
| - [ ] xref #28926
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Hi! I based this change on what I saw in #28947. However, when I test this with `mypy pandas/tests/indexes/interval/test_base.py`, I get this error, which I didn't expect:
```
pandas/tests/indexes/interval/test_base.py:16: error: Incompatible types in assignment (expression has type "Optional[Type[IntervalIndex]]", base class "Base" defined the type as "None")
```
I thought the "Optional" that was added would accept the "None." But, additionally, in my local environment, I also can't see any of the errors listed in issue #28926, so maybe I don't have something set up right.
| https://api.github.com/repos/pandas-dev/pandas/pulls/28961 | 2019-10-14T00:16:53Z | 2019-10-28T19:56:53Z | 2019-10-28T19:56:53Z | 2019-10-28T19:57:01Z |
Fix mypy errors for pandas\tests\* #28926 (test_algos.py) | diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index a5706d8baa614..6df2c8faf7aee 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -767,7 +767,7 @@ def test_same_object_is_in(self):
# with similar behavior, then we at least should
# fall back to usual python's behavior: "a in [a] == True"
class LikeNan:
- def __eq__(self):
+ def __eq__(self, other):
return False
def __hash__(self):
diff --git a/setup.cfg b/setup.cfg
index 149af6c283d05..64494bf84363e 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -220,9 +220,6 @@ ignore_errors=True
[mypy-pandas.tests.series.test_operators]
ignore_errors=True
-[mypy-pandas.tests.test_algos]
-ignore_errors=True
-
[mypy-pandas.tests.test_base]
ignore_errors=True
| - [x] xref #28926
- [x] tests added / passed (verified that tests still pass)
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [N/A] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28960 | 2019-10-13T23:14:56Z | 2019-10-14T00:28:49Z | 2019-10-14T00:28:49Z | 2019-10-14T00:28:58Z |
REF: re-raise AssertionError unchanged | diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index 3069bbbf34bb7..3f741f08d1363 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -441,7 +441,7 @@ def _group_add(floating[:, :] out,
floating[:, :] sumx, nobs
if len(values) != len(labels):
- raise AssertionError("len(index) != len(labels)")
+ raise ValueError("len(index) != len(labels)")
nobs = np.zeros_like(out)
sumx = np.zeros_like(out)
@@ -491,7 +491,7 @@ def _group_prod(floating[:, :] out,
floating[:, :] prodx, nobs
if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
+ raise ValueError("len(index) != len(labels)")
nobs = np.zeros_like(out)
prodx = np.ones_like(out)
@@ -541,7 +541,7 @@ def _group_var(floating[:, :] out,
assert min_count == -1, "'min_count' only used in add and prod"
if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
+ raise ValueError("len(index) != len(labels)")
nobs = np.zeros_like(out)
mean = np.zeros_like(out)
@@ -596,7 +596,7 @@ def _group_mean(floating[:, :] out,
assert min_count == -1, "'min_count' only used in add and prod"
if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
+ raise ValueError("len(index) != len(labels)")
nobs = np.zeros_like(out)
sumx = np.zeros_like(out)
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index aa817ec451aa5..8cd727e744519 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -261,6 +261,8 @@ def aggregate(self, func=None, *args, **kwargs):
try:
return self._python_agg_general(func, *args, **kwargs)
+ except AssertionError:
+ raise
except Exception:
result = self._aggregate_named(func, *args, **kwargs)
@@ -887,6 +889,8 @@ def aggregate(self, func=None, *args, **kwargs):
result = self._aggregate_multiple_funcs(
[func], _level=_level, _axis=self.axis
)
+ except AssertionError:
+ raise
except Exception:
result = self._aggregate_frame(func)
else:
@@ -1036,6 +1040,8 @@ def _aggregate_frame(self, func, *args, **kwargs):
for name, data in self:
fres = func(data, *args, **kwargs)
result[name] = self._try_cast(fres, data)
+ except AssertionError:
+ raise
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
@@ -1043,6 +1049,8 @@ def _aggregate_frame(self, func, *args, **kwargs):
data = self.get_group(name, obj=obj)
try:
fres = func(data, *args, **kwargs)
+ except AssertionError:
+ raise
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
@@ -1398,6 +1406,8 @@ def _choose_path(self, fast_path, slow_path, group):
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
+ except AssertionError:
+ raise
except Exception:
# Hard to know ex-ante what exceptions `fast_path` might raise
return path, res
@@ -1422,6 +1432,8 @@ def _transform_item_by_item(self, obj, wrapper):
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
+ except AssertionError:
+ raise
except Exception:
pass
else:
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 92ea733cc3447..6f2868482b798 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -44,13 +44,7 @@ class providing the base-class of operations.
from pandas.core import nanops
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical
-from pandas.core.base import (
- DataError,
- GroupByError,
- PandasObject,
- SelectionMixin,
- SpecificationError,
-)
+from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.construction import extract_array
from pandas.core.frame import DataFrame
@@ -862,8 +856,6 @@ def _cython_transform(self, how, numeric_only=True, **kwargs):
result, names = self.grouper.transform(obj.values, how, **kwargs)
except NotImplementedError:
continue
- except AssertionError as e:
- raise GroupByError(str(e))
if self._transform_should_cast(how):
output[name] = self._try_cast(result, obj)
else:
@@ -890,12 +882,7 @@ def _cython_agg_general(self, how, alt=None, numeric_only=True, min_count=-1):
if numeric_only and not is_numeric:
continue
- try:
- result, names = self.grouper.aggregate(
- obj.values, how, min_count=min_count
- )
- except AssertionError as e:
- raise GroupByError(str(e))
+ result, names = self.grouper.aggregate(obj.values, how, min_count=min_count)
output[name] = self._try_cast(result, obj)
if len(output) == 0:
@@ -1353,8 +1340,8 @@ def f(self, **kwargs):
# try a cython aggregation if we can
try:
return self._cython_agg_general(alias, alt=npfunc, **kwargs)
- except AssertionError as e:
- raise SpecificationError(str(e))
+ except AssertionError:
+ raise
except DataError:
pass
except Exception:
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 40517eefe4d5d..27415a1bacdbd 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -647,6 +647,8 @@ def _transform(
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
+ except AssertionError:
+ raise
except Exception:
return self._aggregate_series_pure_python(obj, func)
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 545bc21dd6d1b..5185d95cfac4c 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -360,6 +360,8 @@ def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs):
result = grouped._aggregate_item_by_item(how, *args, **kwargs)
else:
result = grouped.aggregate(how, *args, **kwargs)
+ except AssertionError:
+ raise
except Exception:
# we have a non-reducing function
diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py
index 7e3cbed09c6d7..5dad868c8c3aa 100644
--- a/pandas/tests/groupby/aggregate/test_other.py
+++ b/pandas/tests/groupby/aggregate/test_other.py
@@ -19,7 +19,7 @@
date_range,
period_range,
)
-from pandas.core.groupby.groupby import SpecificationError
+from pandas.core.base import SpecificationError
import pandas.util.testing as tm
from pandas.io.formats.printing import pprint_thing
| This should be all the remaining places where we catch `Exception` in groupby/apply/resample code. This should make debugging much easier going forward.
cc @jreback @WillAyd | https://api.github.com/repos/pandas-dev/pandas/pulls/28959 | 2019-10-13T22:49:30Z | 2019-10-16T23:37:30Z | 2019-10-16T23:37:30Z | 2019-10-16T23:41:59Z |
CLN: Consistent and Annotated Return Type of _iterate_slices | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 79e941f262931..c82d8a25fedba 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -14,7 +14,18 @@
import itertools
import sys
from textwrap import dedent
-from typing import FrozenSet, List, Optional, Sequence, Set, Tuple, Type, Union
+from typing import (
+ FrozenSet,
+ Hashable,
+ Iterable,
+ List,
+ Optional,
+ Sequence,
+ Set,
+ Tuple,
+ Type,
+ Union,
+)
import warnings
import numpy as np
@@ -861,7 +872,7 @@ def style(self):
"""
@Appender(_shared_docs["items"])
- def items(self):
+ def items(self) -> Iterable[Tuple[Hashable, Series]]:
if self.columns.is_unique and hasattr(self, "_item_cache"):
for k in self.columns:
yield k, self._get_item_cache(k)
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 068d5e5275f0d..4125d6a918b26 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -11,7 +11,17 @@
from functools import partial
from textwrap import dedent
import typing
-from typing import Any, Callable, FrozenSet, Sequence, Type, Union
+from typing import (
+ Any,
+ Callable,
+ FrozenSet,
+ Hashable,
+ Iterable,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
import warnings
import numpy as np
@@ -132,7 +142,7 @@ def pinner(cls):
class SeriesGroupBy(GroupBy):
_apply_whitelist = base.series_apply_whitelist
- def _iterate_slices(self):
+ def _iterate_slices(self) -> Iterable[Tuple[Hashable, Series]]:
yield self._selection_name, self._selected_obj
@property
@@ -898,22 +908,20 @@ def aggregate(self, func=None, *args, **kwargs):
agg = aggregate
- def _iterate_slices(self):
- if self.axis == 0:
- # kludge
- if self._selection is None:
- slice_axis = self.obj.columns
- else:
- slice_axis = self._selection_list
- slicer = lambda x: self.obj[x]
+ def _iterate_slices(self) -> Iterable[Tuple[Hashable, Series]]:
+ obj = self._selected_obj
+ if self.axis == 1:
+ obj = obj.T
+
+ if isinstance(obj, Series) and obj.name not in self.exclusions:
+ # Occurs when doing DataFrameGroupBy(...)["X"]
+ yield obj.name, obj
else:
- slice_axis = self.obj.index
- slicer = self.obj.xs
+ for label, values in obj.items():
+ if label in self.exclusions:
+ continue
- for val in slice_axis:
- if val in self.exclusions:
- continue
- yield val, slicer(val)
+ yield label, values
def _cython_agg_general(self, how, alt=None, numeric_only=True, min_count=-1):
new_items, new_blocks = self._cython_agg_blocks(
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index cc297629a7004..4ea02d59597f1 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -14,7 +14,7 @@ class providing the base-class of operations.
import inspect
import re
import types
-from typing import FrozenSet, List, Optional, Tuple, Type, Union
+from typing import FrozenSet, Hashable, Iterable, List, Optional, Tuple, Type, Union
import numpy as np
@@ -758,7 +758,7 @@ def _python_apply_general(self, f):
keys, values, not_indexed_same=mutated or self.mutated
)
- def _iterate_slices(self):
+ def _iterate_slices(self) -> Iterable[Tuple[Hashable, Series]]:
raise AbstractMethodError(self)
def transform(self, func, *args, **kwargs):
| General pre-cursor to getting block management out of groupby. This is also a pre-cursor to fixing #21668 but needs to be coupled with a few more changes as a follow up
On master calls to _iterate_slices look up by label, potentially yielding a DataFrame if there were duplicated columns. This takes the surprise out of that and simply returns a Tuple of label / series for each item along the axis
@jbrockmendel | https://api.github.com/repos/pandas-dev/pandas/pulls/28958 | 2019-10-13T21:47:03Z | 2019-10-16T12:24:47Z | 2019-10-16T12:24:47Z | 2020-01-16T00:33:45Z |
CLN: Clean DirNameMixin._deprecated | diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py
index 2d4ded9e2e6ba..b863e7ef3d580 100644
--- a/pandas/core/accessor.py
+++ b/pandas/core/accessor.py
@@ -4,7 +4,7 @@
that can be mixed into or pinned onto other pandas classes.
"""
-from typing import Set
+from typing import FrozenSet, Set
import warnings
from pandas.util._decorators import Appender
@@ -12,9 +12,7 @@
class DirNamesMixin:
_accessors = set() # type: Set[str]
- _deprecations = frozenset(
- ["asobject", "base", "data", "flags", "itemsize", "strides"]
- )
+ _deprecations = frozenset() # type: FrozenSet[str]
def _dir_deletions(self):
"""
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 5e974f0b69e59..6b9836ba8bcec 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -331,7 +331,9 @@ class Categorical(ExtensionArray, PandasObject):
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
# tolist is not actually deprecated, just suppressed in the __dir__
- _deprecations = PandasObject._deprecations | frozenset(["tolist", "get_values"])
+ _deprecations = PandasObject._deprecations | frozenset(
+ ["tolist", "itemsize", "get_values"]
+ )
_typ = "categorical"
def __init__(
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 56ffd3db6e942..f400a9a009c8a 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -4,7 +4,7 @@
import builtins
from collections import OrderedDict
import textwrap
-from typing import Dict, Optional
+from typing import Dict, FrozenSet, Optional
import warnings
import numpy as np
@@ -653,7 +653,17 @@ class IndexOpsMixin:
# ndarray compatibility
__array_priority__ = 1000
- _deprecations = frozenset(["item"])
+ _deprecations = frozenset(
+ [
+ "tolist", # tolist is not deprecated, just suppressed in the __dir__
+ "base",
+ "data",
+ "item",
+ "itemsize",
+ "flags",
+ "strides",
+ ]
+ ) # type: FrozenSet[str]
def transpose(self, *args, **kwargs):
"""
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 7dee3a17f8f9e..572240a524569 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1,7 +1,7 @@
from datetime import datetime
import operator
from textwrap import dedent
-from typing import Union
+from typing import FrozenSet, Union
import warnings
import numpy as np
@@ -63,7 +63,7 @@
from pandas.core.dtypes.missing import array_equivalent, isna
from pandas.core import ops
-from pandas.core.accessor import CachedAccessor, DirNamesMixin
+from pandas.core.accessor import CachedAccessor
import pandas.core.algorithms as algos
from pandas.core.arrays import ExtensionArray
from pandas.core.base import IndexOpsMixin, PandasObject
@@ -206,10 +206,10 @@ class Index(IndexOpsMixin, PandasObject):
# tolist is not actually deprecated, just suppressed in the __dir__
_deprecations = (
- IndexOpsMixin._deprecations
- | DirNamesMixin._deprecations
- | frozenset(["tolist", "contains", "dtype_str", "get_values", "set_value"])
- )
+ PandasObject._deprecations
+ | IndexOpsMixin._deprecations
+ | frozenset(["asobject", "contains", "dtype_str", "get_values", "set_value"])
+ ) # type: FrozenSet[str]
# To hand over control to subclasses
_join_precedence = 1
diff --git a/pandas/core/series.py b/pandas/core/series.py
index ff8149cc2e922..03801a78be9a5 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -54,7 +54,7 @@
import pandas as pd
from pandas.core import algorithms, base, generic, nanops, ops
-from pandas.core.accessor import CachedAccessor, DirNamesMixin
+from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.categorical import Categorical, CategoricalAccessor
from pandas.core.arrays.sparse import SparseAccessor
@@ -178,10 +178,8 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
_deprecations = (
base.IndexOpsMixin._deprecations
| generic.NDFrame._deprecations
- | DirNamesMixin._deprecations
| frozenset(
[
- "tolist", # tolist is not deprecated, just suppressed in the __dir__
"asobject",
"compress",
"valid",
| This moves the content of ``DirNameMixin._deprecations`` to more appropriate locations, typically ``IndexOpsMixIn._deprecations``, as that is a common subclass of ``Index`` and ``Series``. The names in ``DirNameMixin._deprecations`` belonged to those two classes, so having the deprecated names located all the way up in ``DirNameMixin`` made them be available in all classes that subclass ``DirNameMixin``, which was unfortunate.
By having ``DirNameMixin._deprecations`` start with an empty set, it will be easier to use
``DirNameMixin`` and ``PandasObject`` where/when needed, without inheriting undesired names in ``_deprecated``.
This PR also moves ``"tolist"`` to ``IndexOpsMixIn._deprecations``, because ``tolist`` is defined in that class.
| https://api.github.com/repos/pandas-dev/pandas/pulls/28957 | 2019-10-13T20:57:12Z | 2019-10-16T18:53:42Z | 2019-10-16T18:53:42Z | 2019-10-16T19:07:10Z |
To string with encoding | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 7c86ad0f029ed..511e85929f352 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -109,6 +109,7 @@ Other enhancements
(:issue:`28368`)
- :meth:`DataFrame.to_json` now accepts an ``indent`` integer argument to enable pretty printing of JSON output (:issue:`12004`)
- :meth:`read_stata` can read Stata 119 dta files. (:issue:`28250`)
+- Added ``encoding`` argument to :meth:`DataFrame.to_string` for non-ascii text (:issue:`28766`)
- Added ``encoding`` argument to :func:`DataFrame.to_html` for non-ascii text (:issue:`28663`)
Build Changes
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 64755b2390eaf..f032a9a919b3c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -755,6 +755,7 @@ def to_string(
decimal: str = ".",
line_width: Optional[int] = None,
max_colwidth: Optional[int] = None,
+ encoding: Optional[str] = None,
) -> Optional[str]:
"""
Render a DataFrame to a console-friendly tabular output.
@@ -765,6 +766,10 @@ def to_string(
Max width to truncate each column in characters. By default, no limit.
.. versionadded:: 1.0.0
+ encoding : str, default "utf-8"
+ Set character encoding.
+
+ .. versionadded:: 1.0
%(returns)s
See Also
--------
@@ -803,7 +808,7 @@ def to_string(
decimal=decimal,
line_width=line_width,
)
- return formatter.to_string(buf=buf)
+ return formatter.to_string(buf=buf, encoding=encoding)
# ----------------------------------------------------------------------
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index b8c40e3f62221..7c58eafd2ec39 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -485,6 +485,8 @@ def get_buffer(
if encoding is None:
encoding = "utf-8"
+ elif not isinstance(buf, str):
+ raise ValueError("buf is not a file name and encoding is specified.")
if hasattr(buf, "write"):
yield buf
@@ -895,8 +897,12 @@ def _join_multiline(self, *args) -> str:
st = ed
return "\n\n".join(str_lst)
- def to_string(self, buf: Optional[FilePathOrBuffer[str]] = None) -> Optional[str]:
- return self.get_result(buf=buf)
+ def to_string(
+ self,
+ buf: Optional[FilePathOrBuffer[str]] = None,
+ encoding: Optional[str] = None,
+ ) -> Optional[str]:
+ return self.get_result(buf=buf, encoding=encoding)
def to_latex(
self,
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 454e2afb8abe0..9aba4c8aa5019 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -73,17 +73,19 @@ def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
@pytest.fixture
-def assert_filepath_or_buffer_equals(filepath_or_buffer, filepath_or_buffer_id):
+def assert_filepath_or_buffer_equals(
+ filepath_or_buffer, filepath_or_buffer_id, encoding
+):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
- with open(filepath_or_buffer) as f:
+ with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
- result = filepath_or_buffer.read_text()
+ result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
@@ -3240,14 +3242,32 @@ def test_repr_html_ipython_config(ip):
@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
+@pytest.mark.parametrize(
+ "encoding, data",
+ [(None, "abc"), ("utf-8", "abc"), ("gbk", "造成输出中文显示乱码"), ("foo", "abc")],
+)
def test_filepath_or_buffer_arg(
- float_frame, method, filepath_or_buffer, assert_filepath_or_buffer_equals
+ method,
+ filepath_or_buffer,
+ assert_filepath_or_buffer_equals,
+ encoding,
+ data,
+ filepath_or_buffer_id,
):
- df = float_frame
- expected = getattr(df, method)()
+ df = DataFrame([data])
- getattr(df, method)(buf=filepath_or_buffer)
- assert_filepath_or_buffer_equals(expected)
+ if filepath_or_buffer_id not in ["string", "pathlike"] and encoding is not None:
+ with pytest.raises(
+ ValueError, match="buf is not a file name and encoding is specified."
+ ):
+ getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
+ elif encoding == "foo":
+ with pytest.raises(LookupError, match="unknown encoding"):
+ getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
+ else:
+ expected = getattr(df, method)()
+ getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
+ assert_filepath_or_buffer_equals(expected)
@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
| - [x] close #28766
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28951 | 2019-10-13T06:12:47Z | 2019-10-23T17:48:08Z | 2019-10-23T17:48:08Z | 2019-10-23T17:50:48Z |
WEB: Adding new pandas logo | diff --git a/web/pandas/_templates/layout.html b/web/pandas/_templates/layout.html
index fe3e4d1245d93..120058afd1190 100644
--- a/web/pandas/_templates/layout.html
+++ b/web/pandas/_templates/layout.html
@@ -12,6 +12,7 @@
<title>pandas - Python Data Analysis Library</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
+ <link rel='shortcut icon' type='image/x-icon' href='{{ base_url }}/static/img/favicon.ico'/>
<link rel="stylesheet"
href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css"
integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm"
diff --git a/web/pandas/about/citing.md b/web/pandas/about/citing.md
index 77b79c41aa4d1..5cd31d8722b9d 100644
--- a/web/pandas/about/citing.md
+++ b/web/pandas/about/citing.md
@@ -33,14 +33,91 @@ If you use _pandas_ for a scientific publication, we would appreciate citations
When using the project name _pandas_, please use it in lower case, even at the beginning of a sentence.
-The official logo of _pandas_ is:
+The official logos of _pandas_ are:
-
+### Primary logo
-You can download a `svg` version of the logo [here]({{ base_url }}/static/img/pandas.svg).
+<table class="table logo">
+ <tr>
+ <td>
+ <img alt="" src="{{ base_url }}/static/img/pandas.svg"/>
+ </td>
+ <td style="background-color: #150458">
+ <img alt="" src="{{ base_url }}/static/img/pandas_white.svg"/>
+ </td>
+ </tr>
+</table>
+
+### Secondary logo
+
+<table class="table logo">
+ <tr>
+ <td>
+ <img alt="" src="{{ base_url }}/static/img/pandas_secondary.svg"/>
+ </td>
+ <td style="background-color: #150458">
+ <img alt="" src="{{ base_url }}/static/img/pandas_secondary_white.svg"/>
+ </td>
+ </tr>
+</table>
+
+### Logo mark
+
+<table class="table logo">
+ <tr>
+ <td>
+ <img alt="" src="{{ base_url }}/static/img/pandas_mark.svg"/>
+ </td>
+ <td style="background-color: #150458">
+ <img alt="" src="{{ base_url }}/static/img/pandas_mark_white.svg"/>
+ </td>
+ </tr>
+</table>
+
+### Logo usage
+
+The pandas logo is available in full color and white accent.
+The full color logo should only appear against white backgrounds.
+The white accent logo should go against contrasting color background.
When using the logo, please follow the next directives:
-- Leave enough margin around the logo
+- Primary logo should never be seen under 1 inch in size for printing and 72px for web
+- The secondary logo should never be seen under 0.75 inch in size for printing and 55px for web
+- Leave enough margin around the logo (leave the height of the logo in the top, bottom and both sides)
- Do not distort the logo by changing its proportions
- Do not place text or other elements on top of the logo
+
+### Colors
+
+<table class="table">
+ <tr>
+ <td style="text-align: center;">
+ <svg xmlns="http://www.w3.org/2000/svg" width="100" height="100">
+ <circle cx="50" cy="50" r="50" fill="#150458"/>
+ </svg>
+ <br/>
+ <b style="color: #150458;">Blue</b><br/>
+ RGB: R21 G4 B88<br/>
+ HEX: #150458
+ </td>
+ <td style="text-align: center;">
+ <svg xmlns="http://www.w3.org/2000/svg" width="100" height="100">
+ <circle cx="50" cy="50" r="50" fill="#ffca00"/>
+ </svg>
+ <br/>
+ <b style="color: #150458;">Yellow</b><br/>
+ RGB: R255 G202 B0<br/>
+ HEX: #FFCA00
+ </td>
+ <td style="text-align: center;">
+ <svg xmlns="http://www.w3.org/2000/svg" width="100" height="100">
+ <circle cx="50" cy="50" r="50" fill="#e70488"/>
+ </svg>
+ <br/>
+ <b style="color: #150458;">Pink</b><br/>
+ RGB: R231 G4 B136<br/>
+ HEX: #E70488
+ </td>
+ </tr>
+</table>
diff --git a/web/pandas/config.yml b/web/pandas/config.yml
index d5c505f298437..e2a95a5039884 100644
--- a/web/pandas/config.yml
+++ b/web/pandas/config.yml
@@ -16,7 +16,7 @@ main:
- tables
- fenced_code
static:
- logo: # /static/img/pandas.svg
+ logo: /static/img/pandas_white.svg
css:
- /static/css/pandas.css
navbar:
diff --git a/web/pandas/static/css/pandas.css b/web/pandas/static/css/pandas.css
index 0a227cf8d96c9..8b5905d480ac3 100644
--- a/web/pandas/static/css/pandas.css
+++ b/web/pandas/static/css/pandas.css
@@ -31,7 +31,7 @@ code {
color: #130654;
}
a.navbar-brand img {
- max-height: 2em;
+ height: 3rem;
}
div.card {
margin: 0 0 .2em .2em !important;
@@ -52,3 +52,9 @@ div.card .card-title {
.navbar-dark .navbar-nav .nav-link:hover {
color: white;
}
+table.logo td {
+ text-align: center;
+}
+table.logo img {
+ height: 4rem;
+}
diff --git a/web/pandas/static/img/favicon.ico b/web/pandas/static/img/favicon.ico
new file mode 100644
index 0000000000000..0af2443dcaa3e
Binary files /dev/null and b/web/pandas/static/img/favicon.ico differ
diff --git a/web/pandas/static/img/pandas.svg b/web/pandas/static/img/pandas.svg
deleted file mode 120000
index 2e5d3872e4845..0000000000000
--- a/web/pandas/static/img/pandas.svg
+++ /dev/null
@@ -1 +0,0 @@
-../../../../doc/logo/pandas_logo.svg
\ No newline at end of file
diff --git a/web/pandas/static/img/pandas.svg b/web/pandas/static/img/pandas.svg
new file mode 100644
index 0000000000000..a7af4e4d2d401
--- /dev/null
+++ b/web/pandas/static/img/pandas.svg
@@ -0,0 +1 @@
+<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 818.63 331.21"><defs><style>.cls-1{fill:#130754;}.cls-2{fill:#ffca00;}.cls-3{fill:#e70488;}</style></defs><title>Artboard 63</title><path class="cls-1" d="M290.85,199.21c-10.27,0-20.73-4.25-27.28-12.58v45H243l0-111.09h18.6l.71,12.22c6.38-9.39,17.71-14.35,28.52-14.35,20.73,0,36,17.37,36,40.4S311.58,199.22,290.85,199.21Zm-6.37-65.55c-12.05,0-21.79,9.39-21.79,25.16S272.43,184,284.48,184s21.79-9.39,21.79-25.16S296.53,133.66,284.48,133.66Z"/><path class="cls-1" d="M404.36,197.1l-.71-12.22c-6.38,9.39-17.72,14.35-28.53,14.34-20.73,0-36-17.36-36-40.39s15.24-40.4,36-40.39c10.81,0,22.15,5,28.53,14.35l.71-12.22H423V197.1Zm-22.85-63.43c-12.05,0-21.79,9.39-21.8,25.16S369.45,184,381.5,184s21.8-9.39,21.8-25.16S393.56,133.67,381.51,133.67Z"/><path class="cls-1" d="M494.87,197.11V154.77c0-14.88-5.13-19.84-14.52-19.84-9.75,0-20.38,8.85-20.38,19.48v42.7H439.41V120.57H458.2l.89,14.18c5.14-9.75,16.65-16.3,28.35-16.3,20.37,0,28,14.18,28,33.13v45.54Z"/><path class="cls-1" d="M590.77,197.13l-.71-12.23c-6.38,9.39-17.72,14.35-28.52,14.35-20.73,0-36-17.37-36-40.4s15.24-40.39,36-40.39c10.27,0,20.72,4.26,27.28,12.58V90.83h20.56l0,106.3ZM567.92,133.7c-12,0-21.79,9.39-21.79,25.15S555.87,184,567.92,184s21.79-9.38,21.79-25.15S580,133.7,567.92,133.7Z"/><path class="cls-1" d="M686.6,197.14l-.71-12.22c-6.38,9.39-17.72,14.34-28.53,14.34-20.73,0-36-17.36-36-40.4s15.24-40.39,36-40.39c10.81,0,22.15,5,28.53,14.36l.71-12.23h18.6v76.53Zm-22.85-63.43c-12,0-21.79,9.39-21.8,25.16S651.7,184,663.74,184s21.8-9.39,21.8-25.16S675.8,133.71,663.75,133.71Z"/><path class="cls-1" d="M750.73,199.63a60.16,60.16,0,0,1-30.65-8.69l3.37-14.17c6.2,3.72,15.59,8.51,26.93,8.51,8.15,0,13.82-2.48,13.82-8.86,0-5.49-5.85-7.44-16.3-9.92-18.78-4.08-25.51-14-25.51-24.81,0-12.05,9.39-23.38,30.12-23.38,12.58,0,23.57,5.49,26,6.91l-3.37,13.47A44.59,44.59,0,0,0,753,132.31c-8.32,0-12.4,2.83-12.4,7.44,0,5.13,5.32,7.44,13.46,9.39,20.2,4.25,28.35,13.64,28.35,23.92C782.45,189.53,770.4,199.63,750.73,199.63Z"/><rect class="cls-1" x="74.88" y="68.42" width="24.09" height="50.02"/><rect class="cls-1" x="74.88" y="171.17" width="24.09" height="50.02"/><rect class="cls-2" x="74.88" y="133.04" width="24.09" height="23.6"/><rect class="cls-1" x="36.19" y="109.55" width="24.09" height="166.27"/><rect class="cls-1" x="112.78" y="212.44" width="24.09" height="50.02"/><rect class="cls-1" x="112.78" y="109.61" width="24.09" height="50.02"/><rect class="cls-3" x="112.78" y="174.23" width="24.09" height="23.6"/><rect class="cls-1" x="150.67" y="55.39" width="24.09" height="166.27"/></svg>
\ No newline at end of file
diff --git a/web/pandas/static/img/pandas_mark.svg b/web/pandas/static/img/pandas_mark.svg
new file mode 100644
index 0000000000000..1451f57de198e
--- /dev/null
+++ b/web/pandas/static/img/pandas_mark.svg
@@ -0,0 +1,111 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ id="Layer_1"
+ data-name="Layer 1"
+ viewBox="0 0 210.21 280.43"
+ version="1.1"
+ sodipodi:docname="pandas_mark.svg"
+ inkscape:version="0.92.4 (unknown)">
+ <metadata
+ id="metadata27">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1131"
+ inkscape:window-height="921"
+ id="namedview25"
+ showgrid="false"
+ inkscape:zoom="0.84156476"
+ inkscape:cx="107.48153"
+ inkscape:cy="140.215"
+ inkscape:window-x="0"
+ inkscape:window-y="0"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="Layer_1" />
+ <defs
+ id="defs4">
+ <style
+ id="style2">.cls-1{fill:#130754;}.cls-2{fill:#48e5ac;}.cls-3{fill:#e70488;}</style>
+ </defs>
+ <title
+ id="title6">Artboard 61</title>
+ <rect
+ class="cls-1"
+ x="74.51"
+ y="43.03"
+ width="24.09"
+ height="50.02"
+ id="rect8" />
+ <rect
+ class="cls-1"
+ x="74.51"
+ y="145.78"
+ width="24.09"
+ height="50.02"
+ id="rect10" />
+ <rect
+ class="cls-2"
+ x="74.51"
+ y="107.65"
+ width="24.09"
+ height="23.6"
+ id="rect12"
+ style="fill:#ffca00;fill-opacity:1" />
+ <rect
+ class="cls-1"
+ x="35.81"
+ y="84.15"
+ width="24.09"
+ height="166.27"
+ id="rect14" />
+ <rect
+ class="cls-1"
+ x="112.41"
+ y="187.05"
+ width="24.09"
+ height="50.02"
+ id="rect16" />
+ <rect
+ class="cls-1"
+ x="112.41"
+ y="84.21"
+ width="24.09"
+ height="50.02"
+ id="rect18" />
+ <rect
+ class="cls-3"
+ x="112.41"
+ y="148.84"
+ width="24.09"
+ height="23.6"
+ id="rect20" />
+ <rect
+ class="cls-1"
+ x="150.3"
+ y="30"
+ width="24.09"
+ height="166.27"
+ id="rect22" />
+</svg>
diff --git a/web/pandas/static/img/pandas_mark_white.svg b/web/pandas/static/img/pandas_mark_white.svg
new file mode 100644
index 0000000000000..ae50bf5430c3a
--- /dev/null
+++ b/web/pandas/static/img/pandas_mark_white.svg
@@ -0,0 +1,111 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ id="Layer_1"
+ data-name="Layer 1"
+ viewBox="0 0 210.21 280.43"
+ version="1.1"
+ sodipodi:docname="pandas_mark_white.svg"
+ inkscape:version="0.92.4 (unknown)">
+ <metadata
+ id="metadata27">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="761"
+ inkscape:window-height="480"
+ id="namedview25"
+ showgrid="false"
+ inkscape:zoom="0.84156476"
+ inkscape:cx="105.105"
+ inkscape:cy="140.215"
+ inkscape:window-x="0"
+ inkscape:window-y="0"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="Layer_1" />
+ <defs
+ id="defs4">
+ <style
+ id="style2">.cls-1{fill:#fff;}.cls-2{fill:#48e5ac;}.cls-3{fill:#e70488;}</style>
+ </defs>
+ <title
+ id="title6">Artboard 61 copy</title>
+ <rect
+ class="cls-1"
+ x="74.51"
+ y="43.03"
+ width="24.09"
+ height="50.02"
+ id="rect8" />
+ <rect
+ class="cls-1"
+ x="74.51"
+ y="145.78"
+ width="24.09"
+ height="50.02"
+ id="rect10" />
+ <rect
+ class="cls-2"
+ x="74.51"
+ y="107.65"
+ width="24.09"
+ height="23.6"
+ id="rect12"
+ style="fill:#ffca00;fill-opacity:1" />
+ <rect
+ class="cls-1"
+ x="35.81"
+ y="84.15"
+ width="24.09"
+ height="166.27"
+ id="rect14" />
+ <rect
+ class="cls-1"
+ x="112.41"
+ y="187.05"
+ width="24.09"
+ height="50.02"
+ id="rect16" />
+ <rect
+ class="cls-1"
+ x="112.41"
+ y="84.21"
+ width="24.09"
+ height="50.02"
+ id="rect18" />
+ <rect
+ class="cls-3"
+ x="112.41"
+ y="148.84"
+ width="24.09"
+ height="23.6"
+ id="rect20" />
+ <rect
+ class="cls-1"
+ x="150.3"
+ y="30"
+ width="24.09"
+ height="166.27"
+ id="rect22" />
+</svg>
diff --git a/web/pandas/static/img/pandas_secondary.svg b/web/pandas/static/img/pandas_secondary.svg
new file mode 100644
index 0000000000000..e74404842e5b6
--- /dev/null
+++ b/web/pandas/static/img/pandas_secondary.svg
@@ -0,0 +1 @@
+<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 664.97 470.93"><defs><style>.cls-1{fill:#130754;}.cls-2{fill:#ffca00;}.cls-3{fill:#e70488;}</style></defs><title>Artboard 57</title><path class="cls-1" d="M110.61,397.48c-10.28,0-20.73-4.25-27.29-12.58v45H62.76l0-111.08h18.6L82.09,331c6.38-9.39,17.72-14.35,28.53-14.34,20.72,0,36,17.36,36,40.4S131.33,397.49,110.61,397.48Zm-6.37-65.55c-12,0-21.8,9.39-21.8,25.16s9.74,25.16,21.79,25.16S126,372.86,126,357.09,116.28,331.93,104.24,331.93Z"/><path class="cls-1" d="M224.11,395.37l-.71-12.22c-6.38,9.39-17.72,14.35-28.52,14.35-20.73,0-36-17.37-36-40.4s15.24-40.4,36-40.39c10.8,0,22.14,5,28.52,14.35l.71-12.22h18.6v76.54Zm-22.85-63.43c-12,0-21.79,9.39-21.79,25.16s9.74,25.16,21.79,25.16,21.79-9.39,21.79-25.15S213.31,332,201.26,331.94Z"/><path class="cls-1" d="M314.62,395.39V353c0-14.88-5.14-19.84-14.53-19.84-9.74,0-20.37,8.85-20.38,19.48v42.7H259.17V318.84H278l.88,14.18c5.14-9.75,16.66-16.3,28.35-16.3,20.37,0,28,14.18,28,33.14v45.53Z"/><path class="cls-1" d="M410.52,395.4l-.71-12.23c-6.37,9.39-17.71,14.35-28.52,14.35-20.73,0-36-17.37-36-40.4s15.24-40.39,36-40.39c10.27,0,20.73,4.26,27.28,12.59V289.1h20.55l0,106.3ZM387.68,332c-12.05,0-21.8,9.39-21.8,25.16s9.74,25.15,21.79,25.16,21.79-9.39,21.79-25.16S399.72,332,387.68,332Z"/><path class="cls-1" d="M506.35,395.41l-.71-12.22c-6.38,9.39-17.72,14.35-28.52,14.34-20.73,0-36-17.36-36-40.39s15.24-40.4,36-40.39c10.8,0,22.14,5,28.52,14.35l.71-12.22H525v76.53ZM483.5,332c-12.05,0-21.79,9.39-21.79,25.16s9.74,25.16,21.79,25.16,21.79-9.39,21.79-25.16S495.55,332,483.5,332Z"/><path class="cls-1" d="M570.49,397.9a60.15,60.15,0,0,1-30.65-8.68L543.2,375c6.2,3.72,15.59,8.51,26.93,8.51,8.15,0,13.82-2.48,13.82-8.86,0-5.49-5.84-7.44-16.3-9.92-18.77-4.08-25.51-14-25.5-24.81,0-12,9.39-23.38,30.12-23.38,12.58,0,23.56,5.5,26,6.91L594.94,337a44.52,44.52,0,0,0-22.14-6.38c-8.33,0-12.41,2.83-12.41,7.44,0,5.13,5.32,7.44,13.47,9.39,20.19,4.25,28.34,13.64,28.34,23.92C602.2,387.81,590.15,397.9,570.49,397.9Z"/><rect class="cls-1" x="301.89" y="54.05" width="24.09" height="50.02"/><rect class="cls-1" x="301.89" y="156.8" width="24.09" height="50.02"/><rect class="cls-2" x="301.89" y="118.68" width="24.09" height="23.6"/><rect class="cls-1" x="263.19" y="95.18" width="24.09" height="166.27"/><rect class="cls-1" x="339.79" y="198.07" width="24.09" height="50.02"/><rect class="cls-1" x="339.79" y="95.24" width="24.09" height="50.02"/><rect class="cls-3" x="339.79" y="159.86" width="24.09" height="23.6"/><rect class="cls-1" x="377.68" y="41.03" width="24.09" height="166.27"/></svg>
\ No newline at end of file
diff --git a/web/pandas/static/img/pandas_secondary_white.svg b/web/pandas/static/img/pandas_secondary_white.svg
new file mode 100644
index 0000000000000..86bcca57a031e
--- /dev/null
+++ b/web/pandas/static/img/pandas_secondary_white.svg
@@ -0,0 +1 @@
+<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 664.97 470.93"><defs><style>.cls-1{fill:#fff;}.cls-2{fill:#ffca00;}.cls-3{fill:#e70488;}</style></defs><title>Artboard 57 copy</title><path class="cls-1" d="M110.61,397.48c-10.28,0-20.73-4.25-27.29-12.58v45H62.76l0-111.08h18.6L82.09,331c6.38-9.39,17.72-14.35,28.53-14.34,20.72,0,36,17.36,36,40.4S131.33,397.49,110.61,397.48Zm-6.37-65.55c-12,0-21.8,9.39-21.8,25.16s9.74,25.16,21.79,25.16S126,372.86,126,357.09,116.28,331.93,104.24,331.93Z"/><path class="cls-1" d="M224.11,395.37l-.71-12.22c-6.38,9.39-17.72,14.35-28.52,14.35-20.73,0-36-17.37-36-40.4s15.24-40.4,36-40.39c10.8,0,22.14,5,28.52,14.35l.71-12.22h18.6v76.54Zm-22.85-63.43c-12,0-21.79,9.39-21.79,25.16s9.74,25.16,21.79,25.16,21.79-9.39,21.79-25.15S213.31,332,201.26,331.94Z"/><path class="cls-1" d="M314.62,395.39V353c0-14.88-5.14-19.84-14.53-19.84-9.74,0-20.37,8.85-20.38,19.48v42.7H259.17V318.84H278l.88,14.18c5.14-9.75,16.66-16.3,28.35-16.3,20.37,0,28,14.18,28,33.14v45.53Z"/><path class="cls-1" d="M410.52,395.4l-.71-12.23c-6.37,9.39-17.71,14.35-28.52,14.35-20.73,0-36-17.37-36-40.4s15.24-40.39,36-40.39c10.27,0,20.73,4.26,27.28,12.59V289.1h20.55l0,106.3ZM387.68,332c-12.05,0-21.8,9.39-21.8,25.16s9.74,25.15,21.79,25.16,21.79-9.39,21.79-25.16S399.72,332,387.68,332Z"/><path class="cls-1" d="M506.35,395.41l-.71-12.22c-6.38,9.39-17.72,14.35-28.52,14.34-20.73,0-36-17.36-36-40.39s15.24-40.4,36-40.39c10.8,0,22.14,5,28.52,14.35l.71-12.22H525v76.53ZM483.5,332c-12.05,0-21.79,9.39-21.79,25.16s9.74,25.16,21.79,25.16,21.79-9.39,21.79-25.16S495.55,332,483.5,332Z"/><path class="cls-1" d="M570.49,397.9a60.15,60.15,0,0,1-30.65-8.68L543.2,375c6.2,3.72,15.59,8.51,26.93,8.51,8.15,0,13.82-2.48,13.82-8.86,0-5.49-5.84-7.44-16.3-9.92-18.77-4.08-25.51-14-25.5-24.81,0-12,9.39-23.38,30.12-23.38,12.58,0,23.56,5.5,26,6.91L594.94,337a44.52,44.52,0,0,0-22.14-6.38c-8.33,0-12.41,2.83-12.41,7.44,0,5.13,5.32,7.44,13.47,9.39,20.19,4.25,28.34,13.64,28.34,23.92C602.2,387.81,590.15,397.9,570.49,397.9Z"/><rect class="cls-1" x="301.89" y="54.05" width="24.09" height="50.02"/><rect class="cls-1" x="301.89" y="156.8" width="24.09" height="50.02"/><rect class="cls-2" x="301.89" y="118.68" width="24.09" height="23.6"/><rect class="cls-1" x="263.19" y="95.18" width="24.09" height="166.27"/><rect class="cls-1" x="339.79" y="198.07" width="24.09" height="50.02"/><rect class="cls-1" x="339.79" y="95.24" width="24.09" height="50.02"/><rect class="cls-3" x="339.79" y="159.86" width="24.09" height="23.6"/><rect class="cls-1" x="377.68" y="41.03" width="24.09" height="166.27"/></svg>
\ No newline at end of file
diff --git a/web/pandas/static/img/pandas_white.svg b/web/pandas/static/img/pandas_white.svg
new file mode 100644
index 0000000000000..bc7c41651182d
--- /dev/null
+++ b/web/pandas/static/img/pandas_white.svg
@@ -0,0 +1 @@
+<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 818.63 331.21"><defs><style>.cls-1{fill:#fff;}.cls-2{fill:#ffca00;}.cls-3{fill:#e70488;}</style></defs><title>Artboard 63 copy 2</title><path class="cls-1" d="M290.85,199.21c-10.27,0-20.73-4.25-27.28-12.58v45H243l0-111.09h18.6l.71,12.22c6.38-9.39,17.71-14.35,28.52-14.35,20.73,0,36,17.37,36,40.4S311.58,199.22,290.85,199.21Zm-6.37-65.55c-12.05,0-21.79,9.39-21.79,25.16S272.43,184,284.48,184s21.79-9.39,21.79-25.16S296.53,133.66,284.48,133.66Z"/><path class="cls-1" d="M404.36,197.1l-.71-12.22c-6.38,9.39-17.72,14.35-28.53,14.34-20.73,0-36-17.36-36-40.39s15.24-40.4,36-40.39c10.81,0,22.15,5,28.53,14.35l.71-12.22H423V197.1Zm-22.85-63.43c-12.05,0-21.79,9.39-21.8,25.16S369.45,184,381.5,184s21.8-9.39,21.8-25.16S393.56,133.67,381.51,133.67Z"/><path class="cls-1" d="M494.87,197.11V154.77c0-14.88-5.13-19.84-14.52-19.84-9.75,0-20.38,8.85-20.38,19.48v42.7H439.41V120.57H458.2l.89,14.18c5.14-9.75,16.65-16.3,28.35-16.3,20.37,0,28,14.18,28,33.13v45.54Z"/><path class="cls-1" d="M590.77,197.13l-.71-12.23c-6.38,9.39-17.72,14.35-28.52,14.35-20.73,0-36-17.37-36-40.4s15.24-40.39,36-40.39c10.27,0,20.72,4.26,27.28,12.58V90.83h20.56l0,106.3ZM567.92,133.7c-12,0-21.79,9.39-21.79,25.15S555.87,184,567.92,184s21.79-9.38,21.79-25.15S580,133.7,567.92,133.7Z"/><path class="cls-1" d="M686.6,197.14l-.71-12.22c-6.38,9.39-17.72,14.34-28.53,14.34-20.73,0-36-17.36-36-40.4s15.24-40.39,36-40.39c10.81,0,22.15,5,28.53,14.36l.71-12.23h18.6v76.53Zm-22.85-63.43c-12,0-21.79,9.39-21.8,25.16S651.7,184,663.74,184s21.8-9.39,21.8-25.16S675.8,133.71,663.75,133.71Z"/><path class="cls-1" d="M750.73,199.63a60.16,60.16,0,0,1-30.65-8.69l3.37-14.17c6.2,3.72,15.59,8.51,26.93,8.51,8.15,0,13.82-2.48,13.82-8.86,0-5.49-5.85-7.44-16.3-9.92-18.78-4.08-25.51-14-25.51-24.81,0-12.05,9.39-23.38,30.12-23.38,12.58,0,23.57,5.49,26,6.91l-3.37,13.47A44.59,44.59,0,0,0,753,132.31c-8.32,0-12.4,2.83-12.4,7.44,0,5.13,5.32,7.44,13.46,9.39,20.2,4.25,28.35,13.64,28.35,23.92C782.45,189.53,770.4,199.63,750.73,199.63Z"/><rect class="cls-1" x="74.88" y="68.42" width="24.09" height="50.02"/><rect class="cls-1" x="74.88" y="171.17" width="24.09" height="50.02"/><rect class="cls-2" x="74.88" y="133.04" width="24.09" height="23.6"/><rect class="cls-1" x="36.19" y="109.55" width="24.09" height="166.27"/><rect class="cls-1" x="112.78" y="212.44" width="24.09" height="50.02"/><rect class="cls-1" x="112.78" y="109.61" width="24.09" height="50.02"/><rect class="cls-3" x="112.78" y="174.23" width="24.09" height="23.6"/><rect class="cls-1" x="150.67" y="55.39" width="24.09" height="166.27"/></svg>
\ No newline at end of file
| - [X] closes #21376, xref #28521
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry

| https://api.github.com/repos/pandas-dev/pandas/pulls/28948 | 2019-10-12T23:40:15Z | 2019-10-22T04:07:21Z | 2019-10-22T04:07:21Z | 2019-10-22T04:07:21Z |
CLN: fix mypy errors in pandas/tests/extension/test_numpy.py #28926 | diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py
index e35464964f432..e968962caf0b7 100644
--- a/pandas/tests/extension/base/ops.py
+++ b/pandas/tests/extension/base/ops.py
@@ -1,4 +1,5 @@
import operator
+from typing import Optional, Type
import pytest
@@ -61,10 +62,10 @@ class BaseArithmeticOpsTests(BaseOpsUtil):
* divmod_exc = TypeError
"""
- series_scalar_exc = TypeError
- frame_scalar_exc = TypeError
- series_array_exc = TypeError
- divmod_exc = TypeError
+ series_scalar_exc = TypeError # type: Optional[Type[TypeError]]
+ frame_scalar_exc = TypeError # type: Optional[Type[TypeError]]
+ series_array_exc = TypeError # type: Optional[Type[TypeError]]
+ divmod_exc = TypeError # type: Optional[Type[TypeError]]
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# series & scalar
diff --git a/setup.cfg b/setup.cfg
index 9c841b76761f5..9af7215b1dc56 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -166,12 +166,6 @@ ignore_errors=True
[mypy-pandas.tests.extension.json.test_json]
ignore_errors=True
-[mypy-pandas.tests.extension.test_numpy]
-ignore_errors=True
-
-[mypy-pandas.tests.extension.test_sparse]
-ignore_errors=True
-
[mypy-pandas.tests.frame.test_constructors]
ignore_errors=True
| - [ ] xref #28926
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28947 | 2019-10-12T22:12:36Z | 2019-10-13T15:58:58Z | 2019-10-13T15:58:58Z | 2019-10-13T15:59:05Z |
PR06 doc string fixes | diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py
index 8614230c4811f..63344af63470f 100644
--- a/pandas/core/computation/eval.py
+++ b/pandas/core/computation/eval.py
@@ -198,14 +198,14 @@ def eval(
<https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__,
only Python `expressions
<https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__.
- parser : string, default 'pandas', {'pandas', 'python'}
+ parser : {'pandas', 'python'}, default 'pandas'
The parser to use to construct the syntax tree from the expression. The
default of ``'pandas'`` parses code slightly different than standard
Python. Alternatively, you can parse an expression using the
``'python'`` parser to retain strict Python semantics. See the
:ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
- engine : string or None, default 'numexpr', {'python', 'numexpr'}
+ engine : {'python', 'numexpr'}, default 'numexpr'
The engine used to evaluate the expression. Supported engines are
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5200ad0ba0d23..79e941f262931 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -6238,7 +6238,7 @@ def unstack(self, level=-1, fill_value=None):
----------
level : int, str, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name.
- fill_value : int, string or dict
+ fill_value : int, str or dict
Replace NaN with this value if the unstack produces missing values.
Returns
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index fa269b4ebeab1..da8db23fb538b 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2353,7 +2353,7 @@ def to_json(
.. versionadded:: 0.23.0
- indent : integer, optional
+ indent : int, optional
Length of whitespace used to indent each record.
.. versionadded:: 1.0.0
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 7be11696b7d45..068d5e5275f0d 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1644,7 +1644,7 @@ def nunique(self, dropna=True):
Parameters
----------
- dropna : boolean, default True
+ dropna : bool, default True
Don't include NaN in the counts.
Returns
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index c9c02ad9e496a..7dee3a17f8f9e 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4531,7 +4531,7 @@ def shift(self, periods=1, freq=None):
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
- freq : pandas.DateOffset, pandas.Timedelta or string, optional
+ freq : pandas.DateOffset, pandas.Timedelta or str, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index ed3a4a7953df3..b538c4df00e19 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -77,7 +77,7 @@ class CategoricalIndex(Index, accessor.PandasDelegate):
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
- dtype : CategoricalDtype or the string "category", optional
+ dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 0b20df38e7d42..6a2f49cd1470e 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -1328,7 +1328,7 @@ def indexer_at_time(self, time, asof=False):
Parameters
----------
- time : datetime.time or string
+ time : datetime.time or str
datetime.time or string in appropriate format ("%H:%M", "%H%M",
"%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p").
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 2cc15f7650ac1..a2d48b5100a2e 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1412,11 +1412,11 @@ def interval_range(
Right bound for generating intervals
periods : int, default None
Number of periods to generate
- freq : numeric, string, or DateOffset, default None
+ freq : numeric, str, or DateOffset, default None
The length of each interval. Must be consistent with the type of start
and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
for numeric and 'D' for datetime-like.
- name : string, default None
+ name : str, default None
Name of the resulting IntervalIndex
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 2007da541bb2e..596eaf0c55dbd 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1650,7 +1650,7 @@ def to_frame(self, index=True, name=None):
Parameters
----------
- index : boolean, default True
+ index : bool, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of strings, optional
@@ -2334,7 +2334,7 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True):
level : list-like, int or str, default 0
If a string is given, must be a name of the level
If list-like must be names or ints of levels.
- ascending : boolean, default True
+ ascending : bool, default True
False to sort in descending order
Can also be a list to specify a directed ordering
sort_remaining : sort by the remaining levels after level
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index be5d75224e77d..6942a5797a7f0 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -285,10 +285,10 @@ def qcut(x, q, labels=None, retbins=False, precision=3, duplicates="raise"):
Parameters
----------
x : 1d ndarray or Series
- q : integer or array of quantiles
+ q : int or list-like of int
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles
- labels : array or boolean, default None
+ labels : array or bool, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py
index fa33d11bda7eb..05696ffd4605d 100644
--- a/pandas/core/tools/numeric.py
+++ b/pandas/core/tools/numeric.py
@@ -39,7 +39,7 @@ def to_numeric(arg, errors="raise", downcast=None):
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaN
- If 'ignore', then invalid parsing will return the input
- downcast : {'integer', 'signed', 'unsigned', 'float'} , default None
+ downcast : {'integer', 'signed', 'unsigned', 'float'}, default None
If not None, and if the data has been successfully cast to a
numerical dtype (or if the data was numeric to begin with),
downcast that resulting data to the smallest numerical dtype
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 6ce288890b6c7..c71677fa3b570 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -384,7 +384,7 @@ def read_json(
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
- orient : string,
+ orient : str
Indication of expected JSON string format.
Compatible JSON strings can be produced by ``to_json()`` with a
corresponding orient value.
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 3678e32943b2e..c82486532530f 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -257,7 +257,7 @@
arguments.
dayfirst : bool, default False
DD/MM format dates, international and European format.
-cache_dates : boolean, default True
+cache_dates : bool, default True
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 0db5b1b4eecfa..c87cad5472bd9 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1025,8 +1025,8 @@ def append(
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
- append : boolean, default True, append the input data to the
- existing
+ append : bool, default True
+ Append the input data to the existing.
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
@@ -1037,8 +1037,9 @@ def append(
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for strings
- dropna : boolean, default False, do not write an ALL nan row to
- the store settable by the option 'io.hdf.dropna_table'
+ dropna : bool, default False
+ Do not write an ALL nan row to the store settable
+ by the option 'io.hdf.dropna_table'.
Notes
-----
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index b0683fb8b0dfb..822b3288c82d9 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -287,7 +287,7 @@ def read_sql_query(
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex).
- coerce_float : boolean, default True
+ coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Useful for SQL result sets.
params : list, tuple or dict, optional, default: None
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 0b674b556b2ee..679b74caba79e 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -53,31 +53,31 @@
)
_statafile_processing_params1 = """\
-convert_dates : boolean, defaults to True
+convert_dates : bool, default True
Convert date variables to DataFrame time values.
-convert_categoricals : boolean, defaults to True
+convert_categoricals : bool, default True
Read value labels and convert columns to Categorical/Factor variables."""
_encoding_params = """\
-encoding : string, None or encoding
+encoding : str, None or encoding
Encoding used to parse the files. None defaults to latin-1."""
_statafile_processing_params2 = """\
-index_col : string, optional, default: None
+index_col : str, optional
Column to set as index.
-convert_missing : boolean, defaults to False
+convert_missing : bool, default False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nan.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
-preserve_dtypes : boolean, defaults to True
+preserve_dtypes : bool, default True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64).
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns.
-order_categoricals : boolean, defaults to True
+order_categoricals : bool, default True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
@@ -86,7 +86,7 @@
given number of lines."""
_iterator_params = """\
-iterator : boolean, default False
+iterator : bool, default False
Return StataReader object."""
_read_stata_doc = """
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 4491e6ad9ac7e..0dcd8aeb4df9b 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -223,7 +223,7 @@ def infer_freq(index, warn=True):
----------
index : DatetimeIndex or TimedeltaIndex
if passed a Series will use the values of the series (NOT THE INDEX)
- warn : boolean, default True
+ warn : bool, default True
Returns
-------
| This PR contains doctrine PR06 fixes mostly doing the conversion below(not that many left to fix :)):
boolean to bool
string to str
integer to int
Tests unchanged
black pandas ran successfully | https://api.github.com/repos/pandas-dev/pandas/pulls/28946 | 2019-10-12T22:07:20Z | 2019-10-13T19:07:57Z | 2019-10-13T19:07:57Z | 2019-10-13T19:08:21Z |
BUG: Fix comparison between nullable int and string | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 38051e9772ae9..08253c160d408 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -838,6 +838,7 @@ ExtensionArray
^^^^^^^^^^^^^^
- Bug in :class:`arrays.PandasArray` when setting a scalar string (:issue:`28118`, :issue:`28150`).
+- Bug where nullable integers could not be compared to strings (:issue:`28930`)
-
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 3553a411a27f8..6b43bf58b5046 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -654,6 +654,24 @@ def any_int_dtype(request):
return request.param
+@pytest.fixture(params=ALL_EA_INT_DTYPES)
+def any_nullable_int_dtype(request):
+ """
+ Parameterized fixture for any nullable integer dtype.
+
+ * 'UInt8'
+ * 'Int8'
+ * 'UInt16'
+ * 'Int16'
+ * 'UInt32'
+ * 'Int32'
+ * 'UInt64'
+ * 'Int64'
+ """
+
+ return request.param
+
+
@pytest.fixture(params=ALL_REAL_DTYPES)
def any_real_dtype(request):
"""
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 2bfb53aa1c800..08a3eca1e9055 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -26,6 +26,7 @@
from pandas.core import nanops, ops
from pandas.core.algorithms import take
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
+from pandas.core.ops import invalid_comparison
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.tools.numeric import to_numeric
@@ -646,7 +647,11 @@ def cmp_method(self, other):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
with np.errstate(all="ignore"):
- result = op(self._data, other)
+ method = getattr(self._data, f"__{op_name}__")
+ result = method(other)
+
+ if result is NotImplemented:
+ result = invalid_comparison(self._data, other, op)
# nans propagate
if mask is None:
diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py
index d051345fdd12d..f94dbfcc3ec6c 100644
--- a/pandas/tests/extension/test_integer.py
+++ b/pandas/tests/extension/test_integer.py
@@ -168,6 +168,27 @@ def check_opname(self, s, op_name, other, exc=None):
def _compare_other(self, s, data, op_name, other):
self.check_opname(s, op_name, other)
+ def test_compare_to_string(self, any_nullable_int_dtype):
+ # GH 28930
+ s = pd.Series([1, None], dtype=any_nullable_int_dtype)
+ result = s == "a"
+ expected = pd.Series([False, False])
+
+ self.assert_series_equal(result, expected)
+
+ def test_compare_to_int(self, any_nullable_int_dtype, all_compare_operators):
+ # GH 28930
+ s1 = pd.Series([1, 2, 3], dtype=any_nullable_int_dtype)
+ s2 = pd.Series([1, 2, 3], dtype="int")
+
+ method = getattr(s1, all_compare_operators)
+ result = method(2)
+
+ method = getattr(s2, all_compare_operators)
+ expected = method(2)
+
+ self.assert_series_equal(result, expected)
+
class TestInterface(base.BaseInterfaceTests):
pass
| - [x] closes #28930
- [x] tests added / passed
- [x] passes `black pandas`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/28945 | 2019-10-12T21:09:02Z | 2019-12-10T13:27:19Z | 2019-12-10T13:27:18Z | 2019-12-11T09:32:51Z |
TST: add test_series_any_timedelta for GH17667 | diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index dc4db6e7902a8..9acf9e21b9775 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -1065,6 +1065,23 @@ def test_frame_any_all_group(self):
ex = DataFrame({"data": [False, False]}, index=["one", "two"])
tm.assert_frame_equal(result, ex)
+ def test_series_any_timedelta(self):
+ # GH 17667
+ df = DataFrame(
+ {
+ "a": Series([0, 0]),
+ "t": Series([pd.to_timedelta(0, "s"), pd.to_timedelta(1, "ms")]),
+ }
+ )
+
+ result = df.any(axis=0)
+ expected = Series(data=[False, True], index=["a", "t"])
+ tm.assert_series_equal(result, expected)
+
+ result = df.any(axis=1)
+ expected = Series(data=[False, True])
+ tm.assert_series_equal(result, expected)
+
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays(
[np.arange(5).repeat(10), np.tile(np.arange(10), 5)]
| Test case example is the same as the one given in the issue #17667
- [x] closes #17667
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28942 | 2019-10-12T17:51:22Z | 2019-10-13T16:44:43Z | 2019-10-13T16:44:42Z | 2020-10-24T14:10:17Z |
DOC: disable nbsphinx including requirejs | diff --git a/doc/source/conf.py b/doc/source/conf.py
index 34faf183db1c2..86f78d9c0f0ae 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -120,6 +120,9 @@
plot_pre_code = """import numpy as np
import pandas as pd"""
+# nbsphinx do not use requirejs (breaks bootstrap)
+nbsphinx_requirejs_path = ""
+
# Add any paths that contain templates here, relative to this directory.
templates_path = ["../_templates"]
| To fix https://github.com/pandas-dev/pandas-sphinx-theme/issues/25. With their latest release, `nbsphinx` started including require.js by default, which doesn't play nice with the bootstrap.js (and as a result, no javascript at all works).
This is using an option of `nbsphinx` to not include requirejs (which we don't need, nbsphinx added this for rendering notebooks with plotly, I think) | https://api.github.com/repos/pandas-dev/pandas/pulls/28940 | 2019-10-12T16:40:49Z | 2019-10-12T18:03:58Z | 2019-10-12T18:03:58Z | 2019-10-12T18:04:01Z |
CLN: try/except cleanups | diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 605d179e7c652..91f3e878c3807 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -396,15 +396,11 @@ def wrap_results_for_axis(self):
result = self.obj._constructor(data=results)
if not isinstance(results[0], ABCSeries):
- try:
+ if len(result.index) == len(self.res_columns):
result.index = self.res_columns
- except ValueError:
- pass
- try:
+ if len(result.columns) == len(self.res_index):
result.columns = self.res_index
- except ValueError:
- pass
return result
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 4d5b20c56df5a..2d798dd15ad24 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -16,6 +16,7 @@
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.util._validators import validate_bool_kwarg
+from pandas.core.dtypes.cast import is_nested_object
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_ns_dtype,
@@ -566,25 +567,27 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis):
# degenerate case
if obj.ndim == 1:
for a in arg:
+ colg = self._gotitem(obj.name, ndim=1, subset=obj)
try:
- colg = self._gotitem(obj.name, ndim=1, subset=obj)
- results.append(colg.aggregate(a))
+ new_res = colg.aggregate(a)
- # make sure we find a good name
- name = com.get_callable_name(a) or a
- keys.append(name)
except (TypeError, DataError):
pass
except SpecificationError:
raise
+ else:
+ results.append(new_res)
+
+ # make sure we find a good name
+ name = com.get_callable_name(a) or a
+ keys.append(name)
# multiples
else:
for index, col in enumerate(obj):
+ colg = self._gotitem(col, ndim=1, subset=obj.iloc[:, index])
try:
- colg = self._gotitem(col, ndim=1, subset=obj.iloc[:, index])
- results.append(colg.aggregate(arg))
- keys.append(col)
+ new_res = colg.aggregate(arg)
except (TypeError, DataError):
pass
except ValueError:
@@ -592,6 +595,9 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis):
continue
except SpecificationError:
raise
+ else:
+ results.append(new_res)
+ keys.append(col)
# if we are empty
if not len(results):
@@ -604,7 +610,6 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis):
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
- from pandas.core.dtypes.cast import is_nested_object
from pandas import Series
result = Series(results, index=keys, name=self.name)
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 5200d33c6a1fb..7be11696b7d45 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -505,9 +505,7 @@ def true_and_notna(x, *args, **kwargs):
indices = [
self._get_index(name) for name, group in self if true_and_notna(group)
]
- except ValueError:
- raise TypeError("the filter must return a boolean result")
- except TypeError:
+ except (ValueError, TypeError):
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
@@ -1052,8 +1050,8 @@ def _aggregate_item_by_item(self, func, *args, **kwargs):
data = obj[item]
colg = SeriesGroupBy(data, selection=item, grouper=self.grouper)
+ cast = self._transform_should_cast(func)
try:
- cast = self._transform_should_cast(func)
result[item] = colg.aggregate(func, *args, **kwargs)
if cast:
| Move non-raising stuff out of try/except to narrow down the failure modes.
| https://api.github.com/repos/pandas-dev/pandas/pulls/28939 | 2019-10-12T16:26:24Z | 2019-10-12T17:00:19Z | 2019-10-12T17:00:19Z | 2019-10-12T18:32:07Z |
fix #28926 pandas\api\test_api.py mypy errors | diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 6c50159663574..0af8ed0ebf8d5 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -1,3 +1,5 @@
+from typing import List
+
import pandas as pd
from pandas import api, compat
from pandas.util import testing as tm
@@ -41,7 +43,7 @@ class TestPDApi(Base):
]
# these are already deprecated; awaiting removal
- deprecated_modules = []
+ deprecated_modules = [] # type: List[str]
# misc
misc = ["IndexSlice", "NaT"]
@@ -92,10 +94,10 @@ class TestPDApi(Base):
classes.extend(["Panel", "SparseSeries", "SparseDataFrame"])
# these are already deprecated; awaiting removal
- deprecated_classes = []
+ deprecated_classes = [] # type: List[str]
# these should be deprecated in the future
- deprecated_classes_in_future = []
+ deprecated_classes_in_future = [] # type: List[str]
# external modules exposed in pandas namespace
modules = ["np", "datetime"]
@@ -171,10 +173,10 @@ class TestPDApi(Base):
funcs_to = ["to_datetime", "to_msgpack", "to_numeric", "to_pickle", "to_timedelta"]
# top-level to deprecate in the future
- deprecated_funcs_in_future = []
+ deprecated_funcs_in_future = [] # type: List[str]
# these are already deprecated; awaiting removal
- deprecated_funcs = []
+ deprecated_funcs = [] # type: List[str]
# private modules in pandas namespace
private_modules = [
diff --git a/setup.cfg b/setup.cfg
index 9c841b76761f5..69b67c82a1e9f 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -133,9 +133,6 @@ no_implicit_optional=True
[mypy-pandas.conftest]
ignore_errors=True
-[mypy-pandas.tests.api.test_api]
-ignore_errors=True
-
[mypy-pandas.tests.arithmetic.test_datetime64]
ignore_errors=True
| - [ ] xref #28926
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28935 | 2019-10-11T21:35:29Z | 2019-10-13T19:23:37Z | 2019-10-13T19:23:37Z | 2019-10-13T19:23:53Z |
REF: de-duplicate groupby_helper code | diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index 3069bbbf34bb7..c9994812462b1 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -372,7 +372,8 @@ def group_any_all(uint8_t[:] out,
const uint8_t[:] mask,
object val_test,
bint skipna):
- """Aggregated boolean values to show truthfulness of group elements
+ """
+ Aggregated boolean values to show truthfulness of group elements.
Parameters
----------
diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in
index f052feea0bbf3..c837c6c5c6519 100644
--- a/pandas/_libs/groupby_helper.pxi.in
+++ b/pandas/_libs/groupby_helper.pxi.in
@@ -20,6 +20,18 @@ ctypedef fused rank_t:
object
+cdef inline bint _treat_as_na(rank_t val, bint is_datetimelike) nogil:
+ if rank_t is object:
+ # Should never be used, but we need to avoid the `val != val` below
+ # or else cython will raise about gil acquisition.
+ raise NotImplementedError
+
+ elif rank_t is int64_t:
+ return is_datetimelike and val == NPY_NAT
+ else:
+ return val != val
+
+
@cython.wraparound(False)
@cython.boundscheck(False)
def group_last(rank_t[:, :] out,
@@ -61,24 +73,16 @@ def group_last(rank_t[:, :] out,
for j in range(K):
val = values[i, j]
- # not nan
- if rank_t is int64_t:
- # need a special notna check
- if val != NPY_NAT:
- nobs[lab, j] += 1
- resx[lab, j] = val
- else:
- if val == val:
- nobs[lab, j] += 1
- resx[lab, j] = val
+ if val == val:
+ # NB: use _treat_as_na here once
+ # conditional-nogil is available.
+ nobs[lab, j] += 1
+ resx[lab, j] = val
for i in range(ncounts):
for j in range(K):
if nobs[i, j] == 0:
- if rank_t is int64_t:
- out[i, j] = NPY_NAT
- else:
- out[i, j] = NAN
+ out[i, j] = NAN
else:
out[i, j] = resx[i, j]
else:
@@ -92,16 +96,10 @@ def group_last(rank_t[:, :] out,
for j in range(K):
val = values[i, j]
- # not nan
- if rank_t is int64_t:
- # need a special notna check
- if val != NPY_NAT:
- nobs[lab, j] += 1
- resx[lab, j] = val
- else:
- if val == val:
- nobs[lab, j] += 1
- resx[lab, j] = val
+ if not _treat_as_na(val, True):
+ # TODO: Sure we always want is_datetimelike=True?
+ nobs[lab, j] += 1
+ resx[lab, j] = val
for i in range(ncounts):
for j in range(K):
@@ -113,6 +111,7 @@ def group_last(rank_t[:, :] out,
break
else:
out[i, j] = NAN
+
else:
out[i, j] = resx[i, j]
@@ -121,7 +120,6 @@ def group_last(rank_t[:, :] out,
# block.
raise RuntimeError("empty group with uint64_t")
-
group_last_float64 = group_last["float64_t"]
group_last_float32 = group_last["float32_t"]
group_last_int64 = group_last["int64_t"]
@@ -169,8 +167,9 @@ def group_nth(rank_t[:, :] out,
for j in range(K):
val = values[i, j]
- # not nan
if val == val:
+ # NB: use _treat_as_na here once
+ # conditional-nogil is available.
nobs[lab, j] += 1
if nobs[lab, j] == rank:
resx[lab, j] = val
@@ -193,18 +192,11 @@ def group_nth(rank_t[:, :] out,
for j in range(K):
val = values[i, j]
- # not nan
- if rank_t is int64_t:
- # need a special notna check
- if val != NPY_NAT:
- nobs[lab, j] += 1
- if nobs[lab, j] == rank:
- resx[lab, j] = val
- else:
- if val == val:
- nobs[lab, j] += 1
- if nobs[lab, j] == rank:
- resx[lab, j] = val
+ if not _treat_as_na(val, True):
+ # TODO: Sure we always want is_datetimelike=True?
+ nobs[lab, j] += 1
+ if nobs[lab, j] == rank:
+ resx[lab, j] = val
for i in range(ncounts):
for j in range(K):
@@ -487,17 +479,11 @@ def group_max(groupby_t[:, :] out,
for j in range(K):
val = values[i, j]
- # not nan
- if groupby_t is int64_t:
- if val != nan_val:
- nobs[lab, j] += 1
- if val > maxx[lab, j]:
- maxx[lab, j] = val
- else:
- if val == val:
- nobs[lab, j] += 1
- if val > maxx[lab, j]:
- maxx[lab, j] = val
+ if not _treat_as_na(val, True):
+ # TODO: Sure we always want is_datetimelike=True?
+ nobs[lab, j] += 1
+ if val > maxx[lab, j]:
+ maxx[lab, j] = val
for i in range(ncounts):
for j in range(K):
@@ -563,17 +549,11 @@ def group_min(groupby_t[:, :] out,
for j in range(K):
val = values[i, j]
- # not nan
- if groupby_t is int64_t:
- if val != nan_val:
- nobs[lab, j] += 1
- if val < minx[lab, j]:
- minx[lab, j] = val
- else:
- if val == val:
- nobs[lab, j] += 1
- if val < minx[lab, j]:
- minx[lab, j] = val
+ if not _treat_as_na(val, True):
+ # TODO: Sure we always want is_datetimelike=True?
+ nobs[lab, j] += 1
+ if val < minx[lab, j]:
+ minx[lab, j] = val
for i in range(ncounts):
for j in range(K):
@@ -643,21 +623,13 @@ def group_cummin(groupby_t[:, :] out,
for j in range(K):
val = values[i, j]
- # val = nan
- if groupby_t is int64_t:
- if is_datetimelike and val == NPY_NAT:
- out[i, j] = NPY_NAT
- else:
- mval = accum[lab, j]
- if val < mval:
- accum[lab, j] = mval = val
- out[i, j] = mval
+ if _treat_as_na(val, is_datetimelike):
+ out[i, j] = val
else:
- if val == val:
- mval = accum[lab, j]
- if val < mval:
- accum[lab, j] = mval = val
- out[i, j] = mval
+ mval = accum[lab, j]
+ if val < mval:
+ accum[lab, j] = mval = val
+ out[i, j] = mval
@cython.boundscheck(False)
@@ -712,17 +684,10 @@ def group_cummax(groupby_t[:, :] out,
for j in range(K):
val = values[i, j]
- if groupby_t is int64_t:
- if is_datetimelike and val == NPY_NAT:
- out[i, j] = NPY_NAT
- else:
- mval = accum[lab, j]
- if val > mval:
- accum[lab, j] = mval = val
- out[i, j] = mval
+ if _treat_as_na(val, is_datetimelike):
+ out[i, j] = val
else:
- if val == val:
- mval = accum[lab, j]
- if val > mval:
- accum[lab, j] = mval = val
- out[i, j] = mval
+ mval = accum[lab, j]
+ if val > mval:
+ accum[lab, j] = mval = val
+ out[i, j] = mval
| There's one other piece of de-duplication I think should be feasible but cython is still raising compilation errors for, so will do separately.
Orthogonal to #28931, but I expect it will cause merge conflicts. 28931 should be a higher priority. | https://api.github.com/repos/pandas-dev/pandas/pulls/28934 | 2019-10-11T19:10:49Z | 2019-10-16T19:09:04Z | 2019-10-16T19:09:04Z | 2019-10-16T19:15:52Z |
BUG: Preserve key order when using loc on MultiIndex DataFrame | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 607a2c02944b4..7071289ef3243 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -161,8 +161,14 @@ Missing
MultiIndex
^^^^^^^^^^
+- Bug in :meth:`Dataframe.loc` when used with a :class:`MultiIndex`. The returned values were not in the same order as the given inputs (:issue:`22797`)
--
+.. ipython:: python
+
+ df = pd.DataFrame(np.arange(4),
+ index=[["a", "a", "b", "b"], [1, 2, 1, 2]])
+ # Rows are now ordered as the requested keys
+ df.loc[(['b', 'a'], [2, 1]), :]
-
I/O
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 4af9901d79a46..c560d81ba95f6 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1,6 +1,6 @@
import datetime
from sys import getsizeof
-from typing import Any, Hashable, List, Optional, Sequence, Union
+from typing import Any, Hashable, Iterable, List, Optional, Sequence, Tuple, Union
import warnings
import numpy as np
@@ -9,6 +9,7 @@
from pandas._libs import Timestamp, algos as libalgos, index as libindex, lib, tslibs
from pandas._libs.hashtable import duplicated_int64
+from pandas._typing import AnyArrayLike, ArrayLike, Scalar
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.util._decorators import Appender, cache_readonly
@@ -3081,9 +3082,69 @@ def _update_indexer(idxr, indexer=indexer):
# empty indexer
if indexer is None:
return Int64Index([])._ndarray_values
+
+ indexer = self._reorder_indexer(seq, indexer)
+
return indexer._ndarray_values
- # --------------------------------------------------------------------
+ def _reorder_indexer(
+ self, seq: Tuple[Union[Scalar, Iterable, AnyArrayLike], ...], indexer: ArrayLike
+ ) -> ArrayLike:
+ """
+ Reorder an indexer of a MultiIndex (self) so that the label are in the
+ same order as given in seq
+
+ Parameters
+ ----------
+ seq : label/slice/list/mask or a sequence of such
+ indexer: an Int64Index indexer of self
+
+ Returns
+ -------
+ indexer : a sorted Int64Index indexer of self ordered as seq
+ """
+ # If the index is lexsorted and the list_like label in seq are sorted
+ # then we do not need to sort
+ if self.is_lexsorted():
+ need_sort = False
+ for i, k in enumerate(seq):
+ if is_list_like(k):
+ if not need_sort:
+ k_codes = self.levels[i].get_indexer(k)
+ k_codes = k_codes[k_codes >= 0] # Filter absent keys
+ # True if the given codes are not ordered
+ need_sort = (k_codes[:-1] > k_codes[1:]).any()
+ # Bail out if both index and seq are sorted
+ if not need_sort:
+ return indexer
+
+ n = len(self)
+ keys: Tuple[np.ndarray, ...] = tuple()
+ # For each level of the sequence in seq, map the level codes with the
+ # order they appears in a list-like sequence
+ # This mapping is then use to reorder the indexer
+ for i, k in enumerate(seq):
+ if com.is_bool_indexer(k):
+ new_order = np.arange(n)[indexer]
+ elif is_list_like(k):
+ # Generate a map with all level codes as sorted initially
+ key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len(
+ self.levels[i]
+ )
+ # Set order as given in the indexer list
+ level_indexer = self.levels[i].get_indexer(k)
+ level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys
+ key_order_map[level_indexer] = np.arange(len(level_indexer))
+
+ new_order = key_order_map[self.codes[i][indexer]]
+ else:
+ # For all other case, use the same order as the level
+ new_order = np.arange(n)[indexer]
+ keys = (new_order,) + keys
+
+ # Find the reordering using lexsort on the keys mapping
+ ind = np.lexsort(keys)
+ return indexer[ind]
def truncate(self, before=None, after=None):
"""
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 640cd8faf6811..b377ca2869bd3 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -2534,3 +2534,29 @@ def test_sort_ascending_list(self):
result = s.sort_index(level=["third", "first"], ascending=[False, True])
expected = s.iloc[[0, 4, 1, 5, 2, 6, 3, 7]]
tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "keys, expected",
+ [
+ (["b", "a"], [["b", "b", "a", "a"], [1, 2, 1, 2]]),
+ (["a", "b"], [["a", "a", "b", "b"], [1, 2, 1, 2]]),
+ ((["a", "b"], [1, 2]), [["a", "a", "b", "b"], [1, 2, 1, 2]]),
+ ((["a", "b"], [2, 1]), [["a", "a", "b", "b"], [2, 1, 2, 1]]),
+ ((["b", "a"], [2, 1]), [["b", "b", "a", "a"], [2, 1, 2, 1]]),
+ ((["b", "a"], [1, 2]), [["b", "b", "a", "a"], [1, 2, 1, 2]]),
+ ((["c", "a"], [2, 1]), [["c", "a", "a"], [1, 2, 1]]),
+ ],
+ )
+ @pytest.mark.parametrize("dim", ["index", "columns"])
+ def test_multilevel_index_loc_order(self, dim, keys, expected):
+ # GH 22797
+ # Try to respect order of keys given for MultiIndex.loc
+ kwargs = {dim: [["c", "a", "a", "b", "b"], [1, 1, 2, 1, 2]]}
+ df = pd.DataFrame(np.arange(25).reshape(5, 5), **kwargs,)
+ exp_index = MultiIndex.from_arrays(expected)
+ if dim == "index":
+ res = df.loc[keys, :]
+ tm.assert_index_equal(res.index, exp_index)
+ elif dim == "columns":
+ res = df.loc[:, keys]
+ tm.assert_index_equal(res.columns, exp_index)
| ## Description
closes #22797
As described in #22797, the key order given to loc for a MultiIndex DataFrame was not respected:
```
import pandas as pd
import numpy as np
df = pd.DataFrame(np.arange(12).reshape((4, 3)),
index=[['a', 'a', 'b', 'b'], [1, 2, 1, 2]],
columns=[['Ohio', 'Ohio', 'Colorado'],
['Green', 'Red', 'Green']])
df.loc[(['b','a'],[2, 1]),:]
# Out
Ohio Colorado
Green Red Green
a 1 0 1 2
2 3 4 5
b 1 6 7 8
2 9 10 11
```
## Proposed fix
The culprit was the use of intersection of indexers in the loc function. I tried keeping the indexers sorted during the whole function (in the main loop), but performance were really affected (by a factor 3!!!).
As an other solution, I tried to sort the result after the indexers were computed. It was already way better (worse "only" by a factor 1.15 or so, see the asv benchmark result).
So I computed and add a flag testing if the result need to be sorted (the benchmark seems to always have sorted key in the loc call).
**Update** The sorting function is now a separate private function (_reorder_indexer). It is called at the end of the get_locs function.
## Benchmark
Benchmark with the flag (I run asv compare with -s option):
<details>
Benchmarks that have got worse:
before after ratio
[39602e7d] [da8b55af]
<master> <multiindex_sort_loc_order_issue_22797>
+ 5.62±0.2μs 6.27±0.2μs 1.11 index_cached_properties.IndexCache.time_shape('Float64Index')
+ 6.57±0.2μs 7.49±0.2μs 1.14 index_cached_properties.IndexCache.time_shape('TimedeltaIndex')
</details>
Benchmark without flag:
<details>
Benchmarks that have got worse:
before after ratio
[39602e7d] [c786822a]
<master> <multiindex_sort_loc_order_issue_22797~1>
+ 2.49±0.02ms 2.87±0.01ms 1.15 ctors.SeriesConstructors.time_series_constructor(<class 'list'>, False, 'int')
+ 2.53±0ms 2.91±0.01ms 1.15 ctors.SeriesConstructors.time_series_constructor(<class 'list'>, True, 'int')
+ 29.2±0.7ms 33.1±0.02ms 1.13 frame_ctor.FromLists.time_frame_from_lists
+ 87.2±1ms 98.9±1ms 1.13 frame_ctor.FromRecords.time_frame_from_records_generator(None)
+ 12.8±0.09ms 14.3±0.09ms 1.11 groupby.MultiColumn.time_col_select_numpy_sum
+ 5.62±0.2μs 6.32±0.4μs 1.12 index_cached_properties.IndexCache.time_shape('Float64Index')
+ 4.96±0.02ms 5.71±0.01ms 1.15 indexing.MultiIndexing.time_index_slice
+ 2.91±0ms 3.29±0.01ms 1.13 inference.ToNumeric.time_from_numeric_str('coerce')
+ 2.92±0ms 3.29±0.01ms 1.13 inference.ToNumeric.time_from_numeric_str('ignore')
+ 3.45±0.01ms 3.84±0.01ms 1.11 series_methods.Map.time_map('lambda', 'object')
+ 29.3±0.2ms 33.2±0.04ms 1.13 strings.Methods.time_len
</details>
## Checklist
- [x] closes #22797
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28933 | 2019-10-11T18:52:53Z | 2020-02-02T22:20:57Z | 2020-02-02T22:20:56Z | 2020-03-16T19:31:39Z |
add uint64 support for some libgroupby funcs | diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in
index 6b434b6470581..f052feea0bbf3 100644
--- a/pandas/_libs/groupby_helper.pxi.in
+++ b/pandas/_libs/groupby_helper.pxi.in
@@ -16,6 +16,7 @@ ctypedef fused rank_t:
float64_t
float32_t
int64_t
+ uint64_t
object
@@ -34,6 +35,7 @@ def group_last(rank_t[:, :] out,
rank_t val
ndarray[rank_t, ndim=2] resx
ndarray[int64_t, ndim=2] nobs
+ bint runtime_error = False
assert min_count == -1, "'min_count' only used in add and prod"
@@ -106,11 +108,20 @@ def group_last(rank_t[:, :] out,
if nobs[i, j] == 0:
if rank_t is int64_t:
out[i, j] = NPY_NAT
+ elif rank_t is uint64_t:
+ runtime_error = True
+ break
else:
out[i, j] = NAN
else:
out[i, j] = resx[i, j]
+ if runtime_error:
+ # We cannot raise directly above because that is within a nogil
+ # block.
+ raise RuntimeError("empty group with uint64_t")
+
+
group_last_float64 = group_last["float64_t"]
group_last_float32 = group_last["float32_t"]
group_last_int64 = group_last["int64_t"]
@@ -132,6 +143,7 @@ def group_nth(rank_t[:, :] out,
rank_t val
ndarray[rank_t, ndim=2] resx
ndarray[int64_t, ndim=2] nobs
+ bint runtime_error = False
assert min_count == -1, "'min_count' only used in add and prod"
@@ -199,11 +211,19 @@ def group_nth(rank_t[:, :] out,
if nobs[i, j] == 0:
if rank_t is int64_t:
out[i, j] = NPY_NAT
+ elif rank_t is uint64_t:
+ runtime_error = True
+ break
else:
out[i, j] = NAN
else:
out[i, j] = resx[i, j]
+ if runtime_error:
+ # We cannot raise directly above because that is within a nogil
+ # block.
+ raise RuntimeError("empty group with uint64_t")
+
group_nth_float64 = group_nth["float64_t"]
group_nth_float32 = group_nth["float32_t"]
@@ -282,12 +302,16 @@ def group_rank(float64_t[:, :] out,
if ascending ^ (na_option == 'top'):
if rank_t is int64_t:
nan_fill_val = np.iinfo(np.int64).max
+ elif rank_t is uint64_t:
+ nan_fill_val = np.iinfo(np.uint64).max
else:
nan_fill_val = np.inf
order = (masked_vals, mask, labels)
else:
if rank_t is int64_t:
nan_fill_val = np.iinfo(np.int64).min
+ elif rank_t is uint64_t:
+ nan_fill_val = 0
else:
nan_fill_val = -np.inf
@@ -397,6 +421,7 @@ def group_rank(float64_t[:, :] out,
group_rank_float64 = group_rank["float64_t"]
group_rank_float32 = group_rank["float32_t"]
group_rank_int64 = group_rank["int64_t"]
+group_rank_uint64 = group_rank["uint64_t"]
# Note: we do not have a group_rank_object because that would require a
# not-nogil implementation, see GH#19560
@@ -410,6 +435,7 @@ ctypedef fused groupby_t:
float64_t
float32_t
int64_t
+ uint64_t
@cython.wraparound(False)
@@ -426,6 +452,7 @@ def group_max(groupby_t[:, :] out,
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
groupby_t val, count, nan_val
ndarray[groupby_t, ndim=2] maxx, nobs
+ bint runtime_error = False
assert min_count == -1, "'min_count' only used in add and prod"
@@ -439,6 +466,11 @@ def group_max(groupby_t[:, :] out,
# Note: evaluated at compile-time
maxx[:] = -_int64_max
nan_val = NPY_NAT
+ elif groupby_t is uint64_t:
+ # NB: We do not define nan_val because there is no such thing
+ # for uint64_t. We carefully avoid having to reference it in this
+ # case.
+ maxx[:] = 0
else:
maxx[:] = -np.inf
nan_val = NAN
@@ -462,7 +494,7 @@ def group_max(groupby_t[:, :] out,
if val > maxx[lab, j]:
maxx[lab, j] = val
else:
- if val == val and val != nan_val:
+ if val == val:
nobs[lab, j] += 1
if val > maxx[lab, j]:
maxx[lab, j] = val
@@ -470,10 +502,18 @@ def group_max(groupby_t[:, :] out,
for i in range(ncounts):
for j in range(K):
if nobs[i, j] == 0:
+ if groupby_t is uint64_t:
+ runtime_error = True
+ break
out[i, j] = nan_val
else:
out[i, j] = maxx[i, j]
+ if runtime_error:
+ # We cannot raise directly above because that is within a nogil
+ # block.
+ raise RuntimeError("empty group with uint64_t")
+
@cython.wraparound(False)
@cython.boundscheck(False)
@@ -489,6 +529,7 @@ def group_min(groupby_t[:, :] out,
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
groupby_t val, count, nan_val
ndarray[groupby_t, ndim=2] minx, nobs
+ bint runtime_error = False
assert min_count == -1, "'min_count' only used in add and prod"
@@ -501,6 +542,11 @@ def group_min(groupby_t[:, :] out,
if groupby_t is int64_t:
minx[:] = _int64_max
nan_val = NPY_NAT
+ elif groupby_t is uint64_t:
+ # NB: We do not define nan_val because there is no such thing
+ # for uint64_t. We carefully avoid having to reference it in this
+ # case.
+ minx[:] = np.iinfo(np.uint64).max
else:
minx[:] = np.inf
nan_val = NAN
@@ -524,7 +570,7 @@ def group_min(groupby_t[:, :] out,
if val < minx[lab, j]:
minx[lab, j] = val
else:
- if val == val and val != nan_val:
+ if val == val:
nobs[lab, j] += 1
if val < minx[lab, j]:
minx[lab, j] = val
@@ -532,10 +578,18 @@ def group_min(groupby_t[:, :] out,
for i in range(ncounts):
for j in range(K):
if nobs[i, j] == 0:
+ if groupby_t is uint64_t:
+ runtime_error = True
+ break
out[i, j] = nan_val
else:
out[i, j] = minx[i, j]
+ if runtime_error:
+ # We cannot raise directly above because that is within a nogil
+ # block.
+ raise RuntimeError("empty group with uint64_t")
+
@cython.boundscheck(False)
@cython.wraparound(False)
@@ -575,6 +629,8 @@ def group_cummin(groupby_t[:, :] out,
accum = np.empty((ngroups, K), dtype=np.asarray(values).dtype)
if groupby_t is int64_t:
accum[:] = _int64_max
+ elif groupby_t is uint64_t:
+ accum[:] = np.iinfo(np.uint64).max
else:
accum[:] = np.inf
@@ -642,6 +698,8 @@ def group_cummax(groupby_t[:, :] out,
accum = np.empty((ngroups, K), dtype=np.asarray(values).dtype)
if groupby_t is int64_t:
accum[:] = -_int64_max
+ elif groupby_t is uint64_t:
+ accum[:] = 0
else:
accum[:] = -np.inf
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index cc297629a7004..b69a9cd87c025 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1361,7 +1361,15 @@ def f(self, **kwargs):
return self._cython_agg_general(alias, alt=npfunc, **kwargs)
except AssertionError as e:
raise SpecificationError(str(e))
+ except DataError:
+ pass
except Exception:
+ # TODO: the remaining test cases that get here are from:
+ # - AttributeError from _cython_agg_blocks bug passing
+ # DataFrame to make_block; see GH#28275
+ # - TypeError in _cython_operation calling ensure_float64
+ # on object array containing complex numbers;
+ # see test_groupby_complex, test_max_nan_bug
pass
# apply a non-cython aggregation
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index afb22a732691c..571e710ba8928 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -378,7 +378,7 @@ def test_median_empty_bins(observed):
@pytest.mark.parametrize(
- "dtype", ["int8", "int16", "int32", "int64", "float32", "float64"]
+ "dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"]
)
@pytest.mark.parametrize(
"method,data",
| cc @WillAyd I hope you agree the runtime_error thing here is easier to implement/review with fused type than it would be with tempita. | https://api.github.com/repos/pandas-dev/pandas/pulls/28931 | 2019-10-11T17:19:03Z | 2019-10-16T12:42:48Z | 2019-10-16T12:42:48Z | 2019-10-16T15:25:04Z |
ENH: pd.MultiIndex.get_loc(np.nan) | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 5b4761c3bc6c5..09d9a1d7ef322 100755
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -903,6 +903,7 @@ Indexing
- Bug when indexing with ``.loc`` where the index was a :class:`CategoricalIndex` with non-string categories didn't work (:issue:`17569`, :issue:`30225`)
- :meth:`Index.get_indexer_non_unique` could fail with `TypeError` in some cases, such as when searching for ints in a string index (:issue:`28257`)
- Bug in :meth:`Float64Index.get_loc` incorrectly raising ``TypeError`` instead of ``KeyError`` (:issue:`29189`)
+- :meth:`MultiIndex.get_loc` can't find missing values when input includes missing values (:issue:`19132`)
- Bug in :meth:`Series.__setitem__` incorrectly assigning values with boolean indexer when the length of new data matches the number of ``True`` values and new data is not a ``Series`` or an ``np.array`` (:issue:`30567`)
Missing
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index db9806a046305..d3e0cc7b041ba 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2539,7 +2539,7 @@ def _partial_tup_index(self, tup, side="left"):
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
- if lab not in lev:
+ if lab not in lev and not isna(lab):
if not lev.is_type_compatible(lib.infer_dtype([lab], skipna=False)):
raise TypeError(f"Level type mismatch: {lab}")
@@ -2549,13 +2549,38 @@ def _partial_tup_index(self, tup, side="left"):
loc -= 1
return start + section.searchsorted(loc, side=side)
- idx = lev.get_loc(lab)
+ idx = self._get_loc_single_level_index(lev, lab)
if k < n - 1:
end = start + section.searchsorted(idx, side="right")
start = start + section.searchsorted(idx, side="left")
else:
return start + section.searchsorted(idx, side=side)
+ def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int:
+ """
+ If key is NA value, location of index unify as -1.
+
+ Parameters
+ ----------
+ level_index: Index
+ key : label
+
+ Returns
+ -------
+ loc : int
+ If key is NA value, loc is -1
+ Else, location of key in index.
+
+ See Also
+ --------
+ Index.get_loc : The get_loc method for (single-level) index.
+ """
+
+ if is_scalar(key) and isna(key):
+ return -1
+ else:
+ return level_index.get_loc(key)
+
def get_loc(self, key, method=None):
"""
Get location for a label or a tuple of labels as an integer, slice or
@@ -2654,7 +2679,9 @@ def _maybe_to_slice(loc):
loc = np.arange(start, stop, dtype="int64")
for i, k in enumerate(follow_key, len(lead_key)):
- mask = self.codes[i][loc] == self.levels[i].get_loc(k)
+ mask = self.codes[i][loc] == self._get_loc_single_level_index(
+ self.levels[i], k
+ )
if not mask.all():
loc = loc[mask]
if not len(loc):
@@ -2882,7 +2909,7 @@ def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
else:
- code = level_index.get_loc(key)
+ code = self._get_loc_single_level_index(level_index, key)
if level > 0 or self.lexsort_depth == 0:
# Desired level is not sorted
@@ -3377,14 +3404,11 @@ def isin(self, values, level=None):
return algos.isin(self.values, values)
else:
num = self._get_level_number(level)
- levs = self.levels[num]
- level_codes = self.codes[num]
+ levs = self.get_level_values(num)
- sought_labels = levs.isin(values).nonzero()[0]
if levs.size == 0:
- return np.zeros(len(level_codes), dtype=np.bool_)
- else:
- return np.lib.arraysetops.in1d(level_codes, sought_labels)
+ return np.zeros(len(levs), dtype=np.bool_)
+ return levs.isin(values)
MultiIndex._add_numeric_methods_disabled()
diff --git a/pandas/tests/indexes/multi/test_contains.py b/pandas/tests/indexes/multi/test_contains.py
index 4b0895c823b8b..49aa63210cd5e 100644
--- a/pandas/tests/indexes/multi/test_contains.py
+++ b/pandas/tests/indexes/multi/test_contains.py
@@ -98,3 +98,27 @@ def test_isin_level_kwarg():
with pytest.raises(KeyError, match="'Level C not found'"):
idx.isin(vals_1, level="C")
+
+
+def test_contains_with_missing_value():
+ # issue 19132
+ idx = MultiIndex.from_arrays([[1, np.nan, 2]])
+ assert np.nan in idx
+
+ idx = MultiIndex.from_arrays([[1, 2], [np.nan, 3]])
+ assert np.nan not in idx
+ assert (1, np.nan) in idx
+
+
+@pytest.mark.parametrize(
+ "labels,expected,level",
+ [
+ ([("b", np.nan)], np.array([False, False, True]), None,),
+ ([np.nan, "a"], np.array([True, True, False]), 0),
+ (["d", np.nan], np.array([False, True, True]), 1),
+ ],
+)
+def test_isin_multi_index_with_missing_value(labels, expected, level):
+ # GH 19132
+ midx = MultiIndex.from_arrays([[np.nan, "a", "b"], ["c", "d", np.nan]])
+ tm.assert_numpy_array_equal(midx.isin(labels, level=level), expected)
diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py
index 176d47a3bdb9b..ad6f06d065150 100644
--- a/pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas/tests/indexes/multi/test_indexing.py
@@ -437,3 +437,91 @@ def test_timestamp_multiindex_indexer():
)
should_be = pd.Series(data=np.arange(24, len(qidx) + 24), index=qidx, name="foo")
tm.assert_series_equal(result, should_be)
+
+
+def test_get_loc_with_values_including_missing_values():
+ # issue 19132
+ idx = MultiIndex.from_product([[np.nan, 1]] * 2)
+ expected = slice(0, 2, None)
+ assert idx.get_loc(np.nan) == expected
+
+ idx = MultiIndex.from_arrays([[np.nan, 1, 2, np.nan]])
+ expected = np.array([True, False, False, True])
+ tm.assert_numpy_array_equal(idx.get_loc(np.nan), expected)
+
+ idx = MultiIndex.from_product([[np.nan, 1]] * 3)
+ expected = slice(2, 4, None)
+ assert idx.get_loc((np.nan, 1)) == expected
+
+
+@pytest.mark.parametrize(
+ "index_arr,labels,expected",
+ [
+ (
+ [[1, np.nan, 2], [3, 4, 5]],
+ [1, np.nan, 2],
+ np.array([-1, -1, -1], dtype=np.intp),
+ ),
+ ([[1, np.nan, 2], [3, 4, 5]], [(np.nan, 4)], np.array([1], dtype=np.intp)),
+ ([[1, 2, 3], [np.nan, 4, 5]], [(1, np.nan)], np.array([0], dtype=np.intp)),
+ (
+ [[1, 2, 3], [np.nan, 4, 5]],
+ [np.nan, 4, 5],
+ np.array([-1, -1, -1], dtype=np.intp),
+ ),
+ ],
+)
+def test_get_indexer_with_missing_value(index_arr, labels, expected):
+ # issue 19132
+ idx = MultiIndex.from_arrays(index_arr)
+ result = idx.get_indexer(labels)
+ tm.assert_numpy_array_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "index_arr,expected,target,algo",
+ [
+ ([[np.nan, "a", "b"], ["c", "d", "e"]], 0, np.nan, "left"),
+ ([[np.nan, "a", "b"], ["c", "d", "e"]], 1, (np.nan, "c"), "right"),
+ ([["a", "b", "c"], ["d", np.nan, "d"]], 1, ("b", np.nan), "left"),
+ ],
+)
+def test_get_slice_bound_with_missing_value(index_arr, expected, target, algo):
+ # issue 19132
+ idx = MultiIndex.from_arrays(index_arr)
+ result = idx.get_slice_bound(target, side=algo, kind="loc")
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "index_arr,expected,start_idx,end_idx",
+ [
+ ([[np.nan, 1, 2], [3, 4, 5]], slice(0, 2, None), np.nan, 1),
+ ([[np.nan, 1, 2], [3, 4, 5]], slice(0, 3, None), np.nan, (2, 5)),
+ ([[1, 2, 3], [4, np.nan, 5]], slice(1, 3, None), (2, np.nan), 3),
+ ([[1, 2, 3], [4, np.nan, 5]], slice(1, 3, None), (2, np.nan), (3, 5)),
+ ],
+)
+def test_slice_indexer_with_missing_value(index_arr, expected, start_idx, end_idx):
+ # issue 19132
+ idx = MultiIndex.from_arrays(index_arr)
+ result = idx.slice_indexer(start=start_idx, end=end_idx)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "index_arr,expected,start_idx,end_idx",
+ [
+ ([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, None),
+ ([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, "b"),
+ ([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, ("b", "e")),
+ ([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), None),
+ ([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), "c"),
+ ([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), ("c", "e")),
+ ],
+)
+def test_slice_locs_with_missing_value(index_arr, expected, start_idx, end_idx):
+ # issue 19132
+ idx = MultiIndex.from_arrays(index_arr)
+ result = idx.slice_locs(start=start_idx, end=end_idx)
+ assert result == expected
| MultiIndex.get_loc could not find nan with values including missing
values as a input.
Background: In `MultiIndex`, missing value is denoted by -1 in codes and doesn't exist in `self.levels`
So, could not find NA value in `self.levels`.
Before PR xref #28783
- [x] closes #19132
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28919 | 2019-10-11T07:35:50Z | 2020-01-09T03:11:43Z | 2020-01-09T03:11:42Z | 2020-01-09T10:18:40Z |
BUG: Avoid undefined behaviour when converting from float to timedelta | diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index eb442e8bf3486..09b80d1b3a9ac 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -360,7 +360,7 @@ def _wrap_results(result, dtype, fill_value=None):
result = tslibs.Timedelta(result, unit="ns")
else:
- result = result.astype("i8").view(dtype)
+ result = result.astype("m8[ns]").view(dtype)
return result
| Summation of timedelta series with NaTs in them result in undefined
behaviour because the final wrapping step of the summation ends up
converting the NaNs in the sum through a direct cast to int64. This
cast is undefined for NaN and just happens to work on x86_64 because
of the way `cvttd2si` works. On Aarch64, the corresponding `fcvtzs` sets
the result to 0 on undefined input.
This fix trivially sets the conversion target to m8 instead of i8 so
that numpy correctly casts from NaN to NaT. Note that the fix in
numpy for the same is pending in PR numpy/numpy#14669 .
There is an existing test (test_sum_nanops_timedelta in
frame/test_analytics.py) that exercises this bug and has been verified
to have been fixed with this and the numpy patch.
- [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28918 | 2019-10-11T06:28:40Z | 2019-10-12T17:08:43Z | 2019-10-12T17:08:43Z | 2019-10-12T17:09:10Z |
DOC: Fix typos in docstrings | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 2da74012de968..b49bb856a2e2b 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -638,7 +638,7 @@ def levels(self):
@property
def _values(self):
- # We override here, since our parent uses _data, which we dont' use.
+ # We override here, since our parent uses _data, which we don't use.
return self.values
@property
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 2ecb66bc8f1e4..c6dce77c4d078 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -40,9 +40,9 @@
class TimedeltaDelegateMixin(DatetimelikeDelegateMixin):
# Most attrs are dispatched via datetimelike_{ops,methods}
- # Some are "raw" methods, the result is not not re-boxed in an Index
+ # Some are "raw" methods, the result is not re-boxed in an Index
# We also have a few "extra" attrs, which may or may not be raw,
- # which we we dont' want to expose in the .dt accessor.
+ # which we don't want to expose in the .dt accessor.
_delegate_class = TimedeltaArray
_delegated_properties = TimedeltaArray._datetimelike_ops + ["components"]
_delegated_methods = TimedeltaArray._datetimelike_methods + [
| Fixed three typos in docstrings. | https://api.github.com/repos/pandas-dev/pandas/pulls/28915 | 2019-10-11T02:49:50Z | 2019-10-11T03:53:58Z | 2019-10-11T03:53:58Z | 2019-10-11T04:11:56Z |
TYPING: lockdown test modules passing mypy | diff --git a/setup.cfg b/setup.cfg
index 43dbac15f5cfe..9c841b76761f5 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -130,5 +130,116 @@ skip = pandas/__init__.py,pandas/core/api.py
ignore_missing_imports=True
no_implicit_optional=True
-[mypy-pandas.conftest,pandas.tests.*]
+[mypy-pandas.conftest]
+ignore_errors=True
+
+[mypy-pandas.tests.api.test_api]
+ignore_errors=True
+
+[mypy-pandas.tests.arithmetic.test_datetime64]
+ignore_errors=True
+
+[mypy-pandas.tests.arrays.test_array]
+ignore_errors=True
+
+[mypy-pandas.tests.arrays.test_datetimelike]
+ignore_errors=True
+
+[mypy-pandas.tests.arrays.test_period]
+ignore_errors=True
+
+[mypy-pandas.tests.computation.test_eval]
+ignore_errors=True
+
+[mypy-pandas.tests.dtypes.test_common]
+ignore_errors=True
+
+[mypy-pandas.tests.dtypes.test_inference]
+ignore_errors=True
+
+[mypy-pandas.tests.extension.decimal.test_decimal]
+ignore_errors=True
+
+[mypy-pandas.tests.extension.json.array]
+ignore_errors=True
+
+[mypy-pandas.tests.extension.json.test_json]
+ignore_errors=True
+
+[mypy-pandas.tests.extension.test_numpy]
+ignore_errors=True
+
+[mypy-pandas.tests.extension.test_sparse]
+ignore_errors=True
+
+[mypy-pandas.tests.frame.test_constructors]
+ignore_errors=True
+
+[mypy-pandas.tests.frame.test_convert_to]
+ignore_errors=True
+
+[mypy-pandas.tests.indexes.datetimes.test_datetimelike]
+ignore_errors=True
+
+[mypy-pandas.tests.indexes.interval.test_base]
+ignore_errors=True
+
+[mypy-pandas.tests.indexes.interval.test_interval_tree]
+ignore_errors=True
+
+[mypy-pandas.tests.indexes.period.test_period]
+ignore_errors=True
+
+[mypy-pandas.tests.indexes.test_base]
+ignore_errors=True
+
+[mypy-pandas.tests.indexes.test_category]
+ignore_errors=True
+
+[mypy-pandas.tests.indexes.test_numeric]
+ignore_errors=True
+
+[mypy-pandas.tests.indexes.test_range]
+ignore_errors=True
+
+[mypy-pandas.tests.indexes.timedeltas.test_timedelta]
+ignore_errors=True
+
+[mypy-pandas.tests.indexing.test_coercion]
+ignore_errors=True
+
+[mypy-pandas.tests.indexing.test_loc]
+ignore_errors=True
+
+[mypy-pandas.tests.io.json.test_ujson]
+ignore_errors=True
+
+[mypy-pandas.tests.io.parser.conftest]
+ignore_errors=True
+
+[mypy-pandas.tests.io.test_sql]
+ignore_errors=True
+
+[mypy-pandas.tests.plotting.test_backend]
+ignore_errors=True
+
+[mypy-pandas.tests.series.test_constructors]
+ignore_errors=True
+
+[mypy-pandas.tests.series.test_operators]
+ignore_errors=True
+
+[mypy-pandas.tests.test_algos]
+ignore_errors=True
+
+[mypy-pandas.tests.test_base]
+ignore_errors=True
+
+[mypy-pandas.tests.tseries.offsets.test_offsets]
+ignore_errors=True
+
+[mypy-pandas.tests.tseries.offsets.test_offsets_properties]
+ignore_errors=True
+
+[mypy-pandas.tests.tseries.offsets.test_yqm_offsets]
ignore_errors=True
| xref https://github.com/pandas-dev/pandas/pull/28904#discussion_r333740051 and https://github.com/pandas-dev/pandas/pull/28746#discussion_r330499267 | https://api.github.com/repos/pandas-dev/pandas/pulls/28914 | 2019-10-11T00:02:00Z | 2019-10-11T11:56:58Z | 2019-10-11T11:56:58Z | 2019-10-11T13:43:35Z |
CLN: simplify maybe_promote in float and complex cases | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 1e353c97be754..40db53016fb62 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -405,11 +405,8 @@ def maybe_promote(dtype, fill_value=np.nan):
dtype = np.min_scalar_type(fill_value)
elif dtype.kind == "c":
- if not np.can_cast(fill_value, dtype):
- if np.can_cast(fill_value, np.dtype("c16")):
- dtype = np.dtype(np.complex128)
- else:
- dtype = np.dtype(np.object_)
+ mst = np.min_scalar_type(fill_value)
+ dtype = np.promote_types(dtype, mst)
if dtype.kind == "c" and not np.isnan(fill_value):
fill_value = dtype.type(fill_value)
@@ -490,16 +487,8 @@ def maybe_promote(dtype, fill_value=np.nan):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, (np.integer, np.floating)):
- c8 = np.dtype(np.complex64)
- info = np.finfo(dtype) if dtype.kind == "f" else np.iinfo(dtype)
- if (
- np.can_cast(fill_value, c8)
- and np.can_cast(info.min, c8)
- and np.can_cast(info.max, c8)
- ):
- dtype = np.dtype(np.complex64)
- else:
- dtype = np.dtype(np.complex128)
+ mst = np.min_scalar_type(fill_value)
+ dtype = np.promote_types(dtype, mst)
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
| Analogous to #28899 (orthogonal) for complex and float dtypes. I'm pretty sure that after the current crop of maybe_promote PRs goes through we can do another round of consolidating code. | https://api.github.com/repos/pandas-dev/pandas/pulls/28913 | 2019-10-10T22:13:45Z | 2019-10-11T11:57:37Z | 2019-10-11T11:57:37Z | 2019-10-11T15:29:11Z |
TYPING: errors reported by mypy 0.730 | diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index b3c7b8a7c8b9f..3a36713ccdbda 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -68,7 +68,11 @@ def load_reduce(self):
class _LoadSparseSeries:
# To load a SparseSeries as a Series[Sparse]
- def __new__(cls) -> "Series":
+
+ # https://github.com/python/mypy/issues/1020
+ # error: Incompatible return type for "__new__" (returns "Series", but must return
+ # a subtype of "_LoadSparseSeries")
+ def __new__(cls) -> "Series": # type: ignore
from pandas import Series
warnings.warn(
@@ -82,7 +86,11 @@ def __new__(cls) -> "Series":
class _LoadSparseFrame:
# To load a SparseDataFrame as a DataFrame[Sparse]
- def __new__(cls) -> "DataFrame":
+
+ # https://github.com/python/mypy/issues/1020
+ # error: Incompatible return type for "__new__" (returns "DataFrame", but must
+ # return a subtype of "_LoadSparseFrame")
+ def __new__(cls) -> "DataFrame": # type: ignore
from pandas import DataFrame
warnings.warn(
| errors reported by mypy 0.730 on master.
```
pandas\compat\pickle_compat.py:71: error: Incompatible return type for "__new__" (returns "Series", but must return a subtype of "_LoadSparseSeries")
pandas\compat\pickle_compat.py:85: error: Incompatible return type for "__new__" (returns "DataFrame", but must return a subtype of "_LoadSparseFrame")
```
probably makes sense to apply these changes now to avoid ci breakage on mypy bump.
| https://api.github.com/repos/pandas-dev/pandas/pulls/28910 | 2019-10-10T21:51:14Z | 2019-10-11T11:58:25Z | 2019-10-11T11:58:24Z | 2019-10-11T13:44:44Z |
TYPING: fix type annotation for pandas.io.formats.format._binify | diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 15f21814b072d..ad62c56a337b6 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -868,6 +868,8 @@ def _join_multiline(self, *args) -> str:
np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0
for col in strcols
]
+
+ assert lwidth is not None
col_bins = _binify(col_widths, lwidth)
nbins = len(col_bins)
@@ -1890,7 +1892,7 @@ def set_eng_float_format(accuracy: int = 3, use_eng_prefix: bool = False) -> Non
set_option("display.column_space", max(12, accuracy + 9))
-def _binify(cols: List[np.int32], line_width: Union[np.int32, int]) -> List[int]:
+def _binify(cols: List[int], line_width: int) -> List[int]:
adjoin_width = 1
bins = []
curr_width = 0
| - [ ] closes #28843
- [n/a ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [n/a ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28908 | 2019-10-10T21:33:51Z | 2019-10-11T12:41:20Z | 2019-10-11T12:41:20Z | 2019-10-11T12:41:24Z |
Added note to 'contributing.rst file', telling users to append GH Issue… | diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index dc6fa3d100212..949b6bd475319 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -949,6 +949,9 @@ the expected correct result::
assert_frame_equal(pivoted, expected)
+Please remember to add the Github Issue Number as a comment to a new test.
+E.g. "# brief comment, see GH#28907"
+
Transitioning to ``pytest``
~~~~~~~~~~~~~~~~~~~~~~~~~~~
| … Number to new tests.
- [ X ] closes #28703
| https://api.github.com/repos/pandas-dev/pandas/pulls/28907 | 2019-10-10T20:28:37Z | 2019-10-12T17:09:57Z | 2019-10-12T17:09:57Z | 2019-10-12T17:10:03Z |
clean tests/indexing/common.py | diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py
index 78764e6763e95..812d84261eb46 100644
--- a/pandas/tests/indexing/common.py
+++ b/pandas/tests/indexing/common.py
@@ -1,5 +1,4 @@
""" common utilities """
-
import itertools
from warnings import catch_warnings, filterwarnings
@@ -29,7 +28,7 @@ def _axify(obj, key, axis):
class Base:
""" indexing comprehensive base class """
- _objs = {"series", "frame"}
+ _kinds = {"series", "frame"}
_typs = {
"ints",
"uints",
@@ -101,13 +100,12 @@ def setup_method(self, method):
self.series_empty = Series()
# form agglomerates
- for o in self._objs:
-
+ for kind in self._kinds:
d = dict()
- for t in self._typs:
- d[t] = getattr(self, "{o}_{t}".format(o=o, t=t), None)
+ for typ in self._typs:
+ d[typ] = getattr(self, "{kind}_{typ}".format(kind=kind, typ=typ))
- setattr(self, o, d)
+ setattr(self, kind, d)
def generate_indices(self, f, values=False):
""" generate the indices
@@ -117,7 +115,7 @@ def generate_indices(self, f, values=False):
axes = f.axes
if values:
- axes = (list(range(len(a))) for a in axes)
+ axes = (list(range(len(ax))) for ax in axes)
return itertools.product(*axes)
@@ -186,34 +184,34 @@ def check_result(
method2,
key2,
typs=None,
- objs=None,
+ kinds=None,
axes=None,
fails=None,
):
- def _eq(t, o, a, obj, k1, k2):
+ def _eq(typ, kind, axis, obj, key1, key2):
""" compare equal for these 2 keys """
-
- if a is not None and a > obj.ndim - 1:
+ if axis > obj.ndim - 1:
return
def _print(result, error=None):
- if error is not None:
- error = str(error)
- v = (
+ err = str(error) if error is not None else ""
+ msg = (
"%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s"
- % (name, result, t, o, method1, method2, a, error or "")
+ % (name, result, typ, kind, method1, method2, axis, err)
)
if _verbose:
- pprint_thing(v)
+ pprint_thing(msg)
try:
- rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
+ rs = getattr(obj, method1).__getitem__(_axify(obj, key1, axis))
with catch_warnings(record=True):
filterwarnings("ignore", "\\n.ix", FutureWarning)
try:
- xp = self.get_result(obj, method2, k2, a)
+ xp = self.get_result(
+ obj=obj, method=method2, key=key2, axis=axis
+ )
except (KeyError, IndexError):
# TODO: why is this allowed?
result = "no comp"
@@ -228,8 +226,8 @@ def _print(result, error=None):
else:
tm.assert_equal(rs, xp)
result = "ok"
- except AssertionError as e:
- detail = str(e)
+ except AssertionError as exc:
+ detail = str(exc)
result = "fail"
# reverse the checks
@@ -258,36 +256,25 @@ def _print(result, error=None):
if typs is None:
typs = self._typs
- if objs is None:
- objs = self._objs
+ if kinds is None:
+ kinds = self._kinds
- if axes is not None:
- if not isinstance(axes, (tuple, list)):
- axes = [axes]
- else:
- axes = list(axes)
- else:
+ if axes is None:
axes = [0, 1]
+ elif not isinstance(axes, (tuple, list)):
+ assert isinstance(axes, int)
+ axes = [axes]
# check
- for o in objs:
- if o not in self._objs:
+ for kind in kinds:
+ if kind not in self._kinds:
continue
- d = getattr(self, o)
- for a in axes:
- for t in typs:
- if t not in self._typs:
+ d = getattr(self, kind)
+ for ax in axes:
+ for typ in typs:
+ if typ not in self._typs:
continue
- obj = d[t]
- if obj is None:
- continue
-
- def _call(obj=obj):
- obj = obj.copy()
-
- k2 = key2
- _eq(t, o, a, obj, key1, k2)
-
- _call()
+ obj = d[typ]
+ _eq(typ=typ, kind=kind, axis=ax, obj=obj, key1=key1, key2=key2)
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index c3ba5c0545b8b..31120c2c023cc 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -284,7 +284,7 @@ def test_iloc_getitem_dups(self):
[0, 1, 1, 3],
"ix",
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
- objs=["series", "frame"],
+ kinds=["series", "frame"],
typs=["ints", "uints"],
)
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
index 0b8f3af760f1d..532b77d6519c1 100644
--- a/pandas/tests/indexing/test_scalar.py
+++ b/pandas/tests/indexing/test_scalar.py
@@ -19,9 +19,9 @@ def _check(f, func, values=False):
expected = self.get_value(f, i, values)
tm.assert_almost_equal(result, expected)
- for o in self._objs:
+ for kind in self._kinds:
- d = getattr(self, o)
+ d = getattr(self, kind)
# iat
for f in [d["ints"], d["uints"]]:
@@ -47,9 +47,9 @@ def _check(f, func, values=False):
expected = self.get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
- for t in self._objs:
+ for kind in self._kinds:
- d = getattr(self, t)
+ d = getattr(self, kind)
# iat
for f in [d["ints"], d["uints"]]:
| clean-up of tests/indexing/common.py. | https://api.github.com/repos/pandas-dev/pandas/pulls/28904 | 2019-10-10T19:40:09Z | 2019-10-11T12:00:15Z | 2019-10-11T12:00:15Z | 2019-10-11T16:07:25Z |
REF: simplify maybe_promote integer cases | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 1e353c97be754..1e62527f95bc7 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -423,57 +423,14 @@ def maybe_promote(dtype, fill_value=np.nan):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
- # upcast to prevent overflow
- mst = np.min_scalar_type(fill_value)
- if mst > dtype:
- # np.dtype ordering considers:
- # int[n] < int[2*n]
- # uint[n] < uint[2*n]
- # u?int[n] < object_
- dtype = mst
-
- elif np.can_cast(fill_value, dtype):
- pass
-
- elif dtype.kind == "u" and mst.kind == "i":
+ if not np.can_cast(fill_value, dtype):
+ # upcast to prevent overflow
+ mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
if dtype.kind == "f":
# Case where we disagree with numpy
dtype = np.dtype(np.object_)
- elif dtype.kind == "i" and mst.kind == "u":
-
- if fill_value > np.iinfo(np.int64).max:
- # object is the only way to represent fill_value and keep
- # the range allowed by the given dtype
- dtype = np.dtype(np.object_)
-
- elif mst.itemsize < dtype.itemsize:
- pass
-
- elif dtype.itemsize == mst.itemsize:
- # We never cast signed to unsigned because that loses
- # parts of the original range, so find the smallest signed
- # integer that can hold all of `mst`.
- ndt = {
- np.int64: np.object_,
- np.int32: np.int64,
- np.int16: np.int32,
- np.int8: np.int16,
- }[dtype.type]
- dtype = np.dtype(ndt)
-
- else:
- # bump to signed integer dtype that holds all of `mst` range
- # Note: we have to use itemsize because some (windows)
- # builds don't satisfiy e.g. np.uint32 == np.uint32
- ndt = {
- 4: np.int64,
- 2: np.int32,
- 1: np.int16, # TODO: Test for this case
- }[mst.itemsize]
- dtype = np.dtype(ndt)
-
fill_value = dtype.type(fill_value)
elif issubclass(dtype.type, np.floating):
| @jreback its a little absurd how much this can be simplified after all the trouble I went through to on the last round
Let's hope the float cases simplify as nicely (separate branch/PR) | https://api.github.com/repos/pandas-dev/pandas/pulls/28899 | 2019-10-10T16:47:28Z | 2019-10-10T22:53:02Z | 2019-10-10T22:53:02Z | 2019-10-10T22:58:04Z |
REF: maybe_promote refactor/cleanup | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 328c7566d8e8d..90c2638be5eec 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -393,32 +393,29 @@ def maybe_promote(dtype, fill_value=np.nan):
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
- dtype = np.object_
+ dtype = np.dtype(np.object_)
+
elif issubclass(dtype.type, np.integer):
dtype = np.dtype(np.float64)
- if not isna(fill_value):
- fill_value = dtype.type(fill_value)
elif dtype.kind == "f":
- if not np.can_cast(fill_value, dtype):
- # e.g. dtype is float32, need float64
- dtype = np.min_scalar_type(fill_value)
+ mst = np.min_scalar_type(fill_value)
+ if mst > dtype:
+ # e.g. mst is np.float64 and dtype is np.float32
+ dtype = mst
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
- if dtype.kind == "c" and not np.isnan(fill_value):
- fill_value = dtype.type(fill_value)
-
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
- dtype = np.object_
- else:
- fill_value = np.bool_(fill_value)
+ dtype = np.dtype(np.object_)
+
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
+
elif issubclass(dtype.type, np.integer):
if not np.can_cast(fill_value, dtype):
# upcast to prevent overflow
@@ -428,35 +425,20 @@ def maybe_promote(dtype, fill_value=np.nan):
# Case where we disagree with numpy
dtype = np.dtype(np.object_)
- fill_value = dtype.type(fill_value)
-
- elif issubclass(dtype.type, np.floating):
- # check if we can cast
- if _check_lossless_cast(fill_value, dtype):
- fill_value = dtype.type(fill_value)
-
- if dtype.kind in ["c", "f"]:
- # e.g. if dtype is complex128 and fill_value is 1, we
- # want np.complex128(1)
- fill_value = dtype.type(fill_value)
-
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
+
elif issubclass(dtype.type, (np.integer, np.floating)):
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
- if mst > dtype and mst.kind == "c":
+ if mst > dtype:
# e.g. mst is np.complex128 and dtype is np.complex64
dtype = mst
- if dtype.kind == "c":
- # make sure we have a np.complex and not python complex
- fill_value = dtype.type(fill_value)
-
elif fill_value is None:
if is_float_dtype(dtype) or is_complex_dtype(dtype):
fill_value = np.nan
@@ -466,37 +448,48 @@ def maybe_promote(dtype, fill_value=np.nan):
elif is_datetime_or_timedelta_dtype(dtype):
fill_value = dtype.type("NaT", "ns")
else:
- dtype = np.object_
+ dtype = np.dtype(np.object_)
fill_value = np.nan
else:
- dtype = np.object_
+ dtype = np.dtype(np.object_)
# in case we have a string that looked like a number
if is_extension_array_dtype(dtype):
pass
elif issubclass(np.dtype(dtype).type, (bytes, str)):
- dtype = np.object_
+ dtype = np.dtype(np.object_)
+ fill_value = _ensure_dtype_type(fill_value, dtype)
return dtype, fill_value
-def _check_lossless_cast(value, dtype: np.dtype) -> bool:
+def _ensure_dtype_type(value, dtype):
"""
- Check if we can cast the given value to the given dtype _losslesly_.
+ Ensure that the given value is an instance of the given dtype.
+
+ e.g. if out dtype is np.complex64, we should have an instance of that
+ as opposed to a python complex object.
Parameters
----------
value : object
- dtype : np.dtype
+ dtype : np.dtype or ExtensionDtype
Returns
-------
- bool
+ object
"""
- casted = dtype.type(value)
- if casted == value:
- return True
- return False
+
+ # Start with exceptions in which we do _not_ cast to numpy types
+ if is_extension_array_dtype(dtype):
+ return value
+ elif dtype == np.object_:
+ return value
+ elif isna(value):
+ # e.g. keep np.nan rather than try to cast to np.float32(np.nan)
+ return value
+
+ return dtype.type(value)
def infer_dtype_from(val, pandas_dtype=False):
| There is some casting we currently do in many places in this function. Instead this PR implements `_ensure_dtype_type` and calls it once at the end of the function.
Removes _check_lossless_cast which can be replaced with np.can_cast | https://api.github.com/repos/pandas-dev/pandas/pulls/28897 | 2019-10-10T15:48:17Z | 2019-10-12T17:14:22Z | 2019-10-12T17:14:22Z | 2019-10-12T18:27:57Z |
DOC: Fix commpiler typo in contributing.rst | diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index 10d702808606a..dc6fa3d100212 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -172,7 +172,7 @@ installed (or you wish to install a newer version) you can install a compiler
yum groupinstall "Development Tools"
For other Linux distributions, consult your favourite search engine for
-commpiler installation instructions.
+compiler installation instructions.
Let us know if you have any difficulties by opening an issue or reaching out on
`Gitter`_.
| Corrects the spelling of compiler in contributing.rst.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28891 | 2019-10-10T07:23:42Z | 2019-10-10T09:20:23Z | 2019-10-10T09:20:23Z | 2019-10-10T12:43:43Z |
DOC: Update performance comparison section of io docs | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index ef87b6c57b1b9..f8e174abfd193 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -5576,7 +5576,7 @@ Performance considerations
--------------------------
This is an informal comparison of various IO methods, using pandas
-0.20.3. Timings are machine dependent and small differences should be
+0.24.2. Timings are machine dependent and small differences should be
ignored.
.. code-block:: ipython
@@ -5597,11 +5597,18 @@ Given the next test set:
.. code-block:: python
+
+
+ import numpy as np
+
import os
sz = 1000000
df = pd.DataFrame({'A': np.random.randn(sz), 'B': [1] * sz})
+ sz = 1000000
+ np.random.seed(42)
+ df = pd.DataFrame({'A': np.random.randn(sz), 'B': [1] * sz})
def test_sql_write(df):
if os.path.exists('test.sql'):
@@ -5610,151 +5617,152 @@ Given the next test set:
df.to_sql(name='test_table', con=sql_db)
sql_db.close()
-
def test_sql_read():
sql_db = sqlite3.connect('test.sql')
pd.read_sql_query("select * from test_table", sql_db)
sql_db.close()
-
def test_hdf_fixed_write(df):
df.to_hdf('test_fixed.hdf', 'test', mode='w')
-
def test_hdf_fixed_read():
pd.read_hdf('test_fixed.hdf', 'test')
-
def test_hdf_fixed_write_compress(df):
df.to_hdf('test_fixed_compress.hdf', 'test', mode='w', complib='blosc')
-
def test_hdf_fixed_read_compress():
pd.read_hdf('test_fixed_compress.hdf', 'test')
-
def test_hdf_table_write(df):
df.to_hdf('test_table.hdf', 'test', mode='w', format='table')
-
def test_hdf_table_read():
pd.read_hdf('test_table.hdf', 'test')
-
def test_hdf_table_write_compress(df):
df.to_hdf('test_table_compress.hdf', 'test', mode='w',
complib='blosc', format='table')
-
def test_hdf_table_read_compress():
pd.read_hdf('test_table_compress.hdf', 'test')
-
def test_csv_write(df):
df.to_csv('test.csv', mode='w')
-
def test_csv_read():
pd.read_csv('test.csv', index_col=0)
-
def test_feather_write(df):
df.to_feather('test.feather')
-
def test_feather_read():
pd.read_feather('test.feather')
-
def test_pickle_write(df):
df.to_pickle('test.pkl')
-
def test_pickle_read():
pd.read_pickle('test.pkl')
-
def test_pickle_write_compress(df):
df.to_pickle('test.pkl.compress', compression='xz')
-
def test_pickle_read_compress():
pd.read_pickle('test.pkl.compress', compression='xz')
-When writing, the top-three functions in terms of speed are are
-``test_pickle_write``, ``test_feather_write`` and ``test_hdf_fixed_write_compress``.
+ def test_parquet_write(df):
+ df.to_parquet('test.parquet')
+
+ def test_parquet_read():
+ pd.read_parquet('test.parquet')
+
+When writing, the top-three functions in terms of speed are ``test_feather_write``, ``test_hdf_fixed_write`` and ``test_hdf_fixed_write_compress``.
.. code-block:: ipython
- In [14]: %timeit test_sql_write(df)
- 2.37 s ± 36.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
+ In [4]: %timeit test_sql_write(df)
+ 3.29 s ± 43.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
- In [15]: %timeit test_hdf_fixed_write(df)
- 194 ms ± 65.9 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
+ In [5]: %timeit test_hdf_fixed_write(df)
+ 19.4 ms ± 560 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)
- In [26]: %timeit test_hdf_fixed_write_compress(df)
- 119 ms ± 2.15 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
+ In [6]: %timeit test_hdf_fixed_write_compress(df)
+ 19.6 ms ± 308 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
- In [16]: %timeit test_hdf_table_write(df)
- 623 ms ± 125 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
+ In [7]: %timeit test_hdf_table_write(df)
+ 449 ms ± 5.61 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
- In [27]: %timeit test_hdf_table_write_compress(df)
- 563 ms ± 23.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
+ In [8]: %timeit test_hdf_table_write_compress(df)
+ 448 ms ± 11.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
- In [17]: %timeit test_csv_write(df)
- 3.13 s ± 49.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
+ In [9]: %timeit test_csv_write(df)
+ 3.66 s ± 26.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
- In [30]: %timeit test_feather_write(df)
- 103 ms ± 5.88 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
+ In [10]: %timeit test_feather_write(df)
+ 9.75 ms ± 117 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
- In [31]: %timeit test_pickle_write(df)
- 109 ms ± 3.72 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
+ In [11]: %timeit test_pickle_write(df)
+ 30.1 ms ± 229 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
- In [32]: %timeit test_pickle_write_compress(df)
- 3.33 s ± 55.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
+ In [12]: %timeit test_pickle_write_compress(df)
+ 4.29 s ± 15.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
+
+ In [13]: %timeit test_parquet_write(df)
+ 67.6 ms ± 706 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
When reading, the top three are ``test_feather_read``, ``test_pickle_read`` and
``test_hdf_fixed_read``.
+
.. code-block:: ipython
- In [18]: %timeit test_sql_read()
- 1.35 s ± 14.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
+ In [14]: %timeit test_sql_read()
+ 1.77 s ± 17.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
+
+ In [15]: %timeit test_hdf_fixed_read()
+ 19.4 ms ± 436 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
+
+ In [16]: %timeit test_hdf_fixed_read_compress()
+ 19.5 ms ± 222 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
- In [19]: %timeit test_hdf_fixed_read()
- 14.3 ms ± 438 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
+ In [17]: %timeit test_hdf_table_read()
+ 38.6 ms ± 857 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
- In [28]: %timeit test_hdf_fixed_read_compress()
- 23.5 ms ± 672 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
+ In [18]: %timeit test_hdf_table_read_compress()
+ 38.8 ms ± 1.49 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
- In [20]: %timeit test_hdf_table_read()
- 35.4 ms ± 314 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
+ In [19]: %timeit test_csv_read()
+ 452 ms ± 9.04 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
- In [29]: %timeit test_hdf_table_read_compress()
- 42.6 ms ± 2.1 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
+ In [20]: %timeit test_feather_read()
+ 12.4 ms ± 99.7 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
- In [22]: %timeit test_csv_read()
- 516 ms ± 27.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
+ In [21]: %timeit test_pickle_read()
+ 18.4 ms ± 191 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
- In [33]: %timeit test_feather_read()
- 4.06 ms ± 115 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
+ In [22]: %timeit test_pickle_read_compress()
+ 915 ms ± 7.48 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
- In [34]: %timeit test_pickle_read()
- 6.5 ms ± 172 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
+ In [23]: %timeit test_parquet_read()
+ 24.4 ms ± 146 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
- In [35]: %timeit test_pickle_read_compress()
- 588 ms ± 3.57 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
+For this test case ``test.pkl.compress``, ``test.parquet`` and ``test.feather`` took the least space on disk.
Space on disk (in bytes)
.. code-block:: none
- 34816000 Aug 21 18:00 test.sql
- 24009240 Aug 21 18:00 test_fixed.hdf
- 7919610 Aug 21 18:00 test_fixed_compress.hdf
- 24458892 Aug 21 18:00 test_table.hdf
- 8657116 Aug 21 18:00 test_table_compress.hdf
- 28520770 Aug 21 18:00 test.csv
- 16000248 Aug 21 18:00 test.feather
- 16000848 Aug 21 18:00 test.pkl
- 7554108 Aug 21 18:00 test.pkl.compress
+ 29519500 Oct 10 06:45 test.csv
+ 16000248 Oct 10 06:45 test.feather
+ 8281983 Oct 10 06:49 test.parquet
+ 16000857 Oct 10 06:47 test.pkl
+ 7552144 Oct 10 06:48 test.pkl.compress
+ 34816000 Oct 10 06:42 test.sql
+ 24009288 Oct 10 06:43 test_fixed.hdf
+ 24009288 Oct 10 06:43 test_fixed_compress.hdf
+ 24458940 Oct 10 06:44 test_table.hdf
+ 24458940 Oct 10 06:44 test_table_compress.hdf
+
+
+
| xref https://github.com/python-sprints/pandas-mentoring/issues/163
| https://api.github.com/repos/pandas-dev/pandas/pulls/28890 | 2019-10-10T07:04:29Z | 2019-11-09T00:59:55Z | 2019-11-09T00:59:55Z | 2019-11-09T01:00:05Z |
DOC: Fixed PR06 error in pandas.io.formats.style.Styler.set_table_attributes | diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 6bac3fe426f2d..6b98eaca9dacc 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -780,7 +780,7 @@ def set_table_attributes(self, attributes):
Parameters
----------
- attributes : string
+ attributes : str
Returns
-------
| - [x] xref #28724
- [ ] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28887 | 2019-10-10T03:54:24Z | 2019-10-10T15:53:58Z | 2019-10-10T15:53:58Z | 2019-10-10T16:03:57Z |
REF: use fused types for groupby_helper | diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in
index 000689f634545..6b434b6470581 100644
--- a/pandas/_libs/groupby_helper.pxi.in
+++ b/pandas/_libs/groupby_helper.pxi.in
@@ -12,39 +12,27 @@ _int64_max = np.iinfo(np.int64).max
# group_nth, group_last, group_rank
# ----------------------------------------------------------------------
-{{py:
-
-# name, c_type, nan_val
-dtypes = [('float64', 'float64_t', 'NAN'),
- ('float32', 'float32_t', 'NAN'),
- ('int64', 'int64_t', 'NPY_NAT'),
- ('object', 'object', 'NAN')]
-
-def get_dispatch(dtypes):
-
- for name, c_type, nan_val in dtypes:
-
- yield name, c_type, nan_val
-}}
-
-
-{{for name, c_type, nan_val in get_dispatch(dtypes)}}
+ctypedef fused rank_t:
+ float64_t
+ float32_t
+ int64_t
+ object
@cython.wraparound(False)
@cython.boundscheck(False)
-def group_last_{{name}}({{c_type}}[:, :] out,
- int64_t[:] counts,
- {{c_type}}[:, :] values,
- const int64_t[:] labels,
- Py_ssize_t min_count=-1):
+def group_last(rank_t[:, :] out,
+ int64_t[:] counts,
+ rank_t[:, :] values,
+ const int64_t[:] labels,
+ Py_ssize_t min_count=-1):
"""
Only aggregates on axis=0
"""
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- {{c_type}} val
- ndarray[{{c_type}}, ndim=2] resx
+ rank_t val
+ ndarray[rank_t, ndim=2] resx
ndarray[int64_t, ndim=2] nobs
assert min_count == -1, "'min_count' only used in add and prod"
@@ -53,19 +41,15 @@ def group_last_{{name}}({{c_type}}[:, :] out,
raise AssertionError("len(index) != len(labels)")
nobs = np.zeros((<object>out).shape, dtype=np.int64)
- {{if name == 'object'}}
- resx = np.empty((<object>out).shape, dtype=object)
- {{else}}
- resx = np.empty_like(out)
- {{endif}}
+ if rank_t is object:
+ resx = np.empty((<object>out).shape, dtype=object)
+ else:
+ resx = np.empty_like(out)
N, K = (<object>values).shape
- {{if name == "object"}}
- if True: # make templating happy
- {{else}}
- with nogil:
- {{endif}}
+ if rank_t is object:
+ # TODO: De-duplicate once conditional-nogil is available
for i in range(N):
lab = labels[i]
if lab < 0:
@@ -76,36 +60,77 @@ def group_last_{{name}}({{c_type}}[:, :] out,
val = values[i, j]
# not nan
- if (
- {{if not name.startswith("int")}}
- val == val and
- {{endif}}
- val != {{nan_val}}):
- nobs[lab, j] += 1
- resx[lab, j] = val
+ if rank_t is int64_t:
+ # need a special notna check
+ if val != NPY_NAT:
+ nobs[lab, j] += 1
+ resx[lab, j] = val
+ else:
+ if val == val:
+ nobs[lab, j] += 1
+ resx[lab, j] = val
for i in range(ncounts):
for j in range(K):
if nobs[i, j] == 0:
- out[i, j] = {{nan_val}}
+ if rank_t is int64_t:
+ out[i, j] = NPY_NAT
+ else:
+ out[i, j] = NAN
else:
out[i, j] = resx[i, j]
+ else:
+ with nogil:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
+
+ counts[lab] += 1
+ for j in range(K):
+ val = values[i, j]
+
+ # not nan
+ if rank_t is int64_t:
+ # need a special notna check
+ if val != NPY_NAT:
+ nobs[lab, j] += 1
+ resx[lab, j] = val
+ else:
+ if val == val:
+ nobs[lab, j] += 1
+ resx[lab, j] = val
+
+ for i in range(ncounts):
+ for j in range(K):
+ if nobs[i, j] == 0:
+ if rank_t is int64_t:
+ out[i, j] = NPY_NAT
+ else:
+ out[i, j] = NAN
+ else:
+ out[i, j] = resx[i, j]
+
+group_last_float64 = group_last["float64_t"]
+group_last_float32 = group_last["float32_t"]
+group_last_int64 = group_last["int64_t"]
+group_last_object = group_last["object"]
@cython.wraparound(False)
@cython.boundscheck(False)
-def group_nth_{{name}}({{c_type}}[:, :] out,
- int64_t[:] counts,
- {{c_type}}[:, :] values,
- const int64_t[:] labels, int64_t rank,
- Py_ssize_t min_count=-1):
+def group_nth(rank_t[:, :] out,
+ int64_t[:] counts,
+ rank_t[:, :] values,
+ const int64_t[:] labels, int64_t rank,
+ Py_ssize_t min_count=-1):
"""
Only aggregates on axis=0
"""
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- {{c_type}} val
- ndarray[{{c_type}}, ndim=2] resx
+ rank_t val
+ ndarray[rank_t, ndim=2] resx
ndarray[int64_t, ndim=2] nobs
assert min_count == -1, "'min_count' only used in add and prod"
@@ -114,19 +139,15 @@ def group_nth_{{name}}({{c_type}}[:, :] out,
raise AssertionError("len(index) != len(labels)")
nobs = np.zeros((<object>out).shape, dtype=np.int64)
- {{if name=='object'}}
- resx = np.empty((<object>out).shape, dtype=object)
- {{else}}
- resx = np.empty_like(out)
- {{endif}}
+ if rank_t is object:
+ resx = np.empty((<object>out).shape, dtype=object)
+ else:
+ resx = np.empty_like(out)
N, K = (<object>values).shape
- {{if name == "object"}}
- if True: # make templating happy
- {{else}}
- with nogil:
- {{endif}}
+ if rank_t is object:
+ # TODO: De-duplicate once conditional-nogil is available
for i in range(N):
lab = labels[i]
if lab < 0:
@@ -137,11 +158,7 @@ def group_nth_{{name}}({{c_type}}[:, :] out,
val = values[i, j]
# not nan
- if (
- {{if not name.startswith("int")}}
- val == val and
- {{endif}}
- val != {{nan_val}}):
+ if val == val:
nobs[lab, j] += 1
if nobs[lab, j] == rank:
resx[lab, j] = val
@@ -149,28 +166,65 @@ def group_nth_{{name}}({{c_type}}[:, :] out,
for i in range(ncounts):
for j in range(K):
if nobs[i, j] == 0:
- out[i, j] = {{nan_val}}
+ out[i, j] = NAN
else:
out[i, j] = resx[i, j]
+ else:
+ with nogil:
+ for i in range(N):
+ lab = labels[i]
+ if lab < 0:
+ continue
+
+ counts[lab] += 1
+ for j in range(K):
+ val = values[i, j]
+
+ # not nan
+ if rank_t is int64_t:
+ # need a special notna check
+ if val != NPY_NAT:
+ nobs[lab, j] += 1
+ if nobs[lab, j] == rank:
+ resx[lab, j] = val
+ else:
+ if val == val:
+ nobs[lab, j] += 1
+ if nobs[lab, j] == rank:
+ resx[lab, j] = val
+
+ for i in range(ncounts):
+ for j in range(K):
+ if nobs[i, j] == 0:
+ if rank_t is int64_t:
+ out[i, j] = NPY_NAT
+ else:
+ out[i, j] = NAN
+ else:
+ out[i, j] = resx[i, j]
+
-{{if name != 'object'}}
+group_nth_float64 = group_nth["float64_t"]
+group_nth_float32 = group_nth["float32_t"]
+group_nth_int64 = group_nth["int64_t"]
+group_nth_object = group_nth["object"]
@cython.boundscheck(False)
@cython.wraparound(False)
-def group_rank_{{name}}(float64_t[:, :] out,
- {{c_type}}[:, :] values,
- const int64_t[:] labels,
- bint is_datetimelike, object ties_method,
- bint ascending, bint pct, object na_option):
+def group_rank(float64_t[:, :] out,
+ rank_t[:, :] values,
+ const int64_t[:] labels,
+ bint is_datetimelike, object ties_method,
+ bint ascending, bint pct, object na_option):
"""
Provides the rank of values within each group.
Parameters
----------
out : array of float64_t values which this method will write its results to
- values : array of {{c_type}} values to be ranked
+ values : array of rank_t values to be ranked
labels : array containing unique label for each group, with its ordering
matching up to the corresponding record in `values`
is_datetimelike : bool, default False
@@ -203,10 +257,13 @@ def group_rank_{{name}}(float64_t[:, :] out,
Py_ssize_t grp_vals_seen=1, grp_na_count=0, grp_tie_count=0
ndarray[int64_t] _as
ndarray[float64_t, ndim=2] grp_sizes
- ndarray[{{c_type}}] masked_vals
+ ndarray[rank_t] masked_vals
ndarray[uint8_t] mask
bint keep_na
- {{c_type}} nan_fill_val
+ rank_t nan_fill_val
+
+ if rank_t is object:
+ raise NotImplementedError("Cant do nogil")
tiebreak = tiebreakers[ties_method]
keep_na = na_option == 'keep'
@@ -217,25 +274,23 @@ def group_rank_{{name}}(float64_t[:, :] out,
# with mask, without obfuscating location of missing data
# in values array
masked_vals = np.array(values[:, 0], copy=True)
- {{if name == 'int64'}}
- mask = (masked_vals == {{nan_val}}).astype(np.uint8)
- {{else}}
- mask = np.isnan(masked_vals).astype(np.uint8)
- {{endif}}
+ if rank_t is int64_t:
+ mask = (masked_vals == NPY_NAT).astype(np.uint8)
+ else:
+ mask = np.isnan(masked_vals).astype(np.uint8)
if ascending ^ (na_option == 'top'):
- {{if name == 'int64'}}
- nan_fill_val = np.iinfo(np.int64).max
- {{else}}
- nan_fill_val = np.inf
- {{endif}}
+ if rank_t is int64_t:
+ nan_fill_val = np.iinfo(np.int64).max
+ else:
+ nan_fill_val = np.inf
order = (masked_vals, mask, labels)
else:
- {{if name == 'int64'}}
- nan_fill_val = np.iinfo(np.int64).min
- {{else}}
- nan_fill_val = -np.inf
- {{endif}}
+ if rank_t is int64_t:
+ nan_fill_val = np.iinfo(np.int64).min
+ else:
+ nan_fill_val = -np.inf
+
order = (masked_vals, ~mask, labels)
np.putmask(masked_vals, mask, nan_fill_val)
@@ -337,8 +392,13 @@ def group_rank_{{name}}(float64_t[:, :] out,
out[i, 0] = NAN
elif grp_sizes[i, 0] != 0:
out[i, 0] = out[i, 0] / grp_sizes[i, 0]
-{{endif}}
-{{endfor}}
+
+
+group_rank_float64 = group_rank["float64_t"]
+group_rank_float32 = group_rank["float32_t"]
+group_rank_int64 = group_rank["int64_t"]
+# Note: we do not have a group_rank_object because that would require a
+# not-nogil implementation, see GH#19560
# ----------------------------------------------------------------------
@@ -484,7 +544,8 @@ def group_cummin(groupby_t[:, :] out,
const int64_t[:] labels,
int ngroups,
bint is_datetimelike):
- """Cumulative minimum of columns of `values`, in row groups `labels`.
+ """
+ Cumulative minimum of columns of `values`, in row groups `labels`.
Parameters
----------
@@ -548,9 +609,10 @@ def group_cummin(groupby_t[:, :] out,
def group_cummax(groupby_t[:, :] out,
groupby_t[:, :] values,
const int64_t[:] labels,
- int ngroups,
+ int ngroups,
bint is_datetimelike):
- """Cumulative maximum of columns of `values`, in row groups `labels`.
+ """
+ Cumulative maximum of columns of `values`, in row groups `labels`.
Parameters
----------
| There will be some nice cleanups we can do after we bump cython to 0.30 (which hasnt come out yet).
Also I think there is some na-checking code that we can share between the various fused-types functions after this. | https://api.github.com/repos/pandas-dev/pandas/pulls/28886 | 2019-10-10T03:47:10Z | 2019-10-11T12:01:31Z | 2019-10-11T12:01:31Z | 2019-10-11T15:25:48Z |
DOC: Fixed PR06 errors in pandas.api.extensions.ExtensionArray | diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 0778b6726d104..7a16c3f6a35b6 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -177,7 +177,7 @@ def _from_sequence(cls, scalars, dtype=None, copy=False):
dtype : dtype, optional
Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
- copy : boolean, default False
+ copy : bool, default False
If True, copy the underlying data.
Returns
@@ -200,7 +200,7 @@ def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
dtype : dtype, optional
Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
- copy : boolean, default False
+ copy : bool, default False
If True, copy the underlying data.
Returns
@@ -769,7 +769,7 @@ def take(
Parameters
----------
- indices : sequence of integers
+ indices : sequence of int
Indices to be taken.
allow_fill : bool, default False
How to handle negative values in `indices`.
| - [x] xref #28724
- [ ] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28885 | 2019-10-10T03:45:40Z | 2019-10-10T16:00:24Z | 2019-10-10T16:00:24Z | 2019-10-10T16:04:28Z |
CLN: dont catch Exception in groupby var | diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 4e0dd65042196..d477b173b95f0 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -41,6 +41,7 @@ class providing the base-class of operations.
)
from pandas.core.dtypes.missing import isna, notna
+from pandas.core import nanops
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical
from pandas.core.base import (
@@ -721,6 +722,10 @@ def f(g):
with np.errstate(all="ignore"):
return func(g, *args, **kwargs)
+ elif hasattr(nanops, "nan" + func):
+ # TODO: should we wrap this in to e.g. _is_builtin_func?
+ f = getattr(nanops, "nan" + func)
+
else:
raise ValueError(
"func must be a callable if args or kwargs are supplied"
@@ -1297,16 +1302,9 @@ def var(self, ddof=1, *args, **kwargs):
"""
nv.validate_groupby_func("var", args, kwargs)
if ddof == 1:
- try:
- return self._cython_agg_general(
- "var",
- alt=lambda x, axis: Series(x).var(ddof=ddof, **kwargs),
- **kwargs
- )
- except Exception:
- f = lambda x: x.var(ddof=ddof, **kwargs)
- with _group_selection_context(self):
- return self._python_agg_general(f)
+ return self._cython_agg_general(
+ "var", alt=lambda x, axis: Series(x).var(ddof=ddof, **kwargs), **kwargs
+ )
else:
f = lambda x: x.var(ddof=ddof, **kwargs)
with _group_selection_context(self):
| Between this, #28878, and #28873, I'm finding that the outside-in approach to cleaning this up is proving easier the earlier inside-out approach. | https://api.github.com/repos/pandas-dev/pandas/pulls/28883 | 2019-10-09T22:40:33Z | 2019-10-10T01:25:29Z | 2019-10-10T01:25:29Z | 2019-10-10T01:55:47Z |
TST: Fix maybe_promote floating non-boxed tests | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 5b13e13bb20ba..098f42b1a8c5c 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -398,6 +398,22 @@ def maybe_promote(dtype, fill_value=np.nan):
dtype = np.dtype(np.float64)
if not isna(fill_value):
fill_value = dtype.type(fill_value)
+
+ elif dtype.kind == "f":
+ if not np.can_cast(fill_value, dtype):
+ # e.g. dtype is float32, need float64
+ dtype = np.min_scalar_type(fill_value)
+
+ elif dtype.kind == "c":
+ if not np.can_cast(fill_value, dtype):
+ if np.can_cast(fill_value, np.dtype("c16")):
+ dtype = np.dtype(np.complex128)
+ else:
+ dtype = np.dtype(np.object_)
+
+ if dtype.kind == "c" and not np.isnan(fill_value):
+ fill_value = dtype.type(fill_value)
+
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.object_
@@ -405,7 +421,7 @@ def maybe_promote(dtype, fill_value=np.nan):
fill_value = np.bool_(fill_value)
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
- dtype = np.object_
+ dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
# upcast to prevent overflow
arr = np.asarray(fill_value)
@@ -415,11 +431,37 @@ def maybe_promote(dtype, fill_value=np.nan):
# check if we can cast
if _check_lossless_cast(fill_value, dtype):
fill_value = dtype.type(fill_value)
+
+ if dtype.kind in ["c", "f"]:
+ # e.g. if dtype is complex128 and fill_value is 1, we
+ # want np.complex128(1)
+ fill_value = dtype.type(fill_value)
+
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
- dtype = np.object_
+ dtype = np.dtype(np.object_)
elif issubclass(dtype.type, (np.integer, np.floating)):
- dtype = np.complex128
+ c8 = np.dtype(np.complex64)
+ info = np.finfo(dtype) if dtype.kind == "f" else np.iinfo(dtype)
+ if (
+ np.can_cast(fill_value, c8)
+ and np.can_cast(info.min, c8)
+ and np.can_cast(info.max, c8)
+ ):
+ dtype = np.dtype(np.complex64)
+ else:
+ dtype = np.dtype(np.complex128)
+
+ elif dtype.kind == "c":
+ mst = np.min_scalar_type(fill_value)
+ if mst > dtype and mst.kind == "c":
+ # e.g. mst is np.complex128 and dtype is np.complex64
+ dtype = mst
+
+ if dtype.kind == "c":
+ # make sure we have a np.complex and not python complex
+ fill_value = dtype.type(fill_value)
+
elif fill_value is None:
if is_float_dtype(dtype) or is_complex_dtype(dtype):
fill_value = np.nan
diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py
index e4e5a22ea6ca0..e9041a27ab9be 100644
--- a/pandas/tests/dtypes/cast/test_promote.py
+++ b/pandas/tests/dtypes/cast/test_promote.py
@@ -408,25 +408,14 @@ def test_maybe_promote_float_with_float(dtype, fill_value, expected_dtype, box):
if box_dtype == object:
pytest.xfail("falsely upcasts to object")
- if boxed and is_float_dtype(dtype) and is_complex_dtype(expected_dtype):
+ elif boxed and is_float_dtype(dtype) and is_complex_dtype(expected_dtype):
pytest.xfail("does not upcast to complex")
- if (dtype, expected_dtype) in [
+ elif boxed and (dtype, expected_dtype) in [
("float32", "float64"),
("float32", "complex64"),
("complex64", "complex128"),
]:
pytest.xfail("does not upcast correctly depending on value")
- # this following xfails are "only" a consequence of the - now strictly
- # enforced - principle that maybe_promote_with_scalar always casts
- if not boxed and abs(fill_value) < 2:
- pytest.xfail("wrong return type of fill_value")
- if (
- not boxed
- and dtype == "complex128"
- and expected_dtype == "complex128"
- and is_float_dtype(type(fill_value))
- ):
- pytest.xfail("wrong return type of fill_value")
# output is not a generic float, but corresponds to expected_dtype
exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
| Like #28864, this involves changing the underlying function. Some of this can be de-duplicated once these are in.
I think that this is the last of the xfails for non-boxed test cases. Really looking forward to having this done with,. | https://api.github.com/repos/pandas-dev/pandas/pulls/28880 | 2019-10-09T20:43:50Z | 2019-10-10T12:50:18Z | 2019-10-10T12:50:18Z | 2019-10-10T13:33:32Z |
CLN: assorted cleanups, remove unicode checks in cython | diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx
index c0aa661266d29..6b27b2204e75e 100644
--- a/pandas/_libs/hashing.pyx
+++ b/pandas/_libs/hashing.pyx
@@ -60,7 +60,7 @@ def hash_object_array(object[:] arr, object key, object encoding='utf8'):
val = arr[i]
if isinstance(val, bytes):
data = <bytes>val
- elif isinstance(val, unicode):
+ elif isinstance(val, str):
data = <bytes>val.encode(encoding)
elif val is None or is_nan(val):
# null, stringify and encode
diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in
index 17f1d011af01b..1cbdb0df6233c 100644
--- a/pandas/_libs/hashtable_class_helper.pxi.in
+++ b/pandas/_libs/hashtable_class_helper.pxi.in
@@ -667,7 +667,7 @@ cdef class StringHashTable(HashTable):
for i in range(n):
val = values[i]
- if isinstance(val, (str, unicode)):
+ if isinstance(val, str):
v = get_c_string(val)
else:
v = get_c_string(self.na_string_sentinel)
@@ -700,7 +700,7 @@ cdef class StringHashTable(HashTable):
for i in range(n):
val = values[i]
- if isinstance(val, (str, unicode)):
+ if isinstance(val, str):
v = get_c_string(val)
else:
v = get_c_string(self.na_string_sentinel)
@@ -774,7 +774,7 @@ cdef class StringHashTable(HashTable):
val = values[i]
if (ignore_na
- and (not isinstance(val, (str, unicode))
+ and (not isinstance(val, str)
or (use_na_value and val == na_value))):
# if missing values do not count as unique values (i.e. if
# ignore_na is True), we can skip the actual value, and
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index f5a42d7aef3ba..3f12ec4c15fc7 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -2249,7 +2249,7 @@ cdef _apply_converter(object f, parser_t *parser, int64_t col,
def _maybe_encode(values):
if values is None:
return []
- return [x.encode('utf-8') if isinstance(x, unicode) else x for x in values]
+ return [x.encode('utf-8') if isinstance(x, str) else x for x in values]
def sanitize_objects(ndarray[object] values, set na_values,
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index 2ed85595f7e3a..8f5c8d10776df 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -22,7 +22,7 @@ from pandas._libs.tslibs.np_datetime cimport (
from pandas._libs.tslibs.nattype cimport NPY_NAT
-def get_time_micros(ndarray[int64_t] dtindex):
+def get_time_micros(const int64_t[:] dtindex):
"""
Return the number of microseconds in the time component of a
nanosecond timestamp.
@@ -537,7 +537,7 @@ def get_date_field(const int64_t[:] dtindex, object field):
elif field == 'is_leap_year':
return isleapyear_arr(get_date_field(dtindex, 'Y'))
- raise ValueError("Field %s not supported" % field)
+ raise ValueError("Field {field} not supported".format(field=field))
@cython.wraparound(False)
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index ca70c8af45f2f..33665484311ba 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -252,9 +252,7 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
-------
datetime, datetime/dateutil.parser._result, str
"""
- if not isinstance(arg, (str, unicode)):
- # Note: cython recognizes `unicode` in both py2/py3, optimizes
- # this check into a C call.
+ if not isinstance(arg, str):
return arg
if getattr(freq, "_typ", None) == "dateoffset":
@@ -370,7 +368,7 @@ cdef inline object _parse_dateabbr_string(object date_string, object default,
int year, quarter = -1, month, mnum, date_len
# special handling for possibilities eg, 2Q2005, 2Q05, 2005Q1, 05Q1
- assert isinstance(date_string, (str, unicode))
+ assert isinstance(date_string, str)
# len(date_string) == 0
# should be NaT???
@@ -517,7 +515,7 @@ cdef dateutil_parse(object timestr, object default, ignoretz=False,
tzdata = tzinfos.get(res.tzname)
if isinstance(tzdata, datetime.tzinfo):
tzinfo = tzdata
- elif isinstance(tzdata, (str, unicode)):
+ elif isinstance(tzdata, str):
tzinfo = _dateutil_tzstr(tzdata)
elif isinstance(tzdata, int):
tzinfo = tzoffset(res.tzname, tzdata)
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 32dcc86faa7e8..84a41b8757001 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -2448,7 +2448,10 @@ class Period(_Period):
converted = other.asfreq(freq)
ordinal = converted.ordinal
- elif is_null_datetimelike(value) or value in nat_strings:
+ elif is_null_datetimelike(value) or (isinstance(value, str) and
+ value in nat_strings):
+ # explicit str check is necessary to avoid raising incorrectly
+ # if we have a non-hashable value.
ordinal = NPY_NAT
elif isinstance(value, str) or util.is_integer_object(value):
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index bda5f8f4326f1..958650e3842fa 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -1148,7 +1148,7 @@ def _addsub_offset_array(self, other, op):
)
# For EA self.astype('O') returns a numpy array, not an Index
- left = lib.values_from_object(self.astype("O"))
+ left = self.astype("O")
res_values = op(left, np.array(other))
kwargs = {}
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index f2d74794eadf5..43208d98abd3c 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -70,7 +70,7 @@ def _period_array_cmp(cls, op):
nat_result = opname == "__ne__"
def wrapper(self, other):
- op = getattr(self.asi8, opname)
+ ordinal_op = getattr(self.asi8, opname)
other = lib.item_from_zerodim(other)
if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)):
@@ -82,11 +82,11 @@ def wrapper(self, other):
if isinstance(other, Period):
self._check_compatible_with(other)
- result = op(other.ordinal)
+ result = ordinal_op(other.ordinal)
elif isinstance(other, cls):
self._check_compatible_with(other)
- result = op(other.asi8)
+ result = ordinal_op(other.asi8)
mask = self._isnan | other._isnan
if mask.any():
@@ -98,7 +98,7 @@ def wrapper(self, other):
result.fill(nat_result)
else:
other = Period(other, freq=self.freq)
- result = op(other.ordinal)
+ result = ordinal_op(other.ordinal)
if self._hasnans:
result[self._isnan] = nat_result
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 6c9462ff4fa4d..21e07b5101a64 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -553,7 +553,7 @@ def __mul__(self, other):
# for that instead of ValueError
raise ValueError("Cannot multiply with unequal lengths")
- if is_object_dtype(other):
+ if is_object_dtype(other.dtype):
# this multiplication will succeed only if all elements of other
# are int or float scalars, so we will end up with
# timedelta64[ns]-dtyped result
@@ -601,11 +601,11 @@ def __truediv__(self, other):
if len(other) != len(self):
raise ValueError("Cannot divide vectors with unequal lengths")
- elif is_timedelta64_dtype(other):
+ elif is_timedelta64_dtype(other.dtype):
# let numpy handle it
return self._data / other
- elif is_object_dtype(other):
+ elif is_object_dtype(other.dtype):
# Note: we do not do type inference on the result, so either
# an object array or numeric-dtyped (if numpy does inference)
# will be returned. GH#23829
@@ -649,12 +649,12 @@ def __rtruediv__(self, other):
if len(other) != len(self):
raise ValueError("Cannot divide vectors with unequal lengths")
- elif is_timedelta64_dtype(other):
+ elif is_timedelta64_dtype(other.dtype):
# let numpy handle it
return other / self._data
- elif is_object_dtype(other):
- # Note: unlike in __truediv__, we do not _need_ to do type#
+ elif is_object_dtype(other.dtype):
+ # Note: unlike in __truediv__, we do not _need_ to do type
# inference on the result. It does not raise, a numeric array
# is returned. GH#23829
result = [other[n] / self[n] for n in range(len(self))]
@@ -701,7 +701,7 @@ def __floordiv__(self, other):
if len(other) != len(self):
raise ValueError("Cannot divide with unequal lengths")
- elif is_timedelta64_dtype(other):
+ elif is_timedelta64_dtype(other.dtype):
other = type(self)(other)
# numpy timedelta64 does not natively support floordiv, so operate
@@ -713,7 +713,7 @@ def __floordiv__(self, other):
result[mask] = np.nan
return result
- elif is_object_dtype(other):
+ elif is_object_dtype(other.dtype):
result = [self[n] // other[n] for n in range(len(self))]
result = np.array(result)
if lib.infer_dtype(result, skipna=False) == "timedelta":
@@ -721,7 +721,7 @@ def __floordiv__(self, other):
return type(self)(result)
return result
- elif is_integer_dtype(other) or is_float_dtype(other):
+ elif is_integer_dtype(other.dtype) or is_float_dtype(other.dtype):
result = self._data // other
return type(self)(result)
@@ -763,7 +763,7 @@ def __rfloordiv__(self, other):
if len(other) != len(self):
raise ValueError("Cannot divide with unequal lengths")
- elif is_timedelta64_dtype(other):
+ elif is_timedelta64_dtype(other.dtype):
other = type(self)(other)
# numpy timedelta64 does not natively support floordiv, so operate
@@ -775,7 +775,7 @@ def __rfloordiv__(self, other):
result[mask] = np.nan
return result
- elif is_object_dtype(other):
+ elif is_object_dtype(other.dtype):
result = [other[n] // self[n] for n in range(len(self))]
result = np.array(result)
return result
diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py
index a225eec93b27e..8c9a4b94446c0 100644
--- a/pandas/core/ops/array_ops.py
+++ b/pandas/core/ops/array_ops.py
@@ -161,7 +161,7 @@ def arithmetic_op(
right: Any,
op,
str_rep: str,
- eval_kwargs: Dict[str, str],
+ eval_kwargs: Dict[str, bool],
):
"""
Evaluate an arithmetic operation `+`, `-`, `*`, `/`, `//`, `%`, `**`, ...
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index 0e1cd42329169..73eddf91325ae 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -400,7 +400,7 @@ def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
added = float_frame + mixed_int_frame
_check_mixed_float(added, dtype="float64")
- def test_combineSeries(
+ def test_combine_series(
self, float_frame, mixed_float_frame, mixed_int_frame, datetime_frame
):
@@ -432,6 +432,7 @@ def test_combineSeries(
added = mixed_float_frame + series.astype("float16")
_check_mixed_float(added, dtype=dict(C=None))
+ # FIXME: don't leave commented-out
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index 82c197ac054f0..f5f6c9ad6b3da 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -122,7 +122,8 @@ def test_ops(self):
result = getattr(df, rop)(m)
assert_frame_equal(result, expected)
- # GH7192
+ # GH7192: Note we need a large number of rows to ensure this
+ # goes through the numexpr path
df = DataFrame(dict(A=np.random.randn(25000)))
df.iloc[0:5] = np.nan
expected = 1 - np.isnan(df.iloc[0:25])
| https://api.github.com/repos/pandas-dev/pandas/pulls/28879 | 2019-10-09T20:10:44Z | 2019-10-10T01:30:55Z | 2019-10-10T01:30:55Z | 2019-10-10T01:43:10Z | |
CLN: dont catch on groupby.mean | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 41a5195008f0c..5200d33c6a1fb 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -971,6 +971,18 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True, min_count=-1):
if result is not no_result:
# see if we can cast the block back to the original dtype
result = maybe_downcast_numeric(result, block.dtype)
+
+ if result.ndim == 1 and isinstance(result, np.ndarray):
+ # e.g. block.values was an IntegerArray
+ try:
+ # Cast back if feasible
+ result = type(block.values)._from_sequence(
+ result, dtype=block.values.dtype
+ )
+ except ValueError:
+ # reshape to be valid for non-Extension Block
+ result = result.reshape(1, -1)
+
newb = block.make_block(result)
new_items.append(locs)
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 4e0dd65042196..a127e7dc9bada 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1212,16 +1212,9 @@ def mean(self, *args, **kwargs):
Name: B, dtype: float64
"""
nv.validate_groupby_func("mean", args, kwargs, ["numeric_only"])
- try:
- return self._cython_agg_general(
- "mean", alt=lambda x, axis: Series(x).mean(**kwargs), **kwargs
- )
- except GroupByError:
- raise
- except Exception:
- with _group_selection_context(self):
- f = lambda x: x.mean(axis=self.axis, **kwargs)
- return self._python_agg_general(f)
+ return self._cython_agg_general(
+ "mean", alt=lambda x, axis: Series(x).mean(**kwargs), **kwargs
+ )
@Substitution(name="groupby")
@Appender(_common_see_also)
| The new casting in cython_agg_blocks is specific to a single IntegerArray test case. We could pretty reasonably move that into maybe_downcast_numeric. For the moment id rather hold off since I expect other EA cases to show up here.
cc @WillAyd | https://api.github.com/repos/pandas-dev/pandas/pulls/28878 | 2019-10-09T19:58:12Z | 2019-10-10T01:27:38Z | 2019-10-10T01:27:38Z | 2019-10-10T01:51:09Z |
ENH: show numbers on .info() with verbose flag | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 051d64ee87711..8c59ed0dd9388 100755
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -290,13 +290,42 @@ New repr for :class:`~pandas.arrays.IntervalArray`
closed='right',
dtype='interval[int64]')
-
*pandas 1.0.0*
.. ipython:: python
pd.arrays.IntervalArray.from_tuples([(0, 1), (2, 3)])
+Extended verbose info output for :class:`~pandas.DataFrame`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- :meth:`Dataframe.info` now shows line numbers for the columns summary (:issue:`17304`)
+
+*pandas 0.25.x*
+
+.. code-block:: python
+
+ >>> df = pd.DataFrame({"int_col": [1, 2, 3],
+ ... "text_col": ["a", "b", "c"],
+ ... "float_col": [0.0, 0.1, 0.2]})
+ >>> df.info(verbose=True)
+ <class 'pandas.core.frame.DataFrame'>
+ RangeIndex: 3 entries, 0 to 2
+ Data columns (total 3 columns):
+ int_col 3 non-null int64
+ text_col 3 non-null object
+ float_col 3 non-null float64
+ dtypes: float64(1), int64(1), object(1)
+ memory usage: 152.0+ bytes
+
+*pandas 1.0.0*
+
+.. ipython:: python
+
+ df = pd.DataFrame({"int_col": [1, 2, 3],
+ "text_col": ["a", "b", "c"],
+ "float_col": [0.0, 0.1, 0.2]})
+ df.info(verbose=True)
All :class:`SeriesGroupBy` aggregation methods now respect the ``observed`` keyword
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b69199defbcc4..8bc417acaf7f3 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2276,9 +2276,11 @@ def info(
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
- int_col 5 non-null int64
- text_col 5 non-null object
- float_col 5 non-null float64
+ # Column Non-Null Count Dtype
+ --- ------ -------------- -----
+ 0 int_col 5 non-null int64
+ 1 text_col 5 non-null object
+ 2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
@@ -2317,9 +2319,11 @@ def info(
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
- column_1 1000000 non-null object
- column_2 1000000 non-null object
- column_3 1000000 non-null object
+ # Column Non-Null Count Dtype
+ --- ------ -------------- -----
+ 0 column_1 1000000 non-null object
+ 1 column_2 1000000 non-null object
+ 2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
@@ -2327,9 +2331,11 @@ def info(
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
- column_1 1000000 non-null object
- column_2 1000000 non-null object
- column_3 1000000 non-null object
+ # Column Non-Null Count Dtype
+ --- ------ -------------- -----
+ 0 column_1 1000000 non-null object
+ 1 column_2 1000000 non-null object
+ 2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 188.8 MB
"""
@@ -2348,6 +2354,7 @@ def info(
return
cols = self.columns
+ col_count = len(self.columns)
# hack
if max_cols is None:
@@ -2356,36 +2363,76 @@ def info(
max_rows = get_option("display.max_info_rows", len(self) + 1)
if null_counts is None:
- show_counts = (len(self.columns) <= max_cols) and (len(self) < max_rows)
+ show_counts = (col_count <= max_cols) and (len(self) < max_rows)
else:
show_counts = null_counts
- exceeds_info_cols = len(self.columns) > max_cols
+ exceeds_info_cols = col_count > max_cols
def _verbose_repr():
lines.append(f"Data columns (total {len(self.columns)} columns):")
- space = max(len(pprint_thing(k)) for k in self.columns) + 4
+
+ id_head = " # "
+ column_head = "Column"
+ col_space = 2
+
+ max_col = max(len(pprint_thing(k)) for k in cols)
+ len_column = len(pprint_thing(column_head))
+ space = max(max_col, len_column) + col_space
+
+ max_id = len(pprint_thing(col_count))
+ len_id = len(pprint_thing(id_head))
+ space_num = max(max_id, len_id) + col_space
counts = None
- tmpl = "{count}{dtype}"
+ header = _put_str(id_head, space_num) + _put_str(column_head, space)
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError(
f"Columns must equal counts ({len(cols)} != {len(counts)})"
)
- tmpl = "{count} non-null {dtype}"
+ count_header = "Non-Null Count"
+ len_count = len(count_header)
+ non_null = " non-null"
+ max_count = max(len(pprint_thing(k)) for k in counts) + len(non_null)
+ space_count = max(len_count, max_count) + col_space
+ count_temp = "{count}" + non_null
+ else:
+ count_header = ""
+ space_count = len(count_header)
+ len_count = space_count
+ count_temp = "{count}"
+
+ dtype_header = "Dtype"
+ len_dtype = len(dtype_header)
+ max_dtypes = max(len(pprint_thing(k)) for k in self.dtypes)
+ space_dtype = max(len_dtype, max_dtypes)
+ header += _put_str(count_header, space_count) + _put_str(
+ dtype_header, space_dtype
+ )
+
+ lines.append(header)
+ lines.append(
+ _put_str("-" * len_id, space_num)
+ + _put_str("-" * len_column, space)
+ + _put_str("-" * len_count, space_count)
+ + _put_str("-" * len_dtype, space_dtype)
+ )
- dtypes = self.dtypes
for i, col in enumerate(self.columns):
- dtype = dtypes.iloc[i]
+ dtype = self.dtypes.iloc[i]
col = pprint_thing(col)
+ line_no = _put_str(" {num}".format(num=i), space_num)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(
- _put_str(col, space) + tmpl.format(count=count, dtype=dtype)
+ line_no
+ + _put_str(col, space)
+ + _put_str(count_temp.format(count=count), space_count)
+ + _put_str(dtype, space_dtype)
)
def _non_verbose_repr():
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index 60dce36312145..91610102cf0f9 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -205,6 +205,28 @@ def test_info(self, float_frame, datetime_frame):
frame.info()
frame.info(verbose=False)
+ def test_info_verbose(self):
+ buf = StringIO()
+ size = 1001
+ start = 5
+ frame = DataFrame(np.random.randn(3, size))
+ frame.info(verbose=True, buf=buf)
+
+ res = buf.getvalue()
+ header = " # Column Dtype \n--- ------ ----- "
+ assert header in res
+
+ frame.info(verbose=True, buf=buf)
+ buf.seek(0)
+ lines = buf.readlines()
+ assert len(lines) > 0
+
+ for i, line in enumerate(lines):
+ if i >= start and i < start + size:
+ index = i - start
+ line_nr = " {} ".format(index)
+ assert line.startswith(line_nr)
+
def test_info_memory(self):
# https://github.com/pandas-dev/pandas/issues/21056
df = pd.DataFrame({"a": pd.Series([1, 2], dtype="i8")})
@@ -218,7 +240,9 @@ def test_info_memory(self):
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 2 entries, 0 to 1
Data columns (total 1 columns):
- a 2 non-null int64
+ # Column Non-Null Count Dtype
+ --- ------ -------------- -----
+ 0 a 2 non-null int64
dtypes: int64(1)
memory usage: {} bytes
""".format(
@@ -262,8 +286,8 @@ def test_info_duplicate_columns_shows_correct_dtypes(self):
frame.info(buf=io)
io.seek(0)
lines = io.readlines()
- assert "a 1 non-null int64\n" == lines[3]
- assert "a 1 non-null float64\n" == lines[4]
+ assert " 0 a 1 non-null int64 \n" == lines[5]
+ assert " 1 a 1 non-null float64\n" == lines[6]
def test_info_shows_column_dtypes(self):
dtypes = [
@@ -283,13 +307,20 @@ def test_info_shows_column_dtypes(self):
buf = StringIO()
df.info(buf=buf)
res = buf.getvalue()
+ header = (
+ " # Column Non-Null Count Dtype \n"
+ "--- ------ -------------- ----- "
+ )
+ assert header in res
for i, dtype in enumerate(dtypes):
- name = "{i:d} {n:d} non-null {dtype}".format(i=i, n=n, dtype=dtype)
+ name = " {i:d} {i:d} {n:d} non-null {dtype}".format(
+ i=i, n=n, dtype=dtype
+ )
assert name in res
def test_info_max_cols(self):
df = DataFrame(np.random.randn(10, 5))
- for len_, verbose in [(5, None), (5, False), (10, True)]:
+ for len_, verbose in [(5, None), (5, False), (12, True)]:
# For verbose always ^ setting ^ summarize ^ full output
with option_context("max_info_columns", 4):
buf = StringIO()
@@ -297,16 +328,16 @@ def test_info_max_cols(self):
res = buf.getvalue()
assert len(res.strip().split("\n")) == len_
- for len_, verbose in [(10, None), (5, False), (10, True)]:
+ for len_, verbose in [(12, None), (5, False), (12, True)]:
- # max_cols no exceeded
+ # max_cols not exceeded
with option_context("max_info_columns", 5):
buf = StringIO()
df.info(buf=buf, verbose=verbose)
res = buf.getvalue()
assert len(res.strip().split("\n")) == len_
- for len_, max_cols in [(10, 5), (5, 4)]:
+ for len_, max_cols in [(12, 5), (5, 4)]:
# setting truncates
with option_context("max_info_columns", 4):
buf = StringIO()
| - [x] closes #17304
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28876 | 2019-10-09T19:38:52Z | 2020-01-03T01:18:53Z | 2020-01-03T01:18:53Z | 2020-01-03T08:54:26Z |
BUG: Allow all int types for merge (GH28870) | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index fd1c1271a5e37..dd96c6b594cea 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -344,6 +344,7 @@ Reshaping
- Bug :func:`merge_asof` could not use :class:`datetime.timedelta` for ``tolerance`` kwarg (:issue:`28098`)
- Bug in :func:`merge`, did not append suffixes correctly with MultiIndex (:issue:`28518`)
- :func:`qcut` and :func:`cut` now handle boolean input (:issue:`20303`)
+- Fix to ensure all int dtypes can be used in :func:`merge_asof` when using a tolerance value. Previously every non-int64 type would raise an erroneous ``MergeError`` (:issue:`28870`).
Sparse
^^^^^^
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 910c7ea561929..7bfc8153da568 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -28,7 +28,6 @@
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
- is_int64_dtype,
is_integer,
is_integer_dtype,
is_list_like,
@@ -1641,7 +1640,7 @@ def _get_merge_keys(self):
if self.tolerance < Timedelta(0):
raise MergeError("tolerance must be positive")
- elif is_int64_dtype(lt):
+ elif is_integer_dtype(lt):
if not is_integer(self.tolerance):
raise MergeError(msg)
if self.tolerance < 0:
diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py
index caf2539a9e150..2e9ae80323159 100644
--- a/pandas/tests/reshape/merge/test_merge_asof.py
+++ b/pandas/tests/reshape/merge/test_merge_asof.py
@@ -1287,3 +1287,19 @@ def test_timedelta_tolerance_nearest(self):
)
assert_frame_equal(result, expected)
+
+ def test_int_type_tolerance(self, any_int_dtype):
+ # GH #28870
+
+ left = pd.DataFrame({"a": [0, 10, 20], "left_val": [1, 2, 3]})
+ right = pd.DataFrame({"a": [5, 15, 25], "right_val": [1, 2, 3]})
+ left["a"] = left["a"].astype(any_int_dtype)
+ right["a"] = right["a"].astype(any_int_dtype)
+
+ expected = pd.DataFrame(
+ {"a": [0, 10, 20], "left_val": [1, 2, 3], "right_val": [np.nan, 1.0, 2.0]}
+ )
+ expected["a"] = expected["a"].astype(any_int_dtype)
+
+ result = pd.merge_asof(left, right, on="a", tolerance=10)
+ assert_frame_equal(result, expected)
| - [x] closes #28870
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/28875 | 2019-10-09T18:58:32Z | 2019-10-11T12:09:34Z | 2019-10-11T12:09:34Z | 2019-10-12T15:49:35Z |
CLN: No catching needed for groupby median | diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 4e0dd65042196..3df99e330fe4a 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1236,23 +1236,11 @@ def median(self, **kwargs):
Series or DataFrame
Median of values within each group.
"""
- try:
- return self._cython_agg_general(
- "median",
- alt=lambda x, axis: Series(x).median(axis=axis, **kwargs),
- **kwargs
- )
- except GroupByError:
- raise
- except Exception:
-
- def f(x):
- if isinstance(x, np.ndarray):
- x = Series(x)
- return x.median(axis=self.axis, **kwargs)
-
- with _group_selection_context(self):
- return self._python_agg_general(f)
+ return self._cython_agg_general(
+ "median",
+ alt=lambda x, axis: Series(x).median(axis=axis, **kwargs),
+ **kwargs
+ )
@Substitution(name="groupby")
@Appender(_common_see_also)
| https://api.github.com/repos/pandas-dev/pandas/pulls/28873 | 2019-10-09T16:57:29Z | 2019-10-10T01:50:32Z | 2019-10-10T01:50:32Z | 2019-10-10T04:39:24Z | |
TST: Use fixtures instead of setup_method for index tests | diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 793992d311502..b657d8d16df81 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -33,10 +33,6 @@ class Base:
_holder = None
_compat_props = ["shape", "ndim", "size", "nbytes"]
- def setup_indices(self):
- for name, idx in self.indices.items():
- setattr(self, name, idx)
-
def test_pickle_compat_construction(self):
# need an object to create with
msg = (
@@ -205,24 +201,23 @@ def test_reindex_base(self):
with pytest.raises(ValueError, match="Invalid fill method"):
idx.get_indexer(idx, method="invalid")
- def test_get_indexer_consistency(self):
+ def test_get_indexer_consistency(self, indices):
# See GH 16819
- for name, index in self.indices.items():
- if isinstance(index, IntervalIndex):
- continue
-
- if index.is_unique or isinstance(index, CategoricalIndex):
- indexer = index.get_indexer(index[0:2])
- assert isinstance(indexer, np.ndarray)
- assert indexer.dtype == np.intp
- else:
- e = "Reindexing only valid with uniquely valued Index objects"
- with pytest.raises(InvalidIndexError, match=e):
- index.get_indexer(index[0:2])
+ if isinstance(indices, IntervalIndex):
+ return
- indexer, _ = index.get_indexer_non_unique(index[0:2])
+ if indices.is_unique or isinstance(indices, CategoricalIndex):
+ indexer = indices.get_indexer(indices[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
+ else:
+ e = "Reindexing only valid with uniquely valued Index objects"
+ with pytest.raises(InvalidIndexError, match=e):
+ indices.get_indexer(indices[0:2])
+
+ indexer, _ = indices.get_indexer_non_unique(indices[0:2])
+ assert isinstance(indexer, np.ndarray)
+ assert indexer.dtype == np.intp
def test_ndarray_compat_properties(self):
idx = self.create_index()
@@ -258,146 +253,138 @@ def test_repr_max_seq_item_setting(self):
repr(idx)
assert "..." not in str(idx)
- def test_copy_name(self):
+ def test_copy_name(self, indices):
# gh-12309: Check that the "name" argument
# passed at initialization is honored.
+ if isinstance(indices, MultiIndex):
+ return
- for name, index in self.indices.items():
- if isinstance(index, MultiIndex):
- continue
-
- first = index.__class__(index, copy=True, name="mario")
- second = first.__class__(first, copy=False)
+ first = indices.__class__(indices, copy=True, name="mario")
+ second = first.__class__(first, copy=False)
- # Even though "copy=False", we want a new object.
- assert first is not second
+ # Even though "copy=False", we want a new object.
+ assert first is not second
- # Not using tm.assert_index_equal() since names differ.
- assert index.equals(first)
+ # Not using tm.assert_index_equal() since names differ.
+ assert indices.equals(first)
- assert first.name == "mario"
- assert second.name == "mario"
+ assert first.name == "mario"
+ assert second.name == "mario"
- s1 = Series(2, index=first)
- s2 = Series(3, index=second[:-1])
+ s1 = Series(2, index=first)
+ s2 = Series(3, index=second[:-1])
- if not isinstance(index, CategoricalIndex):
- # See gh-13365
- s3 = s1 * s2
- assert s3.index.name == "mario"
+ if not isinstance(indices, CategoricalIndex):
+ # See gh-13365
+ s3 = s1 * s2
+ assert s3.index.name == "mario"
- def test_ensure_copied_data(self):
+ def test_ensure_copied_data(self, indices):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
- for name, index in self.indices.items():
- init_kwargs = {}
- if isinstance(index, PeriodIndex):
- # Needs "freq" specification:
- init_kwargs["freq"] = index.freq
- elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):
- # RangeIndex cannot be initialized from data
- # MultiIndex and CategoricalIndex are tested separately
- continue
-
- index_type = index.__class__
- result = index_type(index.values, copy=True, **init_kwargs)
- tm.assert_index_equal(index, result)
+ init_kwargs = {}
+ if isinstance(indices, PeriodIndex):
+ # Needs "freq" specification:
+ init_kwargs["freq"] = indices.freq
+ elif isinstance(indices, (RangeIndex, MultiIndex, CategoricalIndex)):
+ # RangeIndex cannot be initialized from data
+ # MultiIndex and CategoricalIndex are tested separately
+ return
+
+ index_type = indices.__class__
+ result = index_type(indices.values, copy=True, **init_kwargs)
+ tm.assert_index_equal(indices, result)
+ tm.assert_numpy_array_equal(
+ indices._ndarray_values, result._ndarray_values, check_same="copy"
+ )
+
+ if isinstance(indices, PeriodIndex):
+ # .values an object array of Period, thus copied
+ result = index_type(ordinal=indices.asi8, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(
- index._ndarray_values, result._ndarray_values, check_same="copy"
+ indices._ndarray_values, result._ndarray_values, check_same="same"
+ )
+ elif isinstance(indices, IntervalIndex):
+ # checked in test_interval.py
+ pass
+ else:
+ result = index_type(indices.values, copy=False, **init_kwargs)
+ tm.assert_numpy_array_equal(
+ indices.values, result.values, check_same="same"
+ )
+ tm.assert_numpy_array_equal(
+ indices._ndarray_values, result._ndarray_values, check_same="same"
)
- if isinstance(index, PeriodIndex):
- # .values an object array of Period, thus copied
- result = index_type(ordinal=index.asi8, copy=False, **init_kwargs)
- tm.assert_numpy_array_equal(
- index._ndarray_values, result._ndarray_values, check_same="same"
- )
- elif isinstance(index, IntervalIndex):
- # checked in test_interval.py
- pass
- else:
- result = index_type(index.values, copy=False, **init_kwargs)
- tm.assert_numpy_array_equal(
- index.values, result.values, check_same="same"
- )
- tm.assert_numpy_array_equal(
- index._ndarray_values, result._ndarray_values, check_same="same"
- )
-
- def test_memory_usage(self):
- for name, index in self.indices.items():
- result = index.memory_usage()
- if len(index):
- index.get_loc(index[0])
- result2 = index.memory_usage()
- result3 = index.memory_usage(deep=True)
-
- # RangeIndex, IntervalIndex
- # don't have engines
- if not isinstance(index, (RangeIndex, IntervalIndex)):
- assert result2 > result
-
- if index.inferred_type == "object":
- assert result3 > result2
-
- else:
-
- # we report 0 for no-length
- assert result == 0
-
- def test_argsort(self):
- for k, ind in self.indices.items():
-
- # separately tested
- if k in ["catIndex"]:
- continue
-
- result = ind.argsort()
- expected = np.array(ind).argsort()
- tm.assert_numpy_array_equal(result, expected, check_dtype=False)
-
- def test_numpy_argsort(self):
- for k, ind in self.indices.items():
- result = np.argsort(ind)
- expected = ind.argsort()
- tm.assert_numpy_array_equal(result, expected)
-
- # these are the only two types that perform
- # pandas compatibility input validation - the
- # rest already perform separate (or no) such
- # validation via their 'values' attribute as
- # defined in pandas.core.indexes/base.py - they
- # cannot be changed at the moment due to
- # backwards compatibility concerns
- if isinstance(type(ind), (CategoricalIndex, RangeIndex)):
- msg = "the 'axis' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- np.argsort(ind, axis=1)
-
- msg = "the 'kind' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- np.argsort(ind, kind="mergesort")
-
- msg = "the 'order' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- np.argsort(ind, order=("a", "b"))
-
- def test_take(self):
+ def test_memory_usage(self, indices):
+ indices._engine.clear_mapping()
+ result = indices.memory_usage()
+ if indices.empty:
+ # we report 0 for no-length
+ assert result == 0
+ return
+
+ # non-zero length
+ indices.get_loc(indices[0])
+ result2 = indices.memory_usage()
+ result3 = indices.memory_usage(deep=True)
+
+ # RangeIndex, IntervalIndex
+ # don't have engines
+ if not isinstance(indices, (RangeIndex, IntervalIndex)):
+ assert result2 > result
+
+ if indices.inferred_type == "object":
+ assert result3 > result2
+
+ def test_argsort(self, request, indices):
+ # separately tested
+ if isinstance(indices, CategoricalIndex):
+ return
+
+ result = indices.argsort()
+ expected = np.array(indices).argsort()
+ tm.assert_numpy_array_equal(result, expected, check_dtype=False)
+
+ def test_numpy_argsort(self, indices):
+ result = np.argsort(indices)
+ expected = indices.argsort()
+ tm.assert_numpy_array_equal(result, expected)
+
+ # these are the only two types that perform
+ # pandas compatibility input validation - the
+ # rest already perform separate (or no) such
+ # validation via their 'values' attribute as
+ # defined in pandas.core.indexes/base.py - they
+ # cannot be changed at the moment due to
+ # backwards compatibility concerns
+ if isinstance(type(indices), (CategoricalIndex, RangeIndex)):
+ msg = "the 'axis' parameter is not supported"
+ with pytest.raises(ValueError, match=msg):
+ np.argsort(indices, axis=1)
+
+ msg = "the 'kind' parameter is not supported"
+ with pytest.raises(ValueError, match=msg):
+ np.argsort(indices, kind="mergesort")
+
+ msg = "the 'order' parameter is not supported"
+ with pytest.raises(ValueError, match=msg):
+ np.argsort(indices, order=("a", "b"))
+
+ def test_take(self, indices):
indexer = [4, 3, 0, 2]
- for k, ind in self.indices.items():
-
- # separate
- if k in ["boolIndex", "tuples", "empty"]:
- continue
+ if len(indices) < 5:
+ # not enough elements; ignore
+ return
- result = ind.take(indexer)
- expected = ind[indexer]
- assert result.equals(expected)
+ result = indices.take(indexer)
+ expected = indices[indexer]
+ assert result.equals(expected)
- if not isinstance(ind, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
- # GH 10791
- with pytest.raises(AttributeError):
- ind.freq
+ if not isinstance(indices, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
+ # GH 10791
+ with pytest.raises(AttributeError):
+ indices.freq
def test_take_invalid_kwargs(self):
idx = self.create_index()
@@ -454,173 +441,152 @@ def test_where(self, klass):
@pytest.mark.parametrize(
"method", ["intersection", "union", "difference", "symmetric_difference"]
)
- def test_set_ops_error_cases(self, case, method):
- for name, idx in self.indices.items():
- # non-iterable input
+ def test_set_ops_error_cases(self, case, method, indices):
+ # non-iterable input
+ msg = "Input must be Index or array-like"
+ with pytest.raises(TypeError, match=msg):
+ getattr(indices, method)(case)
- msg = "Input must be Index or array-like"
- with pytest.raises(TypeError, match=msg):
- getattr(idx, method)(case)
+ def test_intersection_base(self, indices):
+ if isinstance(indices, CategoricalIndex):
+ return
- def test_intersection_base(self):
- for name, idx in self.indices.items():
- first = idx[:5]
- second = idx[:3]
- intersect = first.intersection(second)
+ first = indices[:5]
+ second = indices[:3]
+ intersect = first.intersection(second)
+ assert tm.equalContents(intersect, second)
- if isinstance(idx, CategoricalIndex):
- pass
- else:
- assert tm.equalContents(intersect, second)
-
- # GH 10149
- cases = [klass(second.values) for klass in [np.array, Series, list]]
- for case in cases:
- if isinstance(idx, CategoricalIndex):
- pass
- else:
- result = first.intersection(case)
- assert tm.equalContents(result, second)
-
- if isinstance(idx, MultiIndex):
- msg = "other must be a MultiIndex or a list of tuples"
- with pytest.raises(TypeError, match=msg):
- first.intersection([1, 2, 3])
-
- def test_union_base(self):
- for name, idx in self.indices.items():
- first = idx[3:]
- second = idx[:5]
- everything = idx
- union = first.union(second)
- assert tm.equalContents(union, everything)
-
- # GH 10149
- cases = [klass(second.values) for klass in [np.array, Series, list]]
- for case in cases:
- if isinstance(idx, CategoricalIndex):
- pass
- else:
- result = first.union(case)
- assert tm.equalContents(result, everything)
-
- if isinstance(idx, MultiIndex):
- msg = "other must be a MultiIndex or a list of tuples"
- with pytest.raises(TypeError, match=msg):
- first.union([1, 2, 3])
+ # GH 10149
+ cases = [klass(second.values) for klass in [np.array, Series, list]]
+ for case in cases:
+ result = first.intersection(case)
+ assert tm.equalContents(result, second)
- @pytest.mark.parametrize("sort", [None, False])
- def test_difference_base(self, sort):
- for name, idx in self.indices.items():
- first = idx[2:]
- second = idx[:4]
- answer = idx[4:]
- result = first.difference(second, sort)
-
- if isinstance(idx, CategoricalIndex):
- pass
- else:
- assert tm.equalContents(result, answer)
+ if isinstance(indices, MultiIndex):
+ msg = "other must be a MultiIndex or a list of tuples"
+ with pytest.raises(TypeError, match=msg):
+ first.intersection([1, 2, 3])
+
+ def test_union_base(self, indices):
+ first = indices[3:]
+ second = indices[:5]
+ everything = indices
+ union = first.union(second)
+ assert tm.equalContents(union, everything)
+
+ # GH 10149
+ cases = [klass(second.values) for klass in [np.array, Series, list]]
+ for case in cases:
+ if not isinstance(indices, CategoricalIndex):
+ result = first.union(case)
+ assert tm.equalContents(result, everything)
+
+ if isinstance(indices, MultiIndex):
+ msg = "other must be a MultiIndex or a list of tuples"
+ with pytest.raises(TypeError, match=msg):
+ first.union([1, 2, 3])
- # GH 10149
- cases = [klass(second.values) for klass in [np.array, Series, list]]
- for case in cases:
- if isinstance(idx, CategoricalIndex):
- pass
- elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)):
- assert result.__class__ == answer.__class__
- tm.assert_numpy_array_equal(
- result.sort_values().asi8, answer.sort_values().asi8
- )
- else:
- result = first.difference(case, sort)
- assert tm.equalContents(result, answer)
-
- if isinstance(idx, MultiIndex):
- msg = "other must be a MultiIndex or a list of tuples"
- with pytest.raises(TypeError, match=msg):
- first.difference([1, 2, 3], sort)
-
- def test_symmetric_difference(self):
- for name, idx in self.indices.items():
- first = idx[1:]
- second = idx[:-1]
- if isinstance(idx, CategoricalIndex):
- pass
+ @pytest.mark.parametrize("sort", [None, False])
+ def test_difference_base(self, sort, indices):
+ if isinstance(indices, CategoricalIndex):
+ return
+
+ first = indices[2:]
+ second = indices[:4]
+ answer = indices[4:]
+ result = first.difference(second, sort)
+ assert tm.equalContents(result, answer)
+
+ # GH 10149
+ cases = [klass(second.values) for klass in [np.array, Series, list]]
+ for case in cases:
+ if isinstance(indices, (DatetimeIndex, TimedeltaIndex)):
+ assert result.__class__ == answer.__class__
+ tm.assert_numpy_array_equal(
+ result.sort_values().asi8, answer.sort_values().asi8
+ )
else:
- answer = idx[[0, -1]]
- result = first.symmetric_difference(second)
+ result = first.difference(case, sort)
assert tm.equalContents(result, answer)
- # GH 10149
- cases = [klass(second.values) for klass in [np.array, Series, list]]
- for case in cases:
- if isinstance(idx, CategoricalIndex):
- pass
- else:
- result = first.symmetric_difference(case)
- assert tm.equalContents(result, answer)
-
- if isinstance(idx, MultiIndex):
- msg = "other must be a MultiIndex or a list of tuples"
- with pytest.raises(TypeError, match=msg):
- first.symmetric_difference([1, 2, 3])
-
- def test_insert_base(self):
-
- for name, idx in self.indices.items():
- result = idx[1:4]
-
- if not len(idx):
- continue
+ if isinstance(indices, MultiIndex):
+ msg = "other must be a MultiIndex or a list of tuples"
+ with pytest.raises(TypeError, match=msg):
+ first.difference([1, 2, 3], sort)
+
+ def test_symmetric_difference(self, indices):
+ if isinstance(indices, CategoricalIndex):
+ return
+
+ first = indices[1:]
+ second = indices[:-1]
+ answer = indices[[0, -1]]
+ result = first.symmetric_difference(second)
+ assert tm.equalContents(result, answer)
+
+ # GH 10149
+ cases = [klass(second.values) for klass in [np.array, Series, list]]
+ for case in cases:
+ result = first.symmetric_difference(case)
+ assert tm.equalContents(result, answer)
+
+ if isinstance(indices, MultiIndex):
+ msg = "other must be a MultiIndex or a list of tuples"
+ with pytest.raises(TypeError, match=msg):
+ first.symmetric_difference([1, 2, 3])
- # test 0th element
- assert idx[0:4].equals(result.insert(0, idx[0]))
+ def test_insert_base(self, indices):
+ result = indices[1:4]
- def test_delete_base(self):
+ if not len(indices):
+ return
- for name, idx in self.indices.items():
+ # test 0th element
+ assert indices[0:4].equals(result.insert(0, indices[0]))
- if not len(idx):
- continue
+ def test_delete_base(self, indices):
+ if not len(indices):
+ return
- if isinstance(idx, RangeIndex):
- # tested in class
- continue
+ if isinstance(indices, RangeIndex):
+ # tested in class
+ return
- expected = idx[1:]
- result = idx.delete(0)
- assert result.equals(expected)
- assert result.name == expected.name
+ expected = indices[1:]
+ result = indices.delete(0)
+ assert result.equals(expected)
+ assert result.name == expected.name
- expected = idx[:-1]
- result = idx.delete(-1)
- assert result.equals(expected)
- assert result.name == expected.name
+ expected = indices[:-1]
+ result = indices.delete(-1)
+ assert result.equals(expected)
+ assert result.name == expected.name
- with pytest.raises((IndexError, ValueError)):
- # either depending on numpy version
- idx.delete(len(idx))
+ with pytest.raises((IndexError, ValueError)):
+ # either depending on numpy version
+ indices.delete(len(indices))
- def test_equals(self):
+ def test_equals(self, indices):
+ if isinstance(indices, IntervalIndex):
+ # IntervalIndex tested separately
+ return
- for name, idx in self.indices.items():
- assert idx.equals(idx)
- assert idx.equals(idx.copy())
- assert idx.equals(idx.astype(object))
+ assert indices.equals(indices)
+ assert indices.equals(indices.copy())
+ assert indices.equals(indices.astype(object))
- assert not idx.equals(list(idx))
- assert not idx.equals(np.array(idx))
+ assert not indices.equals(list(indices))
+ assert not indices.equals(np.array(indices))
- # Cannot pass in non-int64 dtype to RangeIndex
- if not isinstance(idx, RangeIndex):
- same_values = Index(idx, dtype=object)
- assert idx.equals(same_values)
- assert same_values.equals(idx)
+ # Cannot pass in non-int64 dtype to RangeIndex
+ if not isinstance(indices, RangeIndex):
+ same_values = Index(indices, dtype=object)
+ assert indices.equals(same_values)
+ assert same_values.equals(indices)
- if idx.nlevels == 1:
- # do not test MultiIndex
- assert not idx.equals(pd.Series(idx))
+ if indices.nlevels == 1:
+ # do not test MultiIndex
+ assert not indices.equals(Series(indices))
def test_equals_op(self):
# GH9947, GH10637
@@ -686,107 +652,99 @@ def test_equals_op(self):
tm.assert_numpy_array_equal(index_a == item, expected3)
tm.assert_series_equal(series_a == item, Series(expected3))
- def test_hasnans_isnans(self):
+ def test_hasnans_isnans(self, indices):
# GH 11343, added tests for hasnans / isnans
+ if isinstance(indices, MultiIndex):
+ return
+
+ # cases in indices doesn't include NaN
+ idx = indices.copy(deep=True)
+ expected = np.array([False] * len(idx), dtype=bool)
+ tm.assert_numpy_array_equal(idx._isnan, expected)
+ assert idx.hasnans is False
+
+ idx = indices.copy(deep=True)
+ values = np.asarray(idx.values)
+
+ if len(indices) == 0:
+ return
+ elif isinstance(indices, DatetimeIndexOpsMixin):
+ values[1] = iNaT
+ elif isinstance(indices, (Int64Index, UInt64Index)):
+ return
+ else:
+ values[1] = np.nan
- for name, index in self.indices.items():
- if isinstance(index, MultiIndex):
- pass
- else:
- idx = index.copy()
-
- # cases in indices doesn't include NaN
- expected = np.array([False] * len(idx), dtype=bool)
- tm.assert_numpy_array_equal(idx._isnan, expected)
- assert idx.hasnans is False
-
- idx = index.copy()
- values = np.asarray(idx.values)
-
- if len(index) == 0:
- continue
- elif isinstance(index, DatetimeIndexOpsMixin):
- values[1] = iNaT
- elif isinstance(index, (Int64Index, UInt64Index)):
- continue
- else:
- values[1] = np.nan
-
- if isinstance(index, PeriodIndex):
- idx = index.__class__(values, freq=index.freq)
- else:
- idx = index.__class__(values)
-
- expected = np.array([False] * len(idx), dtype=bool)
- expected[1] = True
- tm.assert_numpy_array_equal(idx._isnan, expected)
- assert idx.hasnans is True
-
- def test_fillna(self):
+ if isinstance(indices, PeriodIndex):
+ idx = indices.__class__(values, freq=indices.freq)
+ else:
+ idx = indices.__class__(values)
+
+ expected = np.array([False] * len(idx), dtype=bool)
+ expected[1] = True
+ tm.assert_numpy_array_equal(idx._isnan, expected)
+ assert idx.hasnans is True
+
+ def test_fillna(self, indices):
# GH 11343
- for name, index in self.indices.items():
- if len(index) == 0:
- pass
- elif isinstance(index, MultiIndex):
- idx = index.copy()
- msg = "isna is not defined for MultiIndex"
- with pytest.raises(NotImplementedError, match=msg):
- idx.fillna(idx[0])
+ if len(indices) == 0:
+ pass
+ elif isinstance(indices, MultiIndex):
+ idx = indices.copy(deep=True)
+ msg = "isna is not defined for MultiIndex"
+ with pytest.raises(NotImplementedError, match=msg):
+ idx.fillna(idx[0])
+ else:
+ idx = indices.copy(deep=True)
+ result = idx.fillna(idx[0])
+ tm.assert_index_equal(result, idx)
+ assert result is not idx
+
+ msg = "'value' must be a scalar, passed: "
+ with pytest.raises(TypeError, match=msg):
+ idx.fillna([idx[0]])
+
+ idx = indices.copy(deep=True)
+ values = np.asarray(idx.values)
+
+ if isinstance(indices, DatetimeIndexOpsMixin):
+ values[1] = iNaT
+ elif isinstance(indices, (Int64Index, UInt64Index)):
+ return
else:
- idx = index.copy()
- result = idx.fillna(idx[0])
- tm.assert_index_equal(result, idx)
- assert result is not idx
-
- msg = "'value' must be a scalar, passed: "
- with pytest.raises(TypeError, match=msg):
- idx.fillna([idx[0]])
-
- idx = index.copy()
- values = np.asarray(idx.values)
-
- if isinstance(index, DatetimeIndexOpsMixin):
- values[1] = iNaT
- elif isinstance(index, (Int64Index, UInt64Index)):
- continue
- else:
- values[1] = np.nan
-
- if isinstance(index, PeriodIndex):
- idx = index.__class__(values, freq=index.freq)
- else:
- idx = index.__class__(values)
-
- expected = np.array([False] * len(idx), dtype=bool)
- expected[1] = True
- tm.assert_numpy_array_equal(idx._isnan, expected)
- assert idx.hasnans is True
-
- def test_nulls(self):
- # this is really a smoke test for the methods
- # as these are adequately tested for function elsewhere
+ values[1] = np.nan
- for name, index in self.indices.items():
- if len(index) == 0:
- tm.assert_numpy_array_equal(index.isna(), np.array([], dtype=bool))
- elif isinstance(index, MultiIndex):
- idx = index.copy()
- msg = "isna is not defined for MultiIndex"
- with pytest.raises(NotImplementedError, match=msg):
- idx.isna()
+ if isinstance(indices, PeriodIndex):
+ idx = indices.__class__(values, freq=indices.freq)
else:
+ idx = indices.__class__(values)
- if not index.hasnans:
- tm.assert_numpy_array_equal(
- index.isna(), np.zeros(len(index), dtype=bool)
- )
- tm.assert_numpy_array_equal(
- index.notna(), np.ones(len(index), dtype=bool)
- )
- else:
- result = isna(index)
- tm.assert_numpy_array_equal(index.isna(), result)
- tm.assert_numpy_array_equal(index.notna(), ~result)
+ expected = np.array([False] * len(idx), dtype=bool)
+ expected[1] = True
+ tm.assert_numpy_array_equal(idx._isnan, expected)
+ assert idx.hasnans is True
+
+ def test_nulls(self, indices):
+ # this is really a smoke test for the methods
+ # as these are adequately tested for function elsewhere
+ if len(indices) == 0:
+ tm.assert_numpy_array_equal(indices.isna(), np.array([], dtype=bool))
+ elif isinstance(indices, MultiIndex):
+ idx = indices.copy()
+ msg = "isna is not defined for MultiIndex"
+ with pytest.raises(NotImplementedError, match=msg):
+ idx.isna()
+ elif not indices.hasnans:
+ tm.assert_numpy_array_equal(
+ indices.isna(), np.zeros(len(indices), dtype=bool)
+ )
+ tm.assert_numpy_array_equal(
+ indices.notna(), np.ones(len(indices), dtype=bool)
+ )
+ else:
+ result = isna(indices)
+ tm.assert_numpy_array_equal(indices.isna(), result)
+ tm.assert_numpy_array_equal(indices.notna(), ~result)
def test_empty(self):
# GH 15270
diff --git a/pandas/tests/indexes/conftest.py b/pandas/tests/indexes/conftest.py
index 12c5fb8339549..2a9a8bf8d824f 100644
--- a/pandas/tests/indexes/conftest.py
+++ b/pandas/tests/indexes/conftest.py
@@ -5,28 +5,29 @@
from pandas.core.indexes.api import Index, MultiIndex
import pandas.util.testing as tm
-indices_list = [
- tm.makeUnicodeIndex(100),
- tm.makeStringIndex(100),
- tm.makeDateIndex(100),
- tm.makePeriodIndex(100),
- tm.makeTimedeltaIndex(100),
- tm.makeIntIndex(100),
- tm.makeUIntIndex(100),
- tm.makeRangeIndex(100),
- tm.makeFloatIndex(100),
- Index([True, False]),
- tm.makeCategoricalIndex(100),
- tm.makeIntervalIndex(100),
- Index([]),
- MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])),
- Index([0, 0, 1, 1, 2, 2]),
-]
-
-
-@pytest.fixture(params=indices_list, ids=lambda x: type(x).__name__)
+indices_dict = {
+ "unicode": tm.makeUnicodeIndex(100),
+ "string": tm.makeStringIndex(100),
+ "datetime": tm.makeDateIndex(100),
+ "period": tm.makePeriodIndex(100),
+ "timedelta": tm.makeTimedeltaIndex(100),
+ "int": tm.makeIntIndex(100),
+ "uint": tm.makeUIntIndex(100),
+ "range": tm.makeRangeIndex(100),
+ "float": tm.makeFloatIndex(100),
+ "bool": Index([True, False]),
+ "categorical": tm.makeCategoricalIndex(100),
+ "interval": tm.makeIntervalIndex(100),
+ "empty": Index([]),
+ "tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])),
+ "repeats": Index([0, 0, 1, 1, 2, 2]),
+}
+
+
+@pytest.fixture(params=indices_dict.keys())
def indices(request):
- return request.param
+ # copy to avoid mutation, e.g. setting .name
+ return indices_dict[request.param].copy()
@pytest.fixture(params=[1, np.array(1, dtype=np.int64)])
diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py
index 7523b250ea291..f7cded9f44918 100644
--- a/pandas/tests/indexes/datetimelike.py
+++ b/pandas/tests/indexes/datetimelike.py
@@ -58,13 +58,14 @@ def test_view(self):
tm.assert_index_equal(result, i_view)
def test_map_callable(self):
- expected = self.index + self.index.freq
- result = self.index.map(lambda x: x + x.freq)
+ index = self.create_index()
+ expected = index + index.freq
+ result = index.map(lambda x: x + x.freq)
tm.assert_index_equal(result, expected)
# map to NaT
- result = self.index.map(lambda x: pd.NaT if x == self.index[0] else x)
- expected = pd.Index([pd.NaT] + self.index[1:].tolist())
+ result = index.map(lambda x: pd.NaT if x == index[0] else x)
+ expected = pd.Index([pd.NaT] + index[1:].tolist())
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
@@ -75,23 +76,24 @@ def test_map_callable(self):
],
)
def test_map_dictlike(self, mapper):
- expected = self.index + self.index.freq
+ index = self.create_index()
+ expected = index + index.freq
# don't compare the freqs
if isinstance(expected, pd.DatetimeIndex):
expected.freq = None
- result = self.index.map(mapper(expected, self.index))
+ result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
- expected = pd.Index([pd.NaT] + self.index[1:].tolist())
- result = self.index.map(mapper(expected, self.index))
+ expected = pd.Index([pd.NaT] + index[1:].tolist())
+ result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
# empty map; these map to np.nan because we cannot know
# to re-infer things
- expected = pd.Index([np.nan] * len(self.index))
- result = self.index.map(mapper([], []))
+ expected = pd.Index([np.nan] * len(index))
+ result = index.map(mapper([], []))
tm.assert_index_equal(result, expected)
def test_asobject_deprecated(self):
diff --git a/pandas/tests/indexes/datetimes/test_datetimelike.py b/pandas/tests/indexes/datetimes/test_datetimelike.py
index 0f1d7927ee3b4..8fa87f55f404b 100644
--- a/pandas/tests/indexes/datetimes/test_datetimelike.py
+++ b/pandas/tests/indexes/datetimes/test_datetimelike.py
@@ -1,4 +1,5 @@
""" generic tests from the Datetimelike class """
+import pytest
from pandas import DatetimeIndex, date_range
from pandas.util import testing as tm
@@ -9,12 +10,12 @@
class TestDatetimeIndex(DatetimeLike):
_holder = DatetimeIndex
- def setup_method(self, method):
- self.indices = dict(
- index=tm.makeDateIndex(10),
- index_dec=date_range("20130110", periods=10, freq="-1D"),
- )
- self.setup_indices()
+ @pytest.fixture(
+ params=[tm.makeDateIndex(10), date_range("20130110", periods=10, freq="-1D")],
+ ids=["index_inc", "index_dec"],
+ )
+ def indices(self, request):
+ return request.param
def create_index(self):
return date_range("20130101", periods=5)
diff --git a/pandas/tests/indexes/interval/test_base.py b/pandas/tests/indexes/interval/test_base.py
index b2cb29dafac09..339bdaf79c690 100644
--- a/pandas/tests/indexes/interval/test_base.py
+++ b/pandas/tests/indexes/interval/test_base.py
@@ -14,10 +14,9 @@ class TestBase(Base):
_holder = IntervalIndex
- def setup_method(self, method):
- self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
- self.index_with_nan = IntervalIndex.from_tuples([(0, 1), np.nan, (1, 2)])
- self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
+ @pytest.fixture
+ def indices(self):
+ return tm.makeIntervalIndex(10)
def create_index(self, closed="right"):
return IntervalIndex.from_breaks(range(11), closed=closed)
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index ee37be7ab4c14..1a2c58bdfce37 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -25,12 +25,15 @@
class TestPeriodIndex(DatetimeLike):
_holder = PeriodIndex
- def setup_method(self, method):
- self.indices = dict(
- index=tm.makePeriodIndex(10),
- index_dec=period_range("20130101", periods=10, freq="D")[::-1],
- )
- self.setup_indices()
+ @pytest.fixture(
+ params=[
+ tm.makePeriodIndex(10),
+ period_range("20130101", periods=10, freq="D")[::-1],
+ ],
+ ids=["index_inc", "index_dec"],
+ )
+ def indices(self, request):
+ return request.param
def create_index(self):
return period_range("20130101", periods=5, freq="D")
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 82d5ddd1ac358..0dc6d24202c34 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -41,6 +41,7 @@
from pandas.core.indexes.api import Index, MultiIndex
from pandas.core.sorting import safe_sort
from pandas.tests.indexes.common import Base
+from pandas.tests.indexes.conftest import indices_dict
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@@ -48,73 +49,57 @@
class TestIndex(Base):
_holder = Index
- def setup_method(self, method):
- self.indices = dict(
- unicodeIndex=tm.makeUnicodeIndex(100),
- strIndex=tm.makeStringIndex(100),
- dateIndex=tm.makeDateIndex(100),
- periodIndex=tm.makePeriodIndex(100),
- tdIndex=tm.makeTimedeltaIndex(100),
- intIndex=tm.makeIntIndex(100),
- uintIndex=tm.makeUIntIndex(100),
- rangeIndex=tm.makeRangeIndex(100),
- floatIndex=tm.makeFloatIndex(100),
- boolIndex=Index([True, False]),
- catIndex=tm.makeCategoricalIndex(100),
- empty=Index([]),
- tuples=MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])),
- repeats=Index([0, 0, 1, 1, 2, 2]),
- )
- self.setup_indices()
+ @pytest.fixture
+ def index(self, request):
+ """
+ Fixture for selectively parametrizing indices_dict via indirect parametrization
+ (parametrize over indices_dict keys with indirect=True). Defaults to string
+ index if no keys are provided.
+ """
+ key = getattr(request, "param", "string")
+
+ # copy to avoid mutation, e.g. setting .name
+ return indices_dict[key].copy()
def create_index(self):
return Index(list("abcde"))
- def generate_index_types(self, skip_index_keys=[]):
- """
- Return a generator of the various index types, leaving
- out the ones with a key in skip_index_keys
- """
- for key, index in self.indices.items():
- if key not in skip_index_keys:
- yield key, index
-
def test_can_hold_identifiers(self):
index = self.create_index()
key = index[0]
assert index._can_hold_identifiers_and_holds_name(key) is True
- def test_new_axis(self):
- new_index = self.dateIndex[None, :]
+ @pytest.mark.parametrize("index", ["datetime"], indirect=True)
+ def test_new_axis(self, index):
+ new_index = index[None, :]
assert new_index.ndim == 2
assert isinstance(new_index, np.ndarray)
- def test_copy_and_deepcopy(self):
- new_copy2 = self.intIndex.copy(dtype=int)
+ @pytest.mark.parametrize("index", ["int", "uint", "float"], indirect=True)
+ def test_copy_and_deepcopy(self, index):
+ new_copy2 = index.copy(dtype=int)
assert new_copy2.dtype.kind == "i"
- @pytest.mark.parametrize("attr", ["strIndex", "dateIndex"])
- def test_constructor_regular(self, attr):
- # regular instance creation
- index = getattr(self, attr)
- tm.assert_contains_all(index, index)
+ def test_constructor_regular(self, indices):
+ tm.assert_contains_all(indices, indices)
- def test_constructor_casting(self):
+ def test_constructor_casting(self, index):
# casting
- arr = np.array(self.strIndex)
- index = Index(arr)
- tm.assert_contains_all(arr, index)
- tm.assert_index_equal(self.strIndex, index)
+ arr = np.array(index)
+ new_index = Index(arr)
+ tm.assert_contains_all(arr, new_index)
+ tm.assert_index_equal(index, new_index)
- def test_constructor_copy(self):
+ def test_constructor_copy(self, index):
# copy
- arr = np.array(self.strIndex)
- index = Index(arr, copy=True, name="name")
- assert isinstance(index, Index)
- assert index.name == "name"
- tm.assert_numpy_array_equal(arr, index.values)
+ # index = self.create_index()
+ arr = np.array(index)
+ new_index = Index(arr, copy=True, name="name")
+ assert isinstance(new_index, Index)
+ assert new_index.name == "name"
+ tm.assert_numpy_array_equal(arr, new_index.values)
arr[0] = "SOMEBIGLONGSTRING"
- assert index[0] != "SOMEBIGLONGSTRING"
+ assert new_index[0] != "SOMEBIGLONGSTRING"
# what to do here?
# arr = np.array(5.)
@@ -570,37 +555,50 @@ def test_constructor_cast(self):
with pytest.raises(ValueError, match=msg):
Index(["a", "b", "c"], dtype=float)
- def test_view_with_args(self):
- restricted = ["unicodeIndex", "strIndex", "catIndex", "boolIndex", "empty"]
- for i in list(set(self.indices.keys()) - set(restricted)):
- ind = self.indices[i]
- ind.view("i8")
+ @pytest.mark.parametrize(
+ "index",
+ [
+ "datetime",
+ "float",
+ "int",
+ "period",
+ "range",
+ "repeats",
+ "timedelta",
+ "tuples",
+ "uint",
+ ],
+ indirect=True,
+ )
+ def test_view_with_args(self, index):
+ index.view("i8")
@pytest.mark.parametrize(
- "index_type",
+ "index",
[
- "unicodeIndex",
- "strIndex",
- pytest.param("catIndex", marks=pytest.mark.xfail(reason="gh-25464")),
- "boolIndex",
+ "unicode",
+ "string",
+ pytest.param("categorical", marks=pytest.mark.xfail(reason="gh-25464")),
+ "bool",
"empty",
],
+ indirect=True,
)
- def test_view_with_args_object_array_raises(self, index_type):
- ind = self.indices[index_type]
+ def test_view_with_args_object_array_raises(self, index):
msg = "Cannot change data-type for object array"
with pytest.raises(TypeError, match=msg):
- ind.view("i8")
+ index.view("i8")
- def test_astype(self):
- casted = self.intIndex.astype("i8")
+ @pytest.mark.parametrize("index", ["int", "range"], indirect=True)
+ def test_astype(self, index):
+ casted = index.astype("i8")
# it works!
casted.get_loc(5)
# pass on name
- self.intIndex.name = "foobar"
- casted = self.intIndex.astype("i8")
+ index.name = "foobar"
+ casted = index.astype("i8")
assert casted.name == "foobar"
def test_equals_object(self):
@@ -700,16 +698,17 @@ def test_is_(self):
ind2 = Index(arr, copy=False)
assert not ind1.is_(ind2)
- def test_asof(self):
- d = self.dateIndex[0]
- assert self.dateIndex.asof(d) == d
- assert isna(self.dateIndex.asof(d - timedelta(1)))
+ @pytest.mark.parametrize("index", ["datetime"], indirect=True)
+ def test_asof(self, index):
+ d = index[0]
+ assert index.asof(d) == d
+ assert isna(index.asof(d - timedelta(1)))
- d = self.dateIndex[-1]
- assert self.dateIndex.asof(d + timedelta(1)) == d
+ d = index[-1]
+ assert index.asof(d + timedelta(1)) == d
- d = self.dateIndex[0].to_pydatetime()
- assert isinstance(self.dateIndex.asof(d), Timestamp)
+ d = index[0].to_pydatetime()
+ assert isinstance(index.asof(d), Timestamp)
def test_asof_datetime_partial(self):
index = pd.date_range("2010-01-01", periods=2, freq="m")
@@ -731,40 +730,39 @@ def test_nanosecond_index_access(self):
expected_ts = np_datetime64_compat("2013-01-01 00:00:00.000000050+0000", "ns")
assert first_value == x[Timestamp(expected_ts)]
- def test_booleanindex(self):
- boolIndex = np.repeat(True, len(self.strIndex)).astype(bool)
- boolIndex[5:30:2] = False
+ def test_booleanindex(self, index):
+ bool_index = np.repeat(True, len(index)).astype(bool)
+ bool_index[5:30:2] = False
- subIndex = self.strIndex[boolIndex]
+ sub_index = index[bool_index]
- for i, val in enumerate(subIndex):
- assert subIndex.get_loc(val) == i
+ for i, val in enumerate(sub_index):
+ assert sub_index.get_loc(val) == i
- subIndex = self.strIndex[list(boolIndex)]
- for i, val in enumerate(subIndex):
- assert subIndex.get_loc(val) == i
+ sub_index = index[list(bool_index)]
+ for i, val in enumerate(sub_index):
+ assert sub_index.get_loc(val) == i
def test_fancy(self):
- sl = self.strIndex[[1, 2, 3]]
+ index = self.create_index()
+ sl = index[[1, 2, 3]]
for i in sl:
assert i == sl[sl.get_loc(i)]
- @pytest.mark.parametrize("attr", ["strIndex", "intIndex", "floatIndex"])
+ @pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True)
@pytest.mark.parametrize("dtype", [np.int_, np.bool_])
- def test_empty_fancy(self, attr, dtype):
+ def test_empty_fancy(self, index, dtype):
empty_arr = np.array([], dtype=dtype)
- index = getattr(self, attr)
empty_index = index.__class__([])
assert index[[]].identical(empty_index)
assert index[empty_arr].identical(empty_index)
- @pytest.mark.parametrize("attr", ["strIndex", "intIndex", "floatIndex"])
- def test_empty_fancy_raises(self, attr):
+ @pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True)
+ def test_empty_fancy_raises(self, index):
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
empty_farr = np.array([], dtype=np.float_)
- index = getattr(self, attr)
empty_index = index.__class__([])
assert index[[]].identical(empty_index)
@@ -774,9 +772,9 @@ def test_empty_fancy_raises(self, attr):
index[empty_farr]
@pytest.mark.parametrize("sort", [None, False])
- def test_intersection(self, sort):
- first = self.strIndex[:20]
- second = self.strIndex[:10]
+ def test_intersection(self, index, sort):
+ first = index[:20]
+ second = index[:10]
intersect = first.intersection(second, sort=sort)
if sort is None:
tm.assert_index_equal(intersect, second.sort_values())
@@ -812,10 +810,10 @@ def test_intersection_name_preservation(self, index2, keeps_name, sort):
)
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_name_preservation2(
- self, first_name, second_name, expected_name, sort
+ self, index, first_name, second_name, expected_name, sort
):
- first = self.strIndex[5:20]
- second = self.strIndex[:10]
+ first = index[5:20]
+ second = index[:10]
first.name = first_name
second.name = second_name
intersect = first.intersection(second, sort=sort)
@@ -900,11 +898,10 @@ def test_chained_union(self, sort):
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize("sort", [None, False])
- def test_union(self, sort):
- # TODO: Replace with fixturesult
- first = self.strIndex[5:20]
- second = self.strIndex[:10]
- everything = self.strIndex[:20]
+ def test_union(self, index, sort):
+ first = index[5:20]
+ second = index[:10]
+ everything = index[:20]
union = first.union(second, sort=sort)
if sort is None:
@@ -965,12 +962,11 @@ def test_union_sort_other_incomparable_true(self):
@pytest.mark.parametrize("klass", [np.array, Series, list])
@pytest.mark.parametrize("sort", [None, False])
- def test_union_from_iterables(self, klass, sort):
+ def test_union_from_iterables(self, index, klass, sort):
# GH 10149
- # TODO: Replace with fixturesult
- first = self.strIndex[5:20]
- second = self.strIndex[:10]
- everything = self.strIndex[:20]
+ first = index[5:20]
+ second = index[:10]
+ everything = index[:20]
case = klass(second.values)
result = first.union(case, sort=sort)
@@ -979,9 +975,8 @@ def test_union_from_iterables(self, klass, sort):
assert tm.equalContents(result, everything)
@pytest.mark.parametrize("sort", [None, False])
- def test_union_identity(self, sort):
- # TODO: replace with fixturesult
- first = self.strIndex[5:20]
+ def test_union_identity(self, index, sort):
+ first = index[5:20]
union = first.union(first, sort=sort)
# i.e. identity is not preserved when sort is True
@@ -1021,19 +1016,21 @@ def test_union_name_preservation(
@pytest.mark.parametrize("sort", [None, False])
def test_union_dt_as_obj(self, sort):
# TODO: Replace with fixturesult
- firstCat = self.strIndex.union(self.dateIndex)
- secondCat = self.strIndex.union(self.strIndex)
+ index = self.create_index()
+ date_index = pd.date_range("2019-01-01", periods=10)
+ first_cat = index.union(date_index)
+ second_cat = index.union(index)
- if self.dateIndex.dtype == np.object_:
- appended = np.append(self.strIndex, self.dateIndex)
+ if date_index.dtype == np.object_:
+ appended = np.append(index, date_index)
else:
- appended = np.append(self.strIndex, self.dateIndex.astype("O"))
+ appended = np.append(index, date_index.astype("O"))
- assert tm.equalContents(firstCat, appended)
- assert tm.equalContents(secondCat, self.strIndex)
- tm.assert_contains_all(self.strIndex, firstCat)
- tm.assert_contains_all(self.strIndex, secondCat)
- tm.assert_contains_all(self.dateIndex, firstCat)
+ assert tm.equalContents(first_cat, appended)
+ assert tm.equalContents(second_cat, index)
+ tm.assert_contains_all(index, first_cat)
+ tm.assert_contains_all(index, second_cat)
+ tm.assert_contains_all(date_index, first_cat)
@pytest.mark.parametrize(
"method", ["union", "intersection", "difference", "symmetric_difference"]
@@ -1045,11 +1042,9 @@ def test_setops_disallow_true(self, method):
with pytest.raises(ValueError, match="The 'sort' keyword only takes"):
getattr(idx1, method)(idx2, sort=True)
- def test_map_identity_mapping(self):
+ def test_map_identity_mapping(self, indices):
# GH 12766
- # TODO: replace with fixture
- for name, cur_index in self.indices.items():
- tm.assert_index_equal(cur_index, cur_index.map(lambda x: x))
+ tm.assert_index_equal(indices, indices.map(lambda x: x))
def test_map_with_tuples(self):
# GH 12766
@@ -1096,31 +1091,37 @@ def test_map_tseries_indices_accsr_return_index(self):
lambda values, index: pd.Series(values, index),
],
)
- def test_map_dictlike(self, mapper):
+ def test_map_dictlike_simple(self, mapper):
# GH 12756
expected = Index(["foo", "bar", "baz"])
index = tm.makeIntIndex(3)
result = index.map(mapper(expected.values, index))
tm.assert_index_equal(result, expected)
- # TODO: replace with fixture
- for name in self.indices.keys():
- if name == "catIndex":
- # Tested in test_categorical
- continue
- elif name == "repeats":
- # Cannot map duplicated index
- continue
-
- index = self.indices[name]
- expected = Index(np.arange(len(index), 0, -1))
-
+ @pytest.mark.parametrize(
+ "mapper",
+ [
+ lambda values, index: {i: e for e, i in zip(values, index)},
+ lambda values, index: pd.Series(values, index),
+ ],
+ )
+ def test_map_dictlike(self, indices, mapper):
+ # GH 12756
+ if isinstance(indices, CategoricalIndex):
+ # Tested in test_categorical
+ return
+ elif not indices.is_unique:
+ # Cannot map duplicated index
+ return
+
+ if indices.empty:
# to match proper result coercion for uints
- if name == "empty":
- expected = Index([])
+ expected = Index([])
+ else:
+ expected = Index(np.arange(len(indices), 0, -1))
- result = index.map(mapper(expected, index))
- tm.assert_index_equal(result, expected)
+ result = indices.map(mapper(expected, indices))
+ tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"mapper",
@@ -1169,11 +1170,10 @@ def test_append_empty_preserve_name(self, name, expected):
@pytest.mark.parametrize("second_name,expected", [(None, None), ("name", "name")])
@pytest.mark.parametrize("sort", [None, False])
- def test_difference_name_preservation(self, second_name, expected, sort):
- # TODO: replace with fixturesult
- first = self.strIndex[5:20]
- second = self.strIndex[:10]
- answer = self.strIndex[10:20]
+ def test_difference_name_preservation(self, index, second_name, expected, sort):
+ first = index[5:20]
+ second = index[:10]
+ answer = index[10:20]
first.name = "name"
second.name = second_name
@@ -1187,8 +1187,8 @@ def test_difference_name_preservation(self, second_name, expected, sort):
assert result.name == expected
@pytest.mark.parametrize("sort", [None, False])
- def test_difference_empty_arg(self, sort):
- first = self.strIndex[5:20]
+ def test_difference_empty_arg(self, index, sort):
+ first = index[5:20]
first.name == "name"
result = first.difference([], sort)
@@ -1196,8 +1196,8 @@ def test_difference_empty_arg(self, sort):
assert result.name == first.name
@pytest.mark.parametrize("sort", [None, False])
- def test_difference_identity(self, sort):
- first = self.strIndex[5:20]
+ def test_difference_identity(self, index, sort):
+ first = index[5:20]
first.name == "name"
result = first.difference(first, sort)
@@ -1205,12 +1205,12 @@ def test_difference_identity(self, sort):
assert result.name == first.name
@pytest.mark.parametrize("sort", [None, False])
- def test_difference_sort(self, sort):
- first = self.strIndex[5:20]
- second = self.strIndex[:10]
+ def test_difference_sort(self, index, sort):
+ first = index[5:20]
+ second = index[:10]
result = first.difference(second, sort)
- expected = self.strIndex[10:20]
+ expected = index[10:20]
if sort is None:
expected = expected.sort_values()
@@ -1267,7 +1267,7 @@ def test_difference_incomparable_true(self, opname):
@pytest.mark.parametrize("sort", [None, False])
def test_symmetric_difference_mi(self, sort):
- index1 = MultiIndex.from_tuples(self.tuples)
+ index1 = MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3]))
index2 = MultiIndex.from_tuples([("foo", 1), ("bar", 3)])
result = index1.symmetric_difference(index2, sort=sort)
expected = MultiIndex.from_tuples([("bar", 2), ("baz", 3), ("bar", 3)])
@@ -1308,73 +1308,78 @@ def test_symmetric_difference_non_index(self, sort):
assert result.name == "new_name"
@pytest.mark.parametrize("sort", [None, False])
- def test_difference_type(self, sort):
+ def test_difference_type(self, indices, sort):
# GH 20040
# If taking difference of a set and itself, it
# needs to preserve the type of the index
- skip_index_keys = ["repeats"]
- for key, index in self.generate_index_types(skip_index_keys):
- result = index.difference(index, sort=sort)
- expected = index.drop(index)
- tm.assert_index_equal(result, expected)
+ if not indices.is_unique:
+ return
+ result = indices.difference(indices, sort=sort)
+ expected = indices.drop(indices)
+ tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
- def test_intersection_difference(self, sort):
+ def test_intersection_difference(self, indices, sort):
# GH 20040
# Test that the intersection of an index with an
# empty index produces the same index as the difference
# of an index with itself. Test for all types
- skip_index_keys = ["repeats"]
- for key, index in self.generate_index_types(skip_index_keys):
- inter = index.intersection(index.drop(index))
- diff = index.difference(index, sort=sort)
- tm.assert_index_equal(inter, diff)
+ if not indices.is_unique:
+ return
+ inter = indices.intersection(indices.drop(indices))
+ diff = indices.difference(indices, sort=sort)
+ tm.assert_index_equal(inter, diff)
@pytest.mark.parametrize(
- "attr,expected",
+ "index, expected",
[
- ("strIndex", False),
- ("boolIndex", False),
- ("catIndex", False),
- ("intIndex", True),
- ("dateIndex", False),
- ("floatIndex", True),
+ ("string", False),
+ ("bool", False),
+ ("categorical", False),
+ ("int", True),
+ ("datetime", False),
+ ("float", True),
],
+ indirect=["index"],
)
- def test_is_numeric(self, attr, expected):
- assert getattr(self, attr).is_numeric() == expected
+ def test_is_numeric(self, index, expected):
+ assert index.is_numeric() is expected
@pytest.mark.parametrize(
- "attr,expected",
+ "index, expected",
[
- ("strIndex", True),
- ("boolIndex", True),
- ("catIndex", False),
- ("intIndex", False),
- ("dateIndex", False),
- ("floatIndex", False),
+ ("string", True),
+ ("bool", True),
+ ("categorical", False),
+ ("int", False),
+ ("datetime", False),
+ ("float", False),
],
+ indirect=["index"],
)
- def test_is_object(self, attr, expected):
- assert getattr(self, attr).is_object() == expected
+ def test_is_object(self, index, expected):
+ assert index.is_object() is expected
@pytest.mark.parametrize(
- "attr,expected",
+ "index, expected",
[
- ("strIndex", False),
- ("boolIndex", False),
- ("catIndex", False),
- ("intIndex", False),
- ("dateIndex", True),
- ("floatIndex", False),
+ ("string", False),
+ ("bool", False),
+ ("categorical", False),
+ ("int", False),
+ ("datetime", True),
+ ("float", False),
],
+ indirect=["index"],
)
- def test_is_all_dates(self, attr, expected):
- assert getattr(self, attr).is_all_dates == expected
+ def test_is_all_dates(self, index, expected):
+ assert index.is_all_dates is expected
+
+ def test_summary(self, indices):
+ self._check_method_works(Index._summary, indices)
- def test_summary(self):
- self._check_method_works(Index._summary)
- # GH3869
+ def test_summary_bug(self):
+ # GH3869`
ind = Index(["{other}%s", "~:{range}:0"], name="A")
result = ind._summary()
# shouldn't be formatted accidentally.
@@ -1388,9 +1393,10 @@ def test_summary_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
ind.summary()
- def test_format(self):
- self._check_method_works(Index.format)
+ def test_format(self, indices):
+ self._check_method_works(Index.format, indices)
+ def test_format_bug(self):
# GH 14626
# windows has different precision on datetime.datetime.now (it doesn't
# include us since the default for Timestamp shows these but Index
@@ -1402,7 +1408,7 @@ def test_format(self):
expected = [str(index[0])]
assert formatted == expected
- self.strIndex[:0].format()
+ Index([]).format()
@pytest.mark.parametrize("vals", [[1, 2.0 + 3.0j, 4.0], ["a", "b", "c"]])
def test_format_missing(self, vals, nulls_fixture):
@@ -1419,8 +1425,7 @@ def test_format_missing(self, vals, nulls_fixture):
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
- inc = timedelta(hours=4)
- dates = Index([dt + inc for dt in self.dateIndex], name="something")
+ dates = date_range("2011-01-01 04:00:00", periods=10, name="something")
formatted = dates.format(name=True)
assert formatted[0] == "something"
@@ -1438,15 +1443,8 @@ def test_logical_compat(self, op):
index = self.create_index()
assert getattr(index, op)() == getattr(index.values, op)()
- def _check_method_works(self, method):
- # TODO: make this a dedicated test with parametrized methods
- method(self.empty)
- method(self.dateIndex)
- method(self.unicodeIndex)
- method(self.strIndex)
- method(self.intIndex)
- method(self.tuples)
- method(self.catIndex)
+ def _check_method_works(self, method, index):
+ method(index)
def test_get_indexer(self):
index1 = Index([1, 2, 3, 4, 5])
@@ -1766,38 +1764,37 @@ def test_slice_locs_negative_step(self, in_slice, expected):
expected = pd.Index(list(expected))
tm.assert_index_equal(result, expected)
- def test_drop_by_str_label(self):
- # TODO: Parametrize these after replacing self.strIndex with fixture
- n = len(self.strIndex)
- drop = self.strIndex[list(range(5, 10))]
- dropped = self.strIndex.drop(drop)
+ @pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True)
+ def test_drop_by_str_label(self, index):
+ n = len(index)
+ drop = index[list(range(5, 10))]
+ dropped = index.drop(drop)
- expected = self.strIndex[list(range(5)) + list(range(10, n))]
+ expected = index[list(range(5)) + list(range(10, n))]
tm.assert_index_equal(dropped, expected)
- dropped = self.strIndex.drop(self.strIndex[0])
- expected = self.strIndex[1:]
+ dropped = index.drop(index[0])
+ expected = index[1:]
tm.assert_index_equal(dropped, expected)
+ @pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True)
@pytest.mark.parametrize("keys", [["foo", "bar"], ["1", "bar"]])
- def test_drop_by_str_label_raises_missing_keys(self, keys):
+ def test_drop_by_str_label_raises_missing_keys(self, index, keys):
with pytest.raises(KeyError, match=""):
- self.strIndex.drop(keys)
+ index.drop(keys)
- def test_drop_by_str_label_errors_ignore(self):
- # TODO: Parametrize these after replacing self.strIndex with fixture
-
- # errors='ignore'
- n = len(self.strIndex)
- drop = self.strIndex[list(range(5, 10))]
+ @pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True)
+ def test_drop_by_str_label_errors_ignore(self, index):
+ n = len(index)
+ drop = index[list(range(5, 10))]
mixed = drop.tolist() + ["foo"]
- dropped = self.strIndex.drop(mixed, errors="ignore")
+ dropped = index.drop(mixed, errors="ignore")
- expected = self.strIndex[list(range(5)) + list(range(10, n))]
+ expected = index[list(range(5)) + list(range(10, n))]
tm.assert_index_equal(dropped, expected)
- dropped = self.strIndex.drop(["foo", "bar"], errors="ignore")
- expected = self.strIndex[list(range(n))]
+ dropped = index.drop(["foo", "bar"], errors="ignore")
+ expected = index[list(range(n))]
tm.assert_index_equal(dropped, expected)
def test_drop_by_numeric_label_loc(self):
@@ -1916,12 +1913,15 @@ def test_set_value_deprecated(self):
idx.set_value(arr, idx[1], 80)
assert arr[1] == 80
- def test_get_value(self):
+ @pytest.mark.parametrize(
+ "index", ["string", "int", "datetime", "timedelta"], indirect=True
+ )
+ def test_get_value(self, index):
# TODO: Remove function? GH 19728
values = np.random.randn(100)
- date = self.dateIndex[67]
+ value = index[67]
- assert_almost_equal(self.dateIndex.get_value(values, date), values[67])
+ assert_almost_equal(index.get_value(values, value), values[67])
@pytest.mark.parametrize("values", [["foo", "bar", "quux"], {"foo", "bar", "quux"}])
@pytest.mark.parametrize(
@@ -2040,8 +2040,8 @@ def test_boolean_cmp(self, values):
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("name,level", [(None, 0), ("a", "a")])
- def test_get_level_values(self, name, level):
- expected = self.strIndex.copy()
+ def test_get_level_values(self, index, name, level):
+ expected = index.copy()
if name:
expected.name = name
@@ -2052,14 +2052,12 @@ def test_slice_keep_name(self):
index = Index(["a", "b"], name="asdf")
assert index.name == index[1:].name
- # instance attributes of the form self.<name>Index
- @pytest.mark.parametrize("index_kind", ["unicode", "str", "date", "int", "float"])
- def test_join_self(self, join_type, index_kind):
-
- res = getattr(self, "{0}Index".format(index_kind))
-
- joined = res.join(res, how=join_type)
- assert res is joined
+ @pytest.mark.parametrize(
+ "index", ["unicode", "string", "datetime", "int", "float"], indirect=True
+ )
+ def test_join_self(self, index, join_type):
+ joined = index.join(index, how=join_type)
+ assert index is joined
@pytest.mark.parametrize("method", ["strip", "rstrip", "lstrip"])
def test_str_attribute(self, method):
@@ -2424,10 +2422,11 @@ def test_tab_complete_warning(self, ip):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("idx.", 4))
- def test_deprecated_contains(self):
- for index in self.indices.values():
- with tm.assert_produces_warning(FutureWarning):
- index.contains(1)
+ def test_deprecated_contains(self, indices):
+ # deprecated for all types except IntervalIndex
+ warning = FutureWarning if not isinstance(indices, pd.IntervalIndex) else None
+ with tm.assert_produces_warning(warning):
+ indices.contains(1)
class TestMixedIntIndex(Base):
@@ -2437,12 +2436,12 @@ class TestMixedIntIndex(Base):
_holder = Index
- def setup_method(self, method):
- self.indices = dict(mixedIndex=Index([0, "a", 1, "b", 2, "c"]))
- self.setup_indices()
+ @pytest.fixture(params=[[0, "a", 1, "b", 2, "c"]], ids=["mixedIndex"])
+ def indices(self, request):
+ return Index(request.param)
def create_index(self):
- return self.mixedIndex
+ return Index([0, "a", 1, "b", 2, "c"])
def test_argsort(self):
index = self.create_index()
@@ -2766,13 +2765,12 @@ def test_ensure_index_mixed_closed_intervals(self):
],
)
def test_generated_op_names(opname, indices):
- index = indices
- if isinstance(index, ABCIndex) and opname == "rsub":
+ if isinstance(indices, ABCIndex) and opname == "rsub":
# pd.Index.__rsub__ does not exist; though the method does exist
# for subclasses. see GH#19723
return
opname = "__{name}__".format(name=opname)
- method = getattr(index, opname)
+ method = getattr(indices, opname)
assert method.__name__ == opname
diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py
index 67bf9bd20e716..4326c3f8188fc 100644
--- a/pandas/tests/indexes/test_category.py
+++ b/pandas/tests/indexes/test_category.py
@@ -19,9 +19,9 @@
class TestCategoricalIndex(Base):
_holder = CategoricalIndex
- def setup_method(self, method):
- self.indices = dict(catIndex=tm.makeCategoricalIndex(100))
- self.setup_indices()
+ @pytest.fixture
+ def indices(self, request):
+ return tm.makeCategoricalIndex(100)
def create_index(self, categories=None, ordered=False):
if categories is None:
@@ -780,7 +780,7 @@ def test_identical(self):
assert ci1.identical(ci1.copy())
assert not ci1.identical(ci2)
- def test_ensure_copied_data(self):
+ def test_ensure_copied_data(self, indices):
# gh-12309: Check the "copy" argument of each
# Index.__new__ is honored.
#
@@ -788,13 +788,12 @@ def test_ensure_copied_data(self):
# self.value is not an ndarray.
_base = lambda ar: ar if ar.base is None else ar.base
- for index in self.indices.values():
- result = CategoricalIndex(index.values, copy=True)
- tm.assert_index_equal(index, result)
- assert _base(index.values) is not _base(result.values)
+ result = CategoricalIndex(indices.values, copy=True)
+ tm.assert_index_equal(indices, result)
+ assert _base(indices.values) is not _base(result.values)
- result = CategoricalIndex(index.values, copy=False)
- assert _base(index.values) is _base(result.values)
+ result = CategoricalIndex(indices.values, copy=False)
+ assert _base(indices.values) is _base(result.values)
def test_equals_categorical(self):
ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True)
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index 8bc9783694492..e424b3601a4b2 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -1,4 +1,4 @@
-from datetime import datetime
+from datetime import datetime, timedelta
import re
import numpy as np
@@ -87,32 +87,42 @@ def test_where(self, klass):
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
- def test_insert(self):
+ def test_insert(self, nulls_fixture):
# GH 18295 (test missing)
- expected = Float64Index([0, np.nan, 1, 2, 3, 4])
- for na in (np.nan, pd.NaT, None):
- result = self.create_index().insert(1, na)
- tm.assert_index_equal(result, expected)
+ index = self.create_index()
+ expected = Float64Index([index[0], np.nan] + list(index[1:]))
+ result = index.insert(1, nulls_fixture)
+ tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric):
_holder = Float64Index
- def setup_method(self, method):
- self.indices = dict(
- mixed=Float64Index([1.5, 2, 3, 4, 5]),
- float=Float64Index(np.arange(5) * 2.5),
- mixed_dec=Float64Index([5, 4, 3, 2, 1.5]),
- float_dec=Float64Index(np.arange(4, -1, -1) * 2.5),
- )
- self.setup_indices()
+ @pytest.fixture(
+ params=[
+ [1.5, 2, 3, 4, 5],
+ [0.0, 2.5, 5.0, 7.5, 10.0],
+ [5, 4, 3, 2, 1.5],
+ [10.0, 7.5, 5.0, 2.5, 0.0],
+ ],
+ ids=["mixed", "float", "mixed_dec", "float_dec"],
+ )
+ def indices(self, request):
+ return Float64Index(request.param)
+
+ @pytest.fixture
+ def mixed_index(self):
+ return Float64Index([1.5, 2, 3, 4, 5])
+
+ @pytest.fixture
+ def float_index(self):
+ return Float64Index([0.0, 2.5, 5.0, 7.5, 10.0])
def create_index(self):
return Float64Index(np.arange(5, dtype="float64"))
- def test_repr_roundtrip(self):
- for ind in (self.mixed, self.float):
- tm.assert_index_equal(eval(repr(ind)), ind)
+ def test_repr_roundtrip(self, indices):
+ tm.assert_index_equal(eval(repr(indices)), indices)
def check_is_index(self, i):
assert isinstance(i, Index)
@@ -176,30 +186,32 @@ def test_constructor_invalid(self):
with pytest.raises(TypeError, match=msg):
Float64Index([Timestamp("20130101")])
- def test_constructor_coerce(self):
+ def test_constructor_coerce(self, mixed_index, float_index):
- self.check_coerce(self.mixed, Index([1.5, 2, 3, 4, 5]))
- self.check_coerce(self.float, Index(np.arange(5) * 2.5))
- self.check_coerce(self.float, Index(np.array(np.arange(5) * 2.5, dtype=object)))
+ self.check_coerce(mixed_index, Index([1.5, 2, 3, 4, 5]))
+ self.check_coerce(float_index, Index(np.arange(5) * 2.5))
+ self.check_coerce(
+ float_index, Index(np.array(np.arange(5) * 2.5, dtype=object))
+ )
- def test_constructor_explicit(self):
+ def test_constructor_explicit(self, mixed_index, float_index):
# these don't auto convert
self.check_coerce(
- self.float, Index((np.arange(5) * 2.5), dtype=object), is_float_index=False
+ float_index, Index((np.arange(5) * 2.5), dtype=object), is_float_index=False
)
self.check_coerce(
- self.mixed, Index([1.5, 2, 3, 4, 5], dtype=object), is_float_index=False
+ mixed_index, Index([1.5, 2, 3, 4, 5], dtype=object), is_float_index=False
)
- def test_astype(self):
+ def test_astype(self, mixed_index, float_index):
- result = self.float.astype(object)
- assert result.equals(self.float)
- assert self.float.equals(result)
+ result = float_index.astype(object)
+ assert result.equals(float_index)
+ assert float_index.equals(result)
self.check_is_index(result)
- i = self.mixed.copy()
+ i = mixed_index.copy()
i.name = "foo"
result = i.astype(object)
assert result.equals(i)
@@ -451,11 +463,12 @@ def test_view(self):
tm.assert_index_equal(i, self._holder(i_view, name="Foo"))
def test_is_monotonic(self):
- assert self.index.is_monotonic is True
- assert self.index.is_monotonic_increasing is True
- assert self.index._is_strictly_monotonic_increasing is True
- assert self.index.is_monotonic_decreasing is False
- assert self.index._is_strictly_monotonic_decreasing is False
+ index = self._holder([1, 2, 3, 4])
+ assert index.is_monotonic is True
+ assert index.is_monotonic_increasing is True
+ assert index._is_strictly_monotonic_increasing is True
+ assert index.is_monotonic_decreasing is False
+ assert index._is_strictly_monotonic_decreasing is False
index = self._holder([4, 3, 2, 1])
assert index.is_monotonic is False
@@ -490,23 +503,22 @@ def test_logical_compat(self):
assert idx.any() == idx.values.any()
def test_identical(self):
- i = Index(self.index.copy())
- assert i.identical(self.index)
+ index = self.create_index()
+ i = Index(index.copy())
+ assert i.identical(index)
same_values_different_type = Index(i, dtype=object)
assert not i.identical(same_values_different_type)
- i = self.index.copy(dtype=object)
+ i = index.copy(dtype=object)
i = i.rename("foo")
same_values = Index(i, dtype=object)
assert same_values.identical(i)
- assert not i.identical(self.index)
+ assert not i.identical(index)
assert Index(same_values, name="foo", dtype=object).identical(i)
- assert not self.index.copy(dtype=object).identical(
- self.index.copy(dtype=self._dtype)
- )
+ assert not index.copy(dtype=object).identical(index.copy(dtype=self._dtype))
def test_join_non_unique(self):
left = Index([4, 4, 3, 3])
@@ -522,23 +534,21 @@ def test_join_non_unique(self):
exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(ridx, exp_ridx)
- @pytest.mark.parametrize("kind", ["outer", "inner", "left", "right"])
- def test_join_self(self, kind):
- joined = self.index.join(self.index, how=kind)
- assert self.index is joined
+ def test_join_self(self, join_type):
+ index = self.create_index()
+ joined = index.join(index, how=join_type)
+ assert index is joined
def test_union_noncomparable(self):
- from datetime import datetime, timedelta
-
# corner case, non-Int64Index
- now = datetime.now()
- other = Index([now + timedelta(i) for i in range(4)], dtype=object)
- result = self.index.union(other)
- expected = Index(np.concatenate((self.index, other)))
+ index = self.create_index()
+ other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object)
+ result = index.union(other)
+ expected = Index(np.concatenate((index, other)))
tm.assert_index_equal(result, expected)
- result = other.union(self.index)
- expected = Index(np.concatenate((other, self.index)))
+ result = other.union(index)
+ expected = Index(np.concatenate((other, index)))
tm.assert_index_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
@@ -557,10 +567,12 @@ def test_cant_or_shouldnt_cast(self):
self._holder(data)
def test_view_index(self):
- self.index.view(Index)
+ index = self.create_index()
+ index.view(Index)
def test_prevent_casting(self):
- result = self.index.astype("O")
+ index = self.create_index()
+ result = index.astype("O")
assert result.dtype == np.object_
def test_take_preserve_name(self):
@@ -604,15 +616,15 @@ class TestInt64Index(NumericInt):
_dtype = "int64"
_holder = Int64Index
- def setup_method(self, method):
- self.indices = dict(
- index=Int64Index(np.arange(0, 20, 2)),
- index_dec=Int64Index(np.arange(19, -1, -1)),
- )
- self.setup_indices()
+ @pytest.fixture(
+ params=[range(0, 20, 2), range(19, -1, -1)], ids=["index_inc", "index_dec"]
+ )
+ def indices(self, request):
+ return Int64Index(request.param)
def create_index(self):
- return Int64Index(np.arange(5, dtype="int64"))
+ # return Int64Index(np.arange(5, dtype="int64"))
+ return Int64Index(range(0, 20, 2))
def test_constructor(self):
# pass list, coerce fine
@@ -633,9 +645,9 @@ def test_constructor(self):
Int64Index(5)
# copy
- arr = self.index.values
+ arr = index.values
new_index = Int64Index(arr, copy=True)
- tm.assert_index_equal(new_index, self.index)
+ tm.assert_index_equal(new_index, index)
val = arr[0] + 3000
# this should not change index
@@ -691,39 +703,42 @@ def test_coerce_list(self):
assert isinstance(arr, Index)
def test_get_indexer(self):
+ index = self.create_index()
target = Int64Index(np.arange(10))
- indexer = self.index.get_indexer(target)
+ indexer = index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
target = Int64Index(np.arange(10))
- indexer = self.index.get_indexer(target, method="pad")
+ indexer = index.get_indexer(target, method="pad")
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
target = Int64Index(np.arange(10))
- indexer = self.index.get_indexer(target, method="backfill")
+ indexer = index.get_indexer(target, method="backfill")
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
def test_intersection(self):
+ index = self.create_index()
other = Index([1, 2, 3, 4, 5])
- result = self.index.intersection(other)
- expected = Index(np.sort(np.intersect1d(self.index.values, other.values)))
+ result = index.intersection(other)
+ expected = Index(np.sort(np.intersect1d(index.values, other.values)))
tm.assert_index_equal(result, expected)
- result = other.intersection(self.index)
+ result = other.intersection(index)
expected = Index(
- np.sort(np.asarray(np.intersect1d(self.index.values, other.values)))
+ np.sort(np.asarray(np.intersect1d(index.values, other.values)))
)
tm.assert_index_equal(result, expected)
def test_join_inner(self):
+ index = self.create_index()
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
- res, lidx, ridx = self.index.join(other, how="inner", return_indexers=True)
+ res, lidx, ridx = index.join(other, how="inner", return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
@@ -741,9 +756,9 @@ def test_join_inner(self):
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
- res, lidx, ridx = self.index.join(other_mono, how="inner", return_indexers=True)
+ res, lidx, ridx = index.join(other_mono, how="inner", return_indexers=True)
- res2 = self.index.intersection(other_mono)
+ res2 = index.intersection(other_mono)
tm.assert_index_equal(res, res2)
elidx = np.array([1, 6], dtype=np.intp)
@@ -754,12 +769,13 @@ def test_join_inner(self):
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
+ index = self.create_index()
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
- res, lidx, ridx = self.index.join(other, how="left", return_indexers=True)
- eres = self.index
+ res, lidx, ridx = index.join(other, how="left", return_indexers=True)
+ eres = index
eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1], dtype=np.intp)
assert isinstance(res, Int64Index)
@@ -768,7 +784,7 @@ def test_join_left(self):
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
- res, lidx, ridx = self.index.join(other_mono, how="left", return_indexers=True)
+ res, lidx, ridx = index.join(other_mono, how="left", return_indexers=True)
eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1], dtype=np.intp)
assert isinstance(res, Int64Index)
tm.assert_index_equal(res, eres)
@@ -787,11 +803,12 @@ def test_join_left(self):
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_right(self):
+ index = self.create_index()
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
- res, lidx, ridx = self.index.join(other, how="right", return_indexers=True)
+ res, lidx, ridx = index.join(other, how="right", return_indexers=True)
eres = other
elidx = np.array([-1, 6, -1, -1, 1, -1], dtype=np.intp)
@@ -801,7 +818,7 @@ def test_join_right(self):
assert ridx is None
# monotonic
- res, lidx, ridx = self.index.join(other_mono, how="right", return_indexers=True)
+ res, lidx, ridx = index.join(other_mono, how="right", return_indexers=True)
eres = other_mono
elidx = np.array([-1, 1, -1, -1, 6, -1], dtype=np.intp)
assert isinstance(other, Int64Index)
@@ -821,40 +838,42 @@ def test_join_right(self):
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_non_int_index(self):
+ index = self.create_index()
other = Index([3, 6, 7, 8, 10], dtype=object)
- outer = self.index.join(other, how="outer")
- outer2 = other.join(self.index, how="outer")
+ outer = index.join(other, how="outer")
+ outer2 = other.join(index, how="outer")
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18])
tm.assert_index_equal(outer, outer2)
tm.assert_index_equal(outer, expected)
- inner = self.index.join(other, how="inner")
- inner2 = other.join(self.index, how="inner")
+ inner = index.join(other, how="inner")
+ inner2 = other.join(index, how="inner")
expected = Index([6, 8, 10])
tm.assert_index_equal(inner, inner2)
tm.assert_index_equal(inner, expected)
- left = self.index.join(other, how="left")
- tm.assert_index_equal(left, self.index.astype(object))
+ left = index.join(other, how="left")
+ tm.assert_index_equal(left, index.astype(object))
- left2 = other.join(self.index, how="left")
+ left2 = other.join(index, how="left")
tm.assert_index_equal(left2, other)
- right = self.index.join(other, how="right")
+ right = index.join(other, how="right")
tm.assert_index_equal(right, other)
- right2 = other.join(self.index, how="right")
- tm.assert_index_equal(right2, self.index.astype(object))
+ right2 = other.join(index, how="right")
+ tm.assert_index_equal(right2, index.astype(object))
def test_join_outer(self):
+ index = self.create_index()
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
# guarantee of sortedness
- res, lidx, ridx = self.index.join(other, how="outer", return_indexers=True)
- noidx_res = self.index.join(other, how="outer")
+ res, lidx, ridx = index.join(other, how="outer", return_indexers=True)
+ noidx_res = index.join(other, how="outer")
tm.assert_index_equal(res, noidx_res)
eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])
@@ -869,8 +888,8 @@ def test_join_outer(self):
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
- res, lidx, ridx = self.index.join(other_mono, how="outer", return_indexers=True)
- noidx_res = self.index.join(other_mono, how="outer")
+ res, lidx, ridx = index.join(other_mono, how="outer", return_indexers=True)
+ noidx_res = index.join(other_mono, how="outer")
tm.assert_index_equal(res, noidx_res)
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp)
@@ -888,14 +907,24 @@ class TestUInt64Index(NumericInt):
_dtype = "uint64"
_holder = UInt64Index
- def setup_method(self, method):
- vals = [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25]
- self.indices = dict(
- index=UInt64Index(vals), index_dec=UInt64Index(reversed(vals))
- )
- self.setup_indices()
+ @pytest.fixture(
+ params=[
+ [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25],
+ [2 ** 63 + 25, 2 ** 63 + 20, 2 ** 63 + 15, 2 ** 63 + 10, 2 ** 63],
+ ],
+ ids=["index_inc", "index_dec"],
+ )
+ def indices(self, request):
+ return UInt64Index(request.param)
+
+ @pytest.fixture
+ def index_large(self):
+ # large values used in TestUInt64Index where no compat needed with Int64/Float64
+ large = [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25]
+ return UInt64Index(large)
def create_index(self):
+ # compat with shared Int64/Float64 tests; use index_large for UInt64 only tests
return UInt64Index(np.arange(5, dtype="uint64"))
def test_constructor(self):
@@ -915,42 +944,42 @@ def test_constructor(self):
res = Index(np.array([-1, 2 ** 63], dtype=object))
tm.assert_index_equal(res, idx)
- def test_get_indexer(self):
+ def test_get_indexer(self, index_large):
target = UInt64Index(np.arange(10).astype("uint64") * 5 + 2 ** 63)
- indexer = self.index.get_indexer(target)
+ indexer = index_large.get_indexer(target)
expected = np.array([0, -1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
target = UInt64Index(np.arange(10).astype("uint64") * 5 + 2 ** 63)
- indexer = self.index.get_indexer(target, method="pad")
+ indexer = index_large.get_indexer(target, method="pad")
expected = np.array([0, 0, 1, 2, 3, 4, 4, 4, 4, 4], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
target = UInt64Index(np.arange(10).astype("uint64") * 5 + 2 ** 63)
- indexer = self.index.get_indexer(target, method="backfill")
+ indexer = index_large.get_indexer(target, method="backfill")
expected = np.array([0, 1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
- def test_intersection(self):
+ def test_intersection(self, index_large):
other = Index([2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20])
- result = self.index.intersection(other)
- expected = Index(np.sort(np.intersect1d(self.index.values, other.values)))
+ result = index_large.intersection(other)
+ expected = Index(np.sort(np.intersect1d(index_large.values, other.values)))
tm.assert_index_equal(result, expected)
- result = other.intersection(self.index)
+ result = other.intersection(index_large)
expected = Index(
- np.sort(np.asarray(np.intersect1d(self.index.values, other.values)))
+ np.sort(np.asarray(np.intersect1d(index_large.values, other.values)))
)
tm.assert_index_equal(result, expected)
- def test_join_inner(self):
+ def test_join_inner(self, index_large):
other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64"))
other_mono = UInt64Index(
2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")
)
# not monotonic
- res, lidx, ridx = self.index.join(other, how="inner", return_indexers=True)
+ res, lidx, ridx = index_large.join(other, how="inner", return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
@@ -968,9 +997,11 @@ def test_join_inner(self):
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
- res, lidx, ridx = self.index.join(other_mono, how="inner", return_indexers=True)
+ res, lidx, ridx = index_large.join(
+ other_mono, how="inner", return_indexers=True
+ )
- res2 = self.index.intersection(other_mono)
+ res2 = index_large.intersection(other_mono)
tm.assert_index_equal(res, res2)
elidx = np.array([1, 4], dtype=np.intp)
@@ -981,15 +1012,15 @@ def test_join_inner(self):
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
- def test_join_left(self):
+ def test_join_left(self, index_large):
other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64"))
other_mono = UInt64Index(
2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")
)
# not monotonic
- res, lidx, ridx = self.index.join(other, how="left", return_indexers=True)
- eres = self.index
+ res, lidx, ridx = index_large.join(other, how="left", return_indexers=True)
+ eres = index_large
eridx = np.array([-1, 5, -1, -1, 2], dtype=np.intp)
assert isinstance(res, UInt64Index)
@@ -998,7 +1029,7 @@ def test_join_left(self):
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
- res, lidx, ridx = self.index.join(other_mono, how="left", return_indexers=True)
+ res, lidx, ridx = index_large.join(other_mono, how="left", return_indexers=True)
eridx = np.array([-1, 3, -1, -1, 5], dtype=np.intp)
assert isinstance(res, UInt64Index)
@@ -1020,14 +1051,14 @@ def test_join_left(self):
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
- def test_join_right(self):
+ def test_join_right(self, index_large):
other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64"))
other_mono = UInt64Index(
2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")
)
# not monotonic
- res, lidx, ridx = self.index.join(other, how="right", return_indexers=True)
+ res, lidx, ridx = index_large.join(other, how="right", return_indexers=True)
eres = other
elidx = np.array([-1, -1, 4, -1, -1, 1], dtype=np.intp)
@@ -1037,7 +1068,9 @@ def test_join_right(self):
assert ridx is None
# monotonic
- res, lidx, ridx = self.index.join(other_mono, how="right", return_indexers=True)
+ res, lidx, ridx = index_large.join(
+ other_mono, how="right", return_indexers=True
+ )
eres = other_mono
elidx = np.array([-1, -1, -1, 1, -1, 4], dtype=np.intp)
@@ -1060,38 +1093,38 @@ def test_join_right(self):
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
- def test_join_non_int_index(self):
+ def test_join_non_int_index(self, index_large):
other = Index(
2 ** 63 + np.array([1, 5, 7, 10, 20], dtype="uint64"), dtype=object
)
- outer = self.index.join(other, how="outer")
- outer2 = other.join(self.index, how="outer")
+ outer = index_large.join(other, how="outer")
+ outer2 = other.join(index_large, how="outer")
expected = Index(
2 ** 63 + np.array([0, 1, 5, 7, 10, 15, 20, 25], dtype="uint64")
)
tm.assert_index_equal(outer, outer2)
tm.assert_index_equal(outer, expected)
- inner = self.index.join(other, how="inner")
- inner2 = other.join(self.index, how="inner")
+ inner = index_large.join(other, how="inner")
+ inner2 = other.join(index_large, how="inner")
expected = Index(2 ** 63 + np.array([10, 20], dtype="uint64"))
tm.assert_index_equal(inner, inner2)
tm.assert_index_equal(inner, expected)
- left = self.index.join(other, how="left")
- tm.assert_index_equal(left, self.index.astype(object))
+ left = index_large.join(other, how="left")
+ tm.assert_index_equal(left, index_large.astype(object))
- left2 = other.join(self.index, how="left")
+ left2 = other.join(index_large, how="left")
tm.assert_index_equal(left2, other)
- right = self.index.join(other, how="right")
+ right = index_large.join(other, how="right")
tm.assert_index_equal(right, other)
- right2 = other.join(self.index, how="right")
- tm.assert_index_equal(right2, self.index.astype(object))
+ right2 = other.join(index_large, how="right")
+ tm.assert_index_equal(right2, index_large.astype(object))
- def test_join_outer(self):
+ def test_join_outer(self, index_large):
other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64"))
other_mono = UInt64Index(
2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")
@@ -1099,8 +1132,8 @@ def test_join_outer(self):
# not monotonic
# guarantee of sortedness
- res, lidx, ridx = self.index.join(other, how="outer", return_indexers=True)
- noidx_res = self.index.join(other, how="outer")
+ res, lidx, ridx = index_large.join(other, how="outer", return_indexers=True)
+ noidx_res = index_large.join(other, how="outer")
tm.assert_index_equal(res, noidx_res)
eres = UInt64Index(
@@ -1115,8 +1148,10 @@ def test_join_outer(self):
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
- res, lidx, ridx = self.index.join(other_mono, how="outer", return_indexers=True)
- noidx_res = self.index.join(other_mono, how="outer")
+ res, lidx, ridx = index_large.join(
+ other_mono, how="outer", return_indexers=True
+ )
+ noidx_res = index_large.join(other_mono, how="outer")
tm.assert_index_equal(res, noidx_res)
elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp)
diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py
index 627c5cc56e010..fa64e1bacb2e5 100644
--- a/pandas/tests/indexes/test_range.py
+++ b/pandas/tests/indexes/test_range.py
@@ -1,4 +1,4 @@
-from datetime import datetime
+from datetime import datetime, timedelta
import numpy as np
import pytest
@@ -22,15 +22,18 @@ class TestRangeIndex(Numeric):
_holder = RangeIndex
_compat_props = ["shape", "ndim", "size"]
- def setup_method(self, method):
- self.indices = dict(
- index=RangeIndex(0, 20, 2, name="foo"),
- index_dec=RangeIndex(18, -1, -2, name="bar"),
- )
- self.setup_indices()
+ @pytest.fixture(
+ params=[
+ RangeIndex(start=0, stop=20, step=2, name="foo"),
+ RangeIndex(start=18, stop=-1, step=-2, name="bar"),
+ ],
+ ids=["index_inc", "index_dec"],
+ )
+ def indices(self, request):
+ return request.param
def create_index(self):
- return RangeIndex(5)
+ return RangeIndex(start=0, stop=20, step=2)
def test_can_hold_identifiers(self):
idx = self.create_index()
@@ -38,8 +41,9 @@ def test_can_hold_identifiers(self):
assert idx._can_hold_identifiers_and_holds_name(key) is False
def test_too_many_names(self):
+ index = self.create_index()
with pytest.raises(ValueError, match="^Length"):
- self.index.names = ["roger", "harold"]
+ index.names = ["roger", "harold"]
@pytest.mark.parametrize("name", [None, "foo"])
@pytest.mark.parametrize(
@@ -267,7 +271,8 @@ def test_view(self):
tm.assert_index_equal(i, i_view)
def test_dtype(self):
- assert self.index.dtype == np.int64
+ index = self.create_index()
+ assert index.dtype == np.int64
def test_cached_data(self):
# GH 26565, GH26617
@@ -326,11 +331,12 @@ def test_cached_data(self):
assert isinstance(idx._cached_data, np.ndarray)
def test_is_monotonic(self):
- assert self.index.is_monotonic is True
- assert self.index.is_monotonic_increasing is True
- assert self.index.is_monotonic_decreasing is False
- assert self.index._is_strictly_monotonic_increasing is True
- assert self.index._is_strictly_monotonic_decreasing is False
+ index = RangeIndex(0, 20, 2)
+ assert index.is_monotonic is True
+ assert index.is_monotonic_increasing is True
+ assert index.is_monotonic_decreasing is False
+ assert index._is_strictly_monotonic_increasing is True
+ assert index._is_strictly_monotonic_decreasing is False
index = RangeIndex(4, 0, -1)
assert index.is_monotonic is False
@@ -376,43 +382,45 @@ def test_logical_compat(self):
assert idx.any() == idx.values.any()
def test_identical(self):
- i = Index(self.index.copy())
- assert i.identical(self.index)
+ index = self.create_index()
+ i = Index(index.copy())
+ assert i.identical(index)
# we don't allow object dtype for RangeIndex
- if isinstance(self.index, RangeIndex):
+ if isinstance(index, RangeIndex):
return
same_values_different_type = Index(i, dtype=object)
assert not i.identical(same_values_different_type)
- i = self.index.copy(dtype=object)
+ i = index.copy(dtype=object)
i = i.rename("foo")
same_values = Index(i, dtype=object)
- assert same_values.identical(self.index.copy(dtype=object))
+ assert same_values.identical(index.copy(dtype=object))
- assert not i.identical(self.index)
+ assert not i.identical(index)
assert Index(same_values, name="foo", dtype=object).identical(i)
- assert not self.index.copy(dtype=object).identical(
- self.index.copy(dtype="int64")
- )
+ assert not index.copy(dtype=object).identical(index.copy(dtype="int64"))
def test_get_indexer(self):
+ index = self.create_index()
target = RangeIndex(10)
- indexer = self.index.get_indexer(target)
+ indexer = index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
+ index = self.create_index()
target = RangeIndex(10)
- indexer = self.index.get_indexer(target, method="pad")
+ indexer = index.get_indexer(target, method="pad")
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
+ index = self.create_index()
target = RangeIndex(10)
- indexer = self.index.get_indexer(target, method="backfill")
+ indexer = index.get_indexer(target, method="backfill")
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
@@ -434,10 +442,11 @@ def test_get_indexer_decreasing(self, stop):
def test_join_outer(self):
# join with Int64Index
+ index = self.create_index()
other = Int64Index(np.arange(25, 14, -1))
- res, lidx, ridx = self.index.join(other, how="outer", return_indexers=True)
- noidx_res = self.index.join(other, how="outer")
+ res, lidx, ridx = index.join(other, how="outer", return_indexers=True)
+ noidx_res = index.join(other, how="outer")
tm.assert_index_equal(res, noidx_res)
eres = Int64Index(
@@ -461,8 +470,8 @@ def test_join_outer(self):
# join with RangeIndex
other = RangeIndex(25, 14, -1)
- res, lidx, ridx = self.index.join(other, how="outer", return_indexers=True)
- noidx_res = self.index.join(other, how="outer")
+ res, lidx, ridx = index.join(other, how="outer", return_indexers=True)
+ noidx_res = index.join(other, how="outer")
tm.assert_index_equal(res, noidx_res)
assert isinstance(res, Int64Index)
@@ -473,9 +482,10 @@ def test_join_outer(self):
def test_join_inner(self):
# Join with non-RangeIndex
+ index = self.create_index()
other = Int64Index(np.arange(25, 14, -1))
- res, lidx, ridx = self.index.join(other, how="inner", return_indexers=True)
+ res, lidx, ridx = index.join(other, how="inner", return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
@@ -495,7 +505,7 @@ def test_join_inner(self):
# Join two RangeIndex
other = RangeIndex(25, 14, -1)
- res, lidx, ridx = self.index.join(other, how="inner", return_indexers=True)
+ res, lidx, ridx = index.join(other, how="inner", return_indexers=True)
assert isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
@@ -504,10 +514,11 @@ def test_join_inner(self):
def test_join_left(self):
# Join with Int64Index
+ index = self.create_index()
other = Int64Index(np.arange(25, 14, -1))
- res, lidx, ridx = self.index.join(other, how="left", return_indexers=True)
- eres = self.index
+ res, lidx, ridx = index.join(other, how="left", return_indexers=True)
+ eres = index
eridx = np.array([-1, -1, -1, -1, -1, -1, -1, -1, 9, 7], dtype=np.intp)
assert isinstance(res, RangeIndex)
@@ -518,7 +529,7 @@ def test_join_left(self):
# Join withRangeIndex
other = Int64Index(np.arange(25, 14, -1))
- res, lidx, ridx = self.index.join(other, how="left", return_indexers=True)
+ res, lidx, ridx = index.join(other, how="left", return_indexers=True)
assert isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
@@ -527,9 +538,10 @@ def test_join_left(self):
def test_join_right(self):
# Join with Int64Index
+ index = self.create_index()
other = Int64Index(np.arange(25, 14, -1))
- res, lidx, ridx = self.index.join(other, how="right", return_indexers=True)
+ res, lidx, ridx = index.join(other, how="right", return_indexers=True)
eres = other
elidx = np.array([-1, -1, -1, -1, -1, -1, -1, 9, -1, 8, -1], dtype=np.intp)
@@ -541,7 +553,7 @@ def test_join_right(self):
# Join withRangeIndex
other = RangeIndex(25, 14, -1)
- res, lidx, ridx = self.index.join(other, how="right", return_indexers=True)
+ res, lidx, ridx = index.join(other, how="right", return_indexers=True)
eres = other
assert isinstance(other, RangeIndex)
@@ -550,36 +562,38 @@ def test_join_right(self):
assert ridx is None
def test_join_non_int_index(self):
+ index = self.create_index()
other = Index([3, 6, 7, 8, 10], dtype=object)
- outer = self.index.join(other, how="outer")
- outer2 = other.join(self.index, how="outer")
+ outer = index.join(other, how="outer")
+ outer2 = other.join(index, how="outer")
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18])
tm.assert_index_equal(outer, outer2)
tm.assert_index_equal(outer, expected)
- inner = self.index.join(other, how="inner")
- inner2 = other.join(self.index, how="inner")
+ inner = index.join(other, how="inner")
+ inner2 = other.join(index, how="inner")
expected = Index([6, 8, 10])
tm.assert_index_equal(inner, inner2)
tm.assert_index_equal(inner, expected)
- left = self.index.join(other, how="left")
- tm.assert_index_equal(left, self.index.astype(object))
+ left = index.join(other, how="left")
+ tm.assert_index_equal(left, index.astype(object))
- left2 = other.join(self.index, how="left")
+ left2 = other.join(index, how="left")
tm.assert_index_equal(left2, other)
- right = self.index.join(other, how="right")
+ right = index.join(other, how="right")
tm.assert_index_equal(right, other)
- right2 = other.join(self.index, how="right")
- tm.assert_index_equal(right2, self.index.astype(object))
+ right2 = other.join(index, how="right")
+ tm.assert_index_equal(right2, index.astype(object))
def test_join_non_unique(self):
+ index = self.create_index()
other = Index([4, 4, 3, 3])
- res, lidx, ridx = self.index.join(other, return_indexers=True)
+ res, lidx, ridx = index.join(other, return_indexers=True)
eres = Int64Index([0, 2, 4, 4, 6, 8, 10, 12, 14, 16, 18])
elidx = np.array([0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp)
@@ -589,40 +603,40 @@ def test_join_non_unique(self):
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
- def test_join_self(self):
- kinds = "outer", "inner", "left", "right"
- for kind in kinds:
- joined = self.index.join(self.index, how=kind)
- assert self.index is joined
+ def test_join_self(self, join_type):
+ index = self.create_index()
+ joined = index.join(index, how=join_type)
+ assert index is joined
@pytest.mark.parametrize("sort", [None, False])
def test_intersection(self, sort):
# intersect with Int64Index
+ index = self.create_index()
other = Index(np.arange(1, 6))
- result = self.index.intersection(other, sort=sort)
- expected = Index(np.sort(np.intersect1d(self.index.values, other.values)))
+ result = index.intersection(other, sort=sort)
+ expected = Index(np.sort(np.intersect1d(index.values, other.values)))
tm.assert_index_equal(result, expected)
- result = other.intersection(self.index, sort=sort)
+ result = other.intersection(index, sort=sort)
expected = Index(
- np.sort(np.asarray(np.intersect1d(self.index.values, other.values)))
+ np.sort(np.asarray(np.intersect1d(index.values, other.values)))
)
tm.assert_index_equal(result, expected)
# intersect with increasing RangeIndex
other = RangeIndex(1, 6)
- result = self.index.intersection(other, sort=sort)
- expected = Index(np.sort(np.intersect1d(self.index.values, other.values)))
+ result = index.intersection(other, sort=sort)
+ expected = Index(np.sort(np.intersect1d(index.values, other.values)))
tm.assert_index_equal(result, expected)
# intersect with decreasing RangeIndex
other = RangeIndex(5, 0, -1)
- result = self.index.intersection(other, sort=sort)
- expected = Index(np.sort(np.intersect1d(self.index.values, other.values)))
+ result = index.intersection(other, sort=sort)
+ expected = Index(np.sort(np.intersect1d(index.values, other.values)))
tm.assert_index_equal(result, expected)
# reversed (GH 17296)
- result = other.intersection(self.index, sort=sort)
+ result = other.intersection(index, sort=sort)
tm.assert_index_equal(result, expected)
# GH 17296: intersect two decreasing RangeIndexes
@@ -667,17 +681,15 @@ def test_intersection(self, sort):
@pytest.mark.parametrize("sort", [False, None])
def test_union_noncomparable(self, sort):
- from datetime import datetime, timedelta
-
# corner case, non-Int64Index
- now = datetime.now()
- other = Index([now + timedelta(i) for i in range(4)], dtype=object)
- result = self.index.union(other, sort=sort)
- expected = Index(np.concatenate((self.index, other)))
+ index = self.create_index()
+ other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object)
+ result = index.union(other, sort=sort)
+ expected = Index(np.concatenate((index, other)))
tm.assert_index_equal(result, expected)
- result = other.union(self.index, sort=sort)
- expected = Index(np.concatenate((other, self.index)))
+ result = other.union(index, sort=sort)
+ expected = Index(np.concatenate((other, index)))
tm.assert_index_equal(result, expected)
@pytest.fixture(
@@ -785,11 +797,13 @@ def test_cant_or_shouldnt_cast(self):
with pytest.raises(TypeError):
RangeIndex("0", "1", "2")
- def test_view_Index(self):
- self.index.view(Index)
+ def test_view_index(self):
+ index = self.create_index()
+ index.view(Index)
def test_prevent_casting(self):
- result = self.index.astype("O")
+ index = self.create_index()
+ result = index.astype("O")
assert result.dtype == np.object_
def test_take_preserve_name(self):
@@ -828,7 +842,8 @@ def test_print_unicode_columns(self):
repr(df.columns) # should not raise UnicodeDecodeError
def test_repr_roundtrip(self):
- tm.assert_index_equal(eval(repr(self.index)), self.index)
+ index = self.create_index()
+ tm.assert_index_equal(eval(repr(index)), index)
def test_slice_keep_name(self):
idx = RangeIndex(1, 2, name="asdf")
@@ -859,20 +874,17 @@ def test_explicit_conversions(self):
result = a - fidx
tm.assert_index_equal(result, expected)
- def test_has_duplicates(self):
- for ind in self.indices:
- if not len(ind):
- continue
- idx = self.indices[ind]
- assert idx.is_unique
- assert not idx.has_duplicates
+ def test_has_duplicates(self, indices):
+ assert indices.is_unique
+ assert not indices.has_duplicates
def test_extended_gcd(self):
- result = self.index._extended_gcd(6, 10)
+ index = self.create_index()
+ result = index._extended_gcd(6, 10)
assert result[0] == result[1] * 6 + result[2] * 10
assert 2 == result[0]
- result = self.index._extended_gcd(10, 6)
+ result = index._extended_gcd(10, 6)
assert 2 == result[1] * 10 + result[2] * 6
assert 2 == result[0]
@@ -917,80 +929,71 @@ def test_pickle_compat_construction(self):
pass
def test_slice_specialised(self):
+ index = self.create_index()
+ index.name = "foo"
# scalar indexing
- res = self.index[1]
+ res = index[1]
expected = 2
assert res == expected
- res = self.index[-1]
+ res = index[-1]
expected = 18
assert res == expected
# slicing
# slice value completion
- index = self.index[:]
- expected = self.index
- tm.assert_index_equal(index, expected)
+ index_slice = index[:]
+ expected = index
+ tm.assert_index_equal(index_slice, expected)
# positive slice values
- index = self.index[7:10:2]
+ index_slice = index[7:10:2]
expected = Index(np.array([14, 18]), name="foo")
- tm.assert_index_equal(index, expected)
+ tm.assert_index_equal(index_slice, expected)
# negative slice values
- index = self.index[-1:-5:-2]
+ index_slice = index[-1:-5:-2]
expected = Index(np.array([18, 14]), name="foo")
- tm.assert_index_equal(index, expected)
+ tm.assert_index_equal(index_slice, expected)
# stop overshoot
- index = self.index[2:100:4]
+ index_slice = index[2:100:4]
expected = Index(np.array([4, 12]), name="foo")
- tm.assert_index_equal(index, expected)
+ tm.assert_index_equal(index_slice, expected)
# reverse
- index = self.index[::-1]
- expected = Index(self.index.values[::-1], name="foo")
- tm.assert_index_equal(index, expected)
+ index_slice = index[::-1]
+ expected = Index(index.values[::-1], name="foo")
+ tm.assert_index_equal(index_slice, expected)
- index = self.index[-8::-1]
+ index_slice = index[-8::-1]
expected = Index(np.array([4, 2, 0]), name="foo")
- tm.assert_index_equal(index, expected)
+ tm.assert_index_equal(index_slice, expected)
- index = self.index[-40::-1]
+ index_slice = index[-40::-1]
expected = Index(np.array([], dtype=np.int64), name="foo")
- tm.assert_index_equal(index, expected)
-
- index = self.index[40::-1]
- expected = Index(self.index.values[40::-1], name="foo")
- tm.assert_index_equal(index, expected)
-
- index = self.index[10::-1]
- expected = Index(self.index.values[::-1], name="foo")
- tm.assert_index_equal(index, expected)
-
- def test_len_specialised(self):
-
- # make sure that our len is the same as
- # np.arange calc
-
- for step in np.arange(1, 6, 1):
+ tm.assert_index_equal(index_slice, expected)
- arr = np.arange(0, 5, step)
- i = RangeIndex(0, 5, step)
- assert len(i) == len(arr)
+ index_slice = index[40::-1]
+ expected = Index(index.values[40::-1], name="foo")
+ tm.assert_index_equal(index_slice, expected)
- i = RangeIndex(5, 0, step)
- assert len(i) == 0
+ index_slice = index[10::-1]
+ expected = Index(index.values[::-1], name="foo")
+ tm.assert_index_equal(index_slice, expected)
- for step in np.arange(-6, -1, 1):
+ @pytest.mark.parametrize("step", set(range(-5, 6)) - {0})
+ def test_len_specialised(self, step):
+ # make sure that our len is the same as np.arange calc
+ start, stop = (0, 5) if step > 0 else (5, 0)
- arr = np.arange(5, 0, step)
- i = RangeIndex(5, 0, step)
- assert len(i) == len(arr)
+ arr = np.arange(start, stop, step)
+ index = RangeIndex(start, stop, step)
+ assert len(index) == len(arr)
- i = RangeIndex(0, 5, step)
- assert len(i) == 0
+ index = RangeIndex(stop, start, step)
+ assert len(index) == 0
@pytest.fixture(
params=[
diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py
index b3850f7a4e09e..d5b23653e8a72 100644
--- a/pandas/tests/indexes/test_setops.py
+++ b/pandas/tests/indexes/test_setops.py
@@ -13,7 +13,7 @@
import pandas as pd
from pandas import Float64Index, Int64Index, RangeIndex, UInt64Index
from pandas.api.types import pandas_dtype
-from pandas.tests.indexes.conftest import indices_list
+from pandas.tests.indexes.conftest import indices_dict
import pandas.util.testing as tm
COMPATIBLE_INCONSISTENT_PAIRS = OrderedDict(
@@ -26,15 +26,12 @@
)
-@pytest.fixture(
- params=list(it.combinations(indices_list, 2)),
- ids=lambda x: type(x[0]).__name__ + type(x[1]).__name__,
-)
+@pytest.fixture(params=it.combinations(indices_dict, 2), ids="-".join)
def index_pair(request):
"""
Create all combinations of 2 index types.
"""
- return request.param
+ return indices_dict[request.param[0]], indices_dict[request.param[1]]
def test_union_same_types(indices):
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index e790a913fcac2..2ef86ddf8c8bf 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -30,9 +30,9 @@
class TestTimedeltaIndex(DatetimeLike):
_holder = TimedeltaIndex
- def setup_method(self, method):
- self.indices = dict(index=tm.makeTimedeltaIndex(10))
- self.setup_indices()
+ @pytest.fixture
+ def indices(self):
+ return tm.makeTimedeltaIndex(10)
def create_index(self):
return pd.to_timedelta(range(5), unit="d") + pd.offsets.Hour(1)
| The common index tests use `setup_method` to create a `dict` of indexes to test against:
https://github.com/pandas-dev/pandas/blob/df2e0813e053cc5bc924b2292ea8918a6b27f0e2/pandas/tests/indexes/test_range.py#L25-L30
This `dict` of indexes is then iterated over within the tests:
https://github.com/pandas-dev/pandas/blob/df2e0813e053cc5bc924b2292ea8918a6b27f0e2/pandas/tests/indexes/common.py#L359-L363
The bulk of this PR involves converting `self.indices` into a parametrized fixture of indexes, and adjusting the tests to support this (largely just unindenting). I had to do this conversion for all indexes at once since common test code for all index classes utilizes this pattern, so the diff is fairly large, but it should be relatively simple changes.
I also had to make some changes to references to specific indexes as well (e.g. `self.index`, `self.strIndex`, etc.) since the `setup_method` code also directly set each index in the `dict` as a class attribute. | https://api.github.com/repos/pandas-dev/pandas/pulls/28865 | 2019-10-09T04:50:28Z | 2019-10-11T15:14:07Z | 2019-10-11T15:14:07Z | 2019-12-20T01:03:29Z |
TST: Fix xfails for non-box maybe_promote on integer dtypes | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 5b13e13bb20ba..a7fdd6759ba95 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -408,9 +408,58 @@ def maybe_promote(dtype, fill_value=np.nan):
dtype = np.object_
elif issubclass(dtype.type, np.integer):
# upcast to prevent overflow
- arr = np.asarray(fill_value)
- if arr != arr.astype(dtype):
- dtype = arr.dtype
+ mst = np.min_scalar_type(fill_value)
+ if mst > dtype:
+ # np.dtype ordering considers:
+ # int[n] < int[2*n]
+ # uint[n] < uint[2*n]
+ # u?int[n] < object_
+ dtype = mst
+
+ elif np.can_cast(fill_value, dtype):
+ pass
+
+ elif dtype.kind == "u" and mst.kind == "i":
+ dtype = np.promote_types(dtype, mst)
+ if dtype.kind == "f":
+ # Case where we disagree with numpy
+ dtype = np.dtype(np.object_)
+
+ elif dtype.kind == "i" and mst.kind == "u":
+
+ if fill_value > np.iinfo(np.int64).max:
+ # object is the only way to represent fill_value and keep
+ # the range allowed by the given dtype
+ dtype = np.dtype(np.object_)
+
+ elif mst.itemsize < dtype.itemsize:
+ pass
+
+ elif dtype.itemsize == mst.itemsize:
+ # We never cast signed to unsigned because that loses
+ # parts of the original range, so find the smallest signed
+ # integer that can hold all of `mst`.
+ ndt = {
+ np.int64: np.object_,
+ np.int32: np.int64,
+ np.int16: np.int32,
+ np.int8: np.int16,
+ }[dtype.type]
+ dtype = np.dtype(ndt)
+
+ else:
+ # bump to signed integer dtype that holds all of `mst` range
+ # Note: we have to use itemsize because some (windows)
+ # builds don't satisfiy e.g. np.uint32 == np.uint32
+ ndt = {
+ 4: np.int64,
+ 2: np.int32,
+ 1: np.int16, # TODO: Test for this case
+ }[mst.itemsize]
+ dtype = np.dtype(ndt)
+
+ fill_value = dtype.type(fill_value)
+
elif issubclass(dtype.type, np.floating):
# check if we can cast
if _check_lossless_cast(fill_value, dtype):
diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py
index e4e5a22ea6ca0..8d10ed26a80fa 100644
--- a/pandas/tests/dtypes/cast/test_promote.py
+++ b/pandas/tests/dtypes/cast/test_promote.py
@@ -151,7 +151,17 @@ def _assert_match(result_fill_value, expected_fill_value):
# GH#23982/25425 require the same type in addition to equality/NA-ness
res_type = type(result_fill_value)
ex_type = type(expected_fill_value)
- assert res_type == ex_type
+ if res_type.__name__ == "uint64":
+ # No idea why, but these (sometimes) do not compare as equal
+ assert ex_type.__name__ == "uint64"
+ elif res_type.__name__ == "ulonglong":
+ # On some builds we get this instead of np.uint64
+ # Note: cant check res_type.dtype.itemsize directly on numpy 1.18
+ assert res_type(0).itemsize == 8
+ assert ex_type == res_type or ex_type == np.uint64
+ else:
+ # On some builds, type comparison fails, e.g. np.int32 != np.int32
+ assert res_type == ex_type or res_type.__name__ == ex_type.__name__
match_value = result_fill_value == expected_fill_value
@@ -275,26 +285,6 @@ def test_maybe_promote_int_with_int(dtype, fill_value, expected_dtype, box):
expected_dtype = np.dtype(expected_dtype)
boxed, box_dtype = box # read from parametrized fixture
- if not boxed:
- if expected_dtype == object:
- pytest.xfail("overflow error")
- if expected_dtype == "int32":
- pytest.xfail("always upcasts to platform int")
- if dtype == "int8" and expected_dtype == "int16":
- pytest.xfail("casts to int32 instead of int16")
- if (
- issubclass(dtype.type, np.unsignedinteger)
- and np.iinfo(dtype).max < fill_value <= np.iinfo("int64").max
- ):
- pytest.xfail("falsely casts to signed")
- if (dtype, expected_dtype) in [
- ("uint8", "int16"),
- ("uint32", "int64"),
- ] and fill_value != np.iinfo("int32").min - 1:
- pytest.xfail("casts to int32 instead of int8/int16")
- # this following xfail is "only" a consequence of the - now strictly
- # enforced - principle that maybe_promote_with_scalar always casts
- pytest.xfail("wrong return type of fill_value")
if boxed:
if expected_dtype != object:
pytest.xfail("falsely casts to object")
| Orthogonal to other outstanding maybe_promote PRs.
This one required pretty significant changes to the code. Using `np.min_scalar_type` and `np.can_cast` cleans this up a bit, but it is still pretty verbose. AFAICT there is no clear way to make it shorter without significantly sacrificing clarity.
In a follow-up I think L410-459 can be refactored out to a helper function. Waiting on that until I figure out the boxed=True cases, which are still troublesome. | https://api.github.com/repos/pandas-dev/pandas/pulls/28864 | 2019-10-09T03:35:04Z | 2019-10-10T12:51:18Z | 2019-10-10T12:51:18Z | 2019-10-10T13:34:20Z |
TST: Fix 36 maybe_promote xfails wanting np.bytes_ instead of np.object_ | diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py
index e4e5a22ea6ca0..b498d589119d2 100644
--- a/pandas/tests/dtypes/cast/test_promote.py
+++ b/pandas/tests/dtypes/cast/test_promote.py
@@ -506,25 +506,13 @@ def test_maybe_promote_bytes_with_any(bytes_dtype, any_numpy_dtype_reduced, box)
fill_dtype = np.dtype(any_numpy_dtype_reduced)
boxed, box_dtype = box # read from parametrized fixture
- if issubclass(fill_dtype.type, np.bytes_):
- if not boxed or box_dtype == object:
- pytest.xfail("falsely upcasts to object")
- # takes the opinion that bool dtype has no missing value marker
- else:
- pytest.xfail("wrong missing value marker")
- else:
- if boxed and box_dtype is None:
- pytest.xfail("does not upcast to object")
-
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
- # filling bytes with anything but bytes casts to object
- expected_dtype = (
- dtype if issubclass(fill_dtype.type, np.bytes_) else np.dtype(object)
- )
+ # we never use bytes dtype internally, always promote to object
+ expected_dtype = np.dtype(np.object_)
exp_val_for_scalar = fill_value
- exp_val_for_array = None if issubclass(fill_dtype.type, np.bytes_) else np.nan
+ exp_val_for_array = np.nan
_check_promote(
dtype,
@@ -542,13 +530,7 @@ def test_maybe_promote_any_with_bytes(any_numpy_dtype_reduced, bytes_dtype, box)
fill_dtype = np.dtype(bytes_dtype)
boxed, box_dtype = box # read from parametrized fixture
- if issubclass(dtype.type, np.bytes_):
- if not boxed or box_dtype == object:
- pytest.xfail("falsely upcasts to object")
- # takes the opinion that bool dtype has no missing value marker
- else:
- pytest.xfail("wrong missing value marker")
- else:
+ if not issubclass(dtype.type, np.bytes_):
if (
boxed
and (box_dtype == "bytes" or box_dtype is None)
@@ -562,11 +544,11 @@ def test_maybe_promote_any_with_bytes(any_numpy_dtype_reduced, bytes_dtype, box)
# special case for box_dtype (cannot use fixture in parametrization)
box_dtype = fill_dtype if box_dtype == "bytes" else box_dtype
- # filling bytes with anything but bytes casts to object
- expected_dtype = dtype if issubclass(dtype.type, np.bytes_) else np.dtype(object)
+ # we never use bytes dtype internally, always promote to object
+ expected_dtype = np.dtype(np.object_)
# output is not a generic bytes, but corresponds to expected_dtype
exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
- exp_val_for_array = None if issubclass(dtype.type, np.bytes_) else np.nan
+ exp_val_for_array = np.nan
_check_promote(
dtype,
| @jreback less trivial than some of the others. These changes are based on my understanding that maybe_promote should _never_ be returning np.bytes_ dtype; those cases all become np.object_. That is what maybe_promote currently does, and this updates the tests to expect that behavior. | https://api.github.com/repos/pandas-dev/pandas/pulls/28861 | 2019-10-08T22:57:01Z | 2019-10-11T12:24:27Z | 2019-10-11T12:24:27Z | 2019-10-11T15:30:07Z |
DOC: Fix missing periods and non capitalized summary beginnings (#27977) | diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 697e97e518b13..32dcc86faa7e8 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1710,7 +1710,7 @@ cdef class _Period:
def asfreq(self, freq, how='E'):
"""
Convert Period to desired frequency, either at the start or end of the
- interval
+ interval.
Parameters
----------
@@ -1777,7 +1777,7 @@ cdef class _Period:
def to_timestamp(self, freq=None, how='start', tz=None):
"""
Return the Timestamp representation of the Period at the target
- frequency at the specified end (how) of the Period
+ frequency at the specified end (how) of the Period.
Parameters
----------
@@ -2380,7 +2380,7 @@ cdef class _Period:
class Period(_Period):
"""
- Represents a period of time
+ Represents a period of time.
Parameters
----------
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index f9cb35eb79ae3..3d267b0114695 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1344,7 +1344,7 @@ class Timedelta(_Timedelta):
def floor(self, freq):
"""
- return a new Timedelta floored to this resolution.
+ Return a new Timedelta floored to this resolution.
Parameters
----------
@@ -1355,7 +1355,7 @@ class Timedelta(_Timedelta):
def ceil(self, freq):
"""
- return a new Timedelta ceiled to this resolution.
+ Return a new Timedelta ceiled to this resolution.
Parameters
----------
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 6dd0b116b3b0d..4039cc91fb554 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -975,7 +975,7 @@ def length(self):
@property
def mid(self):
"""
- Return the midpoint of each Interval in the IntervalArray as an Index
+ Return the midpoint of each Interval in the IntervalArray as an Index.
"""
try:
return 0.5 * (self.left + self.right)
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 9c4746f4d68e3..6bac3fe426f2d 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -849,7 +849,7 @@ def set_uuid(self, uuid):
def set_caption(self, caption):
"""
- Set the caption on a Styler
+ Set the caption on a Styler.
Parameters
----------
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index 74ce60c6116a9..426ca9632af29 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -8,7 +8,7 @@
def table(ax, data, rowLabels=None, colLabels=None, **kwargs):
"""
- Helper function to convert DataFrame and Series to matplotlib.table
+ Helper function to convert DataFrame and Series to matplotlib.table.
Parameters
----------
@@ -32,7 +32,7 @@ def table(ax, data, rowLabels=None, colLabels=None, **kwargs):
def register(explicit=True):
"""
- Register Pandas Formatters and Converters with matplotlib
+ Register Pandas Formatters and Converters with matplotlib.
This function modifies the global ``matplotlib.units.registry``
dictionary. Pandas adds custom converters for
@@ -54,7 +54,7 @@ def register(explicit=True):
def deregister():
"""
- Remove pandas' formatters and converters
+ Remove pandas' formatters and converters.
Removes the custom converters added by :func:`register`. This
attempts to set the state of the registry back to the state before
| - [ ] closes #27977
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Fixed for the following functions
`pandas.io.formats.style.Styler.set_caption
pandas.plotting.table
pandas.plotting.register_matplotlib_converters
pandas.plotting.deregister_matplotlib_converters
pandas.arrays.IntervalArray.mid
pandas.Timedelta.ceil
pandas.Timedelta.floor
pandas.Period
pandas.Period.asfreq
pandas.Period.to_timestamp`
| https://api.github.com/repos/pandas-dev/pandas/pulls/28858 | 2019-10-08T21:26:02Z | 2019-10-09T07:37:50Z | 2019-10-09T07:37:50Z | 2019-10-09T07:38:05Z |
CLN: assorted cleanups, mostly post-black fixups | diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py
index ec3dd7a48a89f..a9e45cad22d27 100644
--- a/asv_bench/benchmarks/ctors.py
+++ b/asv_bench/benchmarks/ctors.py
@@ -67,7 +67,7 @@ class SeriesConstructors:
def setup(self, data_fmt, with_index, dtype):
if data_fmt in (gen_of_str, gen_of_tuples) and with_index:
raise NotImplementedError(
- "Series constructors do not support " "using generators with indexes"
+ "Series constructors do not support using generators with indexes"
)
N = 10 ** 4
if dtype == "float":
diff --git a/asv_bench/benchmarks/eval.py b/asv_bench/benchmarks/eval.py
index 06a181875aaa8..cbab9fdc9c0ba 100644
--- a/asv_bench/benchmarks/eval.py
+++ b/asv_bench/benchmarks/eval.py
@@ -27,7 +27,7 @@ def time_add(self, engine, threads):
def time_and(self, engine, threads):
pd.eval(
- "(self.df > 0) & (self.df2 > 0) & " "(self.df3 > 0) & (self.df4 > 0)",
+ "(self.df > 0) & (self.df2 > 0) & (self.df3 > 0) & (self.df4 > 0)",
engine=engine,
)
diff --git a/asv_bench/benchmarks/io/hdf.py b/asv_bench/benchmarks/io/hdf.py
index 8ec04a2087f1b..b78dc63d17130 100644
--- a/asv_bench/benchmarks/io/hdf.py
+++ b/asv_bench/benchmarks/io/hdf.py
@@ -88,11 +88,11 @@ def time_write_store_table_dc(self):
def time_query_store_table_wide(self):
self.store.select(
- "table_wide", where="index > self.start_wide and " "index < self.stop_wide"
+ "table_wide", where="index > self.start_wide and index < self.stop_wide"
)
def time_query_store_table(self):
- self.store.select("table", where="index > self.start and " "index < self.stop")
+ self.store.select("table", where="index > self.start and index < self.stop")
def time_store_repr(self):
repr(self.store)
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 5e2a2db20b53c..34faf183db1c2 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -628,11 +628,11 @@ def linkcode_resolve(domain, info):
fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__))
if "+" in pandas.__version__:
- return "http://github.com/pandas-dev/pandas/blob/master/pandas/" "{}{}".format(
+ return "http://github.com/pandas-dev/pandas/blob/master/pandas/{}{}".format(
fn, linespec
)
else:
- return "http://github.com/pandas-dev/pandas/blob/" "v{}/pandas/{}{}".format(
+ return "http://github.com/pandas-dev/pandas/blob/v{}/pandas/{}{}".format(
pandas.__version__, fn, linespec
)
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index da75e2c49ae10..ea52736cb11a7 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -256,7 +256,7 @@ Timezones
Numeric
^^^^^^^
- Bug in :meth:`DataFrame.quantile` with zero-column :class:`DataFrame` incorrectly raising (:issue:`23925`)
-- :class:`DataFrame` inequality comparisons with object-dtype and ``complex`` entries failing to raise ``TypeError`` like their :class:`Series` counterparts (:issue:`28079`)
+- :class:`DataFrame` flex inequality comparisons methods (:meth:`DataFrame.lt`, :meth:`DataFrame.le`, :meth:`DataFrame.gt`, :meth: `DataFrame.ge`) with object-dtype and ``complex`` entries failing to raise ``TypeError`` like their :class:`Series` counterparts (:issue:`28079`)
- Bug in :class:`DataFrame` logical operations (`&`, `|`, `^`) not matching :class:`Series` behavior by filling NA values (:issue:`28741`)
-
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index a7d6d19bbc80d..34eb9412451c5 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -170,9 +170,9 @@ cdef class Reducer:
PyArray_SETITEM(result, PyArray_ITER_DATA(it), res)
chunk.data = chunk.data + self.increment
PyArray_ITER_NEXT(it)
- except Exception, e:
- if hasattr(e, 'args'):
- e.args = e.args + (i,)
+ except Exception as err:
+ if hasattr(err, 'args'):
+ err.args = err.args + (i,)
raise
finally:
# so we don't free the wrong memory
diff --git a/pandas/_version.py b/pandas/_version.py
index 4f5bdf59a99d5..0cdedf3da3ea7 100644
--- a/pandas/_version.py
+++ b/pandas/_version.py
@@ -249,7 +249,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
- fmt = "tag '{full_tag}' doesn't start with prefix " "'{tag_prefix}'"
+ fmt = "tag '{full_tag}' doesn't start with prefix '{tag_prefix}'"
msg = fmt.format(full_tag=full_tag, tag_prefix=tag_prefix)
if verbose:
print(msg)
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 714423de34222..605d179e7c652 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -341,13 +341,15 @@ def apply_series_generator(self):
for i, v in enumerate(series_gen):
results[i] = self.f(v)
keys.append(v.name)
- except Exception as e:
- if hasattr(e, "args"):
+ except Exception as err:
+ if hasattr(err, "args"):
# make sure i is defined
if i is not None:
k = res_index[i]
- e.args = e.args + ("occurred at index %s" % pprint_thing(k),)
+ err.args = err.args + (
+ "occurred at index %s" % pprint_thing(k),
+ )
raise
self.results = results
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index 7cc9dc11a8ccc..eb57d703cd4d5 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -672,7 +672,7 @@ def _read_next_page(self):
return True
elif len(self._cached_page) != self._page_length:
self.close()
- msg = "failed to read complete page from file " "(read {:d} of {:d} bytes)"
+ msg = "failed to read complete page from file (read {:d} of {:d} bytes)"
raise ValueError(msg.format(len(self._cached_page), self._page_length))
self._read_page_header()
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 966a18e11a620..d7b0839ec62ea 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -685,7 +685,7 @@ def _get_call_args(backend_name, data, args, kwargs):
else:
raise TypeError(
(
- "Called plot accessor for type {}, expected " "Series or DataFrame"
+ "Called plot accessor for type {}, expected Series or DataFrame"
).format(type(data).__name__)
)
@@ -740,7 +740,7 @@ def __call__(self, *args, **kwargs):
return plot_backend.plot(data, x=x, y=y, kind=kind, **kwargs)
else:
raise ValueError(
- ("plot kind {} can only be used for " "data frames").format(kind)
+ ("plot kind {} can only be used for data frames").format(kind)
)
elif kind in self._series_kinds:
if isinstance(data, ABCDataFrame):
diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py
index 99035013092cc..eed328131da92 100644
--- a/pandas/plotting/_matplotlib/boxplot.py
+++ b/pandas/plotting/_matplotlib/boxplot.py
@@ -331,9 +331,7 @@ def plot_group(keys, values, ax):
if return_type is None:
return_type = "axes"
if layout is not None:
- raise ValueError(
- "The 'layout' keyword is not supported when " "'by' is None"
- )
+ raise ValueError("The 'layout' keyword is not supported when 'by' is None")
if ax is None:
rc = {"figure.figsize": figsize} if figsize is not None else {}
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 82c5ba7f0317d..a729951b3d7db 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -230,7 +230,7 @@ def _validate_color_args(self):
"color" in self.kwds or "colors" in self.kwds
) and self.colormap is not None:
warnings.warn(
- "'color' and 'colormap' cannot be used " "simultaneously. Using 'color'"
+ "'color' and 'colormap' cannot be used simultaneously. Using 'color'"
)
if "color" in self.kwds and self.style is not None:
diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py
index 5213e09f14067..f95ff2578d882 100644
--- a/pandas/plotting/_matplotlib/hist.py
+++ b/pandas/plotting/_matplotlib/hist.py
@@ -184,7 +184,7 @@ def _grouped_plot(
if figsize == "default":
# allowed to specify mpl default with 'default'
warnings.warn(
- "figsize='default' is deprecated. Specify figure " "size by tuple instead",
+ "figsize='default' is deprecated. Specify figure size by tuple instead",
FutureWarning,
stacklevel=5,
)
@@ -298,9 +298,7 @@ def hist_series(
if by is None:
if kwds.get("layout", None) is not None:
- raise ValueError(
- "The 'layout' keyword is not supported when " "'by' is None"
- )
+ raise ValueError("The 'layout' keyword is not supported when 'by' is None")
# hack until the plotting interface is a bit more unified
fig = kwds.pop(
"figure", plt.gcf() if plt.get_fignums() else plt.figure(figsize=figsize)
@@ -394,7 +392,7 @@ def hist_frame(
naxes = len(data.columns)
if naxes == 0:
- raise ValueError("hist method requires numerical columns, " "nothing to plot.")
+ raise ValueError("hist method requires numerical columns, nothing to plot.")
fig, axes = _subplots(
naxes=naxes,
diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py
index e1bba5856e271..927b9cf4e392a 100644
--- a/pandas/plotting/_matplotlib/style.py
+++ b/pandas/plotting/_matplotlib/style.py
@@ -25,7 +25,7 @@ def _get_standard_colors(
elif color is not None:
if colormap is not None:
warnings.warn(
- "'color' and 'colormap' cannot be used " "simultaneously. Using 'color'"
+ "'color' and 'colormap' cannot be used simultaneously. Using 'color'"
)
colors = list(color) if is_list_like(color) else color
else:
diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index eddc9b4cd21bd..caa0167c06389 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -188,8 +188,7 @@ def _subplots(
ax = _flatten(ax)
if layout is not None:
warnings.warn(
- "When passing multiple axes, layout keyword is " "ignored",
- UserWarning,
+ "When passing multiple axes, layout keyword is ignored", UserWarning
)
if sharex or sharey:
warnings.warn(
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index 1045b72f0aa6e..f35707de189dc 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -1233,8 +1233,8 @@ class ErrorThread(threading.Thread):
def run(self):
try:
super().run()
- except Exception as e:
- self.err = e
+ except Exception as err:
+ self.err = err
else:
self.err = None
diff --git a/pandas/tests/window/test_window.py b/pandas/tests/window/test_window.py
index 5692404205012..f42c507e51511 100644
--- a/pandas/tests/window/test_window.py
+++ b/pandas/tests/window/test_window.py
@@ -65,7 +65,7 @@ def test_agg_function_support(self, arg):
df = pd.DataFrame({"A": np.arange(5)})
roll = df.rolling(2, win_type="triang")
- msg = "'{arg}' is not a valid function for " "'Window' object".format(arg=arg)
+ msg = "'{arg}' is not a valid function for 'Window' object".format(arg=arg)
with pytest.raises(AttributeError, match=msg):
roll.agg(arg)
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 81d8869dd7ba0..84b00d7f4907f 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -658,9 +658,7 @@ def __init__(self, start="09:00", end="17:00", offset=timedelta(0)):
# Validation of input
if len(start) != len(end):
- raise ValueError(
- "number of starting time and ending time " "must be the same"
- )
+ raise ValueError("number of starting time and ending time must be the same")
num_openings = len(start)
# sort starting and ending time by starting time
@@ -2242,7 +2240,7 @@ def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code):
variation = "last"
else:
raise ValueError(
- "Unable to parse varion_code: " "{code}".format(code=varion_code)
+ "Unable to parse varion_code: {code}".format(code=varion_code)
)
startingMonth = ccalendar.MONTH_TO_CAL_NUM[startingMonth_code]
@@ -2557,7 +2555,7 @@ def __init__(self, n=1, normalize=False):
BaseOffset.__init__(self, n, normalize)
if normalize:
raise ValueError(
- "Tick offset with `normalize=True` are not " "allowed."
+ "Tick offset with `normalize=True` are not allowed."
) # GH#21427
__gt__ = _tick_comp(operator.gt)
diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index 8a25e511b5fc4..ebc015c820c14 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -171,7 +171,7 @@ def deprecate_kwarg(
if mapping is not None and not hasattr(mapping, "get") and not callable(mapping):
raise TypeError(
- "mapping from old to new argument values " "must be dict or callable!"
+ "mapping from old to new argument values must be dict or callable!"
)
def _deprecate_kwarg(func: F) -> F:
@@ -214,7 +214,7 @@ def wrapper(*args, **kwargs) -> Callable[..., Any]:
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
if kwargs.get(new_arg_name) is not None:
msg = (
- "Can only specify '{old_name}' or '{new_name}', " "not both"
+ "Can only specify '{old_name}' or '{new_name}', not both"
).format(old_name=old_arg_name, new_name=new_arg_name)
raise TypeError(msg)
else:
diff --git a/pandas/util/_exceptions.py b/pandas/util/_exceptions.py
index 953c8a43a21b8..4f2cbd4314b8e 100644
--- a/pandas/util/_exceptions.py
+++ b/pandas/util/_exceptions.py
@@ -6,11 +6,11 @@ def rewrite_exception(old_name, new_name):
"""Rewrite the message of an exception."""
try:
yield
- except Exception as e:
- msg = e.args[0]
+ except Exception as err:
+ msg = err.args[0]
msg = msg.replace(old_name, new_name)
args = (msg,)
- if len(e.args) > 1:
- args = args + e.args[1:]
- e.args = args
+ if len(err.args) > 1:
+ args = args + err.args[1:]
+ err.args = args
raise
diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py
index 21d09c06940ca..25795859d8018 100644
--- a/pandas/util/_print_versions.py
+++ b/pandas/util/_print_versions.py
@@ -139,7 +139,7 @@ def main():
"--json",
metavar="FILE",
nargs=1,
- help="Save output as JSON into file, pass in " "'-' to output to stdout",
+ help="Save output as JSON into file, pass in '-' to output to stdout",
)
(options, args) = parser.parse_args()
diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py
index c9fd426f68b48..b516c3d78a11e 100644
--- a/pandas/util/_test_decorators.py
+++ b/pandas/util/_test_decorators.py
@@ -185,7 +185,7 @@ def skip_if_no(package: str, min_version: Optional[str] = None) -> Callable:
)
skip_if_not_us_locale = pytest.mark.skipif(
_skip_if_not_us_locale(),
- reason="Specific locale is set " "{lang}".format(lang=locale.getlocale()[0]),
+ reason="Specific locale is set {lang}".format(lang=locale.getlocale()[0]),
)
skip_if_no_scipy = pytest.mark.skipif(
_skip_if_no_scipy(), reason="Missing SciPy requirement"
diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py
index f5a472596f58f..0eaf46d563163 100644
--- a/pandas/util/_validators.py
+++ b/pandas/util/_validators.py
@@ -289,7 +289,7 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
# First fill with explicit values provided by the user...
if arg_name in kwargs:
if args:
- msg = "{} got multiple values for argument " "'{}'".format(
+ msg = "{} got multiple values for argument '{}'".format(
method_name, arg_name
)
raise TypeError(msg)
@@ -318,7 +318,7 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
elif len(args) == 2:
if "axis" in kwargs:
# Unambiguously wrong
- msg = "Cannot specify both 'axis' and any of 'index' " "or 'columns'"
+ msg = "Cannot specify both 'axis' and any of 'index' or 'columns'"
raise TypeError(msg)
msg = (
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index a34fdee227afc..c8b41a87baa9d 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -1175,7 +1175,7 @@ def assert_series_equal(
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = (
- "[datetimelike_compat=True] {left} is not equal to " "{right}."
+ "[datetimelike_compat=True] {left} is not equal to {right}."
).format(left=left.values, right=right.values)
raise AssertionError(msg)
else:
@@ -2363,26 +2363,26 @@ def wrapper(*args, **kwargs):
skip()
try:
return t(*args, **kwargs)
- except Exception as e:
- errno = getattr(e, "errno", None)
+ except Exception as err:
+ errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
- errno = getattr(e.reason, "errno", None)
+ errno = getattr(err.reason, "errno", None)
if errno in skip_errnos:
skip(
"Skipping test due to known errno"
- " and error {error}".format(error=e)
+ " and error {error}".format(error=err)
)
- e_str = str(e)
+ e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
"Skipping test because exception "
- "message is known and error {error}".format(error=e)
+ "message is known and error {error}".format(error=err)
)
- if not isinstance(e, error_classes):
+ if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
@@ -2390,7 +2390,7 @@ def wrapper(*args, **kwargs):
else:
skip(
"Skipping test due to lack of connectivity"
- " and error {error}".format(error=e)
+ " and error {error}".format(error=err)
)
return wrapper
diff --git a/scripts/find_commits_touching_func.py b/scripts/find_commits_touching_func.py
index 95a892b822cff..5e1a169dbfc3f 100755
--- a/scripts/find_commits_touching_func.py
+++ b/scripts/find_commits_touching_func.py
@@ -46,14 +46,14 @@
"--dir-masks",
metavar="d_re(,d_re)*",
default=[],
- help="comma separated list of regexes to match base " "path against",
+ help="comma separated list of regexes to match base path against",
)
argparser.add_argument(
"-p",
"--path-masks",
metavar="p_re(,p_re)*",
default=[],
- help="comma separated list of regexes to match full " "file path against",
+ help="comma separated list of regexes to match full file path against",
)
argparser.add_argument(
"-y",
@@ -195,7 +195,7 @@ def sorter(i):
return hits[i].path, d
print(
- ("\nThese commits touched the %s method in these files " "on these dates:\n")
+ ("\nThese commits touched the %s method in these files on these dates:\n")
% args.funcname
)
for i in sorted(range(len(hits)), key=sorter):
diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py
index 85e5bf239cbfa..f1b1d9d8678bb 100644
--- a/scripts/tests/test_validate_docstrings.py
+++ b/scripts/tests/test_validate_docstrings.py
@@ -1029,7 +1029,7 @@ def test_bad_generic_functions(self, capsys, func):
(
"BadReturns",
"no_capitalization",
- ("Return value description should start with a capital " "letter",),
+ ("Return value description should start with a capital letter",),
),
(
"BadReturns",
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index 401eaf8ff5ed5..d363e7108fff3 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -91,7 +91,7 @@
"whitespace only",
"GL06": 'Found unknown section "{section}". Allowed sections are: '
"{allowed_sections}",
- "GL07": "Sections are in the wrong order. Correct order is: " "{correct_sections}",
+ "GL07": "Sections are in the wrong order. Correct order is: {correct_sections}",
"GL08": "The object does not have a docstring",
"GL09": "Deprecation warning should precede extended summary",
"GL10": "reST directives {directives} must be followed by two colons",
diff --git a/setup.py b/setup.py
index 7040147c2b741..04aedcb101e25 100755
--- a/setup.py
+++ b/setup.py
@@ -79,7 +79,7 @@ def is_platform_mac():
except ImportError:
import tempita
except ImportError:
- raise ImportError("Building pandas requires Tempita: " "pip install Tempita")
+ raise ImportError("Building pandas requires Tempita: pip install Tempita")
_pxi_dep_template = {
@@ -142,9 +142,7 @@ def build_extensions(self):
_build_ext.build_extensions(self)
-DESCRIPTION = (
- "Powerful data structures for data analysis, time series, " "and statistics"
-)
+DESCRIPTION = "Powerful data structures for data analysis, time series, and statistics"
LONG_DESCRIPTION = """
**pandas** is a Python package providing fast, flexible, and expressive data
structures designed to make working with structured (tabular, multidimensional,
| Mostly fixing extra " " introduced by black (will take a look at the issue tracker there to see if that can be fixed once and for all).
Also change `except Foo as e:` to `except Foo as err` and remove one `except Exception` in a docs file. Clarified a whatsnew note that @jorisvandenbossche asked for a while back. | https://api.github.com/repos/pandas-dev/pandas/pulls/28857 | 2019-10-08T20:40:03Z | 2019-10-11T18:15:35Z | 2019-10-11T18:15:35Z | 2019-10-11T18:35:24Z |
TST: un-xfail 22 maybe_promote tests | diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py
index da2b4c28a02a5..5fd1ceb7b0027 100644
--- a/pandas/tests/dtypes/cast/test_promote.py
+++ b/pandas/tests/dtypes/cast/test_promote.py
@@ -515,14 +515,6 @@ def test_maybe_promote_bytes_with_any(bytes_dtype, any_numpy_dtype_reduced, box)
else:
if boxed and box_dtype is None:
pytest.xfail("does not upcast to object")
- if (
- is_integer_dtype(fill_dtype)
- or is_float_dtype(fill_dtype)
- or is_complex_dtype(fill_dtype)
- or is_object_dtype(fill_dtype)
- or is_timedelta64_dtype(fill_dtype)
- ) and not boxed:
- pytest.xfail("does not upcast to object")
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
@@ -557,15 +549,12 @@ def test_maybe_promote_any_with_bytes(any_numpy_dtype_reduced, bytes_dtype, box)
else:
pytest.xfail("wrong missing value marker")
else:
- pass
if (
boxed
and (box_dtype == "bytes" or box_dtype is None)
and not (is_string_dtype(dtype) or dtype == bool)
):
pytest.xfail("does not upcast to object")
- if not boxed and is_datetime_or_timedelta_dtype(dtype):
- pytest.xfail("raises error")
# create array of given dtype
fill_value = b"abc"
| orthogonal to other outstanding PRs in this file | https://api.github.com/repos/pandas-dev/pandas/pulls/28856 | 2019-10-08T18:48:11Z | 2019-10-08T20:58:00Z | 2019-10-08T20:58:00Z | 2019-10-08T21:07:44Z |
remove doc note about apply applying a function to the first element … | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1a19910a0957c..5200ad0ba0d23 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -6729,14 +6729,6 @@ def apply(
DataFrame.aggregate: Only perform aggregating type operations.
DataFrame.transform: Only perform transforming type operations.
- Notes
- -----
- In the current implementation apply calls `func` twice on the
- first column/row to decide whether it can take a fast or slow
- code path. This can lead to unexpected behavior if `func` has
- side-effects, as they will take effect twice for the first
- column/row.
-
Examples
--------
| …twice
- [x] closes #28827
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/28854 | 2019-10-08T18:35:16Z | 2019-10-08T20:56:32Z | 2019-10-08T20:56:32Z | 2019-10-08T20:56:40Z |
CLN: dont catch Exception on reindex_multi | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index a135f567fe6f4..f77d543193e74 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4555,10 +4555,7 @@ def reindex(self, *args, **kwargs):
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
- try:
- return self._reindex_multi(axes, copy, fill_value)
- except Exception:
- pass
+ return self._reindex_multi(axes, copy, fill_value)
# perform the reindex on the axes
return self._reindex_axes(
@@ -9065,7 +9062,6 @@ def _where(
# try to not change dtype at first (if try_quick)
if try_quick:
-
new_other = com.values_from_object(self)
new_other = new_other.copy()
new_other[icond] = other
| @toobaz any guesses what this might have been intended to catch? | https://api.github.com/repos/pandas-dev/pandas/pulls/28853 | 2019-10-08T18:20:00Z | 2019-10-11T15:50:18Z | 2019-10-11T15:50:18Z | 2019-10-11T15:54:38Z |
TST: Fix not-boxed maybe_promote test | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 4435b2518e90b..b439a3e2dfbc8 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -359,6 +359,8 @@ def maybe_promote(dtype, fill_value=np.nan):
if isinstance(fill_value, datetime) and fill_value.tzinfo is not None:
# Trying to insert tzaware into tznaive, have to cast to object
dtype = np.dtype(np.object_)
+ elif is_integer(fill_value) or (is_float(fill_value) and not isna(fill_value)):
+ dtype = np.dtype(np.object_)
else:
try:
fill_value = tslibs.Timestamp(fill_value).to_datetime64()
diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py
index da2b4c28a02a5..2252e6c7b3dc9 100644
--- a/pandas/tests/dtypes/cast/test_promote.py
+++ b/pandas/tests/dtypes/cast/test_promote.py
@@ -603,8 +603,6 @@ def test_maybe_promote_datetime64_with_any(
else:
if boxed and box_dtype is None:
pytest.xfail("does not upcast to object")
- if not boxed:
- pytest.xfail("does not upcast to object or raises")
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
| Along with #28833 this gets close to finishing off the not-boxed cases | https://api.github.com/repos/pandas-dev/pandas/pulls/28852 | 2019-10-08T17:56:04Z | 2019-10-08T21:21:13Z | 2019-10-08T21:21:13Z | 2019-10-08T21:33:31Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.