title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
REGR: Fixed handling of Categorical in cython ops | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index f9c756b2518af..ac366337a3c59 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -19,6 +19,7 @@ Fixed regressions
- Fixed regression when indexing a ``Series`` or ``DataFrame`` indexed by ``DatetimeIndex`` with a slice containg a :class:`datetime.date` (:issue:`31501`)
- Fixed regression in ``DataFrame.__setitem__`` raising an ``AttributeError`` with a :class:`MultiIndex` and a non-monotonic indexer (:issue:`31449`)
- Fixed regression in :class:`Series` multiplication when multiplying a numeric :class:`Series` with >10000 elements with a timedelta-like scalar (:issue:`31457`)
+- Fixed regression in ``.groupby()`` aggregations with categorical dtype using Cythonized reduction functions (e.g. ``first``) (:issue:`31450`)
- Fixed regression in :meth:`GroupBy.apply` if called with a function which returned a non-pandas non-scalar object (e.g. a list or numpy array) (:issue:`31441`)
- Fixed regression in :meth:`to_datetime` when parsing non-nanosecond resolution datetimes (:issue:`31491`)
- Fixed regression in :meth:`~DataFrame.to_csv` where specifying an ``na_rep`` might truncate the values written (:issue:`31447`)
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index b52d1bb4db360..0245b9f74d944 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1394,7 +1394,9 @@ def func(self, numeric_only=numeric_only, min_count=min_count):
except DataError:
pass
except NotImplementedError as err:
- if "function is not implemented for this dtype" in str(err):
+ if "function is not implemented for this dtype" in str(
+ err
+ ) or "category dtype not supported" in str(err):
# raised in _get_cython_function, in some cases can
# be trimmed by implementing cython funcs for more dtypes
pass
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index 2d31996a8a964..934d0224a4d9a 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -377,6 +377,22 @@ def test_agg_index_has_complex_internals(index):
tm.assert_frame_equal(result, expected)
+def test_agg_cython_category_not_implemented_fallback():
+ # https://github.com/pandas-dev/pandas/issues/31450
+ df = pd.DataFrame({"col_num": [1, 1, 2, 3]})
+ df["col_cat"] = df["col_num"].astype("category")
+
+ result = df.groupby("col_num").col_cat.first()
+ expected = pd.Series(
+ [1, 2, 3], index=pd.Index([1, 2, 3], name="col_num"), name="col_cat"
+ )
+ tm.assert_series_equal(result, expected)
+
+ result = df.groupby("col_num").agg({"col_cat": "first"})
+ expected = expected.to_frame()
+ tm.assert_frame_equal(result, expected)
+
+
class TestNamedAggregationSeries:
def test_series_named_agg(self):
df = pd.Series([1, 2, 3, 4])
| Fixed by falling back to the wrapped Python version when the cython
version raises NotImplementedError.
Closes https://github.com/pandas-dev/pandas/issues/31450 | https://api.github.com/repos/pandas-dev/pandas/pulls/31668 | 2020-02-04T22:19:19Z | 2020-02-05T00:31:26Z | 2020-02-05T00:31:25Z | 2020-02-05T13:23:23Z |
BUG: fixes bug when using sep=None and comment keyword for read_csv | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 40abb8f83de2f..5962469f24843 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -185,8 +185,10 @@ I/O
- Bug in :meth:`DataFrame.to_parquet` overwriting pyarrow's default for
``coerce_timestamps``; following pyarrow's default allows writing nanosecond
timestamps with ``version="2.0"`` (:issue:`31652`).
+- Bug in :meth:`read_csv` was raising `TypeError` when `sep=None` was used in combination with `comment` keyword (:issue:`31396`)
- Bug in :class:`HDFStore` that caused it to set to ``int64`` the dtype of a ``datetime64`` column when reading a DataFrame in Python 3 from fixed format written in Python 2 (:issue:`31750`)
+
Plotting
^^^^^^^^
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 8bc8470ae7658..ceeeb27741641 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -2381,19 +2381,21 @@ class MyDialect(csv.Dialect):
dia = MyDialect
- sniff_sep = True
-
if sep is not None:
- sniff_sep = False
dia.delimiter = sep
- # attempt to sniff the delimiter
- if sniff_sep:
+ else:
+ # attempt to sniff the delimiter from the first valid line,
+ # i.e. no comment line and not in skiprows
line = f.readline()
- while self.skipfunc(self.pos):
+ lines = self._check_comments([[line]])[0]
+ while self.skipfunc(self.pos) or not lines:
self.pos += 1
line = f.readline()
+ lines = self._check_comments([[line]])[0]
- line = self._check_comments([line])[0]
+ # since `line` was a string, lines will be a list containing
+ # only a single string
+ line = lines[0]
self.pos += 1
self.line_pos += 1
diff --git a/pandas/tests/io/parser/test_python_parser_only.py b/pandas/tests/io/parser/test_python_parser_only.py
index 7367b19b40dc3..4d933fa02d36f 100644
--- a/pandas/tests/io/parser/test_python_parser_only.py
+++ b/pandas/tests/io/parser/test_python_parser_only.py
@@ -66,6 +66,24 @@ def test_sniff_delimiter(python_parser_only, kwargs):
tm.assert_frame_equal(result, expected)
+def test_sniff_delimiter_comment(python_parser_only):
+ data = """# comment line
+index|A|B|C
+# comment line
+foo|1|2|3 # ignore | this
+bar|4|5|6
+baz|7|8|9
+"""
+ parser = python_parser_only
+ result = parser.read_csv(StringIO(data), index_col=0, sep=None, comment="#")
+ expected = DataFrame(
+ [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
+ columns=["A", "B", "C"],
+ index=Index(["foo", "bar", "baz"], name="index"),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
@pytest.mark.parametrize("encoding", [None, "utf-8"])
def test_sniff_delimiter_encoding(python_parser_only, encoding):
parser = python_parser_only
| - [x] closes #31396
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry - added
| https://api.github.com/repos/pandas-dev/pandas/pulls/31667 | 2020-02-04T22:07:59Z | 2020-03-03T03:02:24Z | 2020-03-03T03:02:23Z | 2020-03-03T03:02:28Z |
BUG: Block.iget not wrapping timedelta64/datetime64 | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 20cfcfbde389c..5ce9b01c266f8 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -24,6 +24,7 @@ Fixed regressions
- Fixed regression in :meth:`to_datetime` when parsing non-nanosecond resolution datetimes (:issue:`31491`)
- Fixed regression in :meth:`~DataFrame.to_csv` where specifying an ``na_rep`` might truncate the values written (:issue:`31447`)
- Fixed regression in :class:`Categorical` construction with ``numpy.str_`` categories (:issue:`31499`)
+- Fixed regression in :meth:`DataFrame.loc` and :meth:`DataFrame.iloc` when selecting a row containing a single ``datetime64`` or ``timedelta64`` column (:issue:`31649`)
- Fixed regression where setting :attr:`pd.options.display.max_colwidth` was not accepting negative integer. In addition, this behavior has been deprecated in favor of using ``None`` (:issue:`31532`)
- Fixed regression in objTOJSON.c fix return-type warning (:issue:`31463`)
- Fixed regression in :meth:`qcut` when passed a nullable integer. (:issue:`31389`)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 9e31ccebd0f1b..cb03fbe1770b3 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -7,7 +7,7 @@
import numpy as np
-from pandas._libs import NaT, algos as libalgos, lib, tslib, writers
+from pandas._libs import NaT, Timestamp, algos as libalgos, lib, tslib, writers
from pandas._libs.index import convert_scalar
import pandas._libs.internals as libinternals
from pandas._libs.tslibs import Timedelta, conversion
@@ -2158,6 +2158,16 @@ def internal_values(self):
# Override to return DatetimeArray and TimedeltaArray
return self.array_values()
+ def iget(self, key):
+ # GH#31649 we need to wrap scalars in Timestamp/Timedelta
+ # TODO: this can be removed if we ever have 2D EA
+ result = super().iget(key)
+ if isinstance(result, np.datetime64):
+ result = Timestamp(result)
+ elif isinstance(result, np.timedelta64):
+ result = Timedelta(result)
+ return result
+
class DatetimeBlock(DatetimeLikeBlockMixin, Block):
__slots__ = ()
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 9a01ee18928c0..ca4d1ff067f3d 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -2165,3 +2165,41 @@ def test_set_reset(self):
df = result.set_index("foo")
tm.assert_index_equal(df.index, idx)
+
+
+def test_object_casting_indexing_wraps_datetimelike():
+ # GH#31649, check the indexing methods all the way down the stack
+ df = pd.DataFrame(
+ {
+ "A": [1, 2],
+ "B": pd.date_range("2000", periods=2),
+ "C": pd.timedelta_range("1 Day", periods=2),
+ }
+ )
+
+ ser = df.loc[0]
+ assert isinstance(ser.values[1], pd.Timestamp)
+ assert isinstance(ser.values[2], pd.Timedelta)
+
+ ser = df.iloc[0]
+ assert isinstance(ser.values[1], pd.Timestamp)
+ assert isinstance(ser.values[2], pd.Timedelta)
+
+ ser = df.xs(0, axis=0)
+ assert isinstance(ser.values[1], pd.Timestamp)
+ assert isinstance(ser.values[2], pd.Timedelta)
+
+ mgr = df._data
+ arr = mgr.fast_xs(0)
+ assert isinstance(arr[1], pd.Timestamp)
+ assert isinstance(arr[2], pd.Timedelta)
+
+ blk = mgr.blocks[mgr._blknos[1]]
+ assert blk.dtype == "M8[ns]" # we got the right block
+ val = blk.iget((0, 0))
+ assert isinstance(val, pd.Timestamp)
+
+ blk = mgr.blocks[mgr._blknos[2]]
+ assert blk.dtype == "m8[ns]" # we got the right block
+ val = blk.iget((0, 0))
+ assert isinstance(val, pd.Timedelta)
| - [x] closes #31649
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31666 | 2020-02-04T20:26:39Z | 2020-02-05T00:22:03Z | 2020-02-05T00:22:03Z | 2020-02-05T08:44:26Z |
whatsnew for MultiIndex levels caching | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 92cd2038aabe6..f9c756b2518af 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -27,6 +27,7 @@ Fixed regressions
- Fixed regression in objTOJSON.c fix return-type warning (:issue:`31463`)
- Fixed regression in :meth:`qcut` when passed a nullable integer. (:issue:`31389`)
- Fixed regression in assigning to a :class:`Series` using a nullable integer dtype (:issue:`31446`)
+- Fixed performance regression when indexing a ``DataFrame`` or ``Series`` with a :class:`MultiIndex` for the index using a list of labels (:issue:`31648`)
.. ---------------------------------------------------------------------------
| (cherry picked from commit 103840e974a4c8548fccab86cabc6a3161bd94e8)
Included in https://github.com/pandas-dev/pandas/pull/31664. So this won't need to be backported. | https://api.github.com/repos/pandas-dev/pandas/pulls/31665 | 2020-02-04T20:08:16Z | 2020-02-04T21:24:57Z | 2020-02-04T21:24:57Z | 2020-02-04T21:25:04Z |
Backport PR #31651: PERF: Cache MultiIndex.levels | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 92cd2038aabe6..f9c756b2518af 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -27,6 +27,7 @@ Fixed regressions
- Fixed regression in objTOJSON.c fix return-type warning (:issue:`31463`)
- Fixed regression in :meth:`qcut` when passed a nullable integer. (:issue:`31389`)
- Fixed regression in assigning to a :class:`Series` using a nullable integer dtype (:issue:`31446`)
+- Fixed performance regression when indexing a ``DataFrame`` or ``Series`` with a :class:`MultiIndex` for the index using a list of labels (:issue:`31648`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 75b96666080aa..6c125e17e5549 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -620,16 +620,6 @@ def from_frame(cls, df, sortorder=None, names=None):
# --------------------------------------------------------------------
- @property
- def levels(self):
- result = [
- x._shallow_copy(name=name) for x, name in zip(self._levels, self._names)
- ]
- for level in result:
- # disallow midx.levels[0].name = "foo"
- level._no_setting_name = True
- return FrozenList(result)
-
@property
def _values(self):
# We override here, since our parent uses _data, which we don't use.
@@ -659,6 +649,22 @@ def array(self):
"'MultiIndex.to_numpy()' to get a NumPy array of tuples."
)
+ # --------------------------------------------------------------------
+ # Levels Methods
+
+ @cache_readonly
+ def levels(self):
+ # Use cache_readonly to ensure that self.get_locs doesn't repeatedly
+ # create new IndexEngine
+ # https://github.com/pandas-dev/pandas/issues/31648
+ result = [
+ x._shallow_copy(name=name) for x, name in zip(self._levels, self._names)
+ ]
+ for level in result:
+ # disallow midx.levels[0].name = "foo"
+ level._no_setting_name = True
+ return FrozenList(result)
+
def _set_levels(
self, levels, level=None, copy=False, validate=True, verify_integrity=False
):
@@ -1227,6 +1233,9 @@ def _set_names(self, names, level=None, validate=True):
)
self._names[lev] = name
+ # If .levels has been accessed, the names in our cache will be stale.
+ self._reset_cache()
+
names = property(
fset=_set_names, fget=_get_names, doc="""\nNames of levels in MultiIndex.\n"""
)
diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py
index 074072ae581b2..57d16a739c213 100644
--- a/pandas/tests/indexes/multi/test_get_set.py
+++ b/pandas/tests/indexes/multi/test_get_set.py
@@ -159,7 +159,7 @@ def test_set_levels_codes_directly(idx):
minor_codes = [(x + 1) % 1 for x in minor_codes]
new_codes = [major_codes, minor_codes]
- msg = "can't set attribute"
+ msg = "[Cc]an't set attribute"
with pytest.raises(AttributeError, match=msg):
idx.levels = new_levels
with pytest.raises(AttributeError, match=msg):
| Manual backport for https://github.com/pandas-dev/pandas/pull/31651. | https://api.github.com/repos/pandas-dev/pandas/pulls/31664 | 2020-02-04T20:04:26Z | 2020-02-04T21:24:44Z | 2020-02-04T21:24:44Z | 2020-02-04T21:25:00Z |
CLN: MultiIndex.get_value is a hive of scum and villainy | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index ea29e458fe10e..79b4cbbea7815 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1,4 +1,3 @@
-import datetime
from sys import getsizeof
from typing import Any, Hashable, Iterable, List, Optional, Sequence, Tuple, Union
import warnings
@@ -7,7 +6,7 @@
from pandas._config import get_option
-from pandas._libs import Timestamp, algos as libalgos, index as libindex, lib, tslibs
+from pandas._libs import algos as libalgos, index as libindex, lib
from pandas._libs.hashtable import duplicated_int64
from pandas._typing import AnyArrayLike, ArrayLike, Scalar
from pandas.compat.numpy import function as nv
@@ -2321,13 +2320,20 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
def get_value(self, series, key):
# Label-based
- s = com.values_from_object(series)
- k = com.values_from_object(key)
+ if not is_hashable(key) or is_iterator(key):
+ # We allow tuples if they are hashable, whereas other Index
+ # subclasses require scalar.
+ # We have to explicitly exclude generators, as these are hashable.
+ raise InvalidIndexError(key)
def _try_mi(k):
# TODO: what if a level contains tuples??
loc = self.get_loc(k)
+
new_values = series._values[loc]
+ if is_scalar(loc):
+ return new_values
+
new_index = self[loc]
new_index = maybe_droplevels(new_index, k)
return series._constructor(
@@ -2335,51 +2341,12 @@ def _try_mi(k):
).__finalize__(self)
try:
- return self._engine.get_value(s, k)
- except KeyError as e1:
- try:
- return _try_mi(key)
- except KeyError:
- pass
-
- try:
- return libindex.get_value_at(s, k)
- except IndexError:
+ return _try_mi(key)
+ except KeyError:
+ if is_integer(key):
+ return series._values[key]
+ else:
raise
- except TypeError:
- # generator/iterator-like
- if is_iterator(key):
- raise InvalidIndexError(key)
- else:
- raise e1
- except Exception: # pragma: no cover
- raise e1
- except TypeError:
-
- # a Timestamp will raise a TypeError in a multi-index
- # rather than a KeyError, try it here
- # note that a string that 'looks' like a Timestamp will raise
- # a KeyError! (GH5725)
- if isinstance(key, (datetime.datetime, np.datetime64, str)):
- try:
- return _try_mi(key)
- except KeyError:
- raise
- except (IndexError, ValueError, TypeError):
- pass
-
- try:
- return _try_mi(Timestamp(key))
- except (
- KeyError,
- TypeError,
- IndexError,
- ValueError,
- tslibs.OutOfBoundsDatetime,
- ):
- pass
-
- raise InvalidIndexError(key)
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
diff --git a/pandas/tests/indexing/multiindex/test_getitem.py b/pandas/tests/indexing/multiindex/test_getitem.py
index c15fa34283f21..7e75b5324445e 100644
--- a/pandas/tests/indexing/multiindex/test_getitem.py
+++ b/pandas/tests/indexing/multiindex/test_getitem.py
@@ -87,8 +87,8 @@ def test_series_getitem_returns_scalar(
(lambda s: s[(2000, 3, 4)], KeyError, r"^\(2000, 3, 4\)$"),
(lambda s: s.loc[(2000, 3, 4)], KeyError, r"^\(2000, 3, 4\)$"),
(lambda s: s.loc[(2000, 3, 4, 5)], IndexingError, "Too many indexers"),
- (lambda s: s.__getitem__(len(s)), IndexError, "index out of bounds"),
- (lambda s: s[len(s)], IndexError, "index out of bounds"),
+ (lambda s: s.__getitem__(len(s)), IndexError, "is out of bounds"),
+ (lambda s: s[len(s)], IndexError, "is out of bounds"),
(
lambda s: s.iloc[len(s)],
IndexError,
| This will in turn allow for simplification for other code that has built up kludges around MultiIndex.get_value's behavior | https://api.github.com/repos/pandas-dev/pandas/pulls/31662 | 2020-02-04T19:02:45Z | 2020-02-05T00:59:33Z | 2020-02-05T00:59:33Z | 2020-02-05T01:07:52Z |
Replaced .format with f- strings | diff --git a/pandas/tests/tseries/frequencies/test_to_offset.py b/pandas/tests/tseries/frequencies/test_to_offset.py
index b6069c446160d..beaefe9109e91 100644
--- a/pandas/tests/tseries/frequencies/test_to_offset.py
+++ b/pandas/tests/tseries/frequencies/test_to_offset.py
@@ -86,7 +86,7 @@ def test_to_offset_invalid(freqstr):
# We escape string because some of our
# inputs contain regex special characters.
- msg = re.escape("Invalid frequency: {freqstr}".format(freqstr=freqstr))
+ msg = re.escape(f"Invalid frequency: {freqstr}")
with pytest.raises(ValueError, match=msg):
frequencies.to_offset(freqstr)
diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py
index 2801a2bf9c371..fdfa436ce6536 100644
--- a/pandas/util/_print_versions.py
+++ b/pandas/util/_print_versions.py
@@ -43,7 +43,8 @@ def get_sys_info() -> List[Tuple[str, Optional[Union[str, int]]]]:
("python-bits", struct.calcsize("P") * 8),
("OS", f"{sysname}"),
("OS-release", f"{release}"),
- # ("Version", "{version}".format(version=version)),
+ # FIXME: dont leave commented-out
+ # ("Version", f"{version}"),
("machine", f"{machine}"),
("processor", f"{processor}"),
("byteorder", f"{sys.byteorder}"),
@@ -114,14 +115,13 @@ def show_versions(as_json=False):
else:
maxlen = max(len(x) for x in deps)
- tpl = "{{k:<{maxlen}}}: {{stat}}".format(maxlen=maxlen)
print("\nINSTALLED VERSIONS")
print("------------------")
for k, stat in sys_info:
- print(tpl.format(k=k, stat=stat))
+ print(f"{{k:<{maxlen}}}: {{stat}}")
print("")
for k, stat in deps_blob:
- print(tpl.format(k=k, stat=stat))
+ print(f"{{k:<{maxlen}}}: {{stat}}")
def main() -> int:
| xref #29547
for
- [ ] pandas/util/_print_versions.py
- [ ] pandas/tests/tseries/frequencies/test_to_offset.py
| https://api.github.com/repos/pandas-dev/pandas/pulls/31660 | 2020-02-04T18:39:33Z | 2020-02-05T01:00:29Z | 2020-02-05T01:00:29Z | 2020-02-05T20:22:31Z |
check first and last points' labels are correct | diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index dbe728f5238c7..979b89a87d843 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -45,12 +45,17 @@ def teardown_method(self, method):
@pytest.mark.slow
def test_ts_plot_with_tz(self, tz_aware_fixture):
- # GH2877, GH17173, GH31205
+ # GH2877, GH17173, GH31205, GH31580
tz = tz_aware_fixture
index = date_range("1/1/2011", periods=2, freq="H", tz=tz)
ts = Series([188.5, 328.25], index=index)
with tm.assert_produces_warning(None):
_check_plot_works(ts.plot)
+ ax = ts.plot()
+ xdata = list(ax.get_lines())[0].get_xdata()
+ # Check first and last points' labels are correct
+ assert (xdata[0].hour, xdata[0].minute) == (0, 0)
+ assert (xdata[-1].hour, xdata[-1].minute) == (1, 0)
def test_fontsize_set_correctly(self):
# For issue #8765
| - [ ] closes #31580
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31659 | 2020-02-04T18:11:52Z | 2020-02-05T01:02:58Z | 2020-02-05T01:02:58Z | 2020-02-25T17:12:16Z |
API: allow step!=1 slice with IntervalIndex | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 44deab25db695..28d94243bea4b 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -67,6 +67,7 @@ Other enhancements
- When writing directly to a sqlite connection :func:`to_sql` now supports the ``multi`` method (:issue:`29921`)
- `OptionError` is now exposed in `pandas.errors` (:issue:`27553`)
- :func:`timedelta_range` will now infer a frequency when passed ``start``, ``stop``, and ``periods`` (:issue:`32377`)
+- Positional slicing on a :class:`IntervalIndex` now supports slices with ``step > 1`` (:issue:`31658`)
-
.. ---------------------------------------------------------------------------
@@ -246,7 +247,6 @@ Strings
Interval
^^^^^^^^
-
-
-
diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py
index 3858e750326b4..71fd5b6aab821 100644
--- a/pandas/core/indexers.py
+++ b/pandas/core/indexers.py
@@ -11,6 +11,7 @@
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
+ is_integer,
is_integer_dtype,
is_list_like,
)
@@ -20,6 +21,34 @@
# Indexer Identification
+def is_valid_positional_slice(slc: slice) -> bool:
+ """
+ Check if a slice object can be interpreted as a positional indexer.
+
+ Parameters
+ ----------
+ slc : slice
+
+ Returns
+ -------
+ bool
+
+ Notes
+ -----
+ A valid positional slice may also be interpreted as a label-based slice
+ depending on the index being sliced.
+ """
+
+ def is_int_or_none(val):
+ return val is None or is_integer(val)
+
+ return (
+ is_int_or_none(slc.start)
+ and is_int_or_none(slc.stop)
+ and is_int_or_none(slc.step)
+ )
+
+
def is_list_like_indexer(key) -> bool:
"""
Check if we have a list-like indexer that is *not* a NamedTuple.
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 6968837fb13e6..efdaf5a331f5f 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -39,6 +39,7 @@
from pandas.core.algorithms import take_1d
from pandas.core.arrays.interval import IntervalArray, _interval_shared_docs
import pandas.core.common as com
+from pandas.core.indexers import is_valid_positional_slice
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
@@ -866,7 +867,16 @@ def get_indexer_for(self, target: AnyArrayLike, **kwargs) -> np.ndarray:
def _convert_slice_indexer(self, key: slice, kind: str):
if not (key.step is None or key.step == 1):
- raise ValueError("cannot support not-default step in a slice")
+ # GH#31658 if label-based, we require step == 1,
+ # if positional, we disallow float start/stop
+ msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
+ if kind == "loc":
+ raise ValueError(msg)
+ elif kind == "getitem":
+ if not is_valid_positional_slice(key):
+ # i.e. this cannot be interpreted as a positional slice
+ raise ValueError(msg)
+
return super()._convert_slice_indexer(key, kind)
@Appender(Index.where.__doc__)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 0c4a790646a81..73af98c16afae 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -2598,7 +2598,8 @@ def test_convert_almost_null_slice(indices):
key = slice(None, None, "foo")
if isinstance(idx, pd.IntervalIndex):
- with pytest.raises(ValueError, match="cannot support not-default step"):
+ msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
+ with pytest.raises(ValueError, match=msg):
idx._convert_slice_indexer(key, "loc")
else:
msg = "'>=' not supported between instances of 'str' and 'int'"
diff --git a/pandas/tests/indexing/interval/test_interval_new.py b/pandas/tests/indexing/interval/test_interval_new.py
index 43036fbbd9844..03c3034772bc6 100644
--- a/pandas/tests/indexing/interval/test_interval_new.py
+++ b/pandas/tests/indexing/interval/test_interval_new.py
@@ -128,9 +128,6 @@ def test_loc_with_slices(self):
with pytest.raises(NotImplementedError, match=msg):
s[Interval(3, 4, closed="left") :]
- # TODO with non-existing intervals ?
- # s.loc[Interval(-1, 0):Interval(2, 3)]
-
# slice of scalar
expected = s.iloc[:3]
@@ -143,9 +140,32 @@ def test_loc_with_slices(self):
tm.assert_series_equal(expected, s[:2.5])
tm.assert_series_equal(expected, s[0.1:2.5])
- # slice of scalar with step != 1
- with pytest.raises(ValueError):
- s[0:4:2]
+ def test_slice_step_ne1(self):
+ # GH#31658 slice of scalar with step != 1
+ s = self.s
+ expected = s.iloc[0:4:2]
+
+ result = s[0:4:2]
+ tm.assert_series_equal(result, expected)
+
+ result2 = s[0:4][::2]
+ tm.assert_series_equal(result2, expected)
+
+ def test_slice_float_start_stop(self):
+ # GH#31658 slicing with integers is positional, with floats is not
+ # supported
+ ser = Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6)))
+
+ msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
+ with pytest.raises(ValueError, match=msg):
+ ser[1.5:9.5:2]
+
+ def test_slice_interval_step(self):
+ # GH#31658 allows for integer step!=1, not Interval step
+ s = self.s
+ msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
+ with pytest.raises(ValueError, match=msg):
+ s[0 : 4 : Interval(0, 1)]
def test_loc_with_overlap(self):
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
See discussion https://github.com/pandas-dev/pandas/pull/16386#discussion_r374807689
cc @jorisvandenbossche @jschendel | https://api.github.com/repos/pandas-dev/pandas/pulls/31658 | 2020-02-04T18:08:44Z | 2020-03-08T16:11:22Z | 2020-03-08T16:11:22Z | 2020-03-09T18:05:33Z |
Backport PR #31502 on branch 1.0.x (BUG: Fixed IntervalArray[int].shift) | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index e0182e4e3c1f2..f41eca9d6b846 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -56,6 +56,9 @@ Bug fixes
- Plotting tz-aware timeseries no longer gives UserWarning (:issue:`31205`)
+**Interval**
+
+- Bug in :meth:`Series.shift` with ``interval`` dtype raising a ``TypeError`` when shifting an interval array of integers or datetimes (:issue:`34195`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 23cf5f317ac7d..cc41ac1dc19a9 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -27,6 +27,7 @@
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas.core.dtypes.generic import (
ABCDatetimeIndex,
+ ABCExtensionArray,
ABCIndexClass,
ABCInterval,
ABCIntervalIndex,
@@ -789,6 +790,33 @@ def size(self) -> int:
# Avoid materializing self.values
return self.left.size
+ def shift(self, periods: int = 1, fill_value: object = None) -> ABCExtensionArray:
+ if not len(self) or periods == 0:
+ return self.copy()
+
+ if isna(fill_value):
+ fill_value = self.dtype.na_value
+
+ # ExtensionArray.shift doesn't work for two reasons
+ # 1. IntervalArray.dtype.na_value may not be correct for the dtype.
+ # 2. IntervalArray._from_sequence only accepts NaN for missing values,
+ # not other values like NaT
+
+ empty_len = min(abs(periods), len(self))
+ if isna(fill_value):
+ fill_value = self.left._na_value
+ empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1))
+ else:
+ empty = self._from_sequence([fill_value] * empty_len)
+
+ if periods > 0:
+ a = empty
+ b = self[:-periods]
+ else:
+ a = self[abs(periods) :]
+ b = empty
+ return self._concat_same_type([a, b])
+
def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs):
"""
Take elements from the IntervalArray.
diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py
index e046d87780bb4..a43ea7e40a16a 100644
--- a/pandas/tests/arrays/interval/test_interval.py
+++ b/pandas/tests/arrays/interval/test_interval.py
@@ -81,6 +81,24 @@ def test_where_raises(self, other):
with pytest.raises(ValueError, match=match):
ser.where([True, False, True], other=other)
+ def test_shift(self):
+ # https://github.com/pandas-dev/pandas/issues/31495
+ a = IntervalArray.from_breaks([1, 2, 3])
+ result = a.shift()
+ # int -> float
+ expected = IntervalArray.from_tuples([(np.nan, np.nan), (1.0, 2.0)])
+ tm.assert_interval_array_equal(result, expected)
+
+ def test_shift_datetime(self):
+ a = IntervalArray.from_breaks(pd.date_range("2000", periods=4))
+ result = a.shift(2)
+ expected = a.take([-1, -1, 0], allow_fill=True)
+ tm.assert_interval_array_equal(result, expected)
+
+ result = a.shift(-1)
+ expected = a.take([1, 2, -1], allow_fill=True)
+ tm.assert_interval_array_equal(result, expected)
+
class TestSetitem:
def test_set_na(self, left_right_dtypes):
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index 24ab7fe3fc845..6ed8b782deffa 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -280,6 +280,13 @@ def test_shift_empty_array(self, data, periods):
expected = empty
self.assert_extension_array_equal(result, expected)
+ def test_shift_zero_copies(self, data):
+ result = data.shift(0)
+ assert result is not data
+
+ result = data[:0].shift(2)
+ assert result is not data
+
def test_shift_fill_value(self, data):
arr = data[:4]
fill_value = data[0]
| Backport PR #31502: BUG: Fixed IntervalArray[int].shift | https://api.github.com/repos/pandas-dev/pandas/pulls/31656 | 2020-02-04T16:56:19Z | 2020-02-04T19:25:30Z | 2020-02-04T19:25:30Z | 2020-02-04T19:25:31Z |
REF: call _convert_scalar_indexer upfront in Series.__getitem__ | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 2ffb22d2d272f..5e29df3560761 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -852,6 +852,8 @@ def __getitem__(self, key):
return self
key_is_scalar = is_scalar(key)
+ if key_is_scalar:
+ key = self.index._convert_scalar_indexer(key, kind="getitem")
if key_is_scalar or isinstance(self.index, MultiIndex):
# Otherwise index.get_value will raise InvalidIndexError
@@ -866,11 +868,6 @@ def __getitem__(self, key):
# kludge
pass
else:
-
- # we can try to coerce the indexer (or this will raise)
- new_key = self.index._convert_scalar_indexer(key, kind="getitem")
- if type(new_key) != type(key):
- return self.__getitem__(new_key)
raise
if not key_is_scalar:
| This simplifies a try/except block and moves us towards being able to tighten what we accept in _convert_scalar_indexer among others. I think we'll soon be able to get rid of the kludge on L866/868. | https://api.github.com/repos/pandas-dev/pandas/pulls/31655 | 2020-02-04T16:30:51Z | 2020-02-05T02:44:44Z | 2020-02-05T02:44:44Z | 2020-02-05T02:47:04Z |
Backport PR #31528 on branch 1.0.x (REGR: Categorical with np.str_ categories) | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index e0182e4e3c1f2..561d7a1d25cca 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -22,6 +22,7 @@ Fixed regressions
- Fixed regression in :meth:`GroupBy.apply` if called with a function which returned a non-pandas non-scalar object (e.g. a list or numpy array) (:issue:`31441`)
- Fixed regression in :meth:`to_datetime` when parsing non-nanosecond resolution datetimes (:issue:`31491`)
- Fixed regression in :meth:`~DataFrame.to_csv` where specifying an ``na_rep`` might truncate the values written (:issue:`31447`)
+- Fixed regression in :class:`Categorical` construction with ``numpy.str_`` categories (:issue:`31499`)
- Fixed regression where setting :attr:`pd.options.display.max_colwidth` was not accepting negative integer. In addition, this behavior has been deprecated in favor of using ``None`` (:issue:`31532`)
- Fixed regression in objTOJSON.c fix return-type warning (:issue:`31463`)
- Fixed regression in :meth:`qcut` when passed a nullable integer. (:issue:`31389`)
diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in
index 7d57c67e70b58..6671375f628e7 100644
--- a/pandas/_libs/hashtable_class_helper.pxi.in
+++ b/pandas/_libs/hashtable_class_helper.pxi.in
@@ -670,7 +670,9 @@ cdef class StringHashTable(HashTable):
val = values[i]
if isinstance(val, str):
- v = get_c_string(val)
+ # GH#31499 if we have a np.str_ get_c_string wont recognize
+ # it as a str, even though isinstance does.
+ v = get_c_string(<str>val)
else:
v = get_c_string(self.na_string_sentinel)
vecs[i] = v
@@ -703,7 +705,9 @@ cdef class StringHashTable(HashTable):
val = values[i]
if isinstance(val, str):
- v = get_c_string(val)
+ # GH#31499 if we have a np.str_ get_c_string wont recognize
+ # it as a str, even though isinstance does.
+ v = get_c_string(<str>val)
else:
v = get_c_string(self.na_string_sentinel)
vecs[i] = v
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index cfba3da354d44..70e1421c8dcf4 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -408,6 +408,11 @@ def test_constructor_str_unknown(self):
with pytest.raises(ValueError, match="Unknown dtype"):
Categorical([1, 2], dtype="foo")
+ def test_constructor_np_strs(self):
+ # GH#31499 Hastable.map_locations needs to work on np.str_ objects
+ cat = pd.Categorical(["1", "0", "1"], [np.str_("0"), np.str_("1")])
+ assert all(isinstance(x, np.str_) for x in cat.categories)
+
def test_constructor_from_categorical_with_dtype(self):
dtype = CategoricalDtype(["a", "b", "c"], ordered=True)
values = Categorical(["a", "b", "d"])
| Backport PR #31528: REGR: Categorical with np.str_ categories | https://api.github.com/repos/pandas-dev/pandas/pulls/31654 | 2020-02-04T16:24:38Z | 2020-02-04T19:25:21Z | 2020-02-04T19:25:21Z | 2020-02-04T19:25:21Z |
ENH: Timestamp constructor now raises more explanatory error message | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 705c335acfb48..cbc7159f55b8f 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -114,6 +114,7 @@ Datetimelike
- :meth:`DatetimeArray.searchsorted`, :meth:`TimedeltaArray.searchsorted`, :meth:`PeriodArray.searchsorted` not recognizing non-pandas scalars and incorrectly raising ``ValueError`` instead of ``TypeError`` (:issue:`30950`)
- Bug in :class:`Timestamp` where constructing :class:`Timestamp` with dateutil timezone less than 128 nanoseconds before daylight saving time switch from winter to summer would result in nonexistent time (:issue:`31043`)
- Bug in :meth:`Period.to_timestamp`, :meth:`Period.start_time` with microsecond frequency returning a timestamp one nanosecond earlier than the correct time (:issue:`31475`)
+- :class:`Timestamp` raising confusing error message when year, month or day is missing (:issue:`31200`)
Timedelta
^^^^^^^^^
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index b8c462abe35f1..9f3b4a8a554b5 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -411,10 +411,25 @@ class Timestamp(_Timestamp):
)
elif ts_input is _no_input:
- # User passed keyword arguments.
- ts_input = datetime(year, month, day, hour or 0,
- minute or 0, second or 0,
- microsecond or 0)
+ # GH 31200
+ # When year, month or day is not given, we call the datetime
+ # constructor to make sure we get the same error message
+ # since Timestamp inherits datetime
+ datetime_kwargs = {
+ "hour": hour or 0,
+ "minute": minute or 0,
+ "second": second or 0,
+ "microsecond": microsecond or 0
+ }
+ if year is not None:
+ datetime_kwargs["year"] = year
+ if month is not None:
+ datetime_kwargs["month"] = month
+ if day is not None:
+ datetime_kwargs["day"] = day
+
+ ts_input = datetime(**datetime_kwargs)
+
elif is_integer_object(freq):
# User passed positional arguments:
# Timestamp(year, month, day[, hour[, minute[, second[,
diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py
index b4a7173da84d0..4c75d1ebcd377 100644
--- a/pandas/tests/scalar/timestamp/test_constructors.py
+++ b/pandas/tests/scalar/timestamp/test_constructors.py
@@ -548,3 +548,16 @@ def test_timestamp_constructor_identity():
expected = Timestamp("2017-01-01T12")
result = Timestamp(expected)
assert result is expected
+
+
+@pytest.mark.parametrize("kwargs", [{}, {"year": 2020}, {"year": 2020, "month": 1}])
+def test_constructor_missing_keyword(kwargs):
+ # GH 31200
+
+ # The exact error message of datetime() depends on its version
+ msg1 = r"function missing required argument '(year|month|day)' \(pos [123]\)"
+ msg2 = r"Required argument '(year|month|day)' \(pos [123]\) not found"
+ msg = "|".join([msg1, msg2])
+
+ with pytest.raises(TypeError, match=msg):
+ Timestamp(**kwargs)
| Timestamp constructor now raises explanatory error message when year, month or day is missing
- [x] closes #31200
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31653 | 2020-02-04T15:33:34Z | 2020-02-26T02:19:00Z | 2020-02-26T02:19:00Z | 2020-05-29T10:48:00Z |
BUG: avoid specifying default coerce_timestamps in to_parquet | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index dc7edd8db662e..31e641e4a08bf 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -180,7 +180,9 @@ I/O
- Bug in :meth:`read_json` where integer overflow was occuring when json contains big number strings. (:issue:`30320`)
- `read_csv` will now raise a ``ValueError`` when the arguments `header` and `prefix` both are not `None`. (:issue:`27394`)
- Bug in :meth:`DataFrame.to_json` was raising ``NotFoundError`` when ``path_or_buf`` was an S3 URI (:issue:`28375`)
--
+- Bug in :meth:`DataFrame.to_parquet` overwriting pyarrow's default for
+ ``coerce_timestamps``; following pyarrow's default allows writing nanosecond
+ timestamps with ``version="2.0"`` (:issue:`31652`).
Plotting
^^^^^^^^
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 98f2eb3929b59..926635062d853 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -85,7 +85,6 @@ def write(
df: DataFrame,
path,
compression="snappy",
- coerce_timestamps="ms",
index: Optional[bool] = None,
partition_cols=None,
**kwargs,
@@ -103,17 +102,12 @@ def write(
table,
path,
compression=compression,
- coerce_timestamps=coerce_timestamps,
partition_cols=partition_cols,
**kwargs,
)
else:
self.api.parquet.write_table(
- table,
- path,
- compression=compression,
- coerce_timestamps=coerce_timestamps,
- **kwargs,
+ table, path, compression=compression, **kwargs,
)
def read(self, path, columns=None, **kwargs):
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index d51c712ed5abd..7ed8d8f22764c 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -564,6 +564,13 @@ def test_additional_extension_types(self, pa):
)
check_round_trip(df, pa)
+ @td.skip_if_no("pyarrow", min_version="0.14")
+ def test_timestamp_nanoseconds(self, pa):
+ # with version 2.0, pyarrow defaults to writing the nanoseconds, so
+ # this should work without error
+ df = pd.DataFrame({"a": pd.date_range("2017-01-01", freq="1n", periods=10)})
+ check_round_trip(df, pa, write_kwargs={"version": "2.0"})
+
class TestParquetFastParquet(Base):
@td.skip_if_no("fastparquet", min_version="0.3.2")
| Looking into the usage question of https://github.com/pandas-dev/pandas/issues/31572, I noticed that specifying the version to allow writing nanoseconds to parquet worked in plain pyarrow code, but not with pandas' `to_parquet`.
This is because we hardcode `coerce_timestamps="ms"` while the default is `None`, which has version-dependent behaviour (eg if version="2.0", actually write the nanosecond data) | https://api.github.com/repos/pandas-dev/pandas/pulls/31652 | 2020-02-04T15:09:16Z | 2020-02-05T01:09:00Z | 2020-02-05T01:09:00Z | 2020-10-03T05:07:49Z |
PERF: Cache MultiIndex.levels | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index c560d81ba95f6..ea29e458fe10e 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -677,8 +677,11 @@ def __len__(self) -> int:
# --------------------------------------------------------------------
# Levels Methods
- @property
+ @cache_readonly
def levels(self):
+ # Use cache_readonly to ensure that self.get_locs doesn't repeatedly
+ # create new IndexEngine
+ # https://github.com/pandas-dev/pandas/issues/31648
result = [
x._shallow_copy(name=name) for x, name in zip(self._levels, self._names)
]
@@ -1302,6 +1305,9 @@ def _set_names(self, names, level=None, validate=True):
)
self._names[lev] = name
+ # If .levels has been accessed, the names in our cache will be stale.
+ self._reset_cache()
+
names = property(
fset=_set_names, fget=_get_names, doc="""\nNames of levels in MultiIndex.\n"""
)
diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py
index 074072ae581b2..57d16a739c213 100644
--- a/pandas/tests/indexes/multi/test_get_set.py
+++ b/pandas/tests/indexes/multi/test_get_set.py
@@ -159,7 +159,7 @@ def test_set_levels_codes_directly(idx):
minor_codes = [(x + 1) % 1 for x in minor_codes]
new_codes = [major_codes, minor_codes]
- msg = "can't set attribute"
+ msg = "[Cc]an't set attribute"
with pytest.raises(AttributeError, match=msg):
idx.levels = new_levels
with pytest.raises(AttributeError, match=msg):
| Closes https://github.com/pandas-dev/pandas/issues/31648.
This should have been caught by the benchmark MultiIdnex.time_index_slice. Confirming that now.. | https://api.github.com/repos/pandas-dev/pandas/pulls/31651 | 2020-02-04T15:00:02Z | 2020-02-04T19:26:00Z | 2020-02-04T19:25:59Z | 2020-02-04T20:08:40Z |
DOC: Fix typo in Getting Started docs | diff --git a/doc/source/getting_started/basics.rst b/doc/source/getting_started/basics.rst
index 4fef5efbd1551..277080006cb3c 100644
--- a/doc/source/getting_started/basics.rst
+++ b/doc/source/getting_started/basics.rst
@@ -1973,7 +1973,7 @@ Pandas has two ways to store strings.
1. ``object`` dtype, which can hold any Python object, including strings.
2. :class:`StringDtype`, which is dedicated to strings.
-Generally, we recommend using :class:`StringDtype`. See :ref:`text.types` fore more.
+Generally, we recommend using :class:`StringDtype`. See :ref:`text.types` for more.
Finally, arbitrary objects may be stored using the ``object`` dtype, but should
be avoided to the extent possible (for performance and interoperability with
| https://api.github.com/repos/pandas-dev/pandas/pulls/31642 | 2020-02-04T04:55:42Z | 2020-02-04T11:20:00Z | 2020-02-04T11:20:00Z | 2020-02-04T11:20:10Z | |
DOC: Removed numeric_only parameter from pd.DataFrame.mad docs | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3bb584d4d34e8..b809bc1a1c496 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9960,7 +9960,7 @@ def _add_numeric_operations(cls):
see_also="",
examples="",
)
- @Appender(_num_doc)
+ @Appender(_num_doc_mad)
def mad(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
@@ -10329,6 +10329,26 @@ def _doc_parms(cls):
%(examples)s
"""
+_num_doc_mad = """
+%(desc)s
+
+Parameters
+----------
+axis : %(axis_descr)s
+ Axis for the function to be applied on.
+skipna : bool, default None
+ Exclude NA/null values when computing the result.
+level : int or level name, default None
+ If the axis is a MultiIndex (hierarchical), count along a
+ particular level, collapsing into a %(name1)s.
+
+Returns
+-------
+%(name1)s or %(name2)s (if level specified)\
+%(see_also)s\
+%(examples)s
+"""
+
_num_ddof_doc = """
%(desc)s
| - closes #29079
| https://api.github.com/repos/pandas-dev/pandas/pulls/31641 | 2020-02-04T02:45:15Z | 2020-02-09T17:19:45Z | 2020-02-09T17:19:45Z | 2020-05-15T08:47:48Z |
CLN: prelims for MultiIndex.get_value | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 79b4cbbea7815..68062c413cdb3 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1,5 +1,15 @@
from sys import getsizeof
-from typing import Any, Hashable, Iterable, List, Optional, Sequence, Tuple, Union
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Hashable,
+ Iterable,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+)
import warnings
import numpy as np
@@ -27,7 +37,7 @@
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
-from pandas.core.dtypes.generic import ABCDataFrame
+from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.missing import array_equivalent, isna
import pandas.core.algorithms as algos
@@ -56,6 +66,9 @@
pprint_thing,
)
+if TYPE_CHECKING:
+ from pandas import Series # noqa:F401
+
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass="MultiIndex", target_klass="MultiIndex or list of tuples")
@@ -2318,8 +2331,10 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
# --------------------------------------------------------------------
# Indexing Methods
- def get_value(self, series, key):
+ def get_value(self, series: "Series", key):
# Label-based
+ assert isinstance(series, ABCSeries)
+
if not is_hashable(key) or is_iterator(key):
# We allow tuples if they are hashable, whereas other Index
# subclasses require scalar.
@@ -2338,7 +2353,7 @@ def _try_mi(k):
new_index = maybe_droplevels(new_index, k)
return series._constructor(
new_values, index=new_index, name=series.name
- ).__finalize__(self)
+ ).__finalize__(series)
try:
return _try_mi(key)
| Unravelling MultiIndex.get_value and get_loc is shaping up to be a PITA. This separates out the easiest parts of the get_value cleanup.
@TomAugspurger can you confirm that the change made here to the `__finalize__` call is correct? If so, suggest a test?
| https://api.github.com/repos/pandas-dev/pandas/pulls/31640 | 2020-02-04T02:08:12Z | 2020-02-06T17:22:52Z | null | 2020-02-06T17:22:57Z |
CLN: Replace format with f-strings in test_repr_info | diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index 49e6fe4940e18..a7e01d8f1fd6d 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -223,8 +223,7 @@ def test_info_verbose(self):
for i, line in enumerate(lines):
if i >= start and i < start + size:
- index = i - start
- line_nr = " {} ".format(index)
+ line_nr = f" {i - start} "
assert line.startswith(line_nr)
def test_info_memory(self):
@@ -236,7 +235,7 @@ def test_info_memory(self):
bytes = float(df.memory_usage().sum())
expected = textwrap.dedent(
- """\
+ f"""\
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 2 entries, 0 to 1
Data columns (total 1 columns):
@@ -244,10 +243,8 @@ def test_info_memory(self):
--- ------ -------------- -----
0 a 2 non-null int64
dtypes: int64(1)
- memory usage: {} bytes
- """.format(
- bytes
- )
+ memory usage: {bytes} bytes
+ """
)
assert result == expected
@@ -313,9 +310,7 @@ def test_info_shows_column_dtypes(self):
)
assert header in res
for i, dtype in enumerate(dtypes):
- name = " {i:d} {i:d} {n:d} non-null {dtype}".format(
- i=i, n=n, dtype=dtype
- )
+ name = f" {i:d} {i:d} {n:d} non-null {dtype}"
assert name in res
def test_info_max_cols(self):
| Updated `pandas/tests/frame/test_repr_info.py` to use f-strings
ref: https://github.com/pandas-dev/pandas/issues/29547
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/31639 | 2020-02-04T02:02:09Z | 2020-02-05T01:12:09Z | 2020-02-05T01:12:09Z | 2020-02-05T01:12:15Z |
REF: call _maybe_cast_indexer upfront, better exception messages | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 6a7551391f2a8..4f8136c073587 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2888,10 +2888,11 @@ def get_loc(self, key, method=None, tolerance=None):
"tolerance argument only valid if using pad, "
"backfill or nearest lookups"
)
+ casted_key = self._maybe_cast_indexer(key)
try:
- return self._engine.get_loc(key)
+ return self._engine.get_loc(casted_key)
except KeyError:
- return self._engine.get_loc(self._maybe_cast_indexer(key))
+ raise KeyError(key)
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, np.asarray(key))
diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py
index b08280a712642..21a4773fa3683 100644
--- a/pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas/tests/indexes/multi/test_indexing.py
@@ -392,7 +392,7 @@ def test_get_loc_missing_nan():
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
- with pytest.raises(KeyError, match=r"^3\.0$"):
+ with pytest.raises(KeyError, match=r"^3$"):
idx.get_loc(3)
with pytest.raises(KeyError, match=r"^nan$"):
idx.get_loc(np.nan)
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index 992a91ad8a528..1b504ce99604d 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -385,7 +385,7 @@ def test_get_loc_missing_nan(self):
# GH 8569
idx = Float64Index([1, 2])
assert idx.get_loc(1) == 0
- with pytest.raises(KeyError, match=r"^3\.0$"):
+ with pytest.raises(KeyError, match=r"^3$"):
idx.get_loc(3)
with pytest.raises(KeyError, match="^nan$"):
idx.get_loc(np.nan)
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index 5530896a90941..89647e87445fc 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -107,7 +107,7 @@ def test_scalar_non_numeric(self):
"mixed",
}:
error = KeyError
- msg = r"^3$"
+ msg = r"^3\.0$"
else:
error = TypeError
msg = (
@@ -187,7 +187,7 @@ def test_scalar_with_mixed(self):
with pytest.raises(TypeError, match=msg):
idxr(s2)[1.0]
- with pytest.raises(KeyError, match=r"^1$"):
+ with pytest.raises(KeyError, match=r"^1\.0$"):
s2.loc[1.0]
result = s2.loc["b"]
@@ -213,7 +213,7 @@ def test_scalar_with_mixed(self):
msg = "Cannot index by location index with a non-integer key"
with pytest.raises(TypeError, match=msg):
s3.iloc[1.0]
- with pytest.raises(KeyError, match=r"^1$"):
+ with pytest.raises(KeyError, match=r"^1\.0$"):
s3.loc[1.0]
result = s3.loc[1.5]
@@ -666,11 +666,11 @@ def test_floating_misc(self):
# value not found (and no fallbacking at all)
# scalar integers
- with pytest.raises(KeyError, match=r"^4\.0$"):
+ with pytest.raises(KeyError, match=r"^4$"):
s.loc[4]
- with pytest.raises(KeyError, match=r"^4\.0$"):
+ with pytest.raises(KeyError, match=r"^4$"):
s.loc[4]
- with pytest.raises(KeyError, match=r"^4\.0$"):
+ with pytest.raises(KeyError, match=r"^4$"):
s[4]
# fancy floats/integers create the correct entry (as nan)
| This will actually allow us to remove e.g. CategoricalIndex.get_loc by moving the 2 relevant lines into CategoricalIndex._maybe_cast_indexer. Large parts of DTI/TDI/PI.get_loc will also be simplifiable.
We've got both Index._maybe_cast_indexer and Index._convert_scalar_indexer which hopefully we'll only need one of. | https://api.github.com/repos/pandas-dev/pandas/pulls/31638 | 2020-02-04T01:58:07Z | 2020-02-05T01:13:11Z | 2020-02-05T01:13:11Z | 2020-02-05T01:18:18Z |
Backport PR #31631 on branch 1.0.x (REGR: Fixed setitem with MultiIndex) | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 180411afb117d..e0182e4e3c1f2 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -17,6 +17,7 @@ Fixed regressions
- Fixed regression in :class:`DataFrame` setting values with a slice (e.g. ``df[-4:] = 1``) indexing by label instead of position (:issue:`31469`)
- Fixed regression when indexing a ``Series`` or ``DataFrame`` indexed by ``DatetimeIndex`` with a slice containg a :class:`datetime.date` (:issue:`31501`)
+- Fixed regression in ``DataFrame.__setitem__`` raising an ``AttributeError`` with a :class:`MultiIndex` and a non-monotonic indexer (:issue:`31449`)
- Fixed regression in :class:`Series` multiplication when multiplying a numeric :class:`Series` with >10000 elements with a timedelta-like scalar (:issue:`31457`)
- Fixed regression in :meth:`GroupBy.apply` if called with a function which returned a non-pandas non-scalar object (e.g. a list or numpy array) (:issue:`31441`)
- Fixed regression in :meth:`to_datetime` when parsing non-nanosecond resolution datetimes (:issue:`31491`)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 5624bb5799104..35909e1693b3f 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -926,7 +926,8 @@ def _setitem_with_indexer(self, indexer, value):
# we can directly set the series here
# as we select a slice indexer on the mi
- idx = index._convert_slice_indexer(idx)
+ if isinstance(idx, slice):
+ idx = index._convert_slice_indexer(idx)
obj._consolidate_inplace()
obj = obj.copy()
obj._data = obj._data.setitem(indexer=tuple([idx]), value=value)
diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py
index aebd1ad2573ed..1e641760f7e8d 100644
--- a/pandas/tests/indexing/multiindex/test_setitem.py
+++ b/pandas/tests/indexing/multiindex/test_setitem.py
@@ -414,6 +414,16 @@ def test_astype_assignment_with_dups(self):
df["A"] = df["A"].astype(np.float64)
tm.assert_index_equal(df.index, index)
+ def test_setitem_nonmonotonic(self):
+ # https://github.com/pandas-dev/pandas/issues/31449
+ index = pd.MultiIndex.from_tuples(
+ [("a", "c"), ("b", "x"), ("a", "d")], names=["l1", "l2"]
+ )
+ df = pd.DataFrame(data=[0, 1, 2], index=index, columns=["e"])
+ df.loc["a", "e"] = np.arange(99, 101, dtype="int64")
+ expected = pd.DataFrame({"e": [99, 1, 100]}, index=index)
+ tm.assert_frame_equal(df, expected)
+
def test_frame_setitem_view_direct(multiindex_dataframe_random_data):
# this works because we are modifying the underlying array
| Backport PR #31631: REGR: Fixed setitem with MultiIndex | https://api.github.com/repos/pandas-dev/pandas/pulls/31637 | 2020-02-03T23:20:38Z | 2020-02-04T07:28:01Z | 2020-02-04T07:28:01Z | 2020-02-04T07:28:01Z |
CLN: Unreachable branch in Loc._getitem_iterable | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index e0b11522c7be4..91e86112f5efd 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1477,14 +1477,12 @@ def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False):
def _getitem_iterable(self, key, axis: int):
"""
- Index current object with an an iterable key.
-
- The iterable key can be a boolean indexer or a collection of keys.
+ Index current object with an an iterable collection of keys.
Parameters
----------
key : iterable
- Targeted labels or boolean indexer.
+ Targeted labels.
axis: int
Dimension on which the indexing is being made.
@@ -1493,30 +1491,20 @@ def _getitem_iterable(self, key, axis: int):
KeyError
If no key was found. Will change in the future to raise if not all
keys were found.
- IndexingError
- If the boolean indexer is unalignable with the object being
- indexed.
Returns
-------
scalar, DataFrame, or Series: indexed value(s).
"""
- # caller is responsible for ensuring non-None axis
+ # we assume that not com.is_bool_indexer(key), as that is
+ # handled before we get here.
self._validate_key(key, axis)
- labels = self.obj._get_axis(axis)
-
- if com.is_bool_indexer(key):
- # A boolean indexer
- key = check_bool_indexer(labels, key)
- (inds,) = key.nonzero()
- return self.obj._take_with_is_copy(inds, axis=axis)
- else:
- # A collection of keys
- keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False)
- return self.obj._reindex_with_indexers(
- {axis: [keyarr, indexer]}, copy=True, allow_dups=True
- )
+ # A collection of keys
+ keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False)
+ return self.obj._reindex_with_indexers(
+ {axis: [keyarr, indexer]}, copy=True, allow_dups=True
+ )
def _validate_read_indexer(
self, key, indexer, axis: int, raise_missing: bool = False
@@ -2055,11 +2043,8 @@ def _convert_to_indexer(self, key, axis: int):
elif is_float(key):
return self._convert_scalar_indexer(key, axis)
- try:
- self._validate_key(key, axis)
- return key
- except ValueError:
- raise ValueError(f"Can only index by location with a [{self._valid_types}]")
+ self._validate_key(key, axis)
+ return key
class _ScalarAccessIndexer(_NDFrameIndexerBase):
| https://api.github.com/repos/pandas-dev/pandas/pulls/31636 | 2020-02-03T23:07:09Z | 2020-02-04T07:37:37Z | 2020-02-04T07:37:37Z | 2020-02-04T16:58:18Z | |
REF: implement ExtensionIndex._concat_same_dtype, use for IntervalIndex | diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index b143ff0aa9c02..2f2fc453f02b8 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -80,16 +80,7 @@ def wrapper(left, right):
cache=True,
)
@inherit_names(
- [
- "__iter__",
- "mean",
- "freq",
- "freqstr",
- "_ndarray_values",
- "asi8",
- "_box_values",
- "_box_func",
- ],
+ ["mean", "freq", "freqstr", "asi8", "_box_values", "_box_func"],
DatetimeLikeArrayMixin,
)
class DatetimeIndexOpsMixin(ExtensionIndex):
diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py
index 66b551f654bf1..04b4b275bf90a 100644
--- a/pandas/core/indexes/extension.py
+++ b/pandas/core/indexes/extension.py
@@ -196,6 +196,9 @@ class ExtensionIndex(Index):
Index subclass for indexes backed by ExtensionArray.
"""
+ # The base class already passes through to _data:
+ # size, __len__, dtype
+
_data: ExtensionArray
__eq__ = _make_wrapped_comparison_op("__eq__")
@@ -205,6 +208,9 @@ class ExtensionIndex(Index):
__le__ = _make_wrapped_comparison_op("__le__")
__ge__ = _make_wrapped_comparison_op("__ge__")
+ # ---------------------------------------------------------------------
+ # NDarray-Like Methods
+
def __getitem__(self, key):
result = self._data[key]
if isinstance(result, type(self._data)):
@@ -217,6 +223,8 @@ def __getitem__(self, key):
def __iter__(self):
return self._data.__iter__()
+ # ---------------------------------------------------------------------
+
@property
def _ndarray_values(self) -> np.ndarray:
return self._data._ndarray_values
@@ -235,6 +243,10 @@ def repeat(self, repeats, axis=None):
result = self._data.repeat(repeats, axis=axis)
return self._shallow_copy(result)
+ def _concat_same_dtype(self, to_concat, name):
+ arr = type(self._data)._concat_same_type(to_concat)
+ return type(self)._simple_new(arr, name=name)
+
@Appender(Index.take.__doc__)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 03fb8db2e1e1e..f77785a29b262 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -183,23 +183,10 @@ def func(intvidx_self, other, sort=False):
)
@inherit_names(["set_closed", "to_tuples"], IntervalArray, wrap=True)
@inherit_names(
- [
- "__len__",
- "__array__",
- "overlaps",
- "contains",
- "size",
- "dtype",
- "left",
- "right",
- "length",
- ],
- IntervalArray,
+ ["__array__", "overlaps", "contains", "left", "right", "length"], IntervalArray,
)
@inherit_names(
- ["is_non_overlapping_monotonic", "mid", "_ndarray_values", "closed"],
- IntervalArray,
- cache=True,
+ ["is_non_overlapping_monotonic", "mid", "closed"], IntervalArray, cache=True,
)
class IntervalIndex(IntervalMixin, ExtensionIndex):
_typ = "intervalindex"
@@ -943,18 +930,6 @@ def insert(self, loc, item):
new_right = self.right.insert(loc, right_insert)
return self._shallow_copy(new_left, new_right)
- def _concat_same_dtype(self, to_concat, name):
- """
- assert that we all have the same .closed
- we allow a 0-len index here as well
- """
- if not len({i.closed for i in to_concat if len(i)}) == 1:
- raise ValueError(
- "can only append two IntervalIndex objects "
- "that are closed on the same side"
- )
- return super()._concat_same_dtype(to_concat, name)
-
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
result = self._data.take(
@@ -962,14 +937,6 @@ def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
)
return self._shallow_copy(result)
- def __getitem__(self, value):
- result = self._data[value]
- if isinstance(result, IntervalArray):
- return self._shallow_copy(result)
- else:
- # scalar
- return result
-
# --------------------------------------------------------------------
# Rendering Methods
# __repr__ associated methods are based on MultiIndex
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index 47a0ba7fe0f21..d010060880703 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -673,10 +673,7 @@ def test_append(self, closed):
)
tm.assert_index_equal(result, expected)
- msg = (
- "can only append two IntervalIndex objects that are closed "
- "on the same side"
- )
+ msg = "Intervals must all be closed on the same side"
for other_closed in {"left", "right", "both", "neither"} - {closed}:
index_other_closed = IntervalIndex.from_arrays(
[0, 1], [1, 2], closed=other_closed
| https://api.github.com/repos/pandas-dev/pandas/pulls/31635 | 2020-02-03T23:00:25Z | 2020-02-10T12:52:42Z | 2020-02-10T12:52:42Z | 2020-02-10T16:40:26Z | |
REGR: Fixed setitem with MultiIndex | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 180411afb117d..e0182e4e3c1f2 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -17,6 +17,7 @@ Fixed regressions
- Fixed regression in :class:`DataFrame` setting values with a slice (e.g. ``df[-4:] = 1``) indexing by label instead of position (:issue:`31469`)
- Fixed regression when indexing a ``Series`` or ``DataFrame`` indexed by ``DatetimeIndex`` with a slice containg a :class:`datetime.date` (:issue:`31501`)
+- Fixed regression in ``DataFrame.__setitem__`` raising an ``AttributeError`` with a :class:`MultiIndex` and a non-monotonic indexer (:issue:`31449`)
- Fixed regression in :class:`Series` multiplication when multiplying a numeric :class:`Series` with >10000 elements with a timedelta-like scalar (:issue:`31457`)
- Fixed regression in :meth:`GroupBy.apply` if called with a function which returned a non-pandas non-scalar object (e.g. a list or numpy array) (:issue:`31441`)
- Fixed regression in :meth:`to_datetime` when parsing non-nanosecond resolution datetimes (:issue:`31491`)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 77003719360d9..e0b11522c7be4 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -893,7 +893,8 @@ def _setitem_with_indexer(self, indexer, value):
# we can directly set the series here
# as we select a slice indexer on the mi
- idx = index._convert_slice_indexer(idx)
+ if isinstance(idx, slice):
+ idx = index._convert_slice_indexer(idx)
obj._consolidate_inplace()
obj = obj.copy()
obj._data = obj._data.setitem(indexer=tuple([idx]), value=value)
diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py
index aebd1ad2573ed..1e641760f7e8d 100644
--- a/pandas/tests/indexing/multiindex/test_setitem.py
+++ b/pandas/tests/indexing/multiindex/test_setitem.py
@@ -414,6 +414,16 @@ def test_astype_assignment_with_dups(self):
df["A"] = df["A"].astype(np.float64)
tm.assert_index_equal(df.index, index)
+ def test_setitem_nonmonotonic(self):
+ # https://github.com/pandas-dev/pandas/issues/31449
+ index = pd.MultiIndex.from_tuples(
+ [("a", "c"), ("b", "x"), ("a", "d")], names=["l1", "l2"]
+ )
+ df = pd.DataFrame(data=[0, 1, 2], index=index, columns=["e"])
+ df.loc["a", "e"] = np.arange(99, 101, dtype="int64")
+ expected = pd.DataFrame({"e": [99, 1, 100]}, index=index)
+ tm.assert_frame_equal(df, expected)
+
def test_frame_setitem_view_direct(multiindex_dataframe_random_data):
# this works because we are modifying the underlying array
| Closes https://github.com/pandas-dev/pandas/issues/31449
| https://api.github.com/repos/pandas-dev/pandas/pulls/31631 | 2020-02-03T20:37:44Z | 2020-02-03T23:20:27Z | 2020-02-03T23:20:27Z | 2020-02-03T23:26:28Z |
BUG: Series.xs boxing datetime64 incorrectly | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 27b28c1c08e23..ee9eed69195ac 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -155,6 +155,7 @@ Indexing
- Bug in :meth:`Series.at` and :meth:`DataFrame.at` not matching ``.loc`` behavior when looking up an integer in a :class:`Float64Index` (:issue:`31329`)
- Bug in :meth:`PeriodIndex.is_monotonic` incorrectly returning ``True`` when containing leading ``NaT`` entries (:issue:`31437`)
- Bug in :meth:`DatetimeIndex.get_loc` raising ``KeyError`` with converted-integer key instead of the user-passed key (:issue:`31425`)
+- Bug in :meth:`Series.xs` incorrectly returning ``Timestamp`` instead of ``datetime64`` in some object-dtype cases (:issue:`31630`)
Missing
^^^^^^^
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3bb584d4d34e8..62a4878f1f12e 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3442,15 +3442,14 @@ class animal locomotion
new_index = self.index[loc]
if is_scalar(loc):
- new_values = self._data.fast_xs(loc)
+ # In this case loc should be an integer
+ if self.ndim == 1:
+ # if we encounter an array-like and we only have 1 dim
+ # that means that their are list/ndarrays inside the Series!
+ # so just return them (GH 6394)
+ return self._values[loc]
- # may need to box a datelike-scalar
- #
- # if we encounter an array-like and we only have 1 dim
- # that means that their are list/ndarrays inside the Series!
- # so just return them (GH 6394)
- if not is_list_like(new_values) or self.ndim == 1:
- return com.maybe_box_datetimelike(new_values)
+ new_values = self._data.fast_xs(loc)
result = self._constructor_sliced(
new_values,
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 526863d2e5ec3..08ae0b02169d4 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1565,7 +1565,7 @@ def fast_xs(self, loc):
fast path for getting a cross-section
return a view of the data
"""
- return self._block.values[loc]
+ raise NotImplementedError("Use series._values[loc] instead")
def concat(self, to_concat, new_axis) -> "SingleBlockManager":
"""
diff --git a/pandas/tests/series/indexing/test_xs.py b/pandas/tests/series/indexing/test_xs.py
new file mode 100644
index 0000000000000..43458ca2ebeb2
--- /dev/null
+++ b/pandas/tests/series/indexing/test_xs.py
@@ -0,0 +1,17 @@
+import numpy as np
+
+import pandas as pd
+
+
+def test_xs_datetimelike_wrapping():
+ # GH#31630 a case where we shouldn't wrap datetime64 in Timestamp
+ arr = pd.date_range("2016-01-01", periods=3)._data._data
+
+ ser = pd.Series(arr, dtype=object)
+ for i in range(len(ser)):
+ ser.iloc[i] = arr[i]
+ assert ser.dtype == object
+ assert isinstance(ser[0], np.datetime64)
+
+ result = ser.xs(0)
+ assert isinstance(result, np.datetime64)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31630 | 2020-02-03T20:30:21Z | 2020-02-05T01:18:29Z | 2020-02-05T01:18:29Z | 2020-02-05T01:19:16Z |
Backport PR #31622 on branch 1.0.x (DOC: add redirect for moved json_normalize docstring) | diff --git a/doc/redirects.csv b/doc/redirects.csv
index 0a71f037d23c3..3a990b09e7f7d 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -46,7 +46,10 @@ developer,development/developer
extending,development/extending
internals,development/internals
-# api
+# api moved function
+reference/api/pandas.io.json.json_normalize,pandas.json_normalize
+
+# api rename
api,reference/index
generated/pandas.api.extensions.ExtensionArray.argsort,../reference/api/pandas.api.extensions.ExtensionArray.argsort
generated/pandas.api.extensions.ExtensionArray.astype,../reference/api/pandas.api.extensions.ExtensionArray.astype
| Backport PR #31622: DOC: add redirect for moved json_normalize docstring | https://api.github.com/repos/pandas-dev/pandas/pulls/31629 | 2020-02-03T19:39:53Z | 2020-02-04T07:27:29Z | 2020-02-04T07:27:29Z | 2020-02-04T07:27:29Z |
replacing .format with f-strings | diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index 04fd4835469a9..78b630bb5ada1 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -60,9 +60,7 @@ def compare_element(result, expected, typ, version=None):
assert result == expected
assert result.freq == expected.freq
else:
- comparator = getattr(
- tm, "assert_{typ}_equal".format(typ=typ), tm.assert_almost_equal
- )
+ comparator = getattr(tm, f"assert_{typ}_equal", tm.assert_almost_equal)
comparator(result, expected)
@@ -77,7 +75,7 @@ def compare(data, vf, version):
# use a specific comparator
# if available
- comparator = "compare_{typ}_{dt}".format(typ=typ, dt=dt)
+ comparator = f"compare_{typ}_{dt}"
comparator = m.get(comparator, m["compare_element"])
comparator(result, expected, typ, version)
@@ -234,7 +232,7 @@ def test_legacy_sparse_warning(datapath):
@pytest.fixture
def get_random_path():
- return "__{}__.pickle".format(tm.rands(10))
+ return f"__{tm.rands(10)}__.pickle"
class TestCompression:
@@ -262,7 +260,7 @@ def compress_file(self, src_path, dest_path, compression):
elif compression == "xz":
f = _get_lzma_file(lzma)(dest_path, "w")
else:
- msg = "Unrecognized compression type: {}".format(compression)
+ msg = f"Unrecognized compression type: {compression}"
raise ValueError(msg)
if compression != "zip":
| fixing some styling (using f-strings instead of .format)
| https://api.github.com/repos/pandas-dev/pandas/pulls/31628 | 2020-02-03T17:56:20Z | 2020-02-03T23:46:29Z | 2020-02-03T23:46:29Z | 2020-02-03T23:46:37Z |
Replace .format with f-strings | diff --git a/pandas/tests/tslibs/test_parsing.py b/pandas/tests/tslibs/test_parsing.py
index c452d5b12ce01..660b1a6e295ed 100644
--- a/pandas/tests/tslibs/test_parsing.py
+++ b/pandas/tests/tslibs/test_parsing.py
@@ -42,9 +42,9 @@ def test_parse_time_quarter_with_dash(dashed, normal):
@pytest.mark.parametrize("dashed", ["-2Q1992", "2-Q1992", "4-4Q1992"])
def test_parse_time_quarter_with_dash_error(dashed):
- msg = "Unknown datetime string format, unable to parse: {dashed}"
+ msg = f"Unknown datetime string format, unable to parse: {dashed}"
- with pytest.raises(parsing.DateParseError, match=msg.format(dashed=dashed)):
+ with pytest.raises(parsing.DateParseError, match=msg):
parse_time_string(dashed)
@@ -118,9 +118,9 @@ def test_parsers_quarter_invalid(date_str):
"must be between 1 and 4: {date_str}"
)
else:
- msg = "Unknown datetime string format, unable to parse: {date_str}"
+ msg = f"Unknown datetime string format, unable to parse: {date_str}"
- with pytest.raises(ValueError, match=msg.format(date_str=date_str)):
+ with pytest.raises(ValueError, match=msg):
parsing.parse_time_string(date_str)
diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py
index 2801a2bf9c371..1268e8a42a255 100644
--- a/pandas/util/_print_versions.py
+++ b/pandas/util/_print_versions.py
@@ -43,7 +43,7 @@ def get_sys_info() -> List[Tuple[str, Optional[Union[str, int]]]]:
("python-bits", struct.calcsize("P") * 8),
("OS", f"{sysname}"),
("OS-release", f"{release}"),
- # ("Version", "{version}".format(version=version)),
+ # ("Version", f"{version}"),
("machine", f"{machine}"),
("processor", f"{processor}"),
("byteorder", f"{sys.byteorder}"),
@@ -114,14 +114,13 @@ def show_versions(as_json=False):
else:
maxlen = max(len(x) for x in deps)
- tpl = "{{k:<{maxlen}}}: {{stat}}".format(maxlen=maxlen)
print("\nINSTALLED VERSIONS")
print("------------------")
for k, stat in sys_info:
- print(tpl.format(k=k, stat=stat))
+ print(f"{{k:<{maxlen}}}: {{stat}}")
print("")
for k, stat in deps_blob:
- print(tpl.format(k=k, stat=stat))
+ print(f"{{k:<{maxlen}}}: {{stat}}")
def main() -> int:
| - [ ] solves few cases of #29547
| https://api.github.com/repos/pandas-dev/pandas/pulls/31627 | 2020-02-03T17:47:03Z | 2020-02-04T18:33:15Z | null | 2020-02-04T18:33:15Z |
BUG: qcut can fail for highly discontinuous data distributions | diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index 00a7645d0c7a5..324c28e212e9b 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -345,6 +345,7 @@ def qcut(
else:
quantiles = q
bins = algos.quantile(x, quantiles)
+
fac, bins = _bins_to_cuts(
x,
bins,
@@ -388,7 +389,19 @@ def _bins_to_cuts(
f"You can drop duplicate edges by setting the 'duplicates' kwarg"
)
else:
- bins = unique_bins
+ if len(unique_bins) == 1:
+ raise ValueError(
+ f"Bin edges must be unique: {repr(bins)}.\n"
+ )
+ bins[0] = bins[0] - 1
+ for i in range(1, len(bins)):
+ if i - 2 < 0:
+ bins[i] = np.nextafter(bins[i], bins[i] - 1)
+ else:
+ bins[i - 1] = (bins[i - 2] + bins[i]) / 2
+ unique_bins = algos.unique(bins)
+ if len(unique_bins) < len(bins) and len(bins) != 2:
+ bins = unique_bins
side = "left" if right else "right"
ids = ensure_int64(bins.searchsorted(x, side=side))
| Fixes #15069. Needs refactoring
- [x] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31626 | 2020-02-03T17:23:26Z | 2020-06-14T15:44:48Z | null | 2020-06-14T15:44:48Z |
CLN: inline indexing 1-liners | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 62a4878f1f12e..313d40b575629 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3498,7 +3498,9 @@ def _iget_item_cache(self, item):
def _box_item_values(self, key, values):
raise AbstractMethodError(self)
- def _slice(self: FrameOrSeries, slobj: slice, axis=0, kind=None) -> FrameOrSeries:
+ def _slice(
+ self: FrameOrSeries, slobj: slice, axis=0, kind: str = "getitem"
+ ) -> FrameOrSeries:
"""
Construct a slice of this container.
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 63e4679a85ade..3e468fb567a3a 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -591,12 +591,6 @@ def _get_label(self, label, axis: int):
return self.obj._xs(label, axis=axis)
- def _get_loc(self, key: int, axis: int):
- return self.obj._ixs(key, axis=axis)
-
- def _slice(self, obj, axis: int, kind=None):
- return self.obj._slice(obj, axis=axis, kind=kind)
-
def _get_setitem_indexer(self, key):
if self.axis is not None:
return self._convert_tuple(key, is_setter=True)
@@ -702,17 +696,6 @@ def _convert_tuple(self, key, is_setter: bool = False):
keyidx.append(idx)
return tuple(keyidx)
- def _convert_scalar_indexer(self, key, axis: int):
- # if we are accessing via lowered dim, use the last dim
- ax = self.obj._get_axis(min(axis, self.ndim - 1))
- # a scalar
- return ax._convert_scalar_indexer(key, kind=self.name)
-
- def _convert_slice_indexer(self, key: slice, axis: int):
- # if we are accessing via lowered dim, use the last dim
- ax = self.obj._get_axis(min(axis, self.ndim - 1))
- return ax._convert_slice_indexer(key, kind=self.name)
-
def _has_valid_setitem_indexer(self, indexer) -> bool:
return True
@@ -1627,7 +1610,8 @@ def _validate_key(self, key, axis: int):
return
if not is_list_like_indexer(key):
- self._convert_scalar_indexer(key, axis)
+ labels = self.obj._get_axis(axis)
+ labels._convert_scalar_indexer(key, kind="loc")
def _is_scalar_access(self, key: Tuple) -> bool:
"""
@@ -1772,7 +1756,7 @@ def _get_slice_axis(self, slice_obj: slice, axis: int):
)
if isinstance(indexer, slice):
- return self._slice(indexer, axis=axis, kind="iloc")
+ return self.obj._slice(indexer, axis=axis, kind="iloc")
else:
# DatetimeIndex overrides Index.slice_indexer and may
# return a DatetimeIndex instead of a slice object.
@@ -1796,12 +1780,12 @@ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
- return self._convert_slice_indexer(key, axis)
+ return labels._convert_slice_indexer(key, kind="loc")
if is_scalar(key):
# try to find out correct indexer, if not type correct raise
try:
- key = self._convert_scalar_indexer(key, axis)
+ key = labels._convert_scalar_indexer(key, kind="loc")
except TypeError:
# but we will allow setting
if not is_setter:
@@ -2025,7 +2009,7 @@ def _getitem_axis(self, key, axis: int):
# validate the location
self._validate_integer(key, axis)
- return self._get_loc(key, axis=axis)
+ return self.obj._ixs(key, axis=axis)
def _get_slice_axis(self, slice_obj: slice, axis: int):
# caller is responsible for ensuring non-None axis
@@ -2034,19 +2018,22 @@ def _get_slice_axis(self, slice_obj: slice, axis: int):
if not need_slice(slice_obj):
return obj.copy(deep=False)
- indexer = self._convert_slice_indexer(slice_obj, axis)
- return self._slice(indexer, axis=axis, kind="iloc")
+ labels = obj._get_axis(axis)
+ indexer = labels._convert_slice_indexer(slice_obj, kind="iloc")
+ return self.obj._slice(indexer, axis=axis, kind="iloc")
def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
"""
Much simpler as we only have to deal with our valid types.
"""
+ labels = self.obj._get_axis(axis)
+
# make need to convert a float key
if isinstance(key, slice):
- return self._convert_slice_indexer(key, axis)
+ return labels._convert_slice_indexer(key, kind="iloc")
elif is_float(key):
- return self._convert_scalar_indexer(key, axis)
+ return labels._convert_scalar_indexer(key, kind="iloc")
self._validate_key(key, axis)
return key
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 2a627aa879c7c..0786674daf874 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -840,8 +840,9 @@ def _ixs(self, i: int, axis: int = 0):
"""
return self._values[i]
- def _slice(self, slobj: slice, axis: int = 0, kind=None) -> "Series":
- slobj = self.index._convert_slice_indexer(slobj, kind=kind or "getitem")
+ def _slice(self, slobj: slice, axis: int = 0, kind: str = "getitem") -> "Series":
+ assert kind in ["getitem", "iloc"]
+ slobj = self.index._convert_slice_indexer(slobj, kind=kind)
return self._get_values(slobj)
def __getitem__(self, key):
| The only 2-liners here are _convert_scalar_indexer and _convert_slice_indexer, which are effectively `axis = min(axis, self.ndim-1)`, but that is unnecessary since all places where these are called have axis < self.ndim. | https://api.github.com/repos/pandas-dev/pandas/pulls/31625 | 2020-02-03T17:07:27Z | 2020-02-06T23:43:04Z | 2020-02-06T23:43:04Z | 2020-02-06T23:45:59Z |
DOC: link on nullables in indexing | diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst
index a8cdf4a61073d..146f88c53e690 100644
--- a/doc/source/user_guide/indexing.rst
+++ b/doc/source/user_guide/indexing.rst
@@ -920,6 +920,10 @@ and :ref:`Advanced Indexing <advanced>` you may select along more than one axis
df2.loc[criterion & (df2['b'] == 'x'), 'b':'c']
+Note- pandas does not allow indexing with NA values. Attempting to do so
+will raise a ``ValueError``.
+For more information refer to Nullable Boolean data type.
+
.. _indexing.basics.indexing_isin:
Indexing with isin
diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst
index 2e68a0598bb71..d5686a8d7a086 100644
--- a/doc/source/user_guide/missing_data.rst
+++ b/doc/source/user_guide/missing_data.rst
@@ -809,6 +809,10 @@ a DataFrame or Series, or when reading in data), so you need to specify
the dtype explicitly. An easy way to convert to those dtypes is explained
:ref:`here <missing_data.NA.conversion>`.
+Note - Pandas does not allow indexing with NA values. Attempting to do so will
+raise a ``ValueError``.
+For more information refer to Nullable boolean data type.
+
Propagation in arithmetic and comparison operations
---------------------------------------------------
| DOC: Mention that boolean indexing is impossible for new nullable integer/boolean and string data types when they contain missing values
closes #31537 | https://api.github.com/repos/pandas-dev/pandas/pulls/31624 | 2020-02-03T16:40:44Z | 2020-02-26T20:23:10Z | null | 2020-02-26T20:23:11Z |
DOC: add redirect for moved json_normalize docstring | diff --git a/doc/redirects.csv b/doc/redirects.csv
index 0a71f037d23c3..3a990b09e7f7d 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -46,7 +46,10 @@ developer,development/developer
extending,development/extending
internals,development/internals
-# api
+# api moved function
+reference/api/pandas.io.json.json_normalize,pandas.json_normalize
+
+# api rename
api,reference/index
generated/pandas.api.extensions.ExtensionArray.argsort,../reference/api/pandas.api.extensions.ExtensionArray.argsort
generated/pandas.api.extensions.ExtensionArray.astype,../reference/api/pandas.api.extensions.ExtensionArray.astype
| Closes https://github.com/pandas-dev/pandas/issues/31514 | https://api.github.com/repos/pandas-dev/pandas/pulls/31622 | 2020-02-03T15:07:27Z | 2020-02-03T19:39:23Z | 2020-02-03T19:39:23Z | 2020-02-03T19:39:29Z |
TST: add regression test for apply case from GH-31605 | diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 9c2b045079622..41ec70468aaeb 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -851,3 +851,17 @@ def test_apply_function_returns_non_pandas_non_scalar(function, expected_values)
result = df.groupby("groups").apply(function)
expected = pd.Series(expected_values, index=pd.Index(["A", "B"], name="groups"))
tm.assert_series_equal(result, expected)
+
+
+def test_apply_function_returns_numpy_array():
+ # GH 31605
+ def fct(group):
+ return group["B"].values.flatten()
+
+ df = pd.DataFrame({"A": ["a", "a", "b", "none"], "B": [1, 2, 3, np.nan]})
+
+ result = df.groupby("A").apply(fct)
+ expected = pd.Series(
+ [[1.0, 2.0], [3.0], [np.nan]], index=pd.Index(["a", "b", "none"], name="A")
+ )
+ tm.assert_series_equal(result, expected)
| @jorisvandenbossche you wanted to put more tests in for this issue. I put the user reported code example into a test. Did you have anything else in mind? I didn't merge it with the above one since the above acted on an empty Dataframe.
- [x] closes #31605
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31621 | 2020-02-03T15:03:58Z | 2020-02-05T01:26:33Z | 2020-02-05T01:26:33Z | 2020-02-05T06:28:31Z |
Backport PR #31521: REGR: Fixed slicing DatetimeIndex with date | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 2d209b375840e..0dc659dc93fa1 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -77,6 +77,9 @@ Interval
Indexing
^^^^^^^^
+
+- Fixed regression when indexing a ``Series`` or ``DataFrame`` indexed by ``DatetimeIndex`` with a slice containg a :class:`datetime.date` (:issue:`31501`)
+- Fixed regression in :class:`DataFrame` setting values with a slice (e.g. ``df[-4:] = 1``) indexing by label instead of position (:issue:`31469`)
-
-
- Bug where assigning to a :class:`Series` using a IntegerArray / BooleanArray as a mask would raise ``TypeError`` (:issue:`31446`)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 2241921e94694..292f3dd8e3aaf 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -1,4 +1,4 @@
-from datetime import datetime, time, timedelta, tzinfo
+from datetime import date, datetime, time, timedelta, tzinfo
import operator
from typing import Optional
import warnings
@@ -804,6 +804,13 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None):
if isinstance(start, time) or isinstance(end, time):
raise KeyError("Cannot mix time and non-time slice keys")
+ # Pandas supports slicing with dates, treated as datetimes at midnight.
+ # https://github.com/pandas-dev/pandas/issues/31501
+ if isinstance(start, date) and not isinstance(start, datetime):
+ start = datetime.combine(start, time(0, 0))
+ if isinstance(end, date) and not isinstance(end, datetime):
+ end = datetime.combine(end, time(0, 0))
+
try:
return Index.slice_indexer(self, start, end, step, kind=kind)
except KeyError:
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index 42f992339f036..c8c2d1ed587cf 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -1,4 +1,4 @@
-from datetime import datetime, timedelta
+from datetime import date, datetime, timedelta
from dateutil import tz
import numpy as np
@@ -350,3 +350,23 @@ def test_loc_label_slicing(self):
expected = ser.iloc[:-1]
tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "slice_, positions",
+ [
+ [slice(date(2018, 1, 1), None), [0, 1, 2]],
+ [slice(date(2019, 1, 2), None), [2]],
+ [slice(date(2020, 1, 1), None), []],
+ [slice(None, date(2020, 1, 1)), [0, 1, 2]],
+ [slice(None, date(2019, 1, 1)), [0]],
+ ],
+ )
+ def test_getitem_slice_date(self, slice_, positions):
+ # https://github.com/pandas-dev/pandas/issues/31501
+ s = pd.Series(
+ [0, 1, 2],
+ pd.DatetimeIndex(["2019-01-01", "2019-01-01T06:00:00", "2019-01-02"]),
+ )
+ result = s[slice_]
+ expected = s.take(positions)
+ tm.assert_series_equal(result, expected)
| backport https://github.com/pandas-dev/pandas/pull/31521 | https://api.github.com/repos/pandas-dev/pandas/pulls/31619 | 2020-02-03T14:41:32Z | 2020-02-03T15:25:54Z | 2020-02-03T15:25:54Z | 2020-02-03T15:39:45Z |
Backport PR #31515 on branch 1.0.x (REGR: DataFrame.__setitem__(slice, val) is positional ) | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 0dc659dc93fa1..6d5a2a777be9e 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -77,7 +77,6 @@ Interval
Indexing
^^^^^^^^
-
- Fixed regression when indexing a ``Series`` or ``DataFrame`` indexed by ``DatetimeIndex`` with a slice containg a :class:`datetime.date` (:issue:`31501`)
- Fixed regression in :class:`DataFrame` setting values with a slice (e.g. ``df[-4:] = 1``) indexing by label instead of position (:issue:`31469`)
-
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index cfd37ac961413..b680234cb0afd 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2938,8 +2938,11 @@ def __setitem__(self, key, value):
self._set_item(key, value)
def _setitem_slice(self, key, value):
+ # NB: we can't just use self.loc[key] = value because that
+ # operates on labels and we need to operate positional for
+ # backwards-compat, xref GH#31469
self._check_setitem_copy()
- self.loc[key] = value
+ self.loc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 33c0e92845484..46be2f7d8fe89 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -860,6 +860,15 @@ def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
assert (float_frame["C"] == 4).all()
+ def test_setitem_slice_position(self):
+ # GH#31469
+ df = pd.DataFrame(np.zeros((100, 1)))
+ df[-4:] = 1
+ arr = np.zeros((100, 1))
+ arr[-4:] = 1
+ expected = pd.DataFrame(arr)
+ tm.assert_frame_equal(df, expected)
+
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
| Backport PR #31515: REGR: DataFrame.__setitem__(slice, val) is positional | https://api.github.com/repos/pandas-dev/pandas/pulls/31618 | 2020-02-03T14:38:48Z | 2020-02-03T15:39:57Z | 2020-02-03T15:39:57Z | 2020-02-03T15:39:57Z |
Backport PR #31607: DOC: combine regressions in section in v1.0.1 whatsnew | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 6d5a2a777be9e..180411afb117d 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -10,6 +10,24 @@ including other versions of pandas.
.. ---------------------------------------------------------------------------
+.. _whatsnew_101.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+
+- Fixed regression in :class:`DataFrame` setting values with a slice (e.g. ``df[-4:] = 1``) indexing by label instead of position (:issue:`31469`)
+- Fixed regression when indexing a ``Series`` or ``DataFrame`` indexed by ``DatetimeIndex`` with a slice containg a :class:`datetime.date` (:issue:`31501`)
+- Fixed regression in :class:`Series` multiplication when multiplying a numeric :class:`Series` with >10000 elements with a timedelta-like scalar (:issue:`31457`)
+- Fixed regression in :meth:`GroupBy.apply` if called with a function which returned a non-pandas non-scalar object (e.g. a list or numpy array) (:issue:`31441`)
+- Fixed regression in :meth:`to_datetime` when parsing non-nanosecond resolution datetimes (:issue:`31491`)
+- Fixed regression in :meth:`~DataFrame.to_csv` where specifying an ``na_rep`` might truncate the values written (:issue:`31447`)
+- Fixed regression where setting :attr:`pd.options.display.max_colwidth` was not accepting negative integer. In addition, this behavior has been deprecated in favor of using ``None`` (:issue:`31532`)
+- Fixed regression in objTOJSON.c fix return-type warning (:issue:`31463`)
+- Fixed regression in :meth:`qcut` when passed a nullable integer. (:issue:`31389`)
+- Fixed regression in assigning to a :class:`Series` using a nullable integer dtype (:issue:`31446`)
+
+.. ---------------------------------------------------------------------------
+
.. _whatsnew_101.deprecations:
Deprecations
@@ -23,123 +41,20 @@ Deprecations
Bug fixes
~~~~~~~~~
-- Bug in :meth:`GroupBy.apply` was raising ``TypeError`` if called with function which returned a non-pandas non-scalar object (e.g. a list) (:issue:`31441`)
-Categorical
-^^^^^^^^^^^
+**Datetimelike**
--
--
-
-Datetimelike
-^^^^^^^^^^^^
-- Fixed regression in :meth:`to_datetime` when parsing non-nanosecond resolution datetimes (:issue:`31491`)
- Fixed bug in :meth:`to_datetime` raising when ``cache=True`` and out-of-bound values are present (:issue:`31491`)
-Timedelta
-^^^^^^^^^
+**Numeric**
--
--
-
-Timezones
-^^^^^^^^^
-
--
--
-
-
-Numeric
-^^^^^^^
- Bug in dtypes being lost in ``DataFrame.__invert__`` (``~`` operator) with mixed dtypes (:issue:`31183`)
-- Bug in :class:`Series` multiplication when multiplying a numeric :class:`Series` with >10000 elements with a timedelta-like scalar (:issue:`31467`)
--
-
-Conversion
-^^^^^^^^^^
-
--
--
-
-Strings
-^^^^^^^
-
--
--
-
-
-Interval
-^^^^^^^^
-
--
--
+ and for extension-array backed ``Series`` and ``DataFrame`` (:issue:`23087`)
-Indexing
-^^^^^^^^
-
-- Fixed regression when indexing a ``Series`` or ``DataFrame`` indexed by ``DatetimeIndex`` with a slice containg a :class:`datetime.date` (:issue:`31501`)
-- Fixed regression in :class:`DataFrame` setting values with a slice (e.g. ``df[-4:] = 1``) indexing by label instead of position (:issue:`31469`)
--
--
-- Bug where assigning to a :class:`Series` using a IntegerArray / BooleanArray as a mask would raise ``TypeError`` (:issue:`31446`)
-
-Missing
-^^^^^^^
-
--
--
-
-MultiIndex
-^^^^^^^^^^
-
--
--
-
-I/O
-^^^
-
-- Fixed regression in :meth:`~DataFrame.to_csv` where specifying an ``na_rep`` might truncate the values written (:issue:`31447`)
--
--
-
-Plotting
-^^^^^^^^
+**Plotting**
- Plotting tz-aware timeseries no longer gives UserWarning (:issue:`31205`)
--
-
-Groupby/resample/rolling
-^^^^^^^^^^^^^^^^^^^^^^^^
-
--
--
-
-
-Reshaping
-^^^^^^^^^
-
--
--
-
-Sparse
-^^^^^^
-
--
--
-
-ExtensionArray
-^^^^^^^^^^^^^^
-
-- Bug in dtype being lost in ``__invert__`` (``~`` operator) for extension-array backed ``Series`` and ``DataFrame`` (:issue:`23087`)
-- Bug where :meth:`qcut` would raise when passed a nullable integer. (:issue:`31389`)
--
-
-Other
-^^^^^
-- Regression fixed in objTOJSON.c fix return-type warning (:issue:`31463`)
-- Fixed a regression where setting :attr:`pd.options.display.max_colwidth` was not accepting negative integer. In addition, this behavior has been deprecated in favor of using ``None`` (:issue:`31532`)
--
.. ---------------------------------------------------------------------------
| Backport https://github.com/pandas-dev/pandas/pull/31607 | https://api.github.com/repos/pandas-dev/pandas/pulls/31617 | 2020-02-03T14:31:50Z | 2020-02-03T17:31:54Z | 2020-02-03T17:31:54Z | 2020-02-03T19:39:37Z |
REGR: Fixed AssertionError in groupby | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index c82a58e5d3c45..041158e682bf9 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -19,6 +19,7 @@ Fixed regressions
- Fixed regression when indexing a ``Series`` or ``DataFrame`` indexed by ``DatetimeIndex`` with a slice containg a :class:`datetime.date` (:issue:`31501`)
- Fixed regression in ``DataFrame.__setitem__`` raising an ``AttributeError`` with a :class:`MultiIndex` and a non-monotonic indexer (:issue:`31449`)
- Fixed regression in :class:`Series` multiplication when multiplying a numeric :class:`Series` with >10000 elements with a timedelta-like scalar (:issue:`31457`)
+- Fixed regression in ``.groupby().agg()`` raising an ``AssertionError`` for some reductions like ``min`` on object-dtype columns (:issue:`31522`)
- Fixed regression in ``.groupby()`` aggregations with categorical dtype using Cythonized reduction functions (e.g. ``first``) (:issue:`31450`)
- Fixed regression in :meth:`GroupBy.apply` if called with a function which returned a non-pandas non-scalar object (e.g. a list or numpy array) (:issue:`31441`)
- Fixed regression in :meth:`DataFrame.groupby` whereby taking the minimum or maximum of a column with period dtype would raise a ``TypeError``. (:issue:`31471`)
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 27dd6e953c219..f194c774cf329 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1022,6 +1022,10 @@ def _cython_agg_blocks(
agg_blocks: List[Block] = []
new_items: List[np.ndarray] = []
deleted_items: List[np.ndarray] = []
+ # Some object-dtype blocks might be split into List[Block[T], Block[U]]
+ split_items: List[np.ndarray] = []
+ split_frames: List[DataFrame] = []
+
no_result = object()
for block in data.blocks:
# Avoid inheriting result from earlier in the loop
@@ -1061,40 +1065,56 @@ def _cython_agg_blocks(
else:
result = cast(DataFrame, result)
# unwrap DataFrame to get array
+ if len(result._data.blocks) != 1:
+ # We've split an object block! Everything we've assumed
+ # about a single block input returning a single block output
+ # is a lie. To keep the code-path for the typical non-split case
+ # clean, we choose to clean up this mess later on.
+ split_items.append(locs)
+ split_frames.append(result)
+ continue
+
assert len(result._data.blocks) == 1
result = result._data.blocks[0].values
if isinstance(result, np.ndarray) and result.ndim == 1:
result = result.reshape(1, -1)
- finally:
- assert not isinstance(result, DataFrame)
-
- if result is not no_result:
- # see if we can cast the block back to the original dtype
- result = maybe_downcast_numeric(result, block.dtype)
-
- if block.is_extension and isinstance(result, np.ndarray):
- # e.g. block.values was an IntegerArray
- # (1, N) case can occur if block.values was Categorical
- # and result is ndarray[object]
- assert result.ndim == 1 or result.shape[0] == 1
- try:
- # Cast back if feasible
- result = type(block.values)._from_sequence(
- result.ravel(), dtype=block.values.dtype
- )
- except ValueError:
- # reshape to be valid for non-Extension Block
- result = result.reshape(1, -1)
+ assert not isinstance(result, DataFrame)
+
+ if result is not no_result:
+ # see if we can cast the block back to the original dtype
+ result = maybe_downcast_numeric(result, block.dtype)
+
+ if block.is_extension and isinstance(result, np.ndarray):
+ # e.g. block.values was an IntegerArray
+ # (1, N) case can occur if block.values was Categorical
+ # and result is ndarray[object]
+ assert result.ndim == 1 or result.shape[0] == 1
+ try:
+ # Cast back if feasible
+ result = type(block.values)._from_sequence(
+ result.ravel(), dtype=block.values.dtype
+ )
+ except ValueError:
+ # reshape to be valid for non-Extension Block
+ result = result.reshape(1, -1)
- agg_block: Block = block.make_block(result)
+ agg_block: Block = block.make_block(result)
new_items.append(locs)
agg_blocks.append(agg_block)
- if not agg_blocks:
+ if not (agg_blocks or split_frames):
raise DataError("No numeric types to aggregate")
+ if split_items:
+ # Clean up the mess left over from split blocks.
+ for locs, result in zip(split_items, split_frames):
+ assert len(locs) == result.shape[1]
+ for i, loc in enumerate(locs):
+ new_items.append(np.array([loc], dtype=locs.dtype))
+ agg_blocks.append(result.iloc[:, [i]]._data.blocks[0])
+
# reset the locs in the blocks to correspond to our
# current ordering
indexer = np.concatenate(new_items)
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index 4eb073a28d580..ff99081521ffb 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -377,6 +377,49 @@ def test_agg_index_has_complex_internals(index):
tm.assert_frame_equal(result, expected)
+def test_agg_split_block():
+ # https://github.com/pandas-dev/pandas/issues/31522
+ df = pd.DataFrame(
+ {
+ "key1": ["a", "a", "b", "b", "a"],
+ "key2": ["one", "two", "one", "two", "one"],
+ "key3": ["three", "three", "three", "six", "six"],
+ }
+ )
+ result = df.groupby("key1").min()
+ expected = pd.DataFrame(
+ {"key2": ["one", "one"], "key3": ["six", "six"]},
+ index=pd.Index(["a", "b"], name="key1"),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_agg_split_object_part_datetime():
+ # https://github.com/pandas-dev/pandas/pull/31616
+ df = pd.DataFrame(
+ {
+ "A": pd.date_range("2000", periods=4),
+ "B": ["a", "b", "c", "d"],
+ "C": [1, 2, 3, 4],
+ "D": ["b", "c", "d", "e"],
+ "E": pd.date_range("2000", periods=4),
+ "F": [1, 2, 3, 4],
+ }
+ ).astype(object)
+ result = df.groupby([0, 0, 0, 0]).min()
+ expected = pd.DataFrame(
+ {
+ "A": [pd.Timestamp("2000")],
+ "B": ["a"],
+ "C": [1],
+ "D": ["b"],
+ "E": [pd.Timestamp("2000")],
+ "F": [1],
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+
+
def test_agg_cython_category_not_implemented_fallback():
# https://github.com/pandas-dev/pandas/issues/31450
df = pd.DataFrame({"col_num": [1, 1, 2, 3]})
| Closes https://github.com/pandas-dev/pandas/issues/31522
cc @jbrockmendel. Just raising a `TypeError` when that assert failed didn't work. The `finally` still runs, which raised an assertion error.
It seemed easier to try to just support this case. IIUC, it only occurs when an `(P, n_rows)` input block gets split into `P` result blocks. I believe that
1. The result blocks should all have the same dtype
2. The input block must not have been an extension block, since it's 2d
So it *should* be safe to just cast the result values into an ndarray. Hopefully...
Are there any edge cases I'm not considering? Some kind of `agg` that returns a result that can't be put in a 2D block? Even something like `.agg(lambda x: pd.Period())`
won't hit this, since it has to be a Cython function. | https://api.github.com/repos/pandas-dev/pandas/pulls/31616 | 2020-02-03T14:13:46Z | 2020-02-05T14:55:26Z | 2020-02-05T14:55:26Z | 2020-02-05T14:56:00Z |
REGR: fix non-reduction apply with tz-aware objects | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 9aa9ece9a5267..0e36fd149d9cc 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -20,11 +20,9 @@ Fixed regressions
- Fixed regression in ``DataFrame.__setitem__`` raising an ``AttributeError`` with a :class:`MultiIndex` and a non-monotonic indexer (:issue:`31449`)
- Fixed regression in :class:`Series` multiplication when multiplying a numeric :class:`Series` with >10000 elements with a timedelta-like scalar (:issue:`31457`)
- Fixed regression in ``.groupby()`` aggregations with categorical dtype using Cythonized reduction functions (e.g. ``first``) (:issue:`31450`)
-- Fixed regression in :meth:`GroupBy.apply` if called with a function which returned a non-pandas non-scalar object (e.g. a list or numpy array) (:issue:`31441`)
- Fixed regression in :meth:`DataFrame.groupby` whereby taking the minimum or maximum of a column with period dtype would raise a ``TypeError``. (:issue:`31471`)
-- Fixed regression in :meth:`to_datetime` when parsing non-nanosecond resolution datetimes (:issue:`31491`)
+- Fixed regression in :meth:`GroupBy.apply` if called with a function which returned a non-pandas non-scalar object (e.g. a list or numpy array) (:issue:`31441`)
- Fixed regression in :meth:`~DataFrame.to_csv` where specifying an ``na_rep`` might truncate the values written (:issue:`31447`)
-- Fixed regression in :class:`Categorical` construction with ``numpy.str_`` categories (:issue:`31499`)
- Fixed regression in :meth:`DataFrame.loc` and :meth:`DataFrame.iloc` when selecting a row containing a single ``datetime64`` or ``timedelta64`` column (:issue:`31649`)
- Fixed regression where setting :attr:`pd.options.display.max_colwidth` was not accepting negative integer. In addition, this behavior has been deprecated in favor of using ``None`` (:issue:`31532`)
- Fixed regression in objTOJSON.c fix return-type warning (:issue:`31463`)
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 89164c527002a..43d253f632f0f 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -114,7 +114,8 @@ cdef class Reducer:
if self.typ is not None:
# In this case, we also have self.index
name = labels[i]
- cached_typ = self.typ(chunk, index=self.index, name=name)
+ cached_typ = self.typ(
+ chunk, index=self.index, name=name, dtype=arr.dtype)
# use the cached_typ if possible
if cached_typ is not None:
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index e98f74e133ea9..fe6abef97acc4 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -703,6 +703,14 @@ def apply_list(row):
)
tm.assert_series_equal(result, expected)
+ def test_apply_noreduction_tzaware_object(self):
+ # https://github.com/pandas-dev/pandas/issues/31505
+ df = pd.DataFrame({"foo": [pd.Timestamp("2020", tz="UTC")]}, dtype="object")
+ result = df.apply(lambda x: x)
+ tm.assert_frame_equal(result, df)
+ result = df.apply(lambda x: x.copy())
+ tm.assert_frame_equal(result, df)
+
class TestInferOutputShape:
# the user has supplied an opaque UDF where
| Closes https://github.com/pandas-dev/pandas/issues/31505
| https://api.github.com/repos/pandas-dev/pandas/pulls/31614 | 2020-02-03T13:21:47Z | 2020-02-05T08:15:33Z | 2020-02-05T08:15:33Z | 2020-02-05T12:34:41Z |
BUG: Ensure same index is returned for slow and fast path in groupby.apply | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 00553edca3259..17a830788be3f 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -902,6 +902,10 @@ Groupby/resample/rolling
- Bug in :meth:`Series.groupby` would raise ``ValueError`` when grouping by :class:`PeriodIndex` level (:issue:`34010`)
- Bug in :meth:`GroupBy.agg`, :meth:`GroupBy.transform`, and :meth:`GroupBy.resample` where subclasses are not preserved (:issue:`28330`)
- Bug in :meth:`GroupBy.rolling.apply` ignores args and kwargs parameters (:issue:`33433`)
+- Bug in :meth:`core.groupby.DataFrameGroupBy.apply` where the output index shape for functions returning a DataFrame which is equally indexed
+ to the input DataFrame is inconsistent. An internal heuristic to detect index mutation would behave differently for equal but not identical
+ indices. In particular, the result index shape might change if a copy of the input would be returned.
+ The behaviour now is consistent, independent of internal heuristics. (:issue:`31612`, :issue:`14927`, :issue:`13056`)
Reshaping
^^^^^^^^^
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 0988cd7ff0dde..18422c2f86129 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -502,7 +502,7 @@ def apply_frame_axis0(object frame, object f, object names,
# Need to infer if low level index slider will cause segfaults
require_slow_apply = i == 0 and piece is chunk
try:
- if piece.index is not chunk.index:
+ if not piece.index.equals(chunk.index):
mutated = True
except AttributeError:
# `piece` might not have an index, could be e.g. an int
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index e2b5118922a5a..bc8067212d60e 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -190,6 +190,46 @@ def f_constant_df(group):
assert names == group_names
+def test_apply_fast_slow_identical():
+ # GH 31613
+
+ df = DataFrame({"A": [0, 0, 1], "b": range(3)})
+
+ # For simple index structures we check for fast/slow apply using
+ # an identity check on in/output
+ def slow(group):
+ return group
+
+ def fast(group):
+ return group.copy()
+
+ fast_df = df.groupby("A").apply(fast)
+ slow_df = df.groupby("A").apply(slow)
+
+ tm.assert_frame_equal(fast_df, slow_df)
+
+
+@pytest.mark.parametrize(
+ "func",
+ [
+ lambda x: x,
+ lambda x: x[:],
+ lambda x: x.copy(deep=False),
+ lambda x: x.copy(deep=True),
+ ],
+)
+def test_groupby_apply_identity_maybecopy_index_identical(func):
+ # GH 14927
+ # Whether the function returns a copy of the input data or not should not
+ # have an impact on the index structure of the result since this is not
+ # transparent to the user
+
+ df = pd.DataFrame({"g": [1, 2, 2, 2], "a": [1, 2, 3, 4], "b": [5, 6, 7, 8]})
+
+ result = df.groupby("g").apply(func)
+ tm.assert_frame_equal(result, df)
+
+
def test_apply_with_mixed_dtype():
# GH3480, apply with mixed dtype on axis=1 breaks in 0.11
df = DataFrame(
| This _fixes_ the internal check to be consistent with the slow apply path
- [x] closes #31612
- [x] closes #14927
closes #13056
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31613 | 2020-02-03T13:15:28Z | 2020-05-27T14:06:52Z | 2020-05-27T14:06:51Z | 2020-06-02T09:31:04Z |
Backport PR #31606 on branch 1.0.x (DOC: add back google analytics with the new doc theme) | diff --git a/doc/source/conf.py b/doc/source/conf.py
index 78b317456f720..f126ad99cc463 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -208,6 +208,7 @@
"external_links": [],
"github_url": "https://github.com/pandas-dev/pandas",
"twitter_url": "https://twitter.com/pandas_dev",
+ "google_analytics_id": "UA-27880019-2",
}
# Add any paths that contain custom themes here, relative to this directory.
| Backport PR #31606: DOC: add back google analytics with the new doc theme | https://api.github.com/repos/pandas-dev/pandas/pulls/31611 | 2020-02-03T12:07:39Z | 2020-02-03T12:35:58Z | 2020-02-03T12:35:58Z | 2020-02-03T12:35:58Z |
Backport PR #31594: TST: troubleshoot npdev build | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 6a2a30a3efa17..023819518dc00 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1138,7 +1138,6 @@ def _set_value(self, label, value, takeable: bool = False):
else:
self.index._engine.set_value(self._values, label, value)
except (KeyError, TypeError):
-
# set using a non-recursive method
self.loc[label] = value
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 097e83d93ee71..4c917b9bb42d2 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -675,6 +675,8 @@ def test__get_dtype(input_param, result):
)
def test__get_dtype_fails(input_param, expected_error_message):
# python objects
+ # 2020-02-02 npdev changed error message
+ expected_error_message += f"|Cannot interpret '{input_param}' as a data type"
with pytest.raises(TypeError, match=expected_error_message):
com._get_dtype(input_param)
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index fddd6239df309..4475be12f3ff8 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -43,7 +43,12 @@ def test_equality_invalid(self):
assert not is_dtype_equal(self.dtype, np.int64)
def test_numpy_informed(self):
- with pytest.raises(TypeError, match="data type not understood"):
+ # npdev 2020-02-02 changed from "data type not understood" to
+ # "Cannot interpret 'foo' as a data type"
+ msg = "|".join(
+ ["data type not understood", "Cannot interpret '.*' as a data type"]
+ )
+ with pytest.raises(TypeError, match=msg):
np.dtype(self.dtype)
assert not self.dtype == np.str_
| Backport of https://github.com/pandas-dev/pandas/pull/31594 | https://api.github.com/repos/pandas-dev/pandas/pulls/31610 | 2020-02-03T10:46:34Z | 2020-02-03T12:22:38Z | 2020-02-03T12:22:38Z | 2020-02-03T12:53:41Z |
Backport PR #31571 on branch 1.0.x (DOC fix *_option() docstring) | diff --git a/pandas/_config/config.py b/pandas/_config/config.py
index 0a3009f74492f..1978f5066733f 100644
--- a/pandas/_config/config.py
+++ b/pandas/_config/config.py
@@ -144,9 +144,7 @@ def _describe_option(pat="", _print_desc=True):
if len(keys) == 0:
raise OptionError("No such keys(s)")
- s = ""
- for k in keys: # filter by pat
- s += _build_option_description(k)
+ s = "\n".join([_build_option_description(k) for k in keys])
if _print_desc:
print(s)
| Backport PR #31571: DOC fix *_option() docstring | https://api.github.com/repos/pandas-dev/pandas/pulls/31608 | 2020-02-03T10:39:43Z | 2020-02-03T11:27:01Z | 2020-02-03T11:27:01Z | 2020-02-03T11:27:01Z |
DOC: combine regressions in section in v1.0.1 whatsnew | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 6d5a2a777be9e..180411afb117d 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -10,6 +10,24 @@ including other versions of pandas.
.. ---------------------------------------------------------------------------
+.. _whatsnew_101.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+
+- Fixed regression in :class:`DataFrame` setting values with a slice (e.g. ``df[-4:] = 1``) indexing by label instead of position (:issue:`31469`)
+- Fixed regression when indexing a ``Series`` or ``DataFrame`` indexed by ``DatetimeIndex`` with a slice containg a :class:`datetime.date` (:issue:`31501`)
+- Fixed regression in :class:`Series` multiplication when multiplying a numeric :class:`Series` with >10000 elements with a timedelta-like scalar (:issue:`31457`)
+- Fixed regression in :meth:`GroupBy.apply` if called with a function which returned a non-pandas non-scalar object (e.g. a list or numpy array) (:issue:`31441`)
+- Fixed regression in :meth:`to_datetime` when parsing non-nanosecond resolution datetimes (:issue:`31491`)
+- Fixed regression in :meth:`~DataFrame.to_csv` where specifying an ``na_rep`` might truncate the values written (:issue:`31447`)
+- Fixed regression where setting :attr:`pd.options.display.max_colwidth` was not accepting negative integer. In addition, this behavior has been deprecated in favor of using ``None`` (:issue:`31532`)
+- Fixed regression in objTOJSON.c fix return-type warning (:issue:`31463`)
+- Fixed regression in :meth:`qcut` when passed a nullable integer. (:issue:`31389`)
+- Fixed regression in assigning to a :class:`Series` using a nullable integer dtype (:issue:`31446`)
+
+.. ---------------------------------------------------------------------------
+
.. _whatsnew_101.deprecations:
Deprecations
@@ -23,123 +41,20 @@ Deprecations
Bug fixes
~~~~~~~~~
-- Bug in :meth:`GroupBy.apply` was raising ``TypeError`` if called with function which returned a non-pandas non-scalar object (e.g. a list) (:issue:`31441`)
-Categorical
-^^^^^^^^^^^
+**Datetimelike**
--
--
-
-Datetimelike
-^^^^^^^^^^^^
-- Fixed regression in :meth:`to_datetime` when parsing non-nanosecond resolution datetimes (:issue:`31491`)
- Fixed bug in :meth:`to_datetime` raising when ``cache=True`` and out-of-bound values are present (:issue:`31491`)
-Timedelta
-^^^^^^^^^
+**Numeric**
--
--
-
-Timezones
-^^^^^^^^^
-
--
--
-
-
-Numeric
-^^^^^^^
- Bug in dtypes being lost in ``DataFrame.__invert__`` (``~`` operator) with mixed dtypes (:issue:`31183`)
-- Bug in :class:`Series` multiplication when multiplying a numeric :class:`Series` with >10000 elements with a timedelta-like scalar (:issue:`31467`)
--
-
-Conversion
-^^^^^^^^^^
-
--
--
-
-Strings
-^^^^^^^
-
--
--
-
-
-Interval
-^^^^^^^^
-
--
--
+ and for extension-array backed ``Series`` and ``DataFrame`` (:issue:`23087`)
-Indexing
-^^^^^^^^
-
-- Fixed regression when indexing a ``Series`` or ``DataFrame`` indexed by ``DatetimeIndex`` with a slice containg a :class:`datetime.date` (:issue:`31501`)
-- Fixed regression in :class:`DataFrame` setting values with a slice (e.g. ``df[-4:] = 1``) indexing by label instead of position (:issue:`31469`)
--
--
-- Bug where assigning to a :class:`Series` using a IntegerArray / BooleanArray as a mask would raise ``TypeError`` (:issue:`31446`)
-
-Missing
-^^^^^^^
-
--
--
-
-MultiIndex
-^^^^^^^^^^
-
--
--
-
-I/O
-^^^
-
-- Fixed regression in :meth:`~DataFrame.to_csv` where specifying an ``na_rep`` might truncate the values written (:issue:`31447`)
--
--
-
-Plotting
-^^^^^^^^
+**Plotting**
- Plotting tz-aware timeseries no longer gives UserWarning (:issue:`31205`)
--
-
-Groupby/resample/rolling
-^^^^^^^^^^^^^^^^^^^^^^^^
-
--
--
-
-
-Reshaping
-^^^^^^^^^
-
--
--
-
-Sparse
-^^^^^^
-
--
--
-
-ExtensionArray
-^^^^^^^^^^^^^^
-
-- Bug in dtype being lost in ``__invert__`` (``~`` operator) for extension-array backed ``Series`` and ``DataFrame`` (:issue:`23087`)
-- Bug where :meth:`qcut` would raise when passed a nullable integer. (:issue:`31389`)
--
-
-Other
-^^^^^
-- Regression fixed in objTOJSON.c fix return-type warning (:issue:`31463`)
-- Fixed a regression where setting :attr:`pd.options.display.max_colwidth` was not accepting negative integer. In addition, this behavior has been deprecated in favor of using ``None`` (:issue:`31532`)
--
.. ---------------------------------------------------------------------------
| Similarly how we did it for 0.24.x, gathering all regressions in the first section of the whatsnew (as those are the most important part of 1.0.1)
cc @TomAugspurger | https://api.github.com/repos/pandas-dev/pandas/pulls/31607 | 2020-02-03T10:30:05Z | 2020-02-03T14:05:38Z | 2020-02-03T14:05:38Z | 2020-02-03T14:32:14Z |
DOC: add back google analytics with the new doc theme | diff --git a/doc/source/conf.py b/doc/source/conf.py
index 28df08a8607b9..c12c148d0f10d 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -209,6 +209,7 @@
"external_links": [],
"github_url": "https://github.com/pandas-dev/pandas",
"twitter_url": "https://twitter.com/pandas_dev",
+ "google_analytics_id": "UA-27880019-2",
}
# Add any paths that contain custom themes here, relative to this directory.
| WIth the new theme, we didn't have google analytics anymore (how we had it before https://github.com/pandas-dev/pandas/pull/27662), so did a quick PR to add that to the theme with an option: https://github.com/pandas-dev/pydata-bootstrap-sphinx-theme/pull/84 | https://api.github.com/repos/pandas-dev/pandas/pulls/31606 | 2020-02-03T10:07:33Z | 2020-02-03T12:07:11Z | 2020-02-03T12:07:11Z | 2020-02-03T12:35:38Z |
Backport PR #31569 on branch 1.0.x | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 71a67e10ca38c..a01125d6f43c3 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -10,6 +10,14 @@ including other versions of pandas.
.. ---------------------------------------------------------------------------
+.. _whatsnew_101.deprecations:
+
+Deprecations
+~~~~~~~~~~~~
+
+- Support for negative integer for :attr:`pd.options.display.max_colwidth` is deprecated in favor of using ``None`` (:issue:`31532`)
+
+.. ---------------------------------------------------------------------------
.. _whatsnew_101.bug_fixes:
@@ -128,6 +136,7 @@ ExtensionArray
Other
^^^^^
- Regression fixed in objTOJSON.c fix return-type warning (:issue:`31463`)
+- Fixed a regression where setting :attr:`pd.options.display.max_colwidth` was not accepting negative integer. In addition, this behavior has been deprecated in favor of using ``None`` (:issue:`31532`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index eb1587313910d..6fb4359dd8ae0 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -9,6 +9,8 @@
module is imported, register them here rather then in the module.
"""
+import warnings
+
import pandas._config.config as cf
from pandas._config.config import (
is_bool,
@@ -341,8 +343,26 @@ def is_terminal() -> bool:
validator=is_instance_factory([type(None), int]),
)
cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int)
+
+ def _deprecate_negative_int_max_colwidth(key):
+ value = cf.get_option(key)
+ if value is not None and value < 0:
+ warnings.warn(
+ "Passing a negative integer is deprecated in version 1.0 and "
+ "will not be supported in future version. Instead, use None "
+ "to not limit the column width.",
+ FutureWarning,
+ stacklevel=4,
+ )
+
cf.register_option(
- "max_colwidth", 50, max_colwidth_doc, validator=is_nonnegative_int
+ # FIXME: change `validator=is_nonnegative_int`
+ # in version 1.2
+ "max_colwidth",
+ 50,
+ max_colwidth_doc,
+ validator=is_instance_factory([type(None), int]),
+ cb=_deprecate_negative_int_max_colwidth,
)
if is_terminal():
max_cols = 0 # automatically determine optimal number of columns
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 97956489e7da6..a733874253903 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -239,6 +239,15 @@ def test_repr_truncation(self):
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
+ def test_repr_deprecation_negative_int(self):
+ # FIXME: remove in future version after deprecation cycle
+ # Non-regression test for:
+ # https://github.com/pandas-dev/pandas/issues/31532
+ width = get_option("display.max_colwidth")
+ with tm.assert_produces_warning(FutureWarning):
+ set_option("display.max_colwidth", -1)
+ set_option("display.max_colwidth", width)
+
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
pd.reset_option("display.chop_threshold") # default None
| backport of #31569 in 1.0.x | https://api.github.com/repos/pandas-dev/pandas/pulls/31603 | 2020-02-03T09:32:56Z | 2020-02-03T10:38:41Z | 2020-02-03T10:38:41Z | 2020-02-03T10:38:41Z |
Manual backport 31594 | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 6a2a30a3efa17..466efbf9ae6d1 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -11,7 +11,9 @@
from pandas._config import get_option
-from pandas._libs import index as libindex, lib, reshape, tslibs
+from pandas._libs import lib, properties, reshape, tslibs
+from pandas._libs.index import validate_numeric_casting
+from pandas._typing import Label
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_bool_kwarg, validate_percentile
@@ -1045,17 +1047,10 @@ def __setitem__(self, key, value):
self._maybe_update_cacher()
def _set_with_engine(self, key, value):
- values = self._values
- if is_extension_array_dtype(values.dtype):
- # The cython indexing engine does not support ExtensionArrays.
- values[self.index.get_loc(key)] = value
- return
- try:
- self.index._engine.set_value(values, key, value)
- return
- except KeyError:
- values[self.index.get_loc(key)] = value
- return
+ # fails with AttributeError for IntervalIndex
+ loc = self.index._engine.get_loc(key)
+ validate_numeric_casting(self.dtype, value)
+ self._values[loc] = value
def _set_with(self, key, value):
# other: fancy integer or otherwise
@@ -1136,8 +1131,10 @@ def _set_value(self, label, value, takeable: bool = False):
if takeable:
self._values[label] = value
else:
- self.index._engine.set_value(self._values, label, value)
- except (KeyError, TypeError):
+ loc = self.index.get_loc(label)
+ validate_numeric_casting(self.dtype, value)
+ self._values[loc] = value
+ except KeyError:
# set using a non-recursive method
self.loc[label] = value
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 097e83d93ee71..4c917b9bb42d2 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -675,6 +675,8 @@ def test__get_dtype(input_param, result):
)
def test__get_dtype_fails(input_param, expected_error_message):
# python objects
+ # 2020-02-02 npdev changed error message
+ expected_error_message += f"|Cannot interpret '{input_param}' as a data type"
with pytest.raises(TypeError, match=expected_error_message):
com._get_dtype(input_param)
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index fddd6239df309..42978c937c6b8 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -38,13 +38,14 @@ def setup_method(self, method):
def test_hash(self):
hash(self.dtype)
- def test_equality_invalid(self):
- assert not self.dtype == "foo"
- assert not is_dtype_equal(self.dtype, np.int64)
-
- def test_numpy_informed(self):
- with pytest.raises(TypeError, match="data type not understood"):
- np.dtype(self.dtype)
+ def test_numpy_informed(self, dtype):
+ # npdev 2020-02-02 changed from "data type not understood" to
+ # "Cannot interpret 'foo' as a data type"
+ msg = "|".join(
+ ["data type not understood", "Cannot interpret '.*' as a data type"]
+ )
+ with pytest.raises(TypeError, match=msg):
+ np.dtype(dtype)
assert not self.dtype == np.str_
assert not np.str_ == self.dtype
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31602 | 2020-02-03T08:47:10Z | 2020-02-03T08:57:07Z | null | 2020-02-03T08:57:18Z |
Backport PR #31207 on branch 1.0.x (BUG: no longer raise user warning when plotting tz aware time series) | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 71a67e10ca38c..684dcb1cbf002 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -95,7 +95,7 @@ I/O
Plotting
^^^^^^^^
--
+- Plotting tz-aware timeseries no longer gives UserWarning (:issue:`31205`)
-
Groupby/resample/rolling
diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py
index dd048114142f3..3abce690cbe6b 100644
--- a/pandas/plotting/_matplotlib/timeseries.py
+++ b/pandas/plotting/_matplotlib/timeseries.py
@@ -251,7 +251,7 @@ def _maybe_convert_index(ax, data):
freq = frequencies.get_period_alias(freq)
if isinstance(data.index, ABCDatetimeIndex):
- data = data.to_period(freq=freq)
+ data = data.tz_localize(None).to_period(freq=freq)
elif isinstance(data.index, ABCPeriodIndex):
data.index = data.index.asfreq(freq=freq)
return data
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 8f855fd0c6cff..10d7efd22971b 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -45,11 +45,12 @@ def teardown_method(self, method):
@pytest.mark.slow
def test_ts_plot_with_tz(self, tz_aware_fixture):
- # GH2877, GH17173
+ # GH2877, GH17173, GH31205
tz = tz_aware_fixture
index = date_range("1/1/2011", periods=2, freq="H", tz=tz)
ts = Series([188.5, 328.25], index=index)
- _check_plot_works(ts.plot)
+ with tm.assert_produces_warning(None):
+ _check_plot_works(ts.plot)
def test_fontsize_set_correctly(self):
# For issue #8765
| Backport PR #31207: BUG: no longer raise user warning when plotting tz aware time series | https://api.github.com/repos/pandas-dev/pandas/pulls/31601 | 2020-02-03T07:37:08Z | 2020-02-03T10:57:46Z | 2020-02-03T10:57:46Z | 2020-02-03T10:57:46Z |
CLN: _convert_list_indexer only called by Loc | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 6a7551391f2a8..1d8c08c323cd4 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -68,7 +68,7 @@
from pandas.core.arrays import ExtensionArray
from pandas.core.base import IndexOpsMixin, PandasObject
import pandas.core.common as com
-from pandas.core.indexers import deprecate_ndim_indexing, maybe_convert_indices
+from pandas.core.indexers import deprecate_ndim_indexing
from pandas.core.indexes.frozen import FrozenList
import pandas.core.missing as missing
from pandas.core.ops import get_op_result_name
@@ -3211,7 +3211,7 @@ def is_int(v):
return indexer
- def _convert_listlike_indexer(self, keyarr, kind=None):
+ def _convert_listlike_indexer(self, keyarr):
"""
Parameters
----------
@@ -3230,7 +3230,7 @@ def _convert_listlike_indexer(self, keyarr, kind=None):
else:
keyarr = self._convert_arr_indexer(keyarr)
- indexer = self._convert_list_indexer(keyarr, kind=kind)
+ indexer = self._convert_list_indexer(keyarr)
return indexer, keyarr
def _convert_arr_indexer(self, keyarr):
@@ -3264,7 +3264,7 @@ def _convert_index_indexer(self, keyarr):
"""
return keyarr
- def _convert_list_indexer(self, keyarr, kind=None):
+ def _convert_list_indexer(self, keyarr):
"""
Convert a list-like indexer to the appropriate dtype.
@@ -3278,29 +3278,6 @@ def _convert_list_indexer(self, keyarr, kind=None):
-------
positional indexer or None
"""
- if (
- kind in [None, "iloc"]
- and is_integer_dtype(keyarr)
- and not self.is_floating()
- ):
-
- if self.inferred_type == "mixed-integer":
- indexer = self.get_indexer(keyarr)
- if (indexer >= 0).all():
- return indexer
- # missing values are flagged as -1 by get_indexer and negative
- # indices are already converted to positive indices in the
- # above if-statement, so the negative flags are changed to
- # values outside the range of indices so as to trigger an
- # IndexError in maybe_convert_indices
- indexer[indexer < 0] = len(self)
-
- return maybe_convert_indices(indexer, len(self))
-
- elif not self.inferred_type == "integer":
- keyarr = np.where(keyarr < 0, len(self) + keyarr, keyarr)
- return keyarr
-
return None
def _invalid_indexer(self, form: str_t, key):
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 2cdf47ad61cec..8df4a447e1f44 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -678,12 +678,12 @@ def _convert_scalar_indexer(self, key, kind=None):
return super()._convert_scalar_indexer(key, kind=kind)
@Appender(Index._convert_list_indexer.__doc__)
- def _convert_list_indexer(self, keyarr, kind=None):
+ def _convert_list_indexer(self, keyarr):
# Return our indexer or raise if all of the values are not included in
# the categories
if self.categories._defer_to_indexing:
- indexer = self.categories._convert_list_indexer(keyarr, kind=kind)
+ indexer = self.categories._convert_list_indexer(keyarr)
return Index(self.codes).get_indexer_for(indexer)
indexer = self.categories.get_indexer(np.asarray(keyarr))
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index a665e4df00219..0252a13665b84 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -537,7 +537,7 @@ def _maybe_cast_slice_bound(self, label, side, kind):
return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)
@Appender(Index._convert_list_indexer.__doc__)
- def _convert_list_indexer(self, keyarr, kind=None):
+ def _convert_list_indexer(self, keyarr):
"""
we are passed a list-like indexer. Return the
indexer for matching intervals.
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index c560d81ba95f6..95511858b0cd9 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2375,7 +2375,7 @@ def _try_mi(k):
raise InvalidIndexError(key)
- def _convert_listlike_indexer(self, keyarr, kind=None):
+ def _convert_listlike_indexer(self, keyarr):
"""
Parameters
----------
@@ -2388,7 +2388,7 @@ def _convert_listlike_indexer(self, keyarr, kind=None):
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
- indexer, keyarr = super()._convert_listlike_indexer(keyarr, kind=kind)
+ indexer, keyarr = super()._convert_listlike_indexer(keyarr)
# are we indexing a specific level
if indexer is None and len(keyarr) and not isinstance(keyarr[0], tuple):
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 77003719360d9..1aa3e1dbd4499 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1459,7 +1459,8 @@ def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False):
# Have the index compute an indexer or return None
# if it cannot handle:
- indexer, keyarr = ax._convert_listlike_indexer(key, kind=self.name)
+ assert self.name == "loc"
+ indexer, keyarr = ax._convert_listlike_indexer(key)
# We only act on all found values:
if indexer is not None and (indexer != -1).all():
self._validate_read_indexer(key, indexer, axis, raise_missing=raise_missing)
| https://api.github.com/repos/pandas-dev/pandas/pulls/31599 | 2020-02-03T06:35:00Z | 2020-02-05T01:27:38Z | 2020-02-05T01:27:38Z | 2020-02-05T01:32:53Z | |
REF: simplify PeriodIndex.get_loc | diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 4185cc2084469..6141e2b78e9f4 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -12,6 +12,7 @@ cnp.import_array()
cimport pandas._libs.util as util
+from pandas._libs.tslibs import Period
from pandas._libs.tslibs.nattype cimport c_NaT as NaT
from pandas._libs.tslibs.c_timestamp cimport _Timestamp
@@ -466,6 +467,28 @@ cdef class TimedeltaEngine(DatetimeEngine):
cdef class PeriodEngine(Int64Engine):
+ cdef int64_t _unbox_scalar(self, scalar) except? -1:
+ if scalar is NaT:
+ return scalar.value
+ if isinstance(scalar, Period):
+ # NB: we assume that we have the correct freq here.
+ # TODO: potential optimize by checking for _Period?
+ return scalar.ordinal
+ raise TypeError(scalar)
+
+ cpdef get_loc(self, object val):
+ # NB: the caller is responsible for ensuring that we are called
+ # with either a Period or NaT
+ cdef:
+ int64_t conv
+
+ try:
+ conv = self._unbox_scalar(val)
+ except TypeError:
+ raise KeyError(val)
+
+ return Int64Engine.get_loc(self, conv)
+
cdef _get_index_values(self):
return super(PeriodEngine, self).vgetter().view("i8")
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 0e0eb249562d7..987725bb4b70b 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -468,6 +468,10 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
+ if self_index is not self:
+ # convert tolerance to i8
+ tolerance = self._maybe_convert_timedelta(tolerance)
+
return Index.get_indexer(self_index, target, method, limit, tolerance)
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
@@ -504,6 +508,7 @@ def get_loc(self, key, method=None, tolerance=None):
TypeError
If key is listlike or otherwise not hashable.
"""
+ orig_key = key
if not is_scalar(key):
raise InvalidIndexError(key)
@@ -545,20 +550,12 @@ def get_loc(self, key, method=None, tolerance=None):
key = Period(key, freq=self.freq)
except ValueError:
# we cannot construct the Period
- raise KeyError(key)
+ raise KeyError(orig_key)
- ordinal = self._data._unbox_scalar(key)
try:
- return self._engine.get_loc(ordinal)
+ return Index.get_loc(self, key, method, tolerance)
except KeyError:
-
- try:
- if tolerance is not None:
- tolerance = self._convert_tolerance(tolerance, np.asarray(key))
- return self._int64index.get_loc(ordinal, method, tolerance)
-
- except KeyError:
- raise KeyError(key)
+ raise KeyError(orig_key)
def _maybe_cast_slice_bound(self, label, side: str, kind: str):
"""
@@ -625,12 +622,6 @@ def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True
except KeyError:
raise KeyError(key)
- def _convert_tolerance(self, tolerance, target):
- tolerance = DatetimeIndexOpsMixin._convert_tolerance(self, tolerance, target)
- if target.size != tolerance.size and tolerance.size > 1:
- raise ValueError("list-like tolerance size must match target index size")
- return self._maybe_convert_timedelta(tolerance)
-
def insert(self, loc, item):
if not isinstance(item, Period) or self.freq != item.freq:
return self.astype(object).insert(loc, item)
| This makes PeriodEngine follow patterns in DatetimeEngine, so soon we can have it subclass DatetimeEngine instead of Int64Engine, which will lead to more simplifications.
This also puts us within striking distance of sharing get_loc code | https://api.github.com/repos/pandas-dev/pandas/pulls/31598 | 2020-02-03T06:29:49Z | 2020-02-09T20:41:34Z | 2020-02-09T20:41:34Z | 2020-02-10T23:32:30Z |
REF: implement tests/indexes/objects/ | diff --git a/pandas/tests/indexes/base_class/__init__.py b/pandas/tests/indexes/base_class/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/tests/indexes/base_class/test_constructors.py b/pandas/tests/indexes/base_class/test_constructors.py
new file mode 100644
index 0000000000000..9e6a8f34c135d
--- /dev/null
+++ b/pandas/tests/indexes/base_class/test_constructors.py
@@ -0,0 +1,36 @@
+import pytest
+
+from pandas import Index, MultiIndex
+
+
+class TestIndexConstructor:
+ # Tests for the Index constructor, specifically for cases that do
+ # not return a subclass
+
+ def test_constructor_corner(self):
+ # corner case
+ msg = (
+ r"Index\(\.\.\.\) must be called with a collection of some "
+ "kind, 0 was passed"
+ )
+ with pytest.raises(TypeError, match=msg):
+ Index(0)
+
+ @pytest.mark.parametrize("index_vals", [[("A", 1), "B"], ["B", ("A", 1)]])
+ def test_construction_list_mixed_tuples(self, index_vals):
+ # see gh-10697: if we are constructing from a mixed list of tuples,
+ # make sure that we are independent of the sorting order.
+ index = Index(index_vals)
+ assert isinstance(index, Index)
+ assert not isinstance(index, MultiIndex)
+
+ def test_constructor_wrong_kwargs(self):
+ # GH #19348
+ with pytest.raises(TypeError, match="Unexpected keyword arguments {'foo'}"):
+ Index([], foo="bar")
+
+ @pytest.mark.xfail(reason="see GH#21311: Index doesn't enforce dtype argument")
+ def test_constructor_cast(self):
+ msg = "could not convert string to float"
+ with pytest.raises(ValueError, match=msg):
+ Index(["a", "b", "c"], dtype=float)
diff --git a/pandas/tests/indexes/base_class/test_setops.py b/pandas/tests/indexes/base_class/test_setops.py
new file mode 100644
index 0000000000000..e7d5e21d0ba47
--- /dev/null
+++ b/pandas/tests/indexes/base_class/test_setops.py
@@ -0,0 +1,74 @@
+import numpy as np
+import pytest
+
+from pandas import Index, Series
+import pandas._testing as tm
+from pandas.core.algorithms import safe_sort
+
+
+class TestIndexSetOps:
+ def test_union_base(self):
+ index = Index([0, "a", 1, "b", 2, "c"])
+ first = index[3:]
+ second = index[:5]
+
+ result = first.union(second)
+
+ expected = Index([0, 1, 2, "a", "b", "c"])
+ tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize("klass", [np.array, Series, list])
+ def test_union_different_type_base(self, klass):
+ # GH 10149
+ index = Index([0, "a", 1, "b", 2, "c"])
+ first = index[3:]
+ second = index[:5]
+
+ result = first.union(klass(second.values))
+
+ assert tm.equalContents(result, index)
+
+ @pytest.mark.parametrize("sort", [None, False])
+ def test_intersection_base(self, sort):
+ # (same results for py2 and py3 but sortedness not tested elsewhere)
+ index = Index([0, "a", 1, "b", 2, "c"])
+ first = index[:5]
+ second = index[:3]
+
+ expected = Index([0, 1, "a"]) if sort is None else Index([0, "a", 1])
+ result = first.intersection(second, sort=sort)
+ tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize("klass", [np.array, Series, list])
+ @pytest.mark.parametrize("sort", [None, False])
+ def test_intersection_different_type_base(self, klass, sort):
+ # GH 10149
+ index = Index([0, "a", 1, "b", 2, "c"])
+ first = index[:5]
+ second = index[:3]
+
+ result = first.intersection(klass(second.values), sort=sort)
+ assert tm.equalContents(result, second)
+
+ @pytest.mark.parametrize("sort", [None, False])
+ def test_difference_base(self, sort):
+ # (same results for py2 and py3 but sortedness not tested elsewhere)
+ index = Index([0, "a", 1, "b", 2, "c"])
+ first = index[:4]
+ second = index[3:]
+
+ result = first.difference(second, sort)
+ expected = Index([0, "a", 1])
+ if sort is None:
+ expected = Index(safe_sort(expected))
+ tm.assert_index_equal(result, expected)
+
+ def test_symmetric_difference(self):
+ # (same results for py2 and py3 but sortedness not tested elsewhere)
+ index = Index([0, "a", 1, "b", 2, "c"])
+ first = index[:4]
+ second = index[3:]
+
+ result = first.symmetric_difference(second)
+ expected = Index([0, 1, 2, "a", "c"])
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 811bbe4eddfa9..04af9b09bbf89 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -34,7 +34,6 @@
period_range,
)
import pandas._testing as tm
-from pandas.core.algorithms import safe_sort
from pandas.core.indexes.api import (
Index,
MultiIndex,
@@ -108,23 +107,6 @@ def test_constructor_copy(self, index):
# arr = np.array(5.)
# pytest.raises(Exception, arr.view, Index)
- def test_constructor_corner(self):
- # corner case
- msg = (
- r"Index\(\.\.\.\) must be called with a collection of some "
- "kind, 0 was passed"
- )
- with pytest.raises(TypeError, match=msg):
- Index(0)
-
- @pytest.mark.parametrize("index_vals", [[("A", 1), "B"], ["B", ("A", 1)]])
- def test_construction_list_mixed_tuples(self, index_vals):
- # see gh-10697: if we are constructing from a mixed list of tuples,
- # make sure that we are independent of the sorting order.
- index = Index(index_vals)
- assert isinstance(index, Index)
- assert not isinstance(index, MultiIndex)
-
@pytest.mark.parametrize("na_value", [None, np.nan])
@pytest.mark.parametrize("vtype", [list, tuple, iter])
def test_construction_list_tuples_nan(self, na_value, vtype):
@@ -359,11 +341,6 @@ def test_constructor_simple_new(self, vals, dtype):
result = index._simple_new(index.values, dtype)
tm.assert_index_equal(result, index)
- def test_constructor_wrong_kwargs(self):
- # GH #19348
- with pytest.raises(TypeError, match="Unexpected keyword arguments {'foo'}"):
- Index([], foo="bar")
-
@pytest.mark.parametrize(
"vals",
[
@@ -554,12 +531,6 @@ def test_constructor_overflow_int64(self):
with pytest.raises(OverflowError, match=msg):
Index([np.iinfo(np.uint64).max - 1], dtype="int64")
- @pytest.mark.xfail(reason="see GH#21311: Index doesn't enforce dtype argument")
- def test_constructor_cast(self):
- msg = "could not convert string to float"
- with pytest.raises(ValueError, match=msg):
- Index(["a", "b", "c"], dtype=float)
-
@pytest.mark.parametrize(
"index",
[
@@ -2528,78 +2499,12 @@ def test_copy_name2(self):
assert index3.name == "NewName"
assert index3.names == ["NewName"]
- def test_union_base(self):
- index = self.create_index()
- first = index[3:]
- second = index[:5]
-
- result = first.union(second)
-
- expected = Index([0, 1, 2, "a", "b", "c"])
- tm.assert_index_equal(result, expected)
-
- @pytest.mark.parametrize("klass", [np.array, Series, list])
- def test_union_different_type_base(self, klass):
- # GH 10149
- index = self.create_index()
- first = index[3:]
- second = index[:5]
-
- result = first.union(klass(second.values))
-
- assert tm.equalContents(result, index)
-
def test_unique_na(self):
idx = pd.Index([2, np.nan, 2, 1], name="my_index")
expected = pd.Index([2, np.nan, 1], name="my_index")
result = idx.unique()
tm.assert_index_equal(result, expected)
- @pytest.mark.parametrize("sort", [None, False])
- def test_intersection_base(self, sort):
- # (same results for py2 and py3 but sortedness not tested elsewhere)
- index = self.create_index()
- first = index[:5]
- second = index[:3]
-
- expected = Index([0, 1, "a"]) if sort is None else Index([0, "a", 1])
- result = first.intersection(second, sort=sort)
- tm.assert_index_equal(result, expected)
-
- @pytest.mark.parametrize("klass", [np.array, Series, list])
- @pytest.mark.parametrize("sort", [None, False])
- def test_intersection_different_type_base(self, klass, sort):
- # GH 10149
- index = self.create_index()
- first = index[:5]
- second = index[:3]
-
- result = first.intersection(klass(second.values), sort=sort)
- assert tm.equalContents(result, second)
-
- @pytest.mark.parametrize("sort", [None, False])
- def test_difference_base(self, sort):
- # (same results for py2 and py3 but sortedness not tested elsewhere)
- index = self.create_index()
- first = index[:4]
- second = index[3:]
-
- result = first.difference(second, sort)
- expected = Index([0, "a", 1])
- if sort is None:
- expected = Index(safe_sort(expected))
- tm.assert_index_equal(result, expected)
-
- def test_symmetric_difference(self):
- # (same results for py2 and py3 but sortedness not tested elsewhere)
- index = self.create_index()
- first = index[:4]
- second = index[3:]
-
- result = first.symmetric_difference(second)
- expected = Index([0, 1, 2, "a", "c"])
- tm.assert_index_equal(result, expected)
-
def test_logical_compat(self):
index = self.create_index()
assert index.all() == index.values.all()
| Many of the tests in test_base are about `Index.__new__` cases that return a subclass. This is intended specifically for Index-only tests. | https://api.github.com/repos/pandas-dev/pandas/pulls/31597 | 2020-02-03T06:20:01Z | 2020-02-05T01:31:09Z | 2020-02-05T01:31:09Z | 2020-02-05T01:33:37Z |
BUG: read_csv used in file like object RawIOBase is not recognize encoding option | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 9aa9ece9a5267..2488f8325e53e 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -31,6 +31,7 @@ Fixed regressions
- Fixed regression in :meth:`qcut` when passed a nullable integer. (:issue:`31389`)
- Fixed regression in assigning to a :class:`Series` using a nullable integer dtype (:issue:`31446`)
- Fixed performance regression when indexing a ``DataFrame`` or ``Series`` with a :class:`MultiIndex` for the index using a list of labels (:issue:`31648`)
+- Fixed regression in :meth:`read_csv` used in file like object ``RawIOBase`` is not recognize ``encoding`` option (:issue:`31575`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 377d49f2bbd29..3077f73a8d1a4 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -638,7 +638,7 @@ cdef class TextReader:
raise ValueError(f'Unrecognized compression type: '
f'{self.compression}')
- if self.encoding and isinstance(source, io.BufferedIOBase):
+ if self.encoding and isinstance(source, (io.BufferedIOBase, io.RawIOBase)):
source = io.TextIOWrapper(
source, self.encoding.decode('utf-8'), newline='')
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 00f2961e41617..e506cc155d48d 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -3,7 +3,7 @@
import bz2
from collections import abc
import gzip
-from io import BufferedIOBase, BytesIO
+from io import BufferedIOBase, BytesIO, RawIOBase
import mmap
import os
import pathlib
@@ -359,9 +359,9 @@ def get_handle(
try:
from s3fs import S3File
- need_text_wrapping = (BufferedIOBase, S3File)
+ need_text_wrapping = (BufferedIOBase, RawIOBase, S3File)
except ImportError:
- need_text_wrapping = BufferedIOBase # type: ignore
+ need_text_wrapping = (BufferedIOBase, RawIOBase) # type: ignore
handles: List[IO] = list()
f = path_or_buf
@@ -437,7 +437,7 @@ def get_handle(
from io import TextIOWrapper
g = TextIOWrapper(f, encoding=encoding, newline="")
- if not isinstance(f, BufferedIOBase):
+ if not isinstance(f, (BufferedIOBase, RawIOBase)):
handles.append(g)
f = g
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index b38aa9770a73b..8bc8470ae7658 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -5,7 +5,7 @@
from collections import abc, defaultdict
import csv
import datetime
-from io import BufferedIOBase, StringIO, TextIOWrapper
+from io import BufferedIOBase, RawIOBase, StringIO, TextIOWrapper
import re
import sys
from textwrap import fill
@@ -1872,7 +1872,7 @@ def __init__(self, src, **kwds):
# Handle the file object with universal line mode enabled.
# We will handle the newline character ourselves later on.
- if isinstance(src, BufferedIOBase):
+ if isinstance(src, (BufferedIOBase, RawIOBase)):
src = TextIOWrapper(src, encoding=encoding, newline="")
kwds["encoding"] = "utf-8"
diff --git a/pandas/tests/io/parser/test_encoding.py b/pandas/tests/io/parser/test_encoding.py
index 406e7bedfd298..13f72a0414bac 100644
--- a/pandas/tests/io/parser/test_encoding.py
+++ b/pandas/tests/io/parser/test_encoding.py
@@ -141,6 +141,7 @@ def test_read_csv_utf_aliases(all_parsers, utf_value, encoding_fmt):
)
def test_binary_mode_file_buffers(all_parsers, csv_dir_path, fname, encoding):
# gh-23779: Python csv engine shouldn't error on files opened in binary.
+ # gh-31575: Python csv engine shouldn't error on files opened in raw binary.
parser = all_parsers
fpath = os.path.join(csv_dir_path, fname)
@@ -154,6 +155,10 @@ def test_binary_mode_file_buffers(all_parsers, csv_dir_path, fname, encoding):
result = parser.read_csv(fb, encoding=encoding)
tm.assert_frame_equal(expected, result)
+ with open(fpath, mode="rb", buffering=0) as fb:
+ result = parser.read_csv(fb, encoding=encoding)
+ tm.assert_frame_equal(expected, result)
+
@pytest.mark.parametrize("pass_encoding", [True, False])
def test_encoding_temp_file(all_parsers, utf_value, encoding_fmt, pass_encoding):
| - [x] closes #31575
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31596 | 2020-02-03T05:40:21Z | 2020-02-05T13:27:05Z | 2020-02-05T13:27:05Z | 2020-02-05T20:56:20Z |
TST: troubleshoot npdev build | diff --git a/pandas/core/series.py b/pandas/core/series.py
index bfe9969daaa8e..040fcf392733b 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -23,6 +23,7 @@
from pandas._config import get_option
from pandas._libs import lib, properties, reshape, tslibs
+from pandas._libs.index import validate_numeric_casting
from pandas._typing import Label
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution
@@ -1022,7 +1023,7 @@ def __setitem__(self, key, value):
def _set_with_engine(self, key, value):
# fails with AttributeError for IntervalIndex
loc = self.index._engine.get_loc(key)
- libindex.validate_numeric_casting(self.dtype, value)
+ validate_numeric_casting(self.dtype, value)
self._values[loc] = value
def _set_with(self, key, value):
@@ -1105,7 +1106,7 @@ def _set_value(self, label, value, takeable: bool = False):
self._values[label] = value
else:
loc = self.index.get_loc(label)
- libindex.validate_numeric_casting(self.dtype, value)
+ validate_numeric_casting(self.dtype, value)
self._values[loc] = value
except KeyError:
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 097e83d93ee71..4c917b9bb42d2 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -675,6 +675,8 @@ def test__get_dtype(input_param, result):
)
def test__get_dtype_fails(input_param, expected_error_message):
# python objects
+ # 2020-02-02 npdev changed error message
+ expected_error_message += f"|Cannot interpret '{input_param}' as a data type"
with pytest.raises(TypeError, match=expected_error_message):
com._get_dtype(input_param)
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index 8df222e8fb59d..67c4fef7079e2 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -40,7 +40,12 @@ def test_equality_invalid(self, dtype):
assert not is_dtype_equal(dtype, np.int64)
def test_numpy_informed(self, dtype):
- with pytest.raises(TypeError, match="data type not understood"):
+ # npdev 2020-02-02 changed from "data type not understood" to
+ # "Cannot interpret 'foo' as a data type"
+ msg = "|".join(
+ ["data type not understood", "Cannot interpret '.*' as a data type"]
+ )
+ with pytest.raises(TypeError, match=msg):
np.dtype(dtype)
assert not dtype == np.str_
| https://api.github.com/repos/pandas-dev/pandas/pulls/31594 | 2020-02-03T01:18:14Z | 2020-02-03T03:03:25Z | 2020-02-03T03:03:25Z | 2020-02-03T14:33:00Z | |
Import libindex in series.py | diff --git a/pandas/core/series.py b/pandas/core/series.py
index bfe9969daaa8e..c3f94baa0590d 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -22,7 +22,7 @@
from pandas._config import get_option
-from pandas._libs import lib, properties, reshape, tslibs
+from pandas._libs import index as libindex, lib, properties, reshape, tslibs
from pandas._typing import Label
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution
| I think we may be missing an import in `series.py`. cc @jbrockmendel | https://api.github.com/repos/pandas-dev/pandas/pulls/31593 | 2020-02-03T00:56:28Z | 2020-02-03T00:59:55Z | null | 2020-02-03T00:59:59Z |
REF: parametrize indexing tests | diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index 5530896a90941..dd248196e87e1 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -22,16 +22,9 @@ def check(self, result, original, indexer, getitem):
tm.assert_almost_equal(result, expected)
- def test_scalar_error(self):
-
- # GH 4892
- # float_indexers should raise exceptions
- # on appropriate Index types & accessors
- # this duplicates the code below
- # but is specifically testing for the error
- # message
-
- for index in [
+ @pytest.mark.parametrize(
+ "index_func",
+ [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeCategoricalIndex,
@@ -40,22 +33,31 @@ def test_scalar_error(self):
tm.makePeriodIndex,
tm.makeIntIndex,
tm.makeRangeIndex,
- ]:
+ ],
+ )
+ def test_scalar_error(self, index_func):
- i = index(5)
+ # GH 4892
+ # float_indexers should raise exceptions
+ # on appropriate Index types & accessors
+ # this duplicates the code below
+ # but is specifically testing for the error
+ # message
- s = Series(np.arange(len(i)), index=i)
+ i = index_func(5)
- msg = "Cannot index by location index"
- with pytest.raises(TypeError, match=msg):
- s.iloc[3.0]
+ s = Series(np.arange(len(i)), index=i)
- msg = (
- "cannot do positional indexing on {klass} with these "
- r"indexers \[3\.0\] of {kind}".format(klass=type(i), kind=str(float))
- )
- with pytest.raises(TypeError, match=msg):
- s.iloc[3.0] = 0
+ msg = "Cannot index by location index"
+ with pytest.raises(TypeError, match=msg):
+ s.iloc[3.0]
+
+ msg = (
+ "cannot do positional indexing on {klass} with these "
+ r"indexers \[3\.0\] of {kind}".format(klass=type(i), kind=str(float))
+ )
+ with pytest.raises(TypeError, match=msg):
+ s.iloc[3.0] = 0
def test_scalar_non_numeric(self):
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index d67259e8b7d40..08ea4c1579ef8 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -15,6 +15,44 @@
class TestiLoc(Base):
+ def test_iloc_getitem_int(self):
+ # integer
+ self.check_result(
+ "iloc",
+ 2,
+ "iloc",
+ 2,
+ typs=["labels", "mixed", "ts", "floats", "empty"],
+ fails=IndexError,
+ )
+
+ def test_iloc_getitem_neg_int(self):
+ # neg integer
+ self.check_result(
+ "iloc",
+ -1,
+ "iloc",
+ -1,
+ typs=["labels", "mixed", "ts", "floats", "empty"],
+ fails=IndexError,
+ )
+
+ def test_iloc_getitem_list_int(self):
+ self.check_result(
+ "iloc",
+ [0, 1, 2],
+ "iloc",
+ [0, 1, 2],
+ typs=["labels", "mixed", "ts", "floats", "empty"],
+ fails=IndexError,
+ )
+
+ # array of ints (GH5006), make sure that a single indexer is returning
+ # the correct type
+
+
+class TestiLoc2:
+ # TODO: better name, just separating out things that dont rely on base class
def test_iloc_exceeds_bounds(self):
# GH6296
@@ -135,28 +173,6 @@ def test_iloc_non_integer_raises(self, index, columns, index_vals, column_vals):
with pytest.raises(IndexError, match=msg):
df.iloc[index_vals, column_vals]
- def test_iloc_getitem_int(self):
- # integer
- self.check_result(
- "iloc",
- 2,
- "iloc",
- 2,
- typs=["labels", "mixed", "ts", "floats", "empty"],
- fails=IndexError,
- )
-
- def test_iloc_getitem_neg_int(self):
- # neg integer
- self.check_result(
- "iloc",
- -1,
- "iloc",
- -1,
- typs=["labels", "mixed", "ts", "floats", "empty"],
- fails=IndexError,
- )
-
@pytest.mark.parametrize("dims", [1, 2])
def test_iloc_getitem_invalid_scalar(self, dims):
# GH 21982
@@ -183,19 +199,6 @@ def test_iloc_array_not_mutating_negative_indices(self):
df.iloc[:, array_with_neg_numbers]
tm.assert_numpy_array_equal(array_with_neg_numbers, array_copy)
- def test_iloc_getitem_list_int(self):
- self.check_result(
- "iloc",
- [0, 1, 2],
- "iloc",
- [0, 1, 2],
- typs=["labels", "mixed", "ts", "floats", "empty"],
- fails=IndexError,
- )
-
- # array of ints (GH5006), make sure that a single indexer is returning
- # the correct type
-
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
@@ -286,7 +289,9 @@ def test_iloc_getitem_slice_dups(self):
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
- df = self.frame_ints
+ df = DataFrame(
+ np.random.randn(4, 4), index=np.arange(0, 8, 2), columns=np.arange(0, 12, 3)
+ )
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index ae32274c02dcd..98940b64330b4 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -17,13 +17,13 @@
from pandas.core.generic import NDFrame
from pandas.core.indexers import validate_indices
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
-from pandas.tests.indexing.common import Base, _mklbl
+from pandas.tests.indexing.common import _mklbl
# ------------------------------------------------------------------------
# Indexing test cases
-class TestFancy(Base):
+class TestFancy:
""" pure get/set item & fancy indexing """
def test_setitem_ndarray_1d(self):
@@ -750,7 +750,7 @@ def test_index_type_coercion(self):
assert s2.index.is_object()
-class TestMisc(Base):
+class TestMisc:
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
df["a"] = 10
@@ -875,21 +875,21 @@ def test_indexing_dtypes_on_empty(self):
assert df2.loc[:, "a"].dtype == np.int64
tm.assert_series_equal(df2.loc[:, "a"], df2.iloc[:, 0])
- def test_range_in_series_indexing(self):
+ @pytest.mark.parametrize("size", [5, 999999, 1000000])
+ def test_range_in_series_indexing(self, size):
# range can cause an indexing error
# GH 11652
- for x in [5, 999999, 1000000]:
- s = Series(index=range(x), dtype=np.float64)
- s.loc[range(1)] = 42
- tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))
+ s = Series(index=range(size), dtype=np.float64)
+ s.loc[range(1)] = 42
+ tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))
- s.loc[range(2)] = 43
- tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))
+ s.loc[range(2)] = 43
+ tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))
- def test_non_reducing_slice(self):
- df = DataFrame([[0, 1], [2, 3]])
-
- slices = [
+ @pytest.mark.parametrize(
+ "slc",
+ [
+ # FIXME: dont leave commented-out
# pd.IndexSlice[:, :],
pd.IndexSlice[:, 1],
pd.IndexSlice[1, :],
@@ -902,10 +902,13 @@ def test_non_reducing_slice(self):
[0, 1],
np.array([0, 1]),
Series([0, 1]),
- ]
- for slice_ in slices:
- tslice_ = _non_reducing_slice(slice_)
- assert isinstance(df.loc[tslice_], DataFrame)
+ ],
+ )
+ def test_non_reducing_slice(self, slc):
+ df = DataFrame([[0, 1], [2, 3]])
+
+ tslice_ = _non_reducing_slice(slc)
+ assert isinstance(df.loc[tslice_], DataFrame)
def test_list_slice(self):
# like dataframe getitem
@@ -965,37 +968,37 @@ class TestSeriesNoneCoercion:
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
- def test_coercion_with_setitem(self):
- for start_data, expected_result in self.EXPECTED_RESULTS:
- start_series = Series(start_data)
- start_series[0] = None
+ @pytest.mark.parametrize("start_data,expected_result", EXPECTED_RESULTS)
+ def test_coercion_with_setitem(self, start_data, expected_result):
+ start_series = Series(start_data)
+ start_series[0] = None
- expected_series = Series(expected_result)
- tm.assert_series_equal(start_series, expected_series)
+ expected_series = Series(expected_result)
+ tm.assert_series_equal(start_series, expected_series)
- def test_coercion_with_loc_setitem(self):
- for start_data, expected_result in self.EXPECTED_RESULTS:
- start_series = Series(start_data)
- start_series.loc[0] = None
+ @pytest.mark.parametrize("start_data,expected_result", EXPECTED_RESULTS)
+ def test_coercion_with_loc_setitem(self, start_data, expected_result):
+ start_series = Series(start_data)
+ start_series.loc[0] = None
- expected_series = Series(expected_result)
- tm.assert_series_equal(start_series, expected_series)
+ expected_series = Series(expected_result)
+ tm.assert_series_equal(start_series, expected_series)
- def test_coercion_with_setitem_and_series(self):
- for start_data, expected_result in self.EXPECTED_RESULTS:
- start_series = Series(start_data)
- start_series[start_series == start_series[0]] = None
+ @pytest.mark.parametrize("start_data,expected_result", EXPECTED_RESULTS)
+ def test_coercion_with_setitem_and_series(self, start_data, expected_result):
+ start_series = Series(start_data)
+ start_series[start_series == start_series[0]] = None
- expected_series = Series(expected_result)
- tm.assert_series_equal(start_series, expected_series)
+ expected_series = Series(expected_result)
+ tm.assert_series_equal(start_series, expected_series)
- def test_coercion_with_loc_and_series(self):
- for start_data, expected_result in self.EXPECTED_RESULTS:
- start_series = Series(start_data)
- start_series.loc[start_series == start_series[0]] = None
+ @pytest.mark.parametrize("start_data,expected_result", EXPECTED_RESULTS)
+ def test_coercion_with_loc_and_series(self, start_data, expected_result):
+ start_series = Series(start_data)
+ start_series.loc[start_series == start_series[0]] = None
- expected_series = Series(expected_result)
- tm.assert_series_equal(start_series, expected_series)
+ expected_series = Series(expected_result)
+ tm.assert_series_equal(start_series, expected_series)
class TestDataframeNoneCoercion:
@@ -1012,31 +1015,35 @@ class TestDataframeNoneCoercion:
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
- def test_coercion_with_loc(self):
- for start_data, expected_result in self.EXPECTED_SINGLE_ROW_RESULTS:
- start_dataframe = DataFrame({"foo": start_data})
- start_dataframe.loc[0, ["foo"]] = None
+ @pytest.mark.parametrize("expected", EXPECTED_SINGLE_ROW_RESULTS)
+ def test_coercion_with_loc(self, expected):
+ start_data, expected_result = expected
+
+ start_dataframe = DataFrame({"foo": start_data})
+ start_dataframe.loc[0, ["foo"]] = None
+
+ expected_dataframe = DataFrame({"foo": expected_result})
+ tm.assert_frame_equal(start_dataframe, expected_dataframe)
+
+ @pytest.mark.parametrize("expected", EXPECTED_SINGLE_ROW_RESULTS)
+ def test_coercion_with_setitem_and_dataframe(self, expected):
+ start_data, expected_result = expected
- expected_dataframe = DataFrame({"foo": expected_result})
- tm.assert_frame_equal(start_dataframe, expected_dataframe)
+ start_dataframe = DataFrame({"foo": start_data})
+ start_dataframe[start_dataframe["foo"] == start_dataframe["foo"][0]] = None
- def test_coercion_with_setitem_and_dataframe(self):
- for start_data, expected_result in self.EXPECTED_SINGLE_ROW_RESULTS:
- start_dataframe = DataFrame({"foo": start_data})
- start_dataframe[start_dataframe["foo"] == start_dataframe["foo"][0]] = None
+ expected_dataframe = DataFrame({"foo": expected_result})
+ tm.assert_frame_equal(start_dataframe, expected_dataframe)
- expected_dataframe = DataFrame({"foo": expected_result})
- tm.assert_frame_equal(start_dataframe, expected_dataframe)
+ @pytest.mark.parametrize("expected", EXPECTED_SINGLE_ROW_RESULTS)
+ def test_none_coercion_loc_and_dataframe(self, expected):
+ start_data, expected_result = expected
- def test_none_coercion_loc_and_dataframe(self):
- for start_data, expected_result in self.EXPECTED_SINGLE_ROW_RESULTS:
- start_dataframe = DataFrame({"foo": start_data})
- start_dataframe.loc[
- start_dataframe["foo"] == start_dataframe["foo"][0]
- ] = None
+ start_dataframe = DataFrame({"foo": start_data})
+ start_dataframe.loc[start_dataframe["foo"] == start_dataframe["foo"][0]] = None
- expected_dataframe = DataFrame({"foo": expected_result})
- tm.assert_frame_equal(start_dataframe, expected_dataframe)
+ expected_dataframe = DataFrame({"foo": expected_result})
+ tm.assert_frame_equal(start_dataframe, expected_dataframe)
def test_none_coercion_mixed_dtypes(self):
start_dataframe = DataFrame(
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 0cb4bdcc334d8..3a726fb9923ee 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -13,6 +13,159 @@
class TestLoc(Base):
+ def test_loc_getitem_int(self):
+
+ # int label
+ self.check_result("loc", 2, "loc", 2, typs=["label"], fails=KeyError)
+
+ def test_loc_getitem_label(self):
+
+ # label
+ self.check_result("loc", "c", "loc", "c", typs=["empty"], fails=KeyError)
+
+ def test_loc_getitem_label_out_of_range(self):
+
+ # out of range label
+ self.check_result(
+ "loc",
+ "f",
+ "loc",
+ "f",
+ typs=["ints", "uints", "labels", "mixed", "ts"],
+ fails=KeyError,
+ )
+ self.check_result("loc", "f", "ix", "f", typs=["floats"], fails=KeyError)
+ self.check_result("loc", "f", "loc", "f", typs=["floats"], fails=KeyError)
+ self.check_result(
+ "loc", 20, "loc", 20, typs=["ints", "uints", "mixed"], fails=KeyError,
+ )
+ self.check_result("loc", 20, "loc", 20, typs=["labels"], fails=TypeError)
+ self.check_result("loc", 20, "loc", 20, typs=["ts"], axes=0, fails=TypeError)
+ self.check_result("loc", 20, "loc", 20, typs=["floats"], axes=0, fails=KeyError)
+
+ def test_loc_getitem_label_list(self):
+ # TODO: test something here?
+ # list of labels
+ pass
+
+ def test_loc_getitem_label_list_with_missing(self):
+ self.check_result(
+ "loc", [0, 1, 2], "loc", [0, 1, 2], typs=["empty"], fails=KeyError,
+ )
+ self.check_result(
+ "loc",
+ [0, 2, 10],
+ "ix",
+ [0, 2, 10],
+ typs=["ints", "uints", "floats"],
+ axes=0,
+ fails=KeyError,
+ )
+
+ self.check_result(
+ "loc",
+ [3, 6, 7],
+ "ix",
+ [3, 6, 7],
+ typs=["ints", "uints", "floats"],
+ axes=1,
+ fails=KeyError,
+ )
+
+ # GH 17758 - MultiIndex and missing keys
+ self.check_result(
+ "loc",
+ [(1, 3), (1, 4), (2, 5)],
+ "ix",
+ [(1, 3), (1, 4), (2, 5)],
+ typs=["multi"],
+ axes=0,
+ fails=KeyError,
+ )
+
+ def test_loc_getitem_label_list_fails(self):
+ # fails
+ self.check_result(
+ "loc",
+ [20, 30, 40],
+ "loc",
+ [20, 30, 40],
+ typs=["ints", "uints"],
+ axes=1,
+ fails=KeyError,
+ )
+
+ def test_loc_getitem_label_array_like(self):
+ # TODO: test something?
+ # array like
+ pass
+
+ def test_loc_getitem_bool(self):
+ # boolean indexers
+ b = [True, False, True, False]
+
+ self.check_result("loc", b, "loc", b, typs=["empty"], fails=IndexError)
+
+ def test_loc_getitem_label_slice(self):
+
+ # label slices (with ints)
+
+ # real label slices
+
+ # GH 14316
+
+ self.check_result(
+ "loc",
+ slice(1, 3),
+ "loc",
+ slice(1, 3),
+ typs=["labels", "mixed", "empty", "ts", "floats"],
+ fails=TypeError,
+ )
+
+ self.check_result(
+ "loc",
+ slice("20130102", "20130104"),
+ "loc",
+ slice("20130102", "20130104"),
+ typs=["ts"],
+ axes=1,
+ fails=TypeError,
+ )
+
+ self.check_result(
+ "loc",
+ slice(2, 8),
+ "loc",
+ slice(2, 8),
+ typs=["mixed"],
+ axes=0,
+ fails=TypeError,
+ )
+ self.check_result(
+ "loc",
+ slice(2, 8),
+ "loc",
+ slice(2, 8),
+ typs=["mixed"],
+ axes=1,
+ fails=KeyError,
+ )
+
+ self.check_result(
+ "loc",
+ slice(2, 4, 2),
+ "loc",
+ slice(2, 4, 2),
+ typs=["mixed"],
+ axes=0,
+ fails=TypeError,
+ )
+
+
+class TestLoc2:
+ # TODO: better name, just separating out things that rely on base class
+
def test_loc_getitem_dups(self):
# GH 5678
# repeated getitems on a dup index returning a ndarray
@@ -104,76 +257,6 @@ def test_loc_setitem_dtype(self):
tm.assert_frame_equal(df, expected)
- def test_loc_getitem_int(self):
-
- # int label
- self.check_result("loc", 2, "loc", 2, typs=["label"], fails=KeyError)
-
- def test_loc_getitem_label(self):
-
- # label
- self.check_result("loc", "c", "loc", "c", typs=["empty"], fails=KeyError)
-
- def test_loc_getitem_label_out_of_range(self):
-
- # out of range label
- self.check_result(
- "loc",
- "f",
- "loc",
- "f",
- typs=["ints", "uints", "labels", "mixed", "ts"],
- fails=KeyError,
- )
- self.check_result("loc", "f", "ix", "f", typs=["floats"], fails=KeyError)
- self.check_result("loc", "f", "loc", "f", typs=["floats"], fails=KeyError)
- self.check_result(
- "loc", 20, "loc", 20, typs=["ints", "uints", "mixed"], fails=KeyError,
- )
- self.check_result("loc", 20, "loc", 20, typs=["labels"], fails=TypeError)
- self.check_result("loc", 20, "loc", 20, typs=["ts"], axes=0, fails=TypeError)
- self.check_result("loc", 20, "loc", 20, typs=["floats"], axes=0, fails=KeyError)
-
- def test_loc_getitem_label_list(self):
- # TODO: test something here?
- # list of labels
- pass
-
- def test_loc_getitem_label_list_with_missing(self):
- self.check_result(
- "loc", [0, 1, 2], "loc", [0, 1, 2], typs=["empty"], fails=KeyError,
- )
- self.check_result(
- "loc",
- [0, 2, 10],
- "ix",
- [0, 2, 10],
- typs=["ints", "uints", "floats"],
- axes=0,
- fails=KeyError,
- )
-
- self.check_result(
- "loc",
- [3, 6, 7],
- "ix",
- [3, 6, 7],
- typs=["ints", "uints", "floats"],
- axes=1,
- fails=KeyError,
- )
-
- # GH 17758 - MultiIndex and missing keys
- self.check_result(
- "loc",
- [(1, 3), (1, 4), (2, 5)],
- "ix",
- [(1, 3), (1, 4), (2, 5)],
- typs=["multi"],
- axes=0,
- fails=KeyError,
- )
-
def test_getitem_label_list_with_missing(self):
s = Series(range(3), index=["a", "b", "c"])
@@ -185,29 +268,6 @@ def test_getitem_label_list_with_missing(self):
with pytest.raises(KeyError, match="with any missing labels"):
s[[0, 3]]
- def test_loc_getitem_label_list_fails(self):
- # fails
- self.check_result(
- "loc",
- [20, 30, 40],
- "loc",
- [20, 30, 40],
- typs=["ints", "uints"],
- axes=1,
- fails=KeyError,
- )
-
- def test_loc_getitem_label_array_like(self):
- # TODO: test something?
- # array like
- pass
-
- def test_loc_getitem_bool(self):
- # boolean indexers
- b = [True, False, True, False]
-
- self.check_result("loc", b, "loc", b, typs=["empty"], fails=IndexError)
-
@pytest.mark.parametrize("index", [[True, False], [True, False, True, False]])
def test_loc_getitem_bool_diff_len(self, index):
# GH26658
@@ -309,62 +369,6 @@ def test_loc_getitem_list_with_fail(self):
with pytest.raises(KeyError, match="with any missing labels"):
s.loc[[2, 3]]
- def test_loc_getitem_label_slice(self):
-
- # label slices (with ints)
-
- # real label slices
-
- # GH 14316
-
- self.check_result(
- "loc",
- slice(1, 3),
- "loc",
- slice(1, 3),
- typs=["labels", "mixed", "empty", "ts", "floats"],
- fails=TypeError,
- )
-
- self.check_result(
- "loc",
- slice("20130102", "20130104"),
- "loc",
- slice("20130102", "20130104"),
- typs=["ts"],
- axes=1,
- fails=TypeError,
- )
-
- self.check_result(
- "loc",
- slice(2, 8),
- "loc",
- slice(2, 8),
- typs=["mixed"],
- axes=0,
- fails=TypeError,
- )
- self.check_result(
- "loc",
- slice(2, 8),
- "loc",
- slice(2, 8),
- typs=["mixed"],
- axes=1,
- fails=KeyError,
- )
-
- self.check_result(
- "loc",
- slice(2, 4, 2),
- "loc",
- slice(2, 4, 2),
- typs=["mixed"],
- axes=0,
- fails=TypeError,
- )
-
def test_loc_index(self):
# gh-17131
# a boolean index should index like a boolean numpy array
@@ -571,7 +575,7 @@ def test_loc_modify_datetime(self):
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame(self):
- df = self.frame_labels
+ df = DataFrame(np.random.randn(4, 4), index=list("abcd"), columns=list("ABCD"))
result = df.iloc[0, 0]
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
index a567fb9b8ccc7..9e6446ebc8de7 100644
--- a/pandas/tests/indexing/test_scalar.py
+++ b/pandas/tests/indexing/test_scalar.py
@@ -65,6 +65,10 @@ def _check(f, func, values=False):
for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
_check(f, "at")
+
+class TestScalar2:
+ # TODO: Better name, just separating things that dont need Base class
+
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
| https://api.github.com/repos/pandas-dev/pandas/pulls/31592 | 2020-02-03T00:43:37Z | 2020-02-05T01:36:40Z | 2020-02-05T01:36:40Z | 2020-02-05T01:41:18Z | |
ENH: Enable indexing with nullable Boolean | diff --git a/doc/source/user_guide/boolean.rst b/doc/source/user_guide/boolean.rst
index 4f0ad0e8ceaeb..6370a523b9a0d 100644
--- a/doc/source/user_guide/boolean.rst
+++ b/doc/source/user_guide/boolean.rst
@@ -20,8 +20,9 @@ Nullable Boolean data type
Indexing with NA values
-----------------------
-pandas does not allow indexing with NA values. Attempting to do so
-will raise a ``ValueError``.
+pandas allows indexing with ``NA`` values in a boolean array, which are treated as ``False``.
+
+.. versionchanged:: 1.0.2
.. ipython:: python
:okexcept:
@@ -30,12 +31,11 @@ will raise a ``ValueError``.
mask = pd.array([True, False, pd.NA], dtype="boolean")
s[mask]
-The missing values will need to be explicitly filled with True or False prior
-to using the array as a mask.
+If you would prefer to keep the ``NA`` values you can manually fill them with ``fillna(True)``.
.. ipython:: python
- s[mask.fillna(False)]
+ s[mask.fillna(True)]
.. _boolean.kleene:
diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst
index a8cdf4a61073d..2bd3ff626f2e1 100644
--- a/doc/source/user_guide/indexing.rst
+++ b/doc/source/user_guide/indexing.rst
@@ -59,7 +59,7 @@ of multi-axis indexing.
slices, **both** the start and the stop are included, when present in the
index! See :ref:`Slicing with labels <indexing.slicing_with_labels>`
and :ref:`Endpoints are inclusive <advanced.endpoints_are_inclusive>`.)
- * A boolean array
+ * A boolean array (any ``NA`` values will be treated as ``False``).
* A ``callable`` function with one argument (the calling Series or DataFrame) and
that returns valid output for indexing (one of the above).
@@ -75,7 +75,7 @@ of multi-axis indexing.
* An integer e.g. ``5``.
* A list or array of integers ``[4, 3, 0]``.
* A slice object with ints ``1:7``.
- * A boolean array.
+ * A boolean array (any ``NA`` values will be treated as ``False``).
* A ``callable`` function with one argument (the calling Series or DataFrame) and
that returns valid output for indexing (one of the above).
@@ -374,6 +374,14 @@ For getting values with a boolean array:
df1.loc['a'] > 0
df1.loc[:, df1.loc['a'] > 0]
+NA values in a boolean array propogate as ``False``:
+
+.. versionchanged:: 1.0.2
+
+ mask = pd.array([True, False, True, False, pd.NA, False], dtype="boolean")
+ mask
+ df1[mask]
+
For getting a value explicitly:
.. ipython:: python
diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
index da79f651b63a9..53c8bf9fbcbc9 100644
--- a/doc/source/whatsnew/v1.0.2.rst
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -25,6 +25,33 @@ Fixed regressions
.. ---------------------------------------------------------------------------
+Indexing with Nullable Boolean Arrays
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Previously indexing with a nullable Boolean array containing ``NA`` would raise a ``ValueError``, however this is now permitted with ``NA`` being treated as ``False``. (:issue:`31503`)
+
+.. ipython:: python
+
+ s = pd.Series([1, 2, 3, 4])
+ mask = pd.array([True, True, False, None], dtype="boolean")
+ s
+ mask
+
+*pandas 1.0.0-1.0.1*
+
+.. code-block:: python
+
+ >>> s[mask]
+ Traceback (most recent call last):
+ ...
+ ValueError: cannot mask with array containing NA / NaN values
+
+*pandas 1.0.2*
+
+.. ipython:: python
+
+ s[mask]
+
.. _whatsnew_102.bug_fixes:
Bug fixes
@@ -40,8 +67,6 @@ Bug fixes
- Using ``pd.NA`` with :meth:`DataFrame.to_json` now correctly outputs a null value instead of an empty object (:issue:`31615`)
- Fixed bug in parquet roundtrip with nullable unsigned integer dtypes (:issue:`31896`).
-
-
**Experimental dtypes**
- Fix bug in :meth:`DataFrame.convert_dtypes` for columns that were already using the ``"string"`` dtype (:issue:`31731`).
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index e39d1dc03adf5..854075eaa8d09 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -520,7 +520,9 @@ def __getitem__(self, key):
if com.is_bool_indexer(key):
# first convert to boolean, because check_array_indexer doesn't
# allow object dtype
- key = np.asarray(key, dtype=bool)
+ if is_object_dtype(key):
+ key = np.asarray(key, dtype=bool)
+
key = check_array_indexer(self, key)
if key.all():
key = slice(0, None, None)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 550ce74de5357..705c618fc49dc 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -118,7 +118,6 @@ def is_bool_indexer(key: Any) -> bool:
check_array_indexer : Check that `key` is a valid array to index,
and convert to an ndarray.
"""
- na_msg = "cannot mask with array containing NA / NaN values"
if isinstance(key, (ABCSeries, np.ndarray, ABCIndex)) or (
is_array_like(key) and is_extension_array_dtype(key.dtype)
):
@@ -126,16 +125,12 @@ def is_bool_indexer(key: Any) -> bool:
key = np.asarray(values_from_object(key))
if not lib.is_bool_array(key):
+ na_msg = "Cannot mask with non-boolean array containing NA / NaN values"
if isna(key).any():
raise ValueError(na_msg)
return False
return True
elif is_bool_dtype(key.dtype):
- # an ndarray with bool-dtype by definition has no missing values.
- # So we only need to check for NAs in ExtensionArrays
- if is_extension_array_dtype(key.dtype):
- if np.any(key.isna()):
- raise ValueError(na_msg)
return True
elif isinstance(key, list):
try:
diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py
index cb48d4be75c4d..5e53b061dd1c8 100644
--- a/pandas/core/indexers.py
+++ b/pandas/core/indexers.py
@@ -10,6 +10,7 @@
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
+ is_extension_array_dtype,
is_integer_dtype,
is_list_like,
)
@@ -366,14 +367,11 @@ def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any:
...
IndexError: Boolean index has wrong length: 3 instead of 2.
- A ValueError is raised when the mask cannot be converted to
- a bool-dtype ndarray.
+ NA values in a boolean array are treated as False.
>>> mask = pd.array([True, pd.NA])
>>> pd.api.indexers.check_array_indexer(arr, mask)
- Traceback (most recent call last):
- ...
- ValueError: Cannot mask with a boolean indexer containing NA values
+ array([ True, False])
A numpy boolean mask will get passed through (if the length is correct):
@@ -425,10 +423,10 @@ def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any:
dtype = indexer.dtype
if is_bool_dtype(dtype):
- try:
+ if is_extension_array_dtype(dtype):
+ indexer = indexer.to_numpy(dtype=bool, na_value=False)
+ else:
indexer = np.asarray(indexer, dtype=bool)
- except ValueError:
- raise ValueError("Cannot mask with a boolean indexer containing NA values")
# GH26658
if len(indexer) != len(array):
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 081f87078d9c9..5ae237eb7dc32 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -13,6 +13,7 @@
is_iterator,
is_list_like,
is_numeric_dtype,
+ is_object_dtype,
is_scalar,
is_sequence,
)
@@ -2189,10 +2190,12 @@ def check_bool_indexer(index: Index, key) -> np.ndarray:
"the indexed object do not match)."
)
result = result.astype(bool)._values
- else:
- # key might be sparse / object-dtype bool, check_array_indexer needs bool array
+ elif is_object_dtype(key):
+ # key might be object-dtype bool, check_array_indexer needs bool array
result = np.asarray(result, dtype=bool)
result = check_array_indexer(index, result)
+ else:
+ result = check_array_indexer(index, result)
return result
diff --git a/pandas/tests/arrays/categorical/test_indexing.py b/pandas/tests/arrays/categorical/test_indexing.py
index 85d5a6a3dc3ac..3d9469c252914 100644
--- a/pandas/tests/arrays/categorical/test_indexing.py
+++ b/pandas/tests/arrays/categorical/test_indexing.py
@@ -240,14 +240,17 @@ def test_mask_with_boolean(index):
@pytest.mark.parametrize("index", [True, False])
-def test_mask_with_boolean_raises(index):
+def test_mask_with_boolean_na_treated_as_false(index):
+ # https://github.com/pandas-dev/pandas/issues/31503
s = Series(range(3))
idx = Categorical([True, False, None])
if index:
idx = CategoricalIndex(idx)
- with pytest.raises(ValueError, match="NA / NaN"):
- s[idx]
+ result = s[idx]
+ expected = s[idx.fillna(False)]
+
+ tm.assert_series_equal(result, expected)
@pytest.fixture
diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py
index 8615a8df22dcc..b08a64cc076b6 100644
--- a/pandas/tests/extension/base/getitem.py
+++ b/pandas/tests/extension/base/getitem.py
@@ -158,21 +158,23 @@ def test_getitem_boolean_array_mask(self, data):
result = pd.Series(data)[mask]
self.assert_series_equal(result, expected)
- def test_getitem_boolean_array_mask_raises(self, data):
+ def test_getitem_boolean_na_treated_as_false(self, data):
+ # https://github.com/pandas-dev/pandas/issues/31503
mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")
mask[:2] = pd.NA
+ mask[2:4] = True
- msg = (
- "Cannot mask with a boolean indexer containing NA values|"
- "cannot mask with array containing NA / NaN values"
- )
- with pytest.raises(ValueError, match=msg):
- data[mask]
+ result = data[mask]
+ expected = data[mask.fillna(False)]
+
+ self.assert_extension_array_equal(result, expected)
s = pd.Series(data)
- with pytest.raises(ValueError):
- s[mask]
+ result = s[mask]
+ expected = s[mask.fillna(False)]
+
+ self.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"idx",
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index af70799c0236e..a4fe89df158fa 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -98,8 +98,9 @@ def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
[
np.array([True, True, True, False, False]),
pd.array([True, True, True, False, False], dtype="boolean"),
+ pd.array([True, True, True, pd.NA, pd.NA], dtype="boolean"),
],
- ids=["numpy-array", "boolean-array"],
+ ids=["numpy-array", "boolean-array", "boolean-array-na"],
)
def test_setitem_mask(self, data, mask, box_in_series):
arr = data[:5].copy()
@@ -124,20 +125,17 @@ def test_setitem_mask_raises(self, data, box_in_series):
with pytest.raises(IndexError, match="wrong length"):
data[mask] = data[0]
- def test_setitem_mask_boolean_array_raises(self, data, box_in_series):
- # missing values in mask
+ def test_setitem_mask_boolean_array_with_na(self, data, box_in_series):
mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")
- mask[:2] = pd.NA
+ mask[:3] = True
+ mask[3:5] = pd.NA
if box_in_series:
data = pd.Series(data)
- msg = (
- "Cannot mask with a boolean indexer containing NA values|"
- "cannot mask with array containing NA / NaN values"
- )
- with pytest.raises(ValueError, match=msg):
- data[mask] = data[0]
+ data[mask] = data[0]
+
+ assert (data[:3] == data[0]).all()
@pytest.mark.parametrize(
"idx",
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index 80a093530a8cd..61c5925383f88 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -415,10 +415,6 @@ def test_setitem_mask(self, data, mask, box_in_series):
def test_setitem_mask_raises(self, data, box_in_series):
super().test_setitem_mask_raises(data, box_in_series)
- @skip_nested
- def test_setitem_mask_boolean_array_raises(self, data, box_in_series):
- super().test_setitem_mask_boolean_array_raises(data, box_in_series)
-
@skip_nested
@pytest.mark.parametrize(
"idx",
diff --git a/pandas/tests/indexing/test_check_indexer.py b/pandas/tests/indexing/test_check_indexer.py
index 82f8c12229824..69d4065234d93 100644
--- a/pandas/tests/indexing/test_check_indexer.py
+++ b/pandas/tests/indexing/test_check_indexer.py
@@ -34,12 +34,14 @@ def test_valid_input(indexer, expected):
@pytest.mark.parametrize(
"indexer", [[True, False, None], pd.array([True, False, None], dtype="boolean")],
)
-def test_bool_raise_missing_values(indexer):
- array = np.array([1, 2, 3])
+def test_boolean_na_returns_indexer(indexer):
+ # https://github.com/pandas-dev/pandas/issues/31503
+ arr = np.array([1, 2, 3])
- msg = "Cannot mask with a boolean indexer containing NA values"
- with pytest.raises(ValueError, match=msg):
- check_array_indexer(array, indexer)
+ result = check_array_indexer(arr, indexer)
+ expected = np.array([True, False, False], dtype=bool)
+
+ tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
diff --git a/pandas/tests/indexing/test_na_indexing.py b/pandas/tests/indexing/test_na_indexing.py
index befe4fee8ecf8..345ca30ec77eb 100644
--- a/pandas/tests/indexing/test_na_indexing.py
+++ b/pandas/tests/indexing/test_na_indexing.py
@@ -62,18 +62,29 @@ def test_series_mask_boolean(values, dtype, mask, box_mask, frame):
@pytest.mark.parametrize("frame", [True, False])
-def test_indexing_with_na_raises(frame):
+def test_na_treated_as_false(frame):
+ # https://github.com/pandas-dev/pandas/issues/31503
s = pd.Series([1, 2, 3], name="name")
if frame:
s = s.to_frame()
+
mask = pd.array([True, False, None], dtype="boolean")
- match = "cannot mask with array containing NA / NaN values"
- with pytest.raises(ValueError, match=match):
- s[mask]
- with pytest.raises(ValueError, match=match):
- s.loc[mask]
+ result = s[mask]
+ expected = s[mask.fillna(False)]
+
+ result_loc = s.loc[mask]
+ expected_loc = s.loc[mask.fillna(False)]
- with pytest.raises(ValueError, match=match):
- s.iloc[mask]
+ result_iloc = s.iloc[mask]
+ expected_iloc = s.iloc[mask.fillna(False)]
+
+ if frame:
+ tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(result_loc, expected_loc)
+ tm.assert_frame_equal(result_iloc, expected_iloc)
+ else:
+ tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(result_loc, expected_loc)
+ tm.assert_series_equal(result_iloc, expected_iloc)
diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py
index 28f3c0f7429f8..8878a4a6526af 100644
--- a/pandas/tests/series/indexing/test_boolean.py
+++ b/pandas/tests/series/indexing/test_boolean.py
@@ -72,7 +72,7 @@ def test_getitem_boolean_object(string_series):
# nans raise exception
omask[5:10] = np.nan
- msg = "cannot mask with array containing NA / NaN values"
+ msg = "Cannot mask with non-boolean array containing NA / NaN values"
with pytest.raises(ValueError, match=msg):
s[omask]
with pytest.raises(ValueError, match=msg):
| closes #31503
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I think from the discussion in https://github.com/pandas-dev/pandas/issues/31503 that this is something people want to allow. | https://api.github.com/repos/pandas-dev/pandas/pulls/31591 | 2020-02-03T00:28:12Z | 2020-02-22T15:37:38Z | 2020-02-22T15:37:38Z | 2020-02-23T14:55:01Z |
REF: Move Loc-only methods to Loc | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index bf42cf0330ef0..5c0f893554957 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -696,40 +696,6 @@ def _convert_tuple(self, key, is_setter: bool = False):
keyidx.append(idx)
return tuple(keyidx)
- def _has_valid_setitem_indexer(self, indexer) -> bool:
- return True
-
- def _has_valid_positional_setitem_indexer(self, indexer) -> bool:
- """
- Validate that a positional indexer cannot enlarge its target
- will raise if needed, does not modify the indexer externally.
-
- Returns
- -------
- bool
- """
- if isinstance(indexer, dict):
- raise IndexError(f"{self.name} cannot enlarge its target object")
- else:
- if not isinstance(indexer, tuple):
- indexer = _tuplify(self.ndim, indexer)
- for ax, i in zip(self.obj.axes, indexer):
- if isinstance(i, slice):
- # should check the stop slice?
- pass
- elif is_list_like_indexer(i):
- # should check the elements?
- pass
- elif is_integer(i):
- if i >= len(ax):
- raise IndexError(
- f"{self.name} cannot enlarge its target object"
- )
- elif isinstance(i, dict):
- raise IndexError(f"{self.name} cannot enlarge its target object")
-
- return True
-
def _setitem_with_indexer(self, indexer, value):
self._has_valid_setitem_indexer(indexer)
@@ -1218,80 +1184,6 @@ def _align_frame(self, indexer, df: ABCDataFrame):
raise ValueError("Incompatible indexer with DataFrame")
- def _getitem_tuple(self, tup: Tuple):
- try:
- return self._getitem_lowerdim(tup)
- except IndexingError:
- pass
-
- # no multi-index, so validate all of the indexers
- self._has_valid_tuple(tup)
-
- # ugly hack for GH #836
- if self._multi_take_opportunity(tup):
- return self._multi_take(tup)
-
- # no shortcut needed
- retval = self.obj
- for i, key in enumerate(tup):
- if com.is_null_slice(key):
- continue
-
- retval = getattr(retval, self.name)._getitem_axis(key, axis=i)
-
- return retval
-
- def _multi_take_opportunity(self, tup: Tuple) -> bool:
- """
- Check whether there is the possibility to use ``_multi_take``.
-
- Currently the limit is that all axes being indexed, must be indexed with
- list-likes.
-
- Parameters
- ----------
- tup : tuple
- Tuple of indexers, one per axis.
-
- Returns
- -------
- bool
- Whether the current indexing,
- can be passed through `_multi_take`.
- """
- if not all(is_list_like_indexer(x) for x in tup):
- return False
-
- # just too complicated
- if any(com.is_bool_indexer(x) for x in tup):
- return False
-
- return True
-
- def _multi_take(self, tup: Tuple):
- """
- Create the indexers for the passed tuple of keys, and
- executes the take operation. This allows the take operation to be
- executed all at once, rather than once for each dimension.
- Improving efficiency.
-
- Parameters
- ----------
- tup : tuple
- Tuple of indexers, one per axis.
-
- Returns
- -------
- values: same type as the object being indexed
- """
- # GH 836
- o = self.obj
- d = {
- axis: self._get_listlike_indexer(key, axis)
- for (key, axis) in zip(tup, o._AXIS_ORDERS)
- }
- return o._reindex_with_indexers(d, copy=True, allow_dups=True)
-
def _handle_lowerdim_multi_index_axis0(self, tup: Tuple):
# we have an axis0 multi-index, handle or raise
axis = self.axis or 0
@@ -1412,86 +1304,6 @@ def _getitem_nested_tuple(self, tup: Tuple):
return obj
- def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False):
- """
- Transform a list-like of keys into a new index and an indexer.
-
- Parameters
- ----------
- key : list-like
- Targeted labels.
- axis: int
- Dimension on which the indexing is being made.
- raise_missing: bool, default False
- Whether to raise a KeyError if some labels were not found.
- Will be removed in the future, and then this method will always behave as
- if ``raise_missing=True``.
-
- Raises
- ------
- KeyError
- If at least one key was requested but none was found, and
- raise_missing=True.
-
- Returns
- -------
- keyarr: Index
- New index (coinciding with 'key' if the axis is unique).
- values : array-like
- Indexer for the return object, -1 denotes keys not found.
- """
- o = self.obj
- ax = o._get_axis(axis)
-
- # Have the index compute an indexer or return None
- # if it cannot handle:
- assert self.name == "loc"
- indexer, keyarr = ax._convert_listlike_indexer(key)
- # We only act on all found values:
- if indexer is not None and (indexer != -1).all():
- self._validate_read_indexer(key, indexer, axis, raise_missing=raise_missing)
- return ax[indexer], indexer
-
- if ax.is_unique and not getattr(ax, "is_overlapping", False):
- indexer = ax.get_indexer_for(key)
- keyarr = ax.reindex(keyarr)[0]
- else:
- keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr)
-
- self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing)
- return keyarr, indexer
-
- def _getitem_iterable(self, key, axis: int):
- """
- Index current object with an an iterable collection of keys.
-
- Parameters
- ----------
- key : iterable
- Targeted labels.
- axis: int
- Dimension on which the indexing is being made.
-
- Raises
- ------
- KeyError
- If no key was found. Will change in the future to raise if not all
- keys were found.
-
- Returns
- -------
- scalar, DataFrame, or Series: indexed value(s).
- """
- # we assume that not com.is_bool_indexer(key), as that is
- # handled before we get here.
- self._validate_key(key, axis)
-
- # A collection of keys
- keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False)
- return self.obj._reindex_with_indexers(
- {axis: [keyarr, indexer]}, copy=True, allow_dups=True
- )
-
def _validate_read_indexer(
self, key, indexer, axis: int, raise_missing: bool = False
):
@@ -1575,9 +1387,15 @@ def __getitem__(self, key):
def _is_scalar_access(self, key: Tuple):
raise NotImplementedError()
+ def _getitem_tuple(self, tup: Tuple):
+ raise AbstractMethodError(self)
+
def _getitem_axis(self, key, axis: int):
raise NotImplementedError()
+ def _has_valid_setitem_indexer(self, indexer) -> bool:
+ raise AbstractMethodError(self)
+
def _getbool_axis(self, key, axis: int):
# caller is responsible for ensuring non-None axis
labels = self.obj._get_axis(axis)
@@ -1595,6 +1413,9 @@ class _LocIndexer(_LocationIndexer):
"index is integers), listlike of labels, boolean"
)
+ # -------------------------------------------------------------------
+ # Key Checks
+
@Appender(_LocationIndexer._validate_key.__doc__)
def _validate_key(self, key, axis: int):
@@ -1613,6 +1434,9 @@ def _validate_key(self, key, axis: int):
labels = self.obj._get_axis(axis)
labels._convert_scalar_indexer(key, kind="loc")
+ def _has_valid_setitem_indexer(self, indexer) -> bool:
+ return True
+
def _is_scalar_access(self, key: Tuple) -> bool:
"""
Returns
@@ -1644,6 +1468,61 @@ def _is_scalar_access(self, key: Tuple) -> bool:
return True
+ # -------------------------------------------------------------------
+ # MultiIndex Handling
+
+ def _multi_take_opportunity(self, tup: Tuple) -> bool:
+ """
+ Check whether there is the possibility to use ``_multi_take``.
+
+ Currently the limit is that all axes being indexed, must be indexed with
+ list-likes.
+
+ Parameters
+ ----------
+ tup : tuple
+ Tuple of indexers, one per axis.
+
+ Returns
+ -------
+ bool
+ Whether the current indexing,
+ can be passed through `_multi_take`.
+ """
+ if not all(is_list_like_indexer(x) for x in tup):
+ return False
+
+ # just too complicated
+ if any(com.is_bool_indexer(x) for x in tup):
+ return False
+
+ return True
+
+ def _multi_take(self, tup: Tuple):
+ """
+ Create the indexers for the passed tuple of keys, and
+ executes the take operation. This allows the take operation to be
+ executed all at once, rather than once for each dimension.
+ Improving efficiency.
+
+ Parameters
+ ----------
+ tup : tuple
+ Tuple of indexers, one per axis.
+
+ Returns
+ -------
+ values: same type as the object being indexed
+ """
+ # GH 836
+ d = {
+ axis: self._get_listlike_indexer(key, axis)
+ for (key, axis) in zip(tup, self.obj._AXIS_ORDERS)
+ }
+ return self.obj._reindex_with_indexers(d, copy=True, allow_dups=True)
+
+ # -------------------------------------------------------------------
+
def _get_partial_string_timestamp_match_key(self, key, labels):
"""
Translate any partial string timestamp matches in key, returning the
@@ -1676,6 +1555,60 @@ def _get_partial_string_timestamp_match_key(self, key, labels):
return key
+ def _getitem_iterable(self, key, axis: int):
+ """
+ Index current object with an an iterable collection of keys.
+
+ Parameters
+ ----------
+ key : iterable
+ Targeted labels.
+ axis: int
+ Dimension on which the indexing is being made.
+
+ Raises
+ ------
+ KeyError
+ If no key was found. Will change in the future to raise if not all
+ keys were found.
+
+ Returns
+ -------
+ scalar, DataFrame, or Series: indexed value(s).
+ """
+ # we assume that not com.is_bool_indexer(key), as that is
+ # handled before we get here.
+ self._validate_key(key, axis)
+
+ # A collection of keys
+ keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False)
+ return self.obj._reindex_with_indexers(
+ {axis: [keyarr, indexer]}, copy=True, allow_dups=True
+ )
+
+ def _getitem_tuple(self, tup: Tuple):
+ try:
+ return self._getitem_lowerdim(tup)
+ except IndexingError:
+ pass
+
+ # no multi-index, so validate all of the indexers
+ self._has_valid_tuple(tup)
+
+ # ugly hack for GH #836
+ if self._multi_take_opportunity(tup):
+ return self._multi_take(tup)
+
+ # no shortcut needed
+ retval = self.obj
+ for i, key in enumerate(tup):
+ if com.is_null_slice(key):
+ continue
+
+ retval = getattr(retval, self.name)._getitem_axis(key, axis=i)
+
+ return retval
+
def _getitem_axis(self, key, axis: int):
key = item_from_zerodim(key)
if is_iterator(key):
@@ -1842,6 +1775,53 @@ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
return {"key": key}
raise
+ def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False):
+ """
+ Transform a list-like of keys into a new index and an indexer.
+
+ Parameters
+ ----------
+ key : list-like
+ Targeted labels.
+ axis: int
+ Dimension on which the indexing is being made.
+ raise_missing: bool, default False
+ Whether to raise a KeyError if some labels were not found.
+ Will be removed in the future, and then this method will always behave as
+ if ``raise_missing=True``.
+
+ Raises
+ ------
+ KeyError
+ If at least one key was requested but none was found, and
+ raise_missing=True.
+
+ Returns
+ -------
+ keyarr: Index
+ New index (coinciding with 'key' if the axis is unique).
+ values : array-like
+ Indexer for the return object, -1 denotes keys not found.
+ """
+ ax = self.obj._get_axis(axis)
+
+ # Have the index compute an indexer or return None
+ # if it cannot handle:
+ indexer, keyarr = ax._convert_listlike_indexer(key)
+ # We only act on all found values:
+ if indexer is not None and (indexer != -1).all():
+ self._validate_read_indexer(key, indexer, axis, raise_missing=raise_missing)
+ return ax[indexer], indexer
+
+ if ax.is_unique and not getattr(ax, "is_overlapping", False):
+ indexer = ax.get_indexer_for(key)
+ keyarr = ax.reindex(keyarr)[0]
+ else:
+ keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr)
+
+ self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing)
+ return keyarr, indexer
+
@Appender(IndexingMixin.iloc.__doc__)
class _iLocIndexer(_LocationIndexer):
@@ -1851,6 +1831,9 @@ class _iLocIndexer(_LocationIndexer):
)
_takeable = True
+ # -------------------------------------------------------------------
+ # Key Checks
+
def _validate_key(self, key, axis: int):
if com.is_bool_indexer(key):
if hasattr(key, "index") and isinstance(key.index, Index):
@@ -1891,6 +1874,37 @@ def _validate_key(self, key, axis: int):
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
+ def _has_valid_positional_setitem_indexer(self, indexer) -> bool:
+ """
+ Validate that a positional indexer cannot enlarge its target
+ will raise if needed, does not modify the indexer externally.
+
+ Returns
+ -------
+ bool
+ """
+ if isinstance(indexer, dict):
+ raise IndexError(f"{self.name} cannot enlarge its target object")
+ else:
+ if not isinstance(indexer, tuple):
+ indexer = _tuplify(self.ndim, indexer)
+ for ax, i in zip(self.obj.axes, indexer):
+ if isinstance(i, slice):
+ # should check the stop slice?
+ pass
+ elif is_list_like_indexer(i):
+ # should check the elements?
+ pass
+ elif is_integer(i):
+ if i >= len(ax):
+ raise IndexError(
+ f"{self.name} cannot enlarge its target object"
+ )
+ elif isinstance(i, dict):
+ raise IndexError(f"{self.name} cannot enlarge its target object")
+
+ return True
+
def _is_scalar_access(self, key: Tuple) -> bool:
"""
Returns
@@ -1934,6 +1948,8 @@ def _validate_integer(self, key: int, axis: int) -> None:
if key >= len_axis or key < -len_axis:
raise IndexError("single positional indexer is out-of-bounds")
+ # -------------------------------------------------------------------
+
def _getitem_tuple(self, tup: Tuple):
self._has_valid_tuple(tup)
| https://api.github.com/repos/pandas-dev/pandas/pulls/31589 | 2020-02-02T23:15:21Z | 2020-02-08T01:41:39Z | 2020-02-08T01:41:39Z | 2020-02-08T01:41:43Z | |
BUG: Handle object arrays with NaN in cut | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 180411afb117d..0a2b184c8161e 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -55,6 +55,9 @@ Bug fixes
- Plotting tz-aware timeseries no longer gives UserWarning (:issue:`31205`)
+**Reshaping**
+
+- Bug where :meth:`cut` improperly handled arrays with object dtype containing ``NaN`` values. (:issue:`31586`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index a18b45a077be0..8b15077636209 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -18,6 +18,7 @@
is_integer,
is_integer_dtype,
is_list_like,
+ is_object_dtype,
is_scalar,
is_timedelta64_dtype,
)
@@ -450,6 +451,8 @@ def _coerce_to_type(x):
# https://github.com/pandas-dev/pandas/issues/31389
elif is_extension_array_dtype(x) and is_integer_dtype(x):
x = x.to_numpy(dtype=np.float64, na_value=np.nan)
+ elif is_object_dtype(x):
+ x = x.astype(np.float64)
if dtype is not None:
# GH 19768: force NaT to NaN during integer conversion
diff --git a/pandas/tests/reshape/test_cut.py b/pandas/tests/reshape/test_cut.py
index 830e786fd1c6d..8c4e01517b07c 100644
--- a/pandas/tests/reshape/test_cut.py
+++ b/pandas/tests/reshape/test_cut.py
@@ -624,4 +624,21 @@ def test_cut_nullable_integer(bins, right, include_lowest):
pd.array(a, dtype="Int64"), bins, right=right, include_lowest=include_lowest
)
expected = cut(a, bins, right=right, include_lowest=include_lowest)
+
+ tm.assert_categorical_equal(result, expected)
+
+
+@pytest.mark.parametrize("bins", [2, [0, 50, 100]])
+@pytest.mark.parametrize("right", [True, False])
+@pytest.mark.parametrize("include_lowest", [True, False])
+def test_cut_object_dtype_with_na(bins, right, include_lowest):
+ # https://github.com/pandas-dev/pandas/issues/31586
+ arr = np.arange(100).astype(object)
+ arr[::3] = np.nan
+
+ result = cut(arr, bins, right=right, include_lowest=include_lowest)
+ expected = cut(
+ arr.astype(np.float64), bins, right=right, include_lowest=include_lowest
+ )
+
tm.assert_categorical_equal(result, expected)
| - [x] closes #31586
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31588 | 2020-02-02T22:41:04Z | 2020-02-13T22:56:59Z | null | 2020-02-13T22:57:03Z |
Backport PR #31440 on branch 1.0.x (BUG: Fix qcut for nullable integers) | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 31bc1cefcb292..71a67e10ca38c 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -121,6 +121,7 @@ ExtensionArray
^^^^^^^^^^^^^^
- Bug in dtype being lost in ``__invert__`` (``~`` operator) for extension-array backed ``Series`` and ``DataFrame`` (:issue:`23087`)
+- Bug where :meth:`qcut` would raise when passed a nullable integer. (:issue:`31389`)
-
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index 15e6aaeaa5e9d..50e382572297b 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -202,17 +202,10 @@ def cut(
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
- # for handling the cut for datetime and timedelta objects
original = x
x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
- # To support cut(IntegerArray), we convert to object dtype with NaN
- # Will properly support in the future.
- # https://github.com/pandas-dev/pandas/pull/31290
- if is_extension_array_dtype(x.dtype) and is_integer_dtype(x.dtype):
- x = x.to_numpy(dtype=object, na_value=np.nan)
-
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
@@ -435,7 +428,7 @@ def _bins_to_cuts(
def _coerce_to_type(x):
"""
- if the passed data is of datetime/timedelta or bool type,
+ if the passed data is of datetime/timedelta, bool or nullable int type,
this method converts it to numeric so that cut or qcut method can
handle it
"""
@@ -452,6 +445,12 @@ def _coerce_to_type(x):
elif is_bool_dtype(x):
# GH 20303
x = x.astype(np.int64)
+ # To support cut and qcut for IntegerArray we convert to float dtype.
+ # Will properly support in the future.
+ # https://github.com/pandas-dev/pandas/pull/31290
+ # https://github.com/pandas-dev/pandas/issues/31389
+ elif is_extension_array_dtype(x) and is_integer_dtype(x):
+ x = x.to_numpy(dtype=np.float64, na_value=np.nan)
if dtype is not None:
# GH 19768: force NaT to NaN during integer conversion
diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py
index c165910777649..857b793e9e9a8 100644
--- a/pandas/tests/arrays/test_integer.py
+++ b/pandas/tests/arrays/test_integer.py
@@ -1059,19 +1059,6 @@ def test_value_counts_na():
tm.assert_series_equal(result, expected)
-@pytest.mark.parametrize("bins", [3, [0, 5, 15]])
-@pytest.mark.parametrize("right", [True, False])
-@pytest.mark.parametrize("include_lowest", [True, False])
-def test_cut(bins, right, include_lowest):
- a = np.random.randint(0, 10, size=50).astype(object)
- a[::2] = np.nan
- result = pd.cut(
- pd.array(a, dtype="Int64"), bins, right=right, include_lowest=include_lowest
- )
- expected = pd.cut(a, bins, right=right, include_lowest=include_lowest)
- tm.assert_categorical_equal(result, expected)
-
-
def test_array_setitem_nullable_boolean_mask():
# GH 31446
ser = pd.Series([1, 2], dtype="Int64")
diff --git a/pandas/tests/reshape/test_cut.py b/pandas/tests/reshape/test_cut.py
index 13b6f05ed304a..830e786fd1c6d 100644
--- a/pandas/tests/reshape/test_cut.py
+++ b/pandas/tests/reshape/test_cut.py
@@ -612,3 +612,16 @@ def test_cut_incorrect_labels(labels):
msg = "Bin labels must either be False, None or passed in as a list-like argument"
with pytest.raises(ValueError, match=msg):
cut(values, 4, labels=labels)
+
+
+@pytest.mark.parametrize("bins", [3, [0, 5, 15]])
+@pytest.mark.parametrize("right", [True, False])
+@pytest.mark.parametrize("include_lowest", [True, False])
+def test_cut_nullable_integer(bins, right, include_lowest):
+ a = np.random.randint(0, 10, size=50).astype(float)
+ a[::2] = np.nan
+ result = cut(
+ pd.array(a, dtype="Int64"), bins, right=right, include_lowest=include_lowest
+ )
+ expected = cut(a, bins, right=right, include_lowest=include_lowest)
+ tm.assert_categorical_equal(result, expected)
diff --git a/pandas/tests/reshape/test_qcut.py b/pandas/tests/reshape/test_qcut.py
index 95406a5ebf4f7..c436ab5d90578 100644
--- a/pandas/tests/reshape/test_qcut.py
+++ b/pandas/tests/reshape/test_qcut.py
@@ -3,6 +3,7 @@
import numpy as np
import pytest
+import pandas as pd
from pandas import (
Categorical,
DatetimeIndex,
@@ -286,3 +287,14 @@ def test_qcut_bool_coercion_to_int(bins, box, compare):
expected = qcut(data_expected, bins, duplicates="drop")
result = qcut(data_result, bins, duplicates="drop")
compare(result, expected)
+
+
+@pytest.mark.parametrize("q", [2, 5, 10])
+def test_qcut_nullable_integer(q, any_nullable_int_dtype):
+ arr = pd.array(np.arange(100), dtype=any_nullable_int_dtype)
+ arr[::2] = pd.NA
+
+ result = qcut(arr, q)
+ expected = qcut(arr.astype(float), q)
+
+ tm.assert_categorical_equal(result, expected)
| Backport PR #31440: BUG: Fix qcut for nullable integers | https://api.github.com/repos/pandas-dev/pandas/pulls/31587 | 2020-02-02T22:19:37Z | 2020-02-02T22:57:17Z | 2020-02-02T22:57:17Z | 2020-02-02T22:57:17Z |
REF: define _convert_to_indexer in Loc | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 3d9069a5516f1..77003719360d9 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -566,7 +566,7 @@ def iat(self) -> "_iAtIndexer":
return _iAtIndexer("iat", self)
-class _NDFrameIndexer(_NDFrameIndexerBase):
+class _LocationIndexer(_NDFrameIndexerBase):
_valid_types: str
axis = None
@@ -1580,10 +1580,6 @@ def _validate_read_indexer(
def _convert_to_indexer(self, key, axis: int):
raise AbstractMethodError(self)
-
-class _LocationIndexer(_NDFrameIndexer):
- _takeable: bool = False
-
def __getitem__(self, key):
if type(key) is tuple:
key = tuple(com.apply_if_callable(x, self.obj) for x in key)
@@ -1614,94 +1610,17 @@ def _getbool_axis(self, key, axis: int):
inds = key.nonzero()[0]
return self.obj._take_with_is_copy(inds, axis=axis)
- def _convert_to_indexer(self, key, axis: int):
- """
- Convert indexing key into something we can use to do actual fancy
- indexing on a ndarray.
-
- Examples
- ix[:5] -> slice(0, 5)
- ix[[1,2,3]] -> [1,2,3]
- ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
-
- Going by Zen of Python?
- 'In the face of ambiguity, refuse the temptation to guess.'
- raise AmbiguousIndexError with integer labels?
- - No, prefer label-based indexing
- """
- labels = self.obj._get_axis(axis)
-
- if isinstance(key, slice):
- return self._convert_slice_indexer(key, axis)
-
- # try to find out correct indexer, if not type correct raise
- try:
- key = self._convert_scalar_indexer(key, axis)
- except TypeError:
- # but we will allow setting
- pass
-
- # see if we are positional in nature
- is_int_index = labels.is_integer()
- is_int_positional = is_integer(key) and not is_int_index
-
- if is_scalar(key) or isinstance(labels, ABCMultiIndex):
- # Otherwise get_loc will raise InvalidIndexError
-
- # if we are a label return me
- try:
- return labels.get_loc(key)
- except LookupError:
- if isinstance(key, tuple) and isinstance(labels, ABCMultiIndex):
- if len(key) == labels.nlevels:
- return {"key": key}
- raise
- except TypeError:
- pass
- except ValueError:
- if not is_int_positional:
- raise
-
- # a positional
- if is_int_positional:
-
- # if we are setting and its not a valid location
- # its an insert which fails by definition
-
- # always valid
- return {"key": key}
-
- if is_nested_tuple(key, labels):
- return labels.get_locs(key)
-
- elif is_list_like_indexer(key):
-
- if com.is_bool_indexer(key):
- key = check_bool_indexer(labels, key)
- (inds,) = key.nonzero()
- return inds
- else:
- # When setting, missing keys are not allowed, even with .loc:
- return self._get_listlike_indexer(key, axis, raise_missing=True)[1]
- else:
- try:
- return labels.get_loc(key)
- except LookupError:
- # allow a not found key only if we are a setter
- if not is_list_like_indexer(key):
- return {"key": key}
- raise
-
@Appender(IndexingMixin.loc.__doc__)
class _LocIndexer(_LocationIndexer):
+ _takeable: bool = False
_valid_types = (
"labels (MUST BE IN THE INDEX), slices of labels (BOTH "
"endpoints included! Can be slices of integers if the "
"index is integers), listlike of labels, boolean"
)
- @Appender(_NDFrameIndexer._validate_key.__doc__)
+ @Appender(_LocationIndexer._validate_key.__doc__)
def _validate_key(self, key, axis: int):
# valid for a collection of labels (we check their presence later)
@@ -1867,6 +1786,84 @@ def _get_slice_axis(self, slice_obj: slice, axis: int):
# return a DatetimeIndex instead of a slice object.
return self.obj.take(indexer, axis=axis)
+ def _convert_to_indexer(self, key, axis: int):
+ """
+ Convert indexing key into something we can use to do actual fancy
+ indexing on a ndarray.
+
+ Examples
+ ix[:5] -> slice(0, 5)
+ ix[[1,2,3]] -> [1,2,3]
+ ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
+
+ Going by Zen of Python?
+ 'In the face of ambiguity, refuse the temptation to guess.'
+ raise AmbiguousIndexError with integer labels?
+ - No, prefer label-based indexing
+ """
+ labels = self.obj._get_axis(axis)
+
+ if isinstance(key, slice):
+ return self._convert_slice_indexer(key, axis)
+
+ # try to find out correct indexer, if not type correct raise
+ try:
+ key = self._convert_scalar_indexer(key, axis)
+ except TypeError:
+ # but we will allow setting
+ pass
+
+ # see if we are positional in nature
+ is_int_index = labels.is_integer()
+ is_int_positional = is_integer(key) and not is_int_index
+
+ if is_scalar(key) or isinstance(labels, ABCMultiIndex):
+ # Otherwise get_loc will raise InvalidIndexError
+
+ # if we are a label return me
+ try:
+ return labels.get_loc(key)
+ except LookupError:
+ if isinstance(key, tuple) and isinstance(labels, ABCMultiIndex):
+ if len(key) == labels.nlevels:
+ return {"key": key}
+ raise
+ except TypeError:
+ pass
+ except ValueError:
+ if not is_int_positional:
+ raise
+
+ # a positional
+ if is_int_positional:
+
+ # if we are setting and its not a valid location
+ # its an insert which fails by definition
+
+ # always valid
+ return {"key": key}
+
+ if is_nested_tuple(key, labels):
+ return labels.get_locs(key)
+
+ elif is_list_like_indexer(key):
+
+ if com.is_bool_indexer(key):
+ key = check_bool_indexer(labels, key)
+ (inds,) = key.nonzero()
+ return inds
+ else:
+ # When setting, missing keys are not allowed, even with .loc:
+ return self._get_listlike_indexer(key, axis, raise_missing=True)[1]
+ else:
+ try:
+ return labels.get_loc(key)
+ except LookupError:
+ # allow a not found key only if we are a setter
+ if not is_list_like_indexer(key):
+ return {"key": key}
+ raise
+
@Appender(IndexingMixin.iloc.__doc__)
class _iLocIndexer(_LocationIndexer):
| _convert_to_indexer is overriden by iloc, so the version currently defined in LocationIndexer is only used in Loc. This moves it directly to Loc.
Also, since ix has been removed, we have an unnecessary level of the class hierarchy, so this merges NDFrameIndexer and LocationIndexer.
Follow-ups will move base class methods that are actually Loc-specific to Loc. | https://api.github.com/repos/pandas-dev/pandas/pulls/31585 | 2020-02-02T20:18:45Z | 2020-02-02T22:22:17Z | 2020-02-02T22:22:17Z | 2020-02-02T22:39:38Z |
Backport PR #31545: BUG&TST: df.replace fail after converting to new … | diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index aa91e7a489356..92b74c4409d7d 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -1356,3 +1356,10 @@ def test_replace_replacer_dtype(self, replacer):
result = df.replace({"a": replacer, "b": replacer})
expected = pd.DataFrame([replacer])
tm.assert_frame_equal(result, expected)
+
+ def test_replace_after_convert_dtypes(self):
+ # GH31517
+ df = pd.DataFrame({"grp": [1, 2, 3, 4, 5]}, dtype="Int64")
+ result = df.replace(1, 10)
+ expected = pd.DataFrame({"grp": [10, 2, 3, 4, 5]}, dtype="Int64")
+ tm.assert_frame_equal(result, expected)
| xref #31555
xref #31545 | https://api.github.com/repos/pandas-dev/pandas/pulls/31584 | 2020-02-02T19:07:30Z | 2020-02-02T19:42:56Z | 2020-02-02T19:42:56Z | 2020-02-02T19:42:56Z |
REF: Timestamp constructor tests | diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py
index 68285d41bda70..95d14ad4c86f7 100644
--- a/pandas/tests/indexes/datetimes/test_constructors.py
+++ b/pandas/tests/indexes/datetimes/test_constructors.py
@@ -950,17 +950,3 @@ def test_datetimeindex_constructor_misc(self):
)
assert len(idx1) == len(idx2)
assert idx1.freq == idx2.freq
-
-
-def test_timedelta_constructor_identity():
- # Test for #30543
- expected = pd.Timedelta(np.timedelta64(1, "s"))
- result = pd.Timedelta(expected)
- assert result is expected
-
-
-def test_timestamp_constructor_identity():
- # Test for #30543
- expected = pd.Timestamp("2017-01-01T12")
- result = pd.Timestamp(expected)
- assert result is expected
diff --git a/pandas/tests/scalar/timedelta/test_constructors.py b/pandas/tests/scalar/timedelta/test_constructors.py
index ae1e84576c092..25c9fc19981be 100644
--- a/pandas/tests/scalar/timedelta/test_constructors.py
+++ b/pandas/tests/scalar/timedelta/test_constructors.py
@@ -274,3 +274,10 @@ def test_td_constructor_on_nanoseconds(constructed_td, conversion):
def test_td_constructor_value_error():
with pytest.raises(TypeError):
Timedelta(nanoseconds="abc")
+
+
+def test_timedelta_constructor_identity():
+ # Test for #30543
+ expected = Timedelta(np.timedelta64(1, "s"))
+ result = Timedelta(expected)
+ assert result is expected
diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py
new file mode 100644
index 0000000000000..737a85faa4c9b
--- /dev/null
+++ b/pandas/tests/scalar/timestamp/test_constructors.py
@@ -0,0 +1,552 @@
+import calendar
+from datetime import datetime, timedelta
+
+import dateutil.tz
+from dateutil.tz import tzutc
+import numpy as np
+import pytest
+import pytz
+
+from pandas.errors import OutOfBoundsDatetime
+
+from pandas import Period, Timedelta, Timestamp, compat
+
+from pandas.tseries import offsets
+
+
+class TestTimestampConstructors:
+ def test_constructor(self):
+ base_str = "2014-07-01 09:00"
+ base_dt = datetime(2014, 7, 1, 9)
+ base_expected = 1_404_205_200_000_000_000
+
+ # confirm base representation is correct
+ assert calendar.timegm(base_dt.timetuple()) * 1_000_000_000 == base_expected
+
+ tests = [
+ (base_str, base_dt, base_expected),
+ (
+ "2014-07-01 10:00",
+ datetime(2014, 7, 1, 10),
+ base_expected + 3600 * 1_000_000_000,
+ ),
+ (
+ "2014-07-01 09:00:00.000008000",
+ datetime(2014, 7, 1, 9, 0, 0, 8),
+ base_expected + 8000,
+ ),
+ (
+ "2014-07-01 09:00:00.000000005",
+ Timestamp("2014-07-01 09:00:00.000000005"),
+ base_expected + 5,
+ ),
+ ]
+
+ timezones = [
+ (None, 0),
+ ("UTC", 0),
+ (pytz.utc, 0),
+ ("Asia/Tokyo", 9),
+ ("US/Eastern", -4),
+ ("dateutil/US/Pacific", -7),
+ (pytz.FixedOffset(-180), -3),
+ (dateutil.tz.tzoffset(None, 18000), 5),
+ ]
+
+ for date_str, date, expected in tests:
+ for result in [Timestamp(date_str), Timestamp(date)]:
+ # only with timestring
+ assert result.value == expected
+
+ # re-creation shouldn't affect to internal value
+ result = Timestamp(result)
+ assert result.value == expected
+
+ # with timezone
+ for tz, offset in timezones:
+ for result in [Timestamp(date_str, tz=tz), Timestamp(date, tz=tz)]:
+ expected_tz = expected - offset * 3600 * 1_000_000_000
+ assert result.value == expected_tz
+
+ # should preserve tz
+ result = Timestamp(result)
+ assert result.value == expected_tz
+
+ # should convert to UTC
+ if tz is not None:
+ result = Timestamp(result).tz_convert("UTC")
+ else:
+ result = Timestamp(result, tz="UTC")
+ expected_utc = expected - offset * 3600 * 1_000_000_000
+ assert result.value == expected_utc
+
+ def test_constructor_with_stringoffset(self):
+ # GH 7833
+ base_str = "2014-07-01 11:00:00+02:00"
+ base_dt = datetime(2014, 7, 1, 9)
+ base_expected = 1_404_205_200_000_000_000
+
+ # confirm base representation is correct
+ assert calendar.timegm(base_dt.timetuple()) * 1_000_000_000 == base_expected
+
+ tests = [
+ (base_str, base_expected),
+ ("2014-07-01 12:00:00+02:00", base_expected + 3600 * 1_000_000_000),
+ ("2014-07-01 11:00:00.000008000+02:00", base_expected + 8000),
+ ("2014-07-01 11:00:00.000000005+02:00", base_expected + 5),
+ ]
+
+ timezones = [
+ (None, 0),
+ ("UTC", 0),
+ (pytz.utc, 0),
+ ("Asia/Tokyo", 9),
+ ("US/Eastern", -4),
+ ("dateutil/US/Pacific", -7),
+ (pytz.FixedOffset(-180), -3),
+ (dateutil.tz.tzoffset(None, 18000), 5),
+ ]
+
+ for date_str, expected in tests:
+ for result in [Timestamp(date_str)]:
+ # only with timestring
+ assert result.value == expected
+
+ # re-creation shouldn't affect to internal value
+ result = Timestamp(result)
+ assert result.value == expected
+
+ # with timezone
+ for tz, offset in timezones:
+ result = Timestamp(date_str, tz=tz)
+ expected_tz = expected
+ assert result.value == expected_tz
+
+ # should preserve tz
+ result = Timestamp(result)
+ assert result.value == expected_tz
+
+ # should convert to UTC
+ result = Timestamp(result).tz_convert("UTC")
+ expected_utc = expected
+ assert result.value == expected_utc
+
+ # This should be 2013-11-01 05:00 in UTC
+ # converted to Chicago tz
+ result = Timestamp("2013-11-01 00:00:00-0500", tz="America/Chicago")
+ assert result.value == Timestamp("2013-11-01 05:00").value
+ expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
+ assert repr(result) == expected
+ assert result == eval(repr(result))
+
+ # This should be 2013-11-01 05:00 in UTC
+ # converted to Tokyo tz (+09:00)
+ result = Timestamp("2013-11-01 00:00:00-0500", tz="Asia/Tokyo")
+ assert result.value == Timestamp("2013-11-01 05:00").value
+ expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
+ assert repr(result) == expected
+ assert result == eval(repr(result))
+
+ # GH11708
+ # This should be 2015-11-18 10:00 in UTC
+ # converted to Asia/Katmandu
+ result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
+ assert result.value == Timestamp("2015-11-18 10:00").value
+ expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
+ assert repr(result) == expected
+ assert result == eval(repr(result))
+
+ # This should be 2015-11-18 10:00 in UTC
+ # converted to Asia/Kolkata
+ result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
+ assert result.value == Timestamp("2015-11-18 10:00").value
+ expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
+ assert repr(result) == expected
+ assert result == eval(repr(result))
+
+ def test_constructor_invalid(self):
+ with pytest.raises(TypeError, match="Cannot convert input"):
+ Timestamp(slice(2))
+ with pytest.raises(ValueError, match="Cannot convert Period"):
+ Timestamp(Period("1000-01-01"))
+
+ def test_constructor_invalid_tz(self):
+ # GH#17690
+ with pytest.raises(TypeError, match="must be a datetime.tzinfo"):
+ Timestamp("2017-10-22", tzinfo="US/Eastern")
+
+ with pytest.raises(ValueError, match="at most one of"):
+ Timestamp("2017-10-22", tzinfo=pytz.utc, tz="UTC")
+
+ with pytest.raises(ValueError, match="Invalid frequency:"):
+ # GH#5168
+ # case where user tries to pass tz as an arg, not kwarg, gets
+ # interpreted as a `freq`
+ Timestamp("2012-01-01", "US/Pacific")
+
+ def test_constructor_strptime(self):
+ # GH25016
+ # Test support for Timestamp.strptime
+ fmt = "%Y%m%d-%H%M%S-%f%z"
+ ts = "20190129-235348-000001+0000"
+ with pytest.raises(NotImplementedError):
+ Timestamp.strptime(ts, fmt)
+
+ def test_constructor_tz_or_tzinfo(self):
+ # GH#17943, GH#17690, GH#5168
+ stamps = [
+ Timestamp(year=2017, month=10, day=22, tz="UTC"),
+ Timestamp(year=2017, month=10, day=22, tzinfo=pytz.utc),
+ Timestamp(year=2017, month=10, day=22, tz=pytz.utc),
+ Timestamp(datetime(2017, 10, 22), tzinfo=pytz.utc),
+ Timestamp(datetime(2017, 10, 22), tz="UTC"),
+ Timestamp(datetime(2017, 10, 22), tz=pytz.utc),
+ ]
+ assert all(ts == stamps[0] for ts in stamps)
+
+ def test_constructor_positional(self):
+ # see gh-10758
+ with pytest.raises(TypeError):
+ Timestamp(2000, 1)
+ with pytest.raises(ValueError):
+ Timestamp(2000, 0, 1)
+ with pytest.raises(ValueError):
+ Timestamp(2000, 13, 1)
+ with pytest.raises(ValueError):
+ Timestamp(2000, 1, 0)
+ with pytest.raises(ValueError):
+ Timestamp(2000, 1, 32)
+
+ # see gh-11630
+ assert repr(Timestamp(2015, 11, 12)) == repr(Timestamp("20151112"))
+ assert repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) == repr(
+ Timestamp("2015-11-12 01:02:03.999999")
+ )
+
+ def test_constructor_keyword(self):
+ # GH 10758
+ with pytest.raises(TypeError):
+ Timestamp(year=2000, month=1)
+ with pytest.raises(ValueError):
+ Timestamp(year=2000, month=0, day=1)
+ with pytest.raises(ValueError):
+ Timestamp(year=2000, month=13, day=1)
+ with pytest.raises(ValueError):
+ Timestamp(year=2000, month=1, day=0)
+ with pytest.raises(ValueError):
+ Timestamp(year=2000, month=1, day=32)
+
+ assert repr(Timestamp(year=2015, month=11, day=12)) == repr(
+ Timestamp("20151112")
+ )
+
+ assert repr(
+ Timestamp(
+ year=2015,
+ month=11,
+ day=12,
+ hour=1,
+ minute=2,
+ second=3,
+ microsecond=999999,
+ )
+ ) == repr(Timestamp("2015-11-12 01:02:03.999999"))
+
+ def test_constructor_fromordinal(self):
+ base = datetime(2000, 1, 1)
+
+ ts = Timestamp.fromordinal(base.toordinal(), freq="D")
+ assert base == ts
+ assert ts.freq == "D"
+ assert base.toordinal() == ts.toordinal()
+
+ ts = Timestamp.fromordinal(base.toordinal(), tz="US/Eastern")
+ assert Timestamp("2000-01-01", tz="US/Eastern") == ts
+ assert base.toordinal() == ts.toordinal()
+
+ # GH#3042
+ dt = datetime(2011, 4, 16, 0, 0)
+ ts = Timestamp.fromordinal(dt.toordinal())
+ assert ts.to_pydatetime() == dt
+
+ # with a tzinfo
+ stamp = Timestamp("2011-4-16", tz="US/Eastern")
+ dt_tz = stamp.to_pydatetime()
+ ts = Timestamp.fromordinal(dt_tz.toordinal(), tz="US/Eastern")
+ assert ts.to_pydatetime() == dt_tz
+
+ @pytest.mark.parametrize(
+ "result",
+ [
+ Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1),
+ Timestamp(
+ year=2000,
+ month=1,
+ day=2,
+ hour=3,
+ minute=4,
+ second=5,
+ microsecond=6,
+ nanosecond=1,
+ ),
+ Timestamp(
+ year=2000,
+ month=1,
+ day=2,
+ hour=3,
+ minute=4,
+ second=5,
+ microsecond=6,
+ nanosecond=1,
+ tz="UTC",
+ ),
+ Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),
+ Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC),
+ ],
+ )
+ def test_constructor_nanosecond(self, result):
+ # GH 18898
+ expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz)
+ expected = expected + Timedelta(nanoseconds=1)
+ assert result == expected
+
+ @pytest.mark.parametrize("z", ["Z0", "Z00"])
+ def test_constructor_invalid_Z0_isostring(self, z):
+ # GH 8910
+ with pytest.raises(ValueError):
+ Timestamp("2014-11-02 01:00{}".format(z))
+
+ @pytest.mark.parametrize(
+ "arg",
+ [
+ "year",
+ "month",
+ "day",
+ "hour",
+ "minute",
+ "second",
+ "microsecond",
+ "nanosecond",
+ ],
+ )
+ def test_invalid_date_kwarg_with_string_input(self, arg):
+ kwarg = {arg: 1}
+ with pytest.raises(ValueError):
+ Timestamp("2010-10-10 12:59:59.999999999", **kwarg)
+
+ def test_out_of_bounds_integer_value(self):
+ # GH#26651 check that we raise OutOfBoundsDatetime, not OverflowError
+ with pytest.raises(OutOfBoundsDatetime):
+ Timestamp(Timestamp.max.value * 2)
+ with pytest.raises(OutOfBoundsDatetime):
+ Timestamp(Timestamp.min.value * 2)
+
+ def test_out_of_bounds_value(self):
+ one_us = np.timedelta64(1).astype("timedelta64[us]")
+
+ # By definition we can't go out of bounds in [ns], so we
+ # convert the datetime64s to [us] so we can go out of bounds
+ min_ts_us = np.datetime64(Timestamp.min).astype("M8[us]")
+ max_ts_us = np.datetime64(Timestamp.max).astype("M8[us]")
+
+ # No error for the min/max datetimes
+ Timestamp(min_ts_us)
+ Timestamp(max_ts_us)
+
+ # One us less than the minimum is an error
+ with pytest.raises(ValueError):
+ Timestamp(min_ts_us - one_us)
+
+ # One us more than the maximum is an error
+ with pytest.raises(ValueError):
+ Timestamp(max_ts_us + one_us)
+
+ def test_out_of_bounds_string(self):
+ with pytest.raises(ValueError):
+ Timestamp("1676-01-01")
+ with pytest.raises(ValueError):
+ Timestamp("2263-01-01")
+
+ def test_barely_out_of_bounds(self):
+ # GH#19529
+ # GH#19382 close enough to bounds that dropping nanos would result
+ # in an in-bounds datetime
+ with pytest.raises(OutOfBoundsDatetime):
+ Timestamp("2262-04-11 23:47:16.854775808")
+
+ def test_bounds_with_different_units(self):
+ out_of_bounds_dates = ("1677-09-21", "2262-04-12")
+
+ time_units = ("D", "h", "m", "s", "ms", "us")
+
+ for date_string in out_of_bounds_dates:
+ for unit in time_units:
+ dt64 = np.datetime64(date_string, unit)
+ with pytest.raises(ValueError):
+ Timestamp(dt64)
+
+ in_bounds_dates = ("1677-09-23", "2262-04-11")
+
+ for date_string in in_bounds_dates:
+ for unit in time_units:
+ dt64 = np.datetime64(date_string, unit)
+ Timestamp(dt64)
+
+ def test_min_valid(self):
+ # Ensure that Timestamp.min is a valid Timestamp
+ Timestamp(Timestamp.min)
+
+ def test_max_valid(self):
+ # Ensure that Timestamp.max is a valid Timestamp
+ Timestamp(Timestamp.max)
+
+ def test_now(self):
+ # GH#9000
+ ts_from_string = Timestamp("now")
+ ts_from_method = Timestamp.now()
+ ts_datetime = datetime.now()
+
+ ts_from_string_tz = Timestamp("now", tz="US/Eastern")
+ ts_from_method_tz = Timestamp.now(tz="US/Eastern")
+
+ # Check that the delta between the times is less than 1s (arbitrarily
+ # small)
+ delta = Timedelta(seconds=1)
+ assert abs(ts_from_method - ts_from_string) < delta
+ assert abs(ts_datetime - ts_from_method) < delta
+ assert abs(ts_from_method_tz - ts_from_string_tz) < delta
+ assert (
+ abs(
+ ts_from_string_tz.tz_localize(None)
+ - ts_from_method_tz.tz_localize(None)
+ )
+ < delta
+ )
+
+ def test_today(self):
+ ts_from_string = Timestamp("today")
+ ts_from_method = Timestamp.today()
+ ts_datetime = datetime.today()
+
+ ts_from_string_tz = Timestamp("today", tz="US/Eastern")
+ ts_from_method_tz = Timestamp.today(tz="US/Eastern")
+
+ # Check that the delta between the times is less than 1s (arbitrarily
+ # small)
+ delta = Timedelta(seconds=1)
+ assert abs(ts_from_method - ts_from_string) < delta
+ assert abs(ts_datetime - ts_from_method) < delta
+ assert abs(ts_from_method_tz - ts_from_string_tz) < delta
+ assert (
+ abs(
+ ts_from_string_tz.tz_localize(None)
+ - ts_from_method_tz.tz_localize(None)
+ )
+ < delta
+ )
+
+ @pytest.mark.parametrize("tz", [None, pytz.timezone("US/Pacific")])
+ def test_disallow_setting_tz(self, tz):
+ # GH 3746
+ ts = Timestamp("2010")
+ with pytest.raises(AttributeError):
+ ts.tz = tz
+
+ @pytest.mark.parametrize("offset", ["+0300", "+0200"])
+ def test_construct_timestamp_near_dst(self, offset):
+ # GH 20854
+ expected = Timestamp(
+ "2016-10-30 03:00:00{}".format(offset), tz="Europe/Helsinki"
+ )
+ result = Timestamp(expected).tz_convert("Europe/Helsinki")
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "arg", ["2013/01/01 00:00:00+09:00", "2013-01-01 00:00:00+09:00"]
+ )
+ def test_construct_with_different_string_format(self, arg):
+ # GH 12064
+ result = Timestamp(arg)
+ expected = Timestamp(datetime(2013, 1, 1), tz=pytz.FixedOffset(540))
+ assert result == expected
+
+ def test_construct_timestamp_preserve_original_frequency(self):
+ # GH 22311
+ result = Timestamp(Timestamp("2010-08-08", freq="D")).freq
+ expected = offsets.Day()
+ assert result == expected
+
+ def test_constructor_invalid_frequency(self):
+ # GH 22311
+ with pytest.raises(ValueError, match="Invalid frequency:"):
+ Timestamp("2012-01-01", freq=[])
+
+ @pytest.mark.parametrize("box", [datetime, Timestamp])
+ def test_raise_tz_and_tzinfo_in_datetime_input(self, box):
+ # GH 23579
+ kwargs = {"year": 2018, "month": 1, "day": 1, "tzinfo": pytz.utc}
+ with pytest.raises(ValueError, match="Cannot pass a datetime or Timestamp"):
+ Timestamp(box(**kwargs), tz="US/Pacific")
+ with pytest.raises(ValueError, match="Cannot pass a datetime or Timestamp"):
+ Timestamp(box(**kwargs), tzinfo=pytz.timezone("US/Pacific"))
+
+ def test_dont_convert_dateutil_utc_to_pytz_utc(self):
+ result = Timestamp(datetime(2018, 1, 1), tz=tzutc())
+ expected = Timestamp(datetime(2018, 1, 1)).tz_localize(tzutc())
+ assert result == expected
+
+ def test_constructor_subclassed_datetime(self):
+ # GH 25851
+ # ensure that subclassed datetime works for
+ # Timestamp creation
+ class SubDatetime(datetime):
+ pass
+
+ data = SubDatetime(2000, 1, 1)
+ result = Timestamp(data)
+ expected = Timestamp(2000, 1, 1)
+ assert result == expected
+
+ @pytest.mark.skipif(
+ not compat.PY38,
+ reason="datetime.fromisocalendar was added in Python version 3.8",
+ )
+ def test_constructor_fromisocalendar(self):
+ # GH 30395
+ expected_timestamp = Timestamp("2000-01-03 00:00:00")
+ expected_stdlib = datetime.fromisocalendar(2000, 1, 1)
+ result = Timestamp.fromisocalendar(2000, 1, 1)
+ assert result == expected_timestamp
+ assert result == expected_stdlib
+ assert isinstance(result, Timestamp)
+
+
+def test_constructor_ambigous_dst():
+ # GH 24329
+ # Make sure that calling Timestamp constructor
+ # on Timestamp created from ambiguous time
+ # doesn't change Timestamp.value
+ ts = Timestamp(1382835600000000000, tz="dateutil/Europe/London")
+ expected = ts.value
+ result = Timestamp(ts).value
+ assert result == expected
+
+
+@pytest.mark.parametrize("epoch", [1552211999999999872, 1552211999999999999])
+def test_constructor_before_dst_switch(epoch):
+ # GH 31043
+ # Make sure that calling Timestamp constructor
+ # on time just before DST switch doesn't lead to
+ # nonexistent time or value change
+ ts = Timestamp(epoch, tz="dateutil/America/Los_Angeles")
+ result = ts.tz.dst(ts)
+ expected = timedelta(seconds=0)
+ assert Timestamp(ts).value == epoch
+ assert result == expected
+
+
+def test_timestamp_constructor_identity():
+ # Test for #30543
+ expected = Timestamp("2017-01-01T12")
+ result = Timestamp(expected)
+ assert result is expected
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index a91ff3776eafc..cee7ac450e411 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -5,7 +5,6 @@
import locale
import unicodedata
-import dateutil
from dateutil.tz import tzutc
import numpy as np
import pytest
@@ -13,12 +12,10 @@
from pytz import timezone, utc
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz, get_timezone
-import pandas.compat as compat
from pandas.compat.numpy import np_datetime64_compat
-from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
-from pandas import NaT, Period, Timedelta, Timestamp
+from pandas import NaT, Timedelta, Timestamp
import pandas._testing as tm
from pandas.tseries import offsets
@@ -197,513 +194,6 @@ def test_resolution(self):
assert Timestamp.resolution == Timedelta(nanoseconds=1)
-class TestTimestampConstructors:
- def test_constructor(self):
- base_str = "2014-07-01 09:00"
- base_dt = datetime(2014, 7, 1, 9)
- base_expected = 1_404_205_200_000_000_000
-
- # confirm base representation is correct
- assert calendar.timegm(base_dt.timetuple()) * 1_000_000_000 == base_expected
-
- tests = [
- (base_str, base_dt, base_expected),
- (
- "2014-07-01 10:00",
- datetime(2014, 7, 1, 10),
- base_expected + 3600 * 1_000_000_000,
- ),
- (
- "2014-07-01 09:00:00.000008000",
- datetime(2014, 7, 1, 9, 0, 0, 8),
- base_expected + 8000,
- ),
- (
- "2014-07-01 09:00:00.000000005",
- Timestamp("2014-07-01 09:00:00.000000005"),
- base_expected + 5,
- ),
- ]
-
- timezones = [
- (None, 0),
- ("UTC", 0),
- (pytz.utc, 0),
- ("Asia/Tokyo", 9),
- ("US/Eastern", -4),
- ("dateutil/US/Pacific", -7),
- (pytz.FixedOffset(-180), -3),
- (dateutil.tz.tzoffset(None, 18000), 5),
- ]
-
- for date_str, date, expected in tests:
- for result in [Timestamp(date_str), Timestamp(date)]:
- # only with timestring
- assert result.value == expected
-
- # re-creation shouldn't affect to internal value
- result = Timestamp(result)
- assert result.value == expected
-
- # with timezone
- for tz, offset in timezones:
- for result in [Timestamp(date_str, tz=tz), Timestamp(date, tz=tz)]:
- expected_tz = expected - offset * 3600 * 1_000_000_000
- assert result.value == expected_tz
-
- # should preserve tz
- result = Timestamp(result)
- assert result.value == expected_tz
-
- # should convert to UTC
- if tz is not None:
- result = Timestamp(result).tz_convert("UTC")
- else:
- result = Timestamp(result, tz="UTC")
- expected_utc = expected - offset * 3600 * 1_000_000_000
- assert result.value == expected_utc
-
- def test_constructor_with_stringoffset(self):
- # GH 7833
- base_str = "2014-07-01 11:00:00+02:00"
- base_dt = datetime(2014, 7, 1, 9)
- base_expected = 1_404_205_200_000_000_000
-
- # confirm base representation is correct
- assert calendar.timegm(base_dt.timetuple()) * 1_000_000_000 == base_expected
-
- tests = [
- (base_str, base_expected),
- ("2014-07-01 12:00:00+02:00", base_expected + 3600 * 1_000_000_000),
- ("2014-07-01 11:00:00.000008000+02:00", base_expected + 8000),
- ("2014-07-01 11:00:00.000000005+02:00", base_expected + 5),
- ]
-
- timezones = [
- (None, 0),
- ("UTC", 0),
- (pytz.utc, 0),
- ("Asia/Tokyo", 9),
- ("US/Eastern", -4),
- ("dateutil/US/Pacific", -7),
- (pytz.FixedOffset(-180), -3),
- (dateutil.tz.tzoffset(None, 18000), 5),
- ]
-
- for date_str, expected in tests:
- for result in [Timestamp(date_str)]:
- # only with timestring
- assert result.value == expected
-
- # re-creation shouldn't affect to internal value
- result = Timestamp(result)
- assert result.value == expected
-
- # with timezone
- for tz, offset in timezones:
- result = Timestamp(date_str, tz=tz)
- expected_tz = expected
- assert result.value == expected_tz
-
- # should preserve tz
- result = Timestamp(result)
- assert result.value == expected_tz
-
- # should convert to UTC
- result = Timestamp(result).tz_convert("UTC")
- expected_utc = expected
- assert result.value == expected_utc
-
- # This should be 2013-11-01 05:00 in UTC
- # converted to Chicago tz
- result = Timestamp("2013-11-01 00:00:00-0500", tz="America/Chicago")
- assert result.value == Timestamp("2013-11-01 05:00").value
- expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
- assert repr(result) == expected
- assert result == eval(repr(result))
-
- # This should be 2013-11-01 05:00 in UTC
- # converted to Tokyo tz (+09:00)
- result = Timestamp("2013-11-01 00:00:00-0500", tz="Asia/Tokyo")
- assert result.value == Timestamp("2013-11-01 05:00").value
- expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
- assert repr(result) == expected
- assert result == eval(repr(result))
-
- # GH11708
- # This should be 2015-11-18 10:00 in UTC
- # converted to Asia/Katmandu
- result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
- assert result.value == Timestamp("2015-11-18 10:00").value
- expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
- assert repr(result) == expected
- assert result == eval(repr(result))
-
- # This should be 2015-11-18 10:00 in UTC
- # converted to Asia/Kolkata
- result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
- assert result.value == Timestamp("2015-11-18 10:00").value
- expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
- assert repr(result) == expected
- assert result == eval(repr(result))
-
- def test_constructor_invalid(self):
- with pytest.raises(TypeError, match="Cannot convert input"):
- Timestamp(slice(2))
- with pytest.raises(ValueError, match="Cannot convert Period"):
- Timestamp(Period("1000-01-01"))
-
- def test_constructor_invalid_tz(self):
- # GH#17690
- with pytest.raises(TypeError, match="must be a datetime.tzinfo"):
- Timestamp("2017-10-22", tzinfo="US/Eastern")
-
- with pytest.raises(ValueError, match="at most one of"):
- Timestamp("2017-10-22", tzinfo=utc, tz="UTC")
-
- with pytest.raises(ValueError, match="Invalid frequency:"):
- # GH#5168
- # case where user tries to pass tz as an arg, not kwarg, gets
- # interpreted as a `freq`
- Timestamp("2012-01-01", "US/Pacific")
-
- def test_constructor_strptime(self):
- # GH25016
- # Test support for Timestamp.strptime
- fmt = "%Y%m%d-%H%M%S-%f%z"
- ts = "20190129-235348-000001+0000"
- with pytest.raises(NotImplementedError):
- Timestamp.strptime(ts, fmt)
-
- def test_constructor_tz_or_tzinfo(self):
- # GH#17943, GH#17690, GH#5168
- stamps = [
- Timestamp(year=2017, month=10, day=22, tz="UTC"),
- Timestamp(year=2017, month=10, day=22, tzinfo=utc),
- Timestamp(year=2017, month=10, day=22, tz=utc),
- Timestamp(datetime(2017, 10, 22), tzinfo=utc),
- Timestamp(datetime(2017, 10, 22), tz="UTC"),
- Timestamp(datetime(2017, 10, 22), tz=utc),
- ]
- assert all(ts == stamps[0] for ts in stamps)
-
- def test_constructor_positional(self):
- # see gh-10758
- with pytest.raises(TypeError):
- Timestamp(2000, 1)
- with pytest.raises(ValueError):
- Timestamp(2000, 0, 1)
- with pytest.raises(ValueError):
- Timestamp(2000, 13, 1)
- with pytest.raises(ValueError):
- Timestamp(2000, 1, 0)
- with pytest.raises(ValueError):
- Timestamp(2000, 1, 32)
-
- # see gh-11630
- assert repr(Timestamp(2015, 11, 12)) == repr(Timestamp("20151112"))
- assert repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) == repr(
- Timestamp("2015-11-12 01:02:03.999999")
- )
-
- def test_constructor_keyword(self):
- # GH 10758
- with pytest.raises(TypeError):
- Timestamp(year=2000, month=1)
- with pytest.raises(ValueError):
- Timestamp(year=2000, month=0, day=1)
- with pytest.raises(ValueError):
- Timestamp(year=2000, month=13, day=1)
- with pytest.raises(ValueError):
- Timestamp(year=2000, month=1, day=0)
- with pytest.raises(ValueError):
- Timestamp(year=2000, month=1, day=32)
-
- assert repr(Timestamp(year=2015, month=11, day=12)) == repr(
- Timestamp("20151112")
- )
-
- assert repr(
- Timestamp(
- year=2015,
- month=11,
- day=12,
- hour=1,
- minute=2,
- second=3,
- microsecond=999999,
- )
- ) == repr(Timestamp("2015-11-12 01:02:03.999999"))
-
- def test_constructor_fromordinal(self):
- base = datetime(2000, 1, 1)
-
- ts = Timestamp.fromordinal(base.toordinal(), freq="D")
- assert base == ts
- assert ts.freq == "D"
- assert base.toordinal() == ts.toordinal()
-
- ts = Timestamp.fromordinal(base.toordinal(), tz="US/Eastern")
- assert Timestamp("2000-01-01", tz="US/Eastern") == ts
- assert base.toordinal() == ts.toordinal()
-
- # GH#3042
- dt = datetime(2011, 4, 16, 0, 0)
- ts = Timestamp.fromordinal(dt.toordinal())
- assert ts.to_pydatetime() == dt
-
- # with a tzinfo
- stamp = Timestamp("2011-4-16", tz="US/Eastern")
- dt_tz = stamp.to_pydatetime()
- ts = Timestamp.fromordinal(dt_tz.toordinal(), tz="US/Eastern")
- assert ts.to_pydatetime() == dt_tz
-
- @pytest.mark.parametrize(
- "result",
- [
- Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1),
- Timestamp(
- year=2000,
- month=1,
- day=2,
- hour=3,
- minute=4,
- second=5,
- microsecond=6,
- nanosecond=1,
- ),
- Timestamp(
- year=2000,
- month=1,
- day=2,
- hour=3,
- minute=4,
- second=5,
- microsecond=6,
- nanosecond=1,
- tz="UTC",
- ),
- Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),
- Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC),
- ],
- )
- def test_constructor_nanosecond(self, result):
- # GH 18898
- expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz)
- expected = expected + Timedelta(nanoseconds=1)
- assert result == expected
-
- @pytest.mark.parametrize("z", ["Z0", "Z00"])
- def test_constructor_invalid_Z0_isostring(self, z):
- # GH 8910
- with pytest.raises(ValueError):
- Timestamp("2014-11-02 01:00{}".format(z))
-
- @pytest.mark.parametrize(
- "arg",
- [
- "year",
- "month",
- "day",
- "hour",
- "minute",
- "second",
- "microsecond",
- "nanosecond",
- ],
- )
- def test_invalid_date_kwarg_with_string_input(self, arg):
- kwarg = {arg: 1}
- with pytest.raises(ValueError):
- Timestamp("2010-10-10 12:59:59.999999999", **kwarg)
-
- def test_out_of_bounds_integer_value(self):
- # GH#26651 check that we raise OutOfBoundsDatetime, not OverflowError
- with pytest.raises(OutOfBoundsDatetime):
- Timestamp(Timestamp.max.value * 2)
- with pytest.raises(OutOfBoundsDatetime):
- Timestamp(Timestamp.min.value * 2)
-
- def test_out_of_bounds_value(self):
- one_us = np.timedelta64(1).astype("timedelta64[us]")
-
- # By definition we can't go out of bounds in [ns], so we
- # convert the datetime64s to [us] so we can go out of bounds
- min_ts_us = np.datetime64(Timestamp.min).astype("M8[us]")
- max_ts_us = np.datetime64(Timestamp.max).astype("M8[us]")
-
- # No error for the min/max datetimes
- Timestamp(min_ts_us)
- Timestamp(max_ts_us)
-
- # One us less than the minimum is an error
- with pytest.raises(ValueError):
- Timestamp(min_ts_us - one_us)
-
- # One us more than the maximum is an error
- with pytest.raises(ValueError):
- Timestamp(max_ts_us + one_us)
-
- def test_out_of_bounds_string(self):
- with pytest.raises(ValueError):
- Timestamp("1676-01-01")
- with pytest.raises(ValueError):
- Timestamp("2263-01-01")
-
- def test_barely_out_of_bounds(self):
- # GH#19529
- # GH#19382 close enough to bounds that dropping nanos would result
- # in an in-bounds datetime
- with pytest.raises(OutOfBoundsDatetime):
- Timestamp("2262-04-11 23:47:16.854775808")
-
- def test_bounds_with_different_units(self):
- out_of_bounds_dates = ("1677-09-21", "2262-04-12")
-
- time_units = ("D", "h", "m", "s", "ms", "us")
-
- for date_string in out_of_bounds_dates:
- for unit in time_units:
- dt64 = np.datetime64(date_string, unit)
- with pytest.raises(ValueError):
- Timestamp(dt64)
-
- in_bounds_dates = ("1677-09-23", "2262-04-11")
-
- for date_string in in_bounds_dates:
- for unit in time_units:
- dt64 = np.datetime64(date_string, unit)
- Timestamp(dt64)
-
- def test_min_valid(self):
- # Ensure that Timestamp.min is a valid Timestamp
- Timestamp(Timestamp.min)
-
- def test_max_valid(self):
- # Ensure that Timestamp.max is a valid Timestamp
- Timestamp(Timestamp.max)
-
- def test_now(self):
- # GH#9000
- ts_from_string = Timestamp("now")
- ts_from_method = Timestamp.now()
- ts_datetime = datetime.now()
-
- ts_from_string_tz = Timestamp("now", tz="US/Eastern")
- ts_from_method_tz = Timestamp.now(tz="US/Eastern")
-
- # Check that the delta between the times is less than 1s (arbitrarily
- # small)
- delta = Timedelta(seconds=1)
- assert abs(ts_from_method - ts_from_string) < delta
- assert abs(ts_datetime - ts_from_method) < delta
- assert abs(ts_from_method_tz - ts_from_string_tz) < delta
- assert (
- abs(
- ts_from_string_tz.tz_localize(None)
- - ts_from_method_tz.tz_localize(None)
- )
- < delta
- )
-
- def test_today(self):
- ts_from_string = Timestamp("today")
- ts_from_method = Timestamp.today()
- ts_datetime = datetime.today()
-
- ts_from_string_tz = Timestamp("today", tz="US/Eastern")
- ts_from_method_tz = Timestamp.today(tz="US/Eastern")
-
- # Check that the delta between the times is less than 1s (arbitrarily
- # small)
- delta = Timedelta(seconds=1)
- assert abs(ts_from_method - ts_from_string) < delta
- assert abs(ts_datetime - ts_from_method) < delta
- assert abs(ts_from_method_tz - ts_from_string_tz) < delta
- assert (
- abs(
- ts_from_string_tz.tz_localize(None)
- - ts_from_method_tz.tz_localize(None)
- )
- < delta
- )
-
- @pytest.mark.parametrize("tz", [None, pytz.timezone("US/Pacific")])
- def test_disallow_setting_tz(self, tz):
- # GH 3746
- ts = Timestamp("2010")
- with pytest.raises(AttributeError):
- ts.tz = tz
-
- @pytest.mark.parametrize("offset", ["+0300", "+0200"])
- def test_construct_timestamp_near_dst(self, offset):
- # GH 20854
- expected = Timestamp(
- "2016-10-30 03:00:00{}".format(offset), tz="Europe/Helsinki"
- )
- result = Timestamp(expected).tz_convert("Europe/Helsinki")
- assert result == expected
-
- @pytest.mark.parametrize(
- "arg", ["2013/01/01 00:00:00+09:00", "2013-01-01 00:00:00+09:00"]
- )
- def test_construct_with_different_string_format(self, arg):
- # GH 12064
- result = Timestamp(arg)
- expected = Timestamp(datetime(2013, 1, 1), tz=pytz.FixedOffset(540))
- assert result == expected
-
- def test_construct_timestamp_preserve_original_frequency(self):
- # GH 22311
- result = Timestamp(Timestamp("2010-08-08", freq="D")).freq
- expected = offsets.Day()
- assert result == expected
-
- def test_constructor_invalid_frequency(self):
- # GH 22311
- with pytest.raises(ValueError, match="Invalid frequency:"):
- Timestamp("2012-01-01", freq=[])
-
- @pytest.mark.parametrize("box", [datetime, Timestamp])
- def test_raise_tz_and_tzinfo_in_datetime_input(self, box):
- # GH 23579
- kwargs = {"year": 2018, "month": 1, "day": 1, "tzinfo": utc}
- with pytest.raises(ValueError, match="Cannot pass a datetime or Timestamp"):
- Timestamp(box(**kwargs), tz="US/Pacific")
- with pytest.raises(ValueError, match="Cannot pass a datetime or Timestamp"):
- Timestamp(box(**kwargs), tzinfo=pytz.timezone("US/Pacific"))
-
- def test_dont_convert_dateutil_utc_to_pytz_utc(self):
- result = Timestamp(datetime(2018, 1, 1), tz=tzutc())
- expected = Timestamp(datetime(2018, 1, 1)).tz_localize(tzutc())
- assert result == expected
-
- def test_constructor_subclassed_datetime(self):
- # GH 25851
- # ensure that subclassed datetime works for
- # Timestamp creation
- class SubDatetime(datetime):
- pass
-
- data = SubDatetime(2000, 1, 1)
- result = Timestamp(data)
- expected = Timestamp(2000, 1, 1)
- assert result == expected
-
- @pytest.mark.skipif(
- not compat.PY38,
- reason="datetime.fromisocalendar was added in Python version 3.8",
- )
- def test_constructor_fromisocalendar(self):
- # GH 30395
- expected_timestamp = Timestamp("2000-01-03 00:00:00")
- expected_stdlib = datetime.fromisocalendar(2000, 1, 1)
- result = Timestamp.fromisocalendar(2000, 1, 1)
- assert result == expected_timestamp
- assert result == expected_stdlib
- assert isinstance(result, Timestamp)
-
-
class TestTimestamp:
def test_tz(self):
tstr = "2014-02-01 09:00"
@@ -1074,27 +564,3 @@ def test_dt_subclass_add_timedelta(lh, rh):
result = lh + rh
expected = SubDatetime(2000, 1, 1, 1)
assert result == expected
-
-
-def test_constructor_ambigous_dst():
- # GH 24329
- # Make sure that calling Timestamp constructor
- # on Timestamp created from ambiguous time
- # doesn't change Timestamp.value
- ts = Timestamp(1382835600000000000, tz="dateutil/Europe/London")
- expected = ts.value
- result = Timestamp(ts).value
- assert result == expected
-
-
-@pytest.mark.parametrize("epoch", [1552211999999999872, 1552211999999999999])
-def test_constructor_before_dst_switch(epoch):
- # GH 31043
- # Make sure that calling Timestamp constructor
- # on time just before DST switch doesn't lead to
- # nonexistent time or value change
- ts = Timestamp(epoch, tz="dateutil/America/Los_Angeles")
- result = ts.tz.dst(ts)
- expected = timedelta(seconds=0)
- assert Timestamp(ts).value == epoch
- assert result == expected
| https://api.github.com/repos/pandas-dev/pandas/pulls/31583 | 2020-02-02T18:57:56Z | 2020-02-02T21:52:37Z | 2020-02-02T21:52:37Z | 2020-02-02T21:52:42Z | |
Dispatch IntervalIndex.argsort to IntervalArray | diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 398ed75c060ca..6a9111574ade3 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -701,6 +701,15 @@ def astype(self, dtype, copy=True):
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg)
+ def argsort(self, ascending: bool = True, kind: str = "quicksort", *args, **kwargs):
+ ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
+
+ if not ascending or kind != "quicksort":
+ # fall back to base class implementation
+ return super().argsort(ascending, kind)
+
+ return np.lexsort((self.right, self.left))
+
@classmethod
def _concat_same_type(cls, to_concat):
"""
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 6a3e808ab9821..5da01ee69697e 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1033,7 +1033,7 @@ def _format_space(self) -> str:
# --------------------------------------------------------------------
def argsort(self, *args, **kwargs) -> np.ndarray:
- return np.lexsort((self.right, self.left))
+ return self._data.argsort()
def equals(self, other) -> bool:
"""
| cc @jschendel thoughts on if we can do better in the non-ascending/"quicksort" case? | https://api.github.com/repos/pandas-dev/pandas/pulls/31582 | 2020-02-02T18:47:50Z | 2020-02-19T19:47:55Z | null | 2021-01-20T00:13:22Z |
CLN: Fix ReadMe Badges | diff --git a/README.md b/README.md
index 1130eb30954dc..5342eda4390eb 100644
--- a/README.md
+++ b/README.md
@@ -5,82 +5,16 @@
-----------------
# pandas: powerful Python data analysis toolkit
-
-<table>
-<tr>
- <td>Latest Release</td>
- <td>
- <a href="https://pypi.org/project/pandas/">
- <img src="https://img.shields.io/pypi/v/pandas.svg" alt="latest release" />
- </a>
- </td>
-</tr>
- <td></td>
- <td>
- <a href="https://anaconda.org/anaconda/pandas/">
- <img src="https://anaconda.org/conda-forge/pandas/badges/version.svg" alt="latest release" />
- </a>
-</td>
-</tr>
-<tr>
- <td>Package Status</td>
- <td>
- <a href="https://pypi.org/project/pandas/">
- <img src="https://img.shields.io/pypi/status/pandas.svg" alt="status" />
- </a>
- </td>
-</tr>
-<tr>
- <td>License</td>
- <td>
- <a href="https://github.com/pandas-dev/pandas/blob/master/LICENSE">
- <img src="https://img.shields.io/pypi/l/pandas.svg" alt="license" />
- </a>
-</td>
-</tr>
-<tr>
- <td>Build Status</td>
- <td>
- <a href="https://travis-ci.org/pandas-dev/pandas">
- <img src="https://travis-ci.org/pandas-dev/pandas.svg?branch=master" alt="travis build status" />
- </a>
- </td>
-</tr>
-<tr>
- <td></td>
- <td>
- <a href="https://dev.azure.com/pandas-dev/pandas/_build/latest?definitionId=1&branch=master">
- <img src="https://dev.azure.com/pandas-dev/pandas/_apis/build/status/pandas-dev.pandas?branch=master" alt="Azure Pipelines build status" />
- </a>
- </td>
-</tr>
-<tr>
- <td>Coverage</td>
- <td>
- <a href="https://codecov.io/gh/pandas-dev/pandas">
- <img src="https://codecov.io/github/pandas-dev/pandas/coverage.svg?branch=master" alt="coverage" />
- </a>
- </td>
-</tr>
-<tr>
- <td>Downloads</td>
- <td>
- <a href="https://pandas.pydata.org">
- <img src="https://anaconda.org/conda-forge/pandas/badges/downloads.svg" alt="conda-forge downloads" />
- </a>
- </td>
-</tr>
-<tr>
- <td>Gitter</td>
- <td>
- <a href="https://gitter.im/pydata/pandas">
- <img src="https://badges.gitter.im/Join%20Chat.svg" />
- </a>
- </td>
-</tr>
-</table>
-
-
+[](https://pypi.org/project/pandas/)
+[](https://anaconda.org/anaconda/pandas/)
+[](https://pypi.org/project/pandas/)
+[](https://github.com/pandas-dev/pandas/blob/master/LICENSE)
+[](https://travis-ci.org/pandas-dev/pandas)
+[](https://dev.azure.com/pandas-dev/pandas/_build/latest?definitionId=1&branch=master)
+[](https://codecov.io/gh/pandas-dev/pandas)
+[](https://pandas.pydata.org)
+[](https://gitter.im/pydata/pandas)
+[](https://numfocus.org)
## What is it?
| Noticed when discussing https://github.com/pandas-dev/pandas/issues/31560 our badges are written in HTML + take up a lot of vertical space on our Readme.
My branch (a lot more compact vs master):
https://github.com/alimcmaster1/pandas/tree/mcmali-readme#pandas-powerful-python-data-analysis-toolkit
I've also added a NumFocus one :)
| https://api.github.com/repos/pandas-dev/pandas/pulls/31579 | 2020-02-02T17:11:58Z | 2020-02-02T18:31:37Z | 2020-02-02T18:31:37Z | 2020-02-02T18:31:43Z |
Backport PR #31482 on branch 1.0.x (BUG: objToJson.c - fix return value) | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 9e78ff03f5f67..31bc1cefcb292 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -126,7 +126,7 @@ ExtensionArray
Other
^^^^^
--
+- Regression fixed in objTOJSON.c fix return-type warning (:issue:`31463`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index c5ac279ed3243..5c5b80648aed1 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -178,6 +178,8 @@ void *initObjToJSON(void) {
/* Initialise numpy API */
import_array();
+ // GH 31463
+ return NULL;
}
static TypeContext *createTypeContext(void) {
| Backport PR #31482: BUG: objToJson.c - fix return value | https://api.github.com/repos/pandas-dev/pandas/pulls/31578 | 2020-02-02T17:09:29Z | 2020-02-02T18:32:03Z | 2020-02-02T18:32:03Z | 2020-02-02T18:32:03Z |
REF: de-duplicate Period freq conversion code | diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 7fca624099b38..f3ae28578240f 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -272,6 +272,8 @@ cdef int64_t DtoB_weekday(int64_t unix_date) nogil:
cdef int64_t DtoB(npy_datetimestruct *dts, int roll_back,
int64_t unix_date) nogil:
+ # calculate the current week (counting from 1970-01-01) treating
+ # sunday as last day of a week
cdef:
int day_of_week = dayofweek(dts.year, dts.month, dts.day)
@@ -506,7 +508,11 @@ cdef int64_t asfreq_DTtoM(int64_t ordinal, asfreq_info *af_info) nogil:
cdef int64_t asfreq_DTtoW(int64_t ordinal, asfreq_info *af_info) nogil:
ordinal = downsample_daytime(ordinal, af_info)
- return (ordinal + 3 - af_info.to_end) // 7 + 1
+ return unix_date_to_week(ordinal, af_info.to_end)
+
+
+cdef int64_t unix_date_to_week(int64_t unix_date, int to_end) nogil:
+ return (unix_date + 3 - to_end) // 7 + 1
# --------------------------------------------------------------------
@@ -787,22 +793,10 @@ cdef int64_t get_period_ordinal(npy_datetimestruct *dts, int freq) nogil:
return unix_date
elif freq == FR_BUS:
- # calculate the current week (counting from 1970-01-01) treating
- # sunday as last day of a week
- weeks = (unix_date + 3) // 7
- # calculate the current weekday (in range 1 .. 7)
- delta = (unix_date + 3) % 7 + 1
- # return the number of business days in full weeks plus the business
- # days in the last - possible partial - week
- if delta > 6:
- # We have a Sunday, which rolls back to the previous Friday,
- # just like Saturday, so decrement delta by 1 to treat as saturday
- delta = 6
- return (5 * weeks) + delta - 4
+ return DtoB(dts, 0, unix_date)
elif freq_group == FR_WK:
- day_adj = freq - FR_WK
- return (unix_date + 3 - day_adj) // 7 + 1
+ return unix_date_to_week(unix_date, freq - FR_WK)
cdef void get_date_info(int64_t ordinal, int freq,
| https://api.github.com/repos/pandas-dev/pandas/pulls/31577 | 2020-02-02T16:42:54Z | 2020-02-02T17:34:38Z | 2020-02-02T17:34:38Z | 2020-02-02T17:39:20Z | |
Backport PR #31573 on branch 1.0.x (CI: Remove warning raising after new matplotlib release) | diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py
index 9cd3ccbf9214e..e54f4784e9c4f 100644
--- a/pandas/tests/plotting/test_converter.py
+++ b/pandas/tests/plotting/test_converter.py
@@ -8,6 +8,7 @@
import pandas._config.config as cf
from pandas.compat.numpy import np_datetime64_compat
+import pandas.util._test_decorators as td
from pandas import Index, Period, Series, Timestamp, date_range
import pandas._testing as tm
@@ -59,6 +60,7 @@ def test_register_by_default(self):
call = [sys.executable, "-c", code]
assert subprocess.check_call(call) == 0
+ @td.skip_if_no("matplotlib", min_version="3.1.3")
def test_registering_no_warning(self):
plt = pytest.importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range("2017", periods=12))
@@ -66,9 +68,7 @@ def test_registering_no_warning(self):
# Set to the "warn" state, in case this isn't the first test run
register_matplotlib_converters()
- with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
- # GH#30588 DeprecationWarning from 2D indexing
- ax.plot(s.index, s.values)
+ ax.plot(s.index, s.values)
def test_pandas_plots_register(self):
pytest.importorskip("matplotlib.pyplot")
@@ -91,6 +91,7 @@ def test_matplotlib_formatters(self):
assert Timestamp not in units.registry
assert Timestamp in units.registry
+ @td.skip_if_no("matplotlib", min_version="3.1.3")
def test_option_no_warning(self):
pytest.importorskip("matplotlib.pyplot")
ctx = cf.option_context("plotting.matplotlib.register_converters", False)
@@ -100,15 +101,12 @@ def test_option_no_warning(self):
# Test without registering first, no warning
with ctx:
- # GH#30588 DeprecationWarning from 2D indexing on Index
- with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
- ax.plot(s.index, s.values)
+ ax.plot(s.index, s.values)
# Now test with registering
register_matplotlib_converters()
with ctx:
- with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
- ax.plot(s.index, s.values)
+ ax.plot(s.index, s.values)
def test_registry_resets(self):
units = pytest.importorskip("matplotlib.units")
| Backport PR #31573: CI: Remove warning raising after new matplotlib release | https://api.github.com/repos/pandas-dev/pandas/pulls/31576 | 2020-02-02T16:42:49Z | 2020-02-02T17:10:03Z | 2020-02-02T17:10:03Z | 2020-02-02T17:10:03Z |
TYP: Arraylike | diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 7447d593a7ff0..4435d8b7c88ee 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -439,7 +439,7 @@ def isna(self) -> ArrayLike:
Returns
-------
- na_values : Union[np.ndarray, ExtensionArray]
+ na_values : np.ndarray or ExtensionArray
In most cases, this should return a NumPy ndarray. For
exceptional cases like ``SparseArray``, where returning
an ndarray would be expensive, an ExtensionArray may be
diff --git a/pandas/core/base.py b/pandas/core/base.py
index ee514888c6331..06b7cd9152fb7 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -4,11 +4,12 @@
import builtins
import textwrap
-from typing import Any, Dict, FrozenSet, List, Optional, Union
+from typing import Any, Dict, FrozenSet, Generic, List, Optional
import numpy as np
import pandas._libs.lib as lib
+from pandas._typing import ArrayLike
from pandas.compat import PYPY
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
@@ -584,7 +585,7 @@ def _shallow_copy(self, obj, **kwargs):
return self._constructor(obj, **kwargs)
-class IndexOpsMixin:
+class IndexOpsMixin(Generic[ArrayLike]):
"""
Common ops mixin to support a unified interface / docs for Series / Index
"""
@@ -596,7 +597,7 @@ class IndexOpsMixin:
)
@property
- def _values(self) -> Union[ExtensionArray, np.ndarray]:
+ def _values(self) -> ArrayLike:
# must be defined here as a property for mypy
raise AbstractMethodError(self)
@@ -1141,7 +1142,10 @@ def _map_values(self, mapper, na_action=None):
values = self._values
if na_action is not None:
raise NotImplementedError
- map_f = lambda values, f: values.map(f)
+
+ def map_f(values, f):
+ return values.map(f)
+
else:
values = self.astype(object)._values
if na_action == "ignore":
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 4bc5599297066..c0aab801a2024 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -13,7 +13,7 @@
from pandas._libs.tslibs import OutOfBoundsDatetime, Timestamp
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas._libs.tslibs.timezones import tz_compare
-from pandas._typing import DtypeObj, Label
+from pandas._typing import ArrayLike, DtypeObj, Label
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly, doc
@@ -182,7 +182,7 @@ def _new_Index(cls, d):
return cls.__new__(cls, **d)
-class Index(IndexOpsMixin, PandasObject):
+class Index(IndexOpsMixin[ArrayLike], PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects.
@@ -254,7 +254,7 @@ def _outer_indexer(self, left, right):
return libjoin.outer_join_indexer(left, right)
_typ = "index"
- _data: Union[ExtensionArray, np.ndarray]
+ _data: ArrayLike
_id = None
_name: Label = None
# MultiIndex.levels previously allowed setting the index name. We
@@ -3805,7 +3805,7 @@ def array(self) -> ExtensionArray:
return array
@property
- def _values(self) -> Union[ExtensionArray, np.ndarray]:
+ def _values(self) -> ArrayLike:
"""
The best array representation.
diff --git a/pandas/tests/indexes/period/test_constructors.py b/pandas/tests/indexes/period/test_constructors.py
index 4ec7ef64e2272..56383826db164 100644
--- a/pandas/tests/indexes/period/test_constructors.py
+++ b/pandas/tests/indexes/period/test_constructors.py
@@ -321,7 +321,7 @@ def test_constructor_mixed(self):
def test_constructor_simple_new(self):
idx = period_range("2007-01", name="p", periods=2, freq="M")
- with pytest.raises(AssertionError, match="<class .*PeriodIndex'>"):
+ with pytest.raises(AssertionError, match="PeriodIndex"):
idx._simple_new(idx, name="p")
result = idx._simple_new(idx._data, name="p")
@@ -339,7 +339,7 @@ def test_constructor_simple_new(self):
def test_constructor_simple_new_empty(self):
# GH13079
idx = PeriodIndex([], freq="M", name="p")
- with pytest.raises(AssertionError, match="<class .*PeriodIndex'>"):
+ with pytest.raises(AssertionError, match="PeriodIndex"):
idx._simple_new(idx, name="p")
result = idx._simple_new(idx._data, name="p")
diff --git a/pandas/tests/series/methods/test_argsort.py b/pandas/tests/series/methods/test_argsort.py
index 4353eb4c8cd64..53e5dbd87e508 100644
--- a/pandas/tests/series/methods/test_argsort.py
+++ b/pandas/tests/series/methods/test_argsort.py
@@ -53,7 +53,7 @@ def test_argsort_stable(self):
tm.assert_series_equal(qindexer.astype(np.intp), Series(qexpected))
msg = (
r"ndarray Expected type <class 'numpy\.ndarray'>, "
- r"found <class 'pandas\.core\.series\.Series'> instead"
+ r"found (<class ')?pandas\.core\.series\.Series('>)? instead"
)
with pytest.raises(AssertionError, match=msg):
tm.assert_numpy_array_equal(qindexer, mindexer)
| reopening #31518
cc @jbrockmendel @WillAyd | https://api.github.com/repos/pandas-dev/pandas/pulls/31574 | 2020-02-02T16:06:04Z | 2020-05-01T14:57:54Z | null | 2020-05-01T14:57:54Z |
CI: Remove warning raising after new matplotlib release | diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py
index 9cd3ccbf9214e..e54f4784e9c4f 100644
--- a/pandas/tests/plotting/test_converter.py
+++ b/pandas/tests/plotting/test_converter.py
@@ -8,6 +8,7 @@
import pandas._config.config as cf
from pandas.compat.numpy import np_datetime64_compat
+import pandas.util._test_decorators as td
from pandas import Index, Period, Series, Timestamp, date_range
import pandas._testing as tm
@@ -59,6 +60,7 @@ def test_register_by_default(self):
call = [sys.executable, "-c", code]
assert subprocess.check_call(call) == 0
+ @td.skip_if_no("matplotlib", min_version="3.1.3")
def test_registering_no_warning(self):
plt = pytest.importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range("2017", periods=12))
@@ -66,9 +68,7 @@ def test_registering_no_warning(self):
# Set to the "warn" state, in case this isn't the first test run
register_matplotlib_converters()
- with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
- # GH#30588 DeprecationWarning from 2D indexing
- ax.plot(s.index, s.values)
+ ax.plot(s.index, s.values)
def test_pandas_plots_register(self):
pytest.importorskip("matplotlib.pyplot")
@@ -91,6 +91,7 @@ def test_matplotlib_formatters(self):
assert Timestamp not in units.registry
assert Timestamp in units.registry
+ @td.skip_if_no("matplotlib", min_version="3.1.3")
def test_option_no_warning(self):
pytest.importorskip("matplotlib.pyplot")
ctx = cf.option_context("plotting.matplotlib.register_converters", False)
@@ -100,15 +101,12 @@ def test_option_no_warning(self):
# Test without registering first, no warning
with ctx:
- # GH#30588 DeprecationWarning from 2D indexing on Index
- with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
- ax.plot(s.index, s.values)
+ ax.plot(s.index, s.values)
# Now test with registering
register_matplotlib_converters()
with ctx:
- with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
- ax.plot(s.index, s.values)
+ ax.plot(s.index, s.values)
def test_registry_resets(self):
units = pytest.importorskip("matplotlib.units")
| - [ ] xref #31562
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
seems CI is broken after the new release of matplotlib | https://api.github.com/repos/pandas-dev/pandas/pulls/31573 | 2020-02-02T13:45:49Z | 2020-02-02T16:42:20Z | 2020-02-02T16:42:20Z | 2020-02-02T16:42:32Z |
DOC fix *_option() docstring | diff --git a/pandas/_config/config.py b/pandas/_config/config.py
index cacd6f5454de7..8b6116d3abd60 100644
--- a/pandas/_config/config.py
+++ b/pandas/_config/config.py
@@ -155,9 +155,7 @@ def _describe_option(pat: str = "", _print_desc: bool = True):
if len(keys) == 0:
raise OptionError("No such keys(s)")
- s = ""
- for k in keys: # filter by pat
- s += _build_option_description(k)
+ s = "\n".join([_build_option_description(k) for k in keys])
if _print_desc:
print(s)
| - [x] closes #28780
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31571 | 2020-02-02T12:12:30Z | 2020-02-03T10:39:33Z | 2020-02-03T10:39:33Z | 2020-02-03T10:39:34Z |
BUG accept and deprecate negative integer for max_colwidth | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 16d9d341f785d..305de5bbd57eb 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -10,6 +10,14 @@ including other versions of pandas.
.. ---------------------------------------------------------------------------
+.. _whatsnew_101.deprecations:
+
+Deprecations
+~~~~~~~~~~~~
+
+- Support for negative integer for :attr:`pd.options.display.max_colwidth` is deprecated in favor of using ``None`` (:issue:`31532`)
+
+.. ---------------------------------------------------------------------------
.. _whatsnew_101.bug_fixes:
@@ -129,6 +137,7 @@ ExtensionArray
Other
^^^^^
- Regression fixed in objTOJSON.c fix return-type warning (:issue:`31463`)
+- Fixed a regression where setting :attr:`pd.options.display.max_colwidth` was not accepting negative integer. In addition, this behavior has been deprecated in favor of using ``None`` (:issue:`31532`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index 3776c6f816d96..b0410e31c6de7 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -9,6 +9,8 @@
module is imported, register them here rather than in the module.
"""
+import warnings
+
import pandas._config.config as cf
from pandas._config.config import (
is_bool,
@@ -341,8 +343,26 @@ def is_terminal() -> bool:
validator=is_instance_factory([type(None), int]),
)
cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int)
+
+ def _deprecate_negative_int_max_colwidth(key):
+ value = cf.get_option(key)
+ if value is not None and value < 0:
+ warnings.warn(
+ "Passing a negative integer is deprecated in version 1.0 and "
+ "will not be supported in future version. Instead, use None "
+ "to not limit the column width.",
+ FutureWarning,
+ stacklevel=4,
+ )
+
cf.register_option(
- "max_colwidth", 50, max_colwidth_doc, validator=is_nonnegative_int
+ # FIXME: change `validator=is_nonnegative_int`
+ # in version 1.2
+ "max_colwidth",
+ 50,
+ max_colwidth_doc,
+ validator=is_instance_factory([type(None), int]),
+ cb=_deprecate_negative_int_max_colwidth,
)
if is_terminal():
max_cols = 0 # automatically determine optimal number of columns
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 7650561d3072d..bf7b98eb78f11 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -239,6 +239,15 @@ def test_repr_truncation(self):
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
+ def test_repr_deprecation_negative_int(self):
+ # FIXME: remove in future version after deprecation cycle
+ # Non-regression test for:
+ # https://github.com/pandas-dev/pandas/issues/31532
+ width = get_option("display.max_colwidth")
+ with tm.assert_produces_warning(FutureWarning):
+ set_option("display.max_colwidth", -1)
+ set_option("display.max_colwidth", width)
+
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
pd.reset_option("display.chop_threshold") # default None
| - [x] closes #31532
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31569 | 2020-02-02T11:44:06Z | 2020-02-02T18:30:39Z | 2020-02-02T18:30:38Z | 2020-02-03T10:38:51Z |
Description of Plotting Backends added in the end. | diff --git a/doc/source/user_guide/pip b/doc/source/user_guide/pip
new file mode 160000
index 0000000000000..37c75b671faca
--- /dev/null
+++ b/doc/source/user_guide/pip
@@ -0,0 +1 @@
+Subproject commit 37c75b671faca56337faea42dabfed65ac5bf567
diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst
index 6680ba854cb6f..9e53cdf26461c 100644
--- a/doc/source/user_guide/visualization.rst
+++ b/doc/source/user_guide/visualization.rst
@@ -1641,3 +1641,27 @@ when plotting a large number of points.
:suppress:
plt.close('all')
+
+
+Plotting Backend
+----------------
+
+Starting in 0.25 pandas can be extended with third-party plotting backends.
+The main idea is letting users select a plotting backend different than the provided one based on Matplotlib.
+For example:
+
+.. ipython:: python
+
+ pd.set_option('plotting.backend', 'backend.module')
+ pd.Series([1, 2, 3]).plot()
+This would be more or less equivalent to:
+
+.. ipython:: python
+
+ import backend.module
+ backend.module.plot(pd.Series([1, 2, 3]))
+
+The backend module can then use other visualization tools (Bokeh, Altair,…) to generate the plots.
+Libraries implementing the plotting backend should use entry points to make their backend discoverable to pandas. The key is "pandas_plotting_backends".
+
+More information on how to implement a third-party plotting backend can be found in its documentation.
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31568 | 2020-02-02T10:57:17Z | 2020-03-08T05:20:20Z | null | 2020-03-08T05:20:20Z |
WEB: Changed whatsnew href from v0.25.0 to v1.0.0 | diff --git a/web/pandas/index.html b/web/pandas/index.html
index fedb0b0c5f712..83d0f48197033 100644
--- a/web/pandas/index.html
+++ b/web/pandas/index.html
@@ -63,7 +63,7 @@ <h5>With the support of:</h5>
{% if releases %}
<h4>Latest version: {{ releases[0].name }}</h4>
<ul>
- <li><a href="docs/whatsnew/v0.25.0.html">What's new in {{ releases[0].name }}</a></li>
+ <li><a href="docs/whatsnew/v1.0.0.html">What's new in {{ releases[0].name }}</a></li>
<li>Release date:<br/>{{ releases[0].published.strftime("%b %d, %Y") }}</li>
<li><a href="{{ base_url}}/docs/">Documentation (web)</a></li>
<li><a href="{{ base_url }}/docs/pandas.pdf">Documentation (pdf)</a></li>
| - [ ] closes #31539
| https://api.github.com/repos/pandas-dev/pandas/pulls/31567 | 2020-02-02T10:15:29Z | 2020-02-02T15:22:33Z | 2020-02-02T15:22:33Z | 2021-08-26T10:55:44Z |
TST: DataFrame.interpolate(axis='columns') throws exception while DataFrame.interpolate(axis=1) not (#25190) | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 40abb8f83de2f..c062cdee92145 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -158,6 +158,7 @@ Indexing
- Bug in :meth:`PeriodIndex.is_monotonic` incorrectly returning ``True`` when containing leading ``NaT`` entries (:issue:`31437`)
- Bug in :meth:`DatetimeIndex.get_loc` raising ``KeyError`` with converted-integer key instead of the user-passed key (:issue:`31425`)
- Bug in :meth:`Series.xs` incorrectly returning ``Timestamp`` instead of ``datetime64`` in some object-dtype cases (:issue:`31630`)
+- Bug in :meth:`DataFrame.interpolate` raising ``UnboundLocalError`` when specifying the ``axis`` with a string (:issue:`25190`)
Missing
^^^^^^^
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index ae0516dd29a1f..7263c9335f139 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -983,3 +983,11 @@ def test_interp_time_inplace_axis(self, axis):
result = expected.interpolate(axis=0, method="time")
expected.interpolate(axis=0, method="time", inplace=True)
tm.assert_frame_equal(result, expected)
+
+ def test_interp_string_axis(self):
+ # GH 25190
+ x = np.linspace(0, 100, 1000)
+ y = np.sin(x)
+ df = pd.DataFrame(data=np.tile(y, (10, 1)), index=np.arange(10), columns=x)
+ df.reindex(columns=x * 1.005).interpolate(method="linear", axis="columns")
+ df.reindex(columns=x * 1.005).interpolate(method="linear", axis="index")
| Make sure that DataFrame.interpolate allows setting having "columns" or "index" as the `axis` argument.
I included the `whatsnew` entry as well. However, since the bug was already fixed in v1.0.0, it might be reasonable to just drop it. I leave it to the reviewer's discretion, that's why I've put it in a separate commit.
- [x] closes #25190
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31566 | 2020-02-02T09:53:06Z | 2020-05-12T12:18:35Z | null | 2020-05-12T12:20:46Z |
Fix docstring for to_markdown buf parameter | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3b1d7e4c50be5..3bb584d4d34e8 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1922,10 +1922,8 @@ def _repr_data_resource_(self):
Parameters
----------
- buf : writable buffer, defaults to sys.stdout
- Where to send the output. By default, the output is printed to
- sys.stdout. Pass a writable buffer if you need to further process
- the output.
+ buf : str, Path or StringIO-like, optional, default None
+ Buffer to write to. If None, the output is returned as a string.
mode : str, optional
Mode in which file is opened.
**kwargs
| - [ ] ~~closes #xxxx~~ (N/A)
- [ ] ~~tests added / passed~~ (N/A)
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] ~~whatsnew entry~~ (N/A)
| https://api.github.com/repos/pandas-dev/pandas/pulls/31565 | 2020-02-02T09:15:51Z | 2020-02-02T17:12:31Z | 2020-02-02T17:12:31Z | 2020-02-02T17:35:56Z |
ENH: add fold support to Timestamp constructor | diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index 3fdab0fd26643..f208c8d576131 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -2297,6 +2297,35 @@ To remove time zone information, use ``tz_localize(None)`` or ``tz_convert(None)
# tz_convert(None) is identical to tz_convert('UTC').tz_localize(None)
didx.tz_convert('UTC').tz_localize(None)
+.. _timeseries.fold:
+
+Fold
+~~~~
+
+.. versionadded:: 1.1.0
+
+For ambiguous times, pandas supports explicitly specifying the keyword-only fold argument.
+Due to daylight saving time, one wall clock time can occur twice when shifting
+from summer to winter time; fold describes whether the datetime-like corresponds
+to the first (0) or the second time (1) the wall clock hits the ambiguous time.
+Fold is supported only for constructing from naive ``datetime.datetime``
+(see `datetime documentation <https://docs.python.org/3/library/datetime.html>`__ for details) or from :class:`Timestamp`
+or for constructing from components (see below). Only ``dateutil`` timezones are supported
+(see `dateutil documentation <https://dateutil.readthedocs.io/en/stable/tz.html#dateutil.tz.enfold>`__
+for ``dateutil`` methods that deal with ambiguous datetimes) as ``pytz``
+timezones do not support fold (see `pytz documentation <http://pytz.sourceforge.net/index.html>`__
+for details on how ``pytz`` deals with ambiguous datetimes). To localize an ambiguous datetime
+with ``pytz``, please use :meth:`Timestamp.tz_localize`. In general, we recommend to rely
+on :meth:`Timestamp.tz_localize` when localizing ambiguous datetimes if you need direct
+control over how they are handled.
+
+.. ipython:: python
+
+ pd.Timestamp(datetime.datetime(2019, 10, 27, 1, 30, 0, 0),
+ tz='dateutil/Europe/London', fold=0)
+ pd.Timestamp(year=2019, month=10, day=27, hour=1, minute=30,
+ tz='dateutil/Europe/London', fold=1)
+
.. _timeseries.timezone_ambiguous:
Ambiguous times when localizing
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 888b7d23aeb35..2b64b85863def 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -36,6 +36,28 @@ For example:
ser["2014"]
ser.loc["May 2015"]
+.. _whatsnew_110.timestamp_fold_support:
+
+Fold argument support in Timestamp constructor
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+:class:`Timestamp:` now supports the keyword-only fold argument according to `PEP 495 <https://www.python.org/dev/peps/pep-0495/#the-fold-attribute>`_ similar to parent ``datetime.datetime`` class. It supports both accepting fold as an initialization argument and inferring fold from other constructor arguments (:issue:`25057`, :issue:`31338`). Support is limited to ``dateutil`` timezones as ``pytz`` doesn't support fold.
+
+For example:
+
+.. ipython:: python
+
+ ts = pd.Timestamp("2019-10-27 01:30:00+00:00")
+ ts.fold
+
+.. ipython:: python
+
+ ts = pd.Timestamp(year=2019, month=10, day=27, hour=1, minute=30,
+ tz="dateutil/Europe/London", fold=1)
+ ts
+
+For more on working with fold, see :ref:`Fold subsection <timeseries.fold>` in the user guide.
+
.. _whatsnew_110.enhancements.other:
Other enhancements
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 53e3354ca8eb6..a176c4e41e834 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -49,30 +49,31 @@ from pandas._libs.tslibs.tzconversion cimport (
cdef inline object create_datetime_from_ts(
int64_t value, npy_datetimestruct dts,
- object tz, object freq):
+ object tz, object freq, bint fold):
""" convenience routine to construct a datetime.datetime from its parts """
return datetime(dts.year, dts.month, dts.day, dts.hour,
- dts.min, dts.sec, dts.us, tz)
+ dts.min, dts.sec, dts.us, tz, fold=fold)
cdef inline object create_date_from_ts(
int64_t value, npy_datetimestruct dts,
- object tz, object freq):
+ object tz, object freq, bint fold):
""" convenience routine to construct a datetime.date from its parts """
+ # GH 25057 add fold argument to match other func_create signatures
return date(dts.year, dts.month, dts.day)
cdef inline object create_time_from_ts(
int64_t value, npy_datetimestruct dts,
- object tz, object freq):
+ object tz, object freq, bint fold):
""" convenience routine to construct a datetime.time from its parts """
- return time(dts.hour, dts.min, dts.sec, dts.us, tz)
+ return time(dts.hour, dts.min, dts.sec, dts.us, tz, fold=fold)
@cython.wraparound(False)
@cython.boundscheck(False)
def ints_to_pydatetime(const int64_t[:] arr, object tz=None, object freq=None,
- str box="datetime"):
+ bint fold=0, str box="datetime"):
"""
Convert an i8 repr to an ndarray of datetimes, date, time or Timestamp
@@ -83,6 +84,13 @@ def ints_to_pydatetime(const int64_t[:] arr, object tz=None, object freq=None,
convert to this timezone
freq : str/Offset, default None
freq to convert
+ fold : bint, default is 0
+ Due to daylight saving time, one wall clock time can occur twice
+ when shifting from summer to winter time; fold describes whether the
+ datetime-like corresponds to the first (0) or the second time (1)
+ the wall clock hits the ambiguous time
+
+ .. versionadded:: 1.1.0
box : {'datetime', 'timestamp', 'date', 'time'}, default 'datetime'
If datetime, convert to datetime.datetime
If date, convert to datetime.date
@@ -104,7 +112,7 @@ def ints_to_pydatetime(const int64_t[:] arr, object tz=None, object freq=None,
str typ
int64_t value, delta, local_value
ndarray[object] result = np.empty(n, dtype=object)
- object (*func_create)(int64_t, npy_datetimestruct, object, object)
+ object (*func_create)(int64_t, npy_datetimestruct, object, object, bint)
if box == "date":
assert (tz is None), "tz should be None when converting to date"
@@ -129,7 +137,7 @@ def ints_to_pydatetime(const int64_t[:] arr, object tz=None, object freq=None,
result[i] = <object>NaT
else:
dt64_to_dtstruct(value, &dts)
- result[i] = func_create(value, dts, tz, freq)
+ result[i] = func_create(value, dts, tz, freq, fold)
elif is_tzlocal(tz):
for i in range(n):
value = arr[i]
@@ -141,7 +149,7 @@ def ints_to_pydatetime(const int64_t[:] arr, object tz=None, object freq=None,
# using the i8 representation.
local_value = tz_convert_utc_to_tzlocal(value, tz)
dt64_to_dtstruct(local_value, &dts)
- result[i] = func_create(value, dts, tz, freq)
+ result[i] = func_create(value, dts, tz, freq, fold)
else:
trans, deltas, typ = get_dst_info(tz)
@@ -155,7 +163,7 @@ def ints_to_pydatetime(const int64_t[:] arr, object tz=None, object freq=None,
else:
# Adjust datetime64 timestamp, recompute datetimestruct
dt64_to_dtstruct(value + delta, &dts)
- result[i] = func_create(value, dts, tz, freq)
+ result[i] = func_create(value, dts, tz, freq, fold)
elif typ == 'dateutil':
# no zone-name change for dateutil tzs - dst etc
@@ -168,7 +176,7 @@ def ints_to_pydatetime(const int64_t[:] arr, object tz=None, object freq=None,
# Adjust datetime64 timestamp, recompute datetimestruct
pos = trans.searchsorted(value, side='right') - 1
dt64_to_dtstruct(value + deltas[pos], &dts)
- result[i] = func_create(value, dts, tz, freq)
+ result[i] = func_create(value, dts, tz, freq, fold)
else:
# pytz
for i in range(n):
@@ -182,7 +190,7 @@ def ints_to_pydatetime(const int64_t[:] arr, object tz=None, object freq=None,
new_tz = tz._tzinfos[tz._transition_info[pos]]
dt64_to_dtstruct(value + deltas[pos], &dts)
- result[i] = func_create(value, dts, new_tz, freq)
+ result[i] = func_create(value, dts, new_tz, freq, fold)
return result
diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd
index c74307a3d2887..bb20296e24587 100644
--- a/pandas/_libs/tslibs/conversion.pxd
+++ b/pandas/_libs/tslibs/conversion.pxd
@@ -12,6 +12,7 @@ cdef class _TSObject:
npy_datetimestruct dts # npy_datetimestruct
int64_t value # numpy dt64
object tzinfo
+ bint fold
cdef convert_to_tsobject(object ts, object tz, object unit,
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 6e978d495c325..57483783faf9f 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -39,7 +39,8 @@ from pandas._libs.tslibs.nattype cimport (
from pandas._libs.tslibs.tzconversion import (
tz_localize_to_utc, tz_convert_single)
-from pandas._libs.tslibs.tzconversion cimport _tz_convert_tzlocal_utc
+from pandas._libs.tslibs.tzconversion cimport (
+ _tz_convert_tzlocal_utc, _tz_convert_tzlocal_fromutc)
# ----------------------------------------------------------------------
# Constants
@@ -215,6 +216,11 @@ cdef class _TSObject:
# npy_datetimestruct dts # npy_datetimestruct
# int64_t value # numpy dt64
# object tzinfo
+ # bint fold
+
+ def __cinit__(self):
+ # GH 25057. As per PEP 495, set fold to 0 by default
+ self.fold = 0
@property
def value(self):
@@ -322,6 +328,7 @@ cdef _TSObject convert_datetime_to_tsobject(datetime ts, object tz,
cdef:
_TSObject obj = _TSObject()
+ obj.fold = ts.fold
if tz is not None:
tz = maybe_get_tz(tz)
@@ -380,6 +387,8 @@ cdef _TSObject create_tsobject_tz_using_offset(npy_datetimestruct dts,
_TSObject obj = _TSObject()
int64_t value # numpy dt64
datetime dt
+ ndarray[int64_t] trans
+ int64_t[:] deltas
value = dtstruct_to_dt64(&dts)
obj.dts = dts
@@ -389,10 +398,23 @@ cdef _TSObject create_tsobject_tz_using_offset(npy_datetimestruct dts,
check_overflows(obj)
return obj
+ # Infer fold from offset-adjusted obj.value
+ # see PEP 495 https://www.python.org/dev/peps/pep-0495/#the-fold-attribute
+ if is_utc(tz):
+ pass
+ elif is_tzlocal(tz):
+ _tz_convert_tzlocal_fromutc(obj.value, tz, &obj.fold)
+ else:
+ trans, deltas, typ = get_dst_info(tz)
+
+ if typ == 'dateutil':
+ pos = trans.searchsorted(obj.value, side='right') - 1
+ obj.fold = _infer_tsobject_fold(obj, trans, deltas, pos)
+
# Keep the converter same as PyDateTime's
dt = datetime(obj.dts.year, obj.dts.month, obj.dts.day,
obj.dts.hour, obj.dts.min, obj.dts.sec,
- obj.dts.us, obj.tzinfo)
+ obj.dts.us, obj.tzinfo, fold=obj.fold)
obj = convert_datetime_to_tsobject(
dt, tz, nanos=obj.dts.ps // 1000)
return obj
@@ -543,7 +565,7 @@ cdef inline void localize_tso(_TSObject obj, tzinfo tz):
elif obj.value == NPY_NAT:
pass
elif is_tzlocal(tz):
- local_val = _tz_convert_tzlocal_utc(obj.value, tz, to_utc=False)
+ local_val = _tz_convert_tzlocal_fromutc(obj.value, tz, &obj.fold)
dt64_to_dtstruct(local_val, &obj.dts)
else:
# Adjust datetime64 timestamp, recompute datetimestruct
@@ -562,6 +584,8 @@ cdef inline void localize_tso(_TSObject obj, tzinfo tz):
# i.e. treat_tz_as_dateutil(tz)
pos = trans.searchsorted(obj.value, side='right') - 1
dt64_to_dtstruct(obj.value + deltas[pos], &obj.dts)
+ # dateutil supports fold, so we infer fold from value
+ obj.fold = _infer_tsobject_fold(obj, trans, deltas, pos)
else:
# Note: as of 2018-07-17 all tzinfo objects that are _not_
# either pytz or dateutil have is_fixed_offset(tz) == True,
@@ -571,6 +595,45 @@ cdef inline void localize_tso(_TSObject obj, tzinfo tz):
obj.tzinfo = tz
+cdef inline bint _infer_tsobject_fold(_TSObject obj, ndarray[int64_t] trans,
+ int64_t[:] deltas, int32_t pos):
+ """
+ Infer _TSObject fold property from value by assuming 0 and then setting
+ to 1 if necessary.
+
+ Parameters
+ ----------
+ obj : _TSObject
+ trans : ndarray[int64_t]
+ ndarray of offset transition points in nanoseconds since epoch.
+ deltas : int64_t[:]
+ array of offsets corresponding to transition points in trans.
+ pos : int32_t
+ Position of the last transition point before taking fold into account.
+
+ Returns
+ -------
+ bint
+ Due to daylight saving time, one wall clock time can occur twice
+ when shifting from summer to winter time; fold describes whether the
+ datetime-like corresponds to the first (0) or the second time (1)
+ the wall clock hits the ambiguous time
+
+ References
+ ----------
+ .. [1] "PEP 495 - Local Time Disambiguation"
+ https://www.python.org/dev/peps/pep-0495/#the-fold-attribute
+ """
+ cdef:
+ bint fold = 0
+
+ if pos > 0:
+ fold_delta = deltas[pos - 1] - deltas[pos]
+ if obj.value - fold_delta < trans[pos]:
+ fold = 1
+
+ return fold
+
cdef inline datetime _localize_pydatetime(datetime dt, tzinfo tz):
"""
Take a datetime/Timestamp in UTC and localizes to timezone tz.
diff --git a/pandas/_libs/tslibs/timestamps.pxd b/pandas/_libs/tslibs/timestamps.pxd
index b7282e02ff117..5e55e6e8d5297 100644
--- a/pandas/_libs/tslibs/timestamps.pxd
+++ b/pandas/_libs/tslibs/timestamps.pxd
@@ -5,4 +5,4 @@ from pandas._libs.tslibs.np_datetime cimport npy_datetimestruct
cdef object create_timestamp_from_ts(int64_t value,
npy_datetimestruct dts,
- object tz, object freq)
+ object tz, object freq, bint fold)
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 9f3b4a8a554b5..5cd3467eed042 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -6,12 +6,12 @@ from numpy cimport int64_t
cnp.import_array()
from datetime import time as datetime_time, timedelta
-from cpython.datetime cimport (datetime,
+from cpython.datetime cimport (datetime, PyDateTime_Check,
PyTZInfo_Check, PyDateTime_IMPORT)
PyDateTime_IMPORT
from pandas._libs.tslibs.util cimport (
- is_integer_object, is_offset_object)
+ is_datetime64_object, is_float_object, is_integer_object, is_offset_object)
from pandas._libs.tslibs.c_timestamp cimport _Timestamp
cimport pandas._libs.tslibs.ccalendar as ccalendar
@@ -41,12 +41,12 @@ _no_input = object()
cdef inline object create_timestamp_from_ts(int64_t value,
npy_datetimestruct dts,
- object tz, object freq):
+ object tz, object freq, bint fold):
""" convenience routine to construct a Timestamp from its parts """
cdef _Timestamp ts_base
ts_base = _Timestamp.__new__(Timestamp, dts.year, dts.month,
dts.day, dts.hour, dts.min,
- dts.sec, dts.us, tz)
+ dts.sec, dts.us, tz, fold=fold)
ts_base.value = value
ts_base.freq = freq
ts_base.nanosecond = dts.ps // 1000
@@ -195,6 +195,13 @@ class Timestamp(_Timestamp):
nanosecond : int, optional, default 0
.. versionadded:: 0.23.0
tzinfo : datetime.tzinfo, optional, default None
+ fold : {0, 1}, default None, keyword-only
+ Due to daylight saving time, one wall clock time can occur twice
+ when shifting from summer to winter time; fold describes whether the
+ datetime-like corresponds to the first (0) or the second time (1)
+ the wall clock hits the ambiguous time
+
+ .. versionadded:: 1.1.0
Notes
-----
@@ -350,7 +357,9 @@ class Timestamp(_Timestamp):
second=None,
microsecond=None,
nanosecond=None,
- tzinfo=None
+ tzinfo=None,
+ *,
+ fold=None
):
# The parameter list folds together legacy parameter names (the first
# four) and positional and keyword parameter names from pydatetime.
@@ -390,6 +399,32 @@ class Timestamp(_Timestamp):
# User passed tzinfo instead of tz; avoid silently ignoring
tz, tzinfo = tzinfo, None
+ # Allow fold only for unambiguous input
+ if fold is not None:
+ if fold not in [0, 1]:
+ raise ValueError(
+ "Valid values for the fold argument are None, 0, or 1."
+ )
+
+ if (ts_input is not _no_input and not (
+ PyDateTime_Check(ts_input) and
+ getattr(ts_input, 'tzinfo', None) is None)):
+ raise ValueError(
+ "Cannot pass fold with possibly unambiguous input: int, "
+ "float, numpy.datetime64, str, or timezone-aware "
+ "datetime-like. Pass naive datetime-like or build "
+ "Timestamp from components."
+ )
+
+ if tz is not None and treat_tz_as_pytz(tz):
+ raise ValueError(
+ "pytz timezones do not support fold. Please use dateutil "
+ "timezones."
+ )
+
+ if hasattr(ts_input, 'fold'):
+ ts_input = ts_input.replace(fold=fold)
+
# GH 30543 if pd.Timestamp already passed, return it
# check that only ts_input is passed
# checking verbosely, because cython doesn't optimize
@@ -419,7 +454,8 @@ class Timestamp(_Timestamp):
"hour": hour or 0,
"minute": minute or 0,
"second": second or 0,
- "microsecond": microsecond or 0
+ "microsecond": microsecond or 0,
+ "fold": fold or 0
}
if year is not None:
datetime_kwargs["year"] = year
@@ -435,7 +471,7 @@ class Timestamp(_Timestamp):
# Timestamp(year, month, day[, hour[, minute[, second[,
# microsecond[, nanosecond[, tzinfo]]]]]])
ts_input = datetime(ts_input, freq, tz, unit or 0,
- year or 0, month or 0, day or 0)
+ year or 0, month or 0, day or 0, fold=fold or 0)
nanosecond = hour
tz = minute
freq = None
@@ -455,7 +491,7 @@ class Timestamp(_Timestamp):
elif not is_offset_object(freq):
freq = to_offset(freq)
- return create_timestamp_from_ts(ts.value, ts.dts, ts.tzinfo, freq)
+ return create_timestamp_from_ts(ts.value, ts.dts, ts.tzinfo, freq, ts.fold)
def _round(self, freq, mode, ambiguous='raise', nonexistent='raise'):
if self.tz is not None:
@@ -999,7 +1035,7 @@ default 'raise'
if value != NPY_NAT:
check_dts_bounds(&dts)
- return create_timestamp_from_ts(value, dts, _tzinfo, self.freq)
+ return create_timestamp_from_ts(value, dts, _tzinfo, self.freq, fold)
def isoformat(self, sep='T'):
base = super(_Timestamp, self).isoformat(sep=sep)
diff --git a/pandas/_libs/tslibs/tzconversion.pxd b/pandas/_libs/tslibs/tzconversion.pxd
index 9c86057b0a392..c1dd88e5b2313 100644
--- a/pandas/_libs/tslibs/tzconversion.pxd
+++ b/pandas/_libs/tslibs/tzconversion.pxd
@@ -4,4 +4,5 @@ from numpy cimport int64_t
cdef int64_t tz_convert_utc_to_tzlocal(int64_t utc_val, tzinfo tz)
cdef int64_t _tz_convert_tzlocal_utc(int64_t val, tzinfo tz, bint to_utc=*)
+cdef int64_t _tz_convert_tzlocal_fromutc(int64_t val, tzinfo tz, bint *fold)
cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2)
diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx
index b368f0fde3edc..a9702f91107ec 100644
--- a/pandas/_libs/tslibs/tzconversion.pyx
+++ b/pandas/_libs/tslibs/tzconversion.pyx
@@ -444,12 +444,12 @@ cdef int64_t[:] _tz_convert_one_way(int64_t[:] vals, object tz, bint to_utc):
return converted
-cdef int64_t _tz_convert_tzlocal_utc(int64_t val, tzinfo tz, bint to_utc=True):
+cdef inline int64_t _tzlocal_get_offset_components(int64_t val, tzinfo tz,
+ bint to_utc,
+ bint *fold=NULL):
"""
- Convert the i8 representation of a datetime from a tzlocal timezone to
- UTC, or vice-versa.
-
- Private, not intended for use outside of tslibs.conversion
+ Calculate offset in nanoseconds needed to convert the i8 representation of
+ a datetime from a tzlocal timezone to UTC, or vice-versa.
Parameters
----------
@@ -457,15 +457,22 @@ cdef int64_t _tz_convert_tzlocal_utc(int64_t val, tzinfo tz, bint to_utc=True):
tz : tzinfo
to_utc : bint
True if converting tzlocal _to_ UTC, False if going the other direction
+ fold : bint*, default NULL
+ pointer to fold: whether datetime ends up in a fold or not
+ after adjustment
Returns
-------
- result : int64_t
+ delta : int64_t
+
+ Notes
+ -----
+ Sets fold by pointer
"""
cdef:
npy_datetimestruct dts
- int64_t delta
datetime dt
+ int64_t delta
dt64_to_dtstruct(val, &dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
@@ -475,11 +482,69 @@ cdef int64_t _tz_convert_tzlocal_utc(int64_t val, tzinfo tz, bint to_utc=True):
if not to_utc:
dt = dt.replace(tzinfo=tzutc())
dt = dt.astimezone(tz)
- delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000
- if not to_utc:
+ if fold is not NULL:
+ fold[0] = dt.fold
+
+ return int(get_utcoffset(tz, dt).total_seconds()) * 1000000000
+
+
+cdef int64_t _tz_convert_tzlocal_utc(int64_t val, tzinfo tz, bint to_utc=True):
+ """
+ Convert the i8 representation of a datetime from a tzlocal timezone to
+ UTC, or vice-versa.
+
+ Private, not intended for use outside of tslibs.conversion
+
+ Parameters
+ ----------
+ val : int64_t
+ tz : tzinfo
+ to_utc : bint
+ True if converting tzlocal _to_ UTC, False if going the other direction
+
+ Returns
+ -------
+ result : int64_t
+ """
+ cdef int64_t delta
+
+ delta = _tzlocal_get_offset_components(val, tz, to_utc, NULL)
+
+ if to_utc:
+ return val - delta
+ else:
return val + delta
- return val - delta
+
+
+cdef int64_t _tz_convert_tzlocal_fromutc(int64_t val, tzinfo tz, bint *fold):
+ """
+ Convert the i8 representation of a datetime from UTC to local timezone,
+ set fold by pointer
+
+ Private, not intended for use outside of tslibs.conversion
+
+ Parameters
+ ----------
+ val : int64_t
+ tz : tzinfo
+ fold : bint*
+ pointer to fold: whether datetime ends up in a fold or not
+ after adjustment
+
+ Returns
+ -------
+ result : int64_t
+
+ Notes
+ -----
+ Sets fold by pointer
+ """
+ cdef int64_t delta
+
+ delta = _tzlocal_get_offset_components(val, tz, False, fold)
+
+ return val + delta
@cython.boundscheck(False)
diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py
index 1d1d371fcec1e..b293c008d6683 100644
--- a/pandas/tests/indexes/datetimes/test_constructors.py
+++ b/pandas/tests/indexes/datetimes/test_constructors.py
@@ -1,4 +1,4 @@
-from datetime import datetime, timedelta
+from datetime import datetime, timedelta, timezone
from functools import partial
from operator import attrgetter
@@ -959,3 +959,95 @@ def test_pass_datetimeindex_to_index(self):
expected = Index(rng.to_pydatetime(), dtype=object)
tm.assert_numpy_array_equal(idx.values, expected.values)
+
+
+def test_timestamp_constructor_invalid_fold_raise():
+ # Test for #25057
+ # Valid fold values are only [None, 0, 1]
+ msg = "Valid values for the fold argument are None, 0, or 1."
+ with pytest.raises(ValueError, match=msg):
+ Timestamp(123, fold=2)
+
+
+def test_timestamp_constructor_pytz_fold_raise():
+ # Test for #25057
+ # pytz doesn't support fold. Check that we raise
+ # if fold is passed with pytz
+ msg = "pytz timezones do not support fold. Please use dateutil timezones."
+ tz = pytz.timezone("Europe/London")
+ with pytest.raises(ValueError, match=msg):
+ Timestamp(datetime(2019, 10, 27, 0, 30, 0, 0), tz=tz, fold=0)
+
+
+@pytest.mark.parametrize("fold", [0, 1])
+@pytest.mark.parametrize(
+ "ts_input",
+ [
+ 1572136200000000000,
+ 1572136200000000000.0,
+ np.datetime64(1572136200000000000, "ns"),
+ "2019-10-27 01:30:00+01:00",
+ datetime(2019, 10, 27, 0, 30, 0, 0, tzinfo=timezone.utc),
+ ],
+)
+def test_timestamp_constructor_fold_conflict(ts_input, fold):
+ # Test for #25057
+ # Check that we raise on fold conflict
+ msg = (
+ "Cannot pass fold with possibly unambiguous input: int, float, "
+ "numpy.datetime64, str, or timezone-aware datetime-like. "
+ "Pass naive datetime-like or build Timestamp from components."
+ )
+ with pytest.raises(ValueError, match=msg):
+ Timestamp(ts_input=ts_input, fold=fold)
+
+
+@pytest.mark.parametrize("tz", ["dateutil/Europe/London", None])
+@pytest.mark.parametrize("fold", [0, 1])
+def test_timestamp_constructor_retain_fold(tz, fold):
+ # Test for #25057
+ # Check that we retain fold
+ ts = pd.Timestamp(year=2019, month=10, day=27, hour=1, minute=30, tz=tz, fold=fold)
+ result = ts.fold
+ expected = fold
+ assert result == expected
+
+
+@pytest.mark.parametrize("tz", ["dateutil/Europe/London"])
+@pytest.mark.parametrize(
+ "ts_input,fold_out",
+ [
+ (1572136200000000000, 0),
+ (1572139800000000000, 1),
+ ("2019-10-27 01:30:00+01:00", 0),
+ ("2019-10-27 01:30:00+00:00", 1),
+ (datetime(2019, 10, 27, 1, 30, 0, 0, fold=0), 0),
+ (datetime(2019, 10, 27, 1, 30, 0, 0, fold=1), 1),
+ ],
+)
+def test_timestamp_constructor_infer_fold_from_value(tz, ts_input, fold_out):
+ # Test for #25057
+ # Check that we infer fold correctly based on timestamps since utc
+ # or strings
+ ts = pd.Timestamp(ts_input, tz=tz)
+ result = ts.fold
+ expected = fold_out
+ assert result == expected
+
+
+@pytest.mark.parametrize("tz", ["dateutil/Europe/London"])
+@pytest.mark.parametrize(
+ "ts_input,fold,value_out",
+ [
+ (datetime(2019, 10, 27, 1, 30, 0, 0), 0, 1572136200000000000),
+ (datetime(2019, 10, 27, 1, 30, 0, 0), 1, 1572139800000000000),
+ ],
+)
+def test_timestamp_constructor_adjust_value_for_fold(tz, ts_input, fold, value_out):
+ # Test for #25057
+ # Check that we adjust value for fold correctly
+ # based on timestamps since utc
+ ts = pd.Timestamp(ts_input, tz=tz, fold=fold)
+ result = ts.value
+ expected = value_out
+ assert result == expected
diff --git a/pandas/tests/scalar/timestamp/test_timezones.py b/pandas/tests/scalar/timestamp/test_timezones.py
index 6537f6ccd8432..cfa7da810ada1 100644
--- a/pandas/tests/scalar/timestamp/test_timezones.py
+++ b/pandas/tests/scalar/timestamp/test_timezones.py
@@ -140,7 +140,7 @@ def test_tz_localize_ambiguous_compat(self):
# see gh-14621
assert result_pytz.to_pydatetime().tzname() == "GMT"
assert result_dateutil.to_pydatetime().tzname() == "BST"
- assert str(result_pytz) != str(result_dateutil)
+ assert str(result_pytz) == str(result_dateutil)
# 1 hour difference
result_pytz = naive.tz_localize(pytz_zone, ambiguous=1)
| **PERF Note:** Current implementation slows down the `Timestamp` constructor. I've tested this thoroughly and tracked it down to ba7fcd5 where I changed the function signatures at the very beginning of working on the PR. The performance overhead appeared before any of the logic was implemented.
- [X] closes #25057, closes #31338
- [X] tests added 32 / passed 32
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
### Reasoning for changes
We currently don't support fold from [PEP 495](https://www.python.org/dev/peps/pep-0495/#the-fold-attribute), and this causes us a lot of grief, including, but not limited to, broken Timestamp representations at the edge of DST shift (see #31338). The values can also break.
Support of the fold attribute helps us deal with that. Now, if the wall clock time occurs twice due to a DST shift, we will know exactly at which point in time we are.
This also removes inconsistencies between using `pytz` and `dateutil`: now both the values and representations match for both timezone packages near DST summer/winter shifts.
### Scope of PR
Implementing fold into Timestamp can easily get out of hand. Parent `pydatetime` has it easy, as the object doesn't need to sync its underlying epoch time, fold, and the representation, like we do.
This PR is already large, so I propose we limit its scope to minimal consistent fold support:
- when fold is explicitly supplied, attempt to shift value across the DST boundary according to the value supplied
- infer fold from value during tz-aware Timestamp construction. For example, if Timestamp is built from epoch time, then we can infer fold.
- pass the resulting fold to the underlying `datetime` constructor in `create_timestamp_from_ts` so that it gets stored in the object (can't assign it directly as it's read-only).
- implement local timezone support for fold. Don't have any idea how to do this though.
Things I suggest we leave to discussion and other PRs:
- conflicts resolution. A user can supply `fold=1` for a datetime that is nowhere near a DST shift. We can raise and Error or a Warning in the future. For now, we check whether `fold` can be 1, and if not, we leave it as default `fold=0`.
- consider reintroducing ambiguous time errors. Currently, the implementation assumes `fold == 0` for ambiguous time like Timestamp("2019-10-27 01:30:00", tz='Europe/London'). The error was dropped to mirror the `fold=0` default in pydatetime and to allow the fold attribute to be `bint` in Cython functions.
**Note:** `pydatetime` doesn't infer fold from value and doesn't raise errors when you assign `fold=1` nowhere near a fold. Example:
```python
from datetime import datetime
from dateutil.tz import gettz
dt = datetime.datetime(2015, 10, 27, 5, 30, 0, 0, gettz("Europe/London"), fold=1)
dt.fold
OUT:
1
```
### Performance problems
This implementation behaves as I expect it to, but it slows down scalar constructor benchmarks by 30-70%, even for tz-naive benchmarks.
Update: since the benchmark slowdown appeared before any of the functionality, this has nothing to do with the logic introduced and all to do with adding `bint fold` to function signatures, apparently. I'm afraid I don't know Cython well enough to research further. | https://api.github.com/repos/pandas-dev/pandas/pulls/31563 | 2020-02-02T08:10:40Z | 2020-02-26T12:35:04Z | 2020-02-26T12:35:03Z | 2020-03-06T08:24:04Z |
Backport PR: BUG: Array.__setitem__ failing with nullable boolean mask (#31484) | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index cb916cecd4f1b..9e78ff03f5f67 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -71,6 +71,7 @@ Indexing
-
-
+- Bug where assigning to a :class:`Series` using a IntegerArray / BooleanArray as a mask would raise ``TypeError`` (:issue:`31446`)
Missing
^^^^^^^
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py
index 7b12f3348e7e7..9eeed42124f2a 100644
--- a/pandas/core/arrays/boolean.py
+++ b/pandas/core/arrays/boolean.py
@@ -26,6 +26,7 @@
from pandas.core.dtypes.missing import isna, notna
from pandas.core import nanops, ops
+from pandas.core.indexers import check_array_indexer
from .masked import BaseMaskedArray
@@ -369,6 +370,7 @@ def __setitem__(self, key, value):
value = value[0]
mask = mask[0]
+ key = check_array_indexer(self, key)
self._data[key] = value
self._mask[key] = mask
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 105b14aa3c3b7..aa84edd413bc9 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2073,6 +2073,8 @@ def __setitem__(self, key, value):
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
+
+ key = check_array_indexer(self, key)
self._codes[key] = lindexer
def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]:
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 82fa9197b39eb..e8d5890d2564f 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -601,6 +601,8 @@ def __setitem__(
f"or array of those. Got '{type(value).__name__}' instead."
)
raise TypeError(msg)
+
+ key = check_array_indexer(self, key)
self._data[key] = value
self._maybe_clear_freq()
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 022e6a7322872..9a0f5794e7607 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -25,6 +25,7 @@
from pandas.core.dtypes.missing import isna
from pandas.core import nanops, ops
+from pandas.core.indexers import check_array_indexer
from pandas.core.ops import invalid_comparison
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.tools.numeric import to_numeric
@@ -414,6 +415,7 @@ def __setitem__(self, key, value):
value = value[0]
mask = mask[0]
+ key = check_array_indexer(self, key)
self._data[key] = value
self._mask[key] = mask
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index d890c0c16aecc..23cf5f317ac7d 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -541,6 +541,7 @@ def __setitem__(self, key, value):
msg = f"'value' should be an interval type, got {type(value)} instead."
raise TypeError(msg)
+ key = check_array_indexer(self, key)
# Need to ensure that left and right are updated atomically, so we're
# forced to copy, update the copy, and swap in the new values.
left = self.left.copy(deep=True)
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 8b1d1e58dc36c..57cc52ce24f8c 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -243,6 +243,7 @@ def __getitem__(self, item):
def __setitem__(self, key, value):
value = extract_array(value, extract_numpy=True)
+ key = check_array_indexer(self, key)
scalar_key = lib.is_scalar(key)
scalar_value = lib.is_scalar(value)
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index c485d1f50dc9d..b53484e1892f9 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -15,6 +15,7 @@
from pandas.core import ops
from pandas.core.arrays import PandasArray
from pandas.core.construction import extract_array
+from pandas.core.indexers import check_array_indexer
from pandas.core.missing import isna
@@ -224,6 +225,7 @@ def __setitem__(self, key, value):
# extract_array doesn't extract PandasArray subclasses
value = value._ndarray
+ key = check_array_indexer(self, key)
scalar_key = lib.is_scalar(key)
scalar_value = lib.is_scalar(value)
if scalar_key and not scalar_value:
diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py
index 0c5ae506ae0ce..c165910777649 100644
--- a/pandas/tests/arrays/test_integer.py
+++ b/pandas/tests/arrays/test_integer.py
@@ -1072,6 +1072,23 @@ def test_cut(bins, right, include_lowest):
tm.assert_categorical_equal(result, expected)
+def test_array_setitem_nullable_boolean_mask():
+ # GH 31446
+ ser = pd.Series([1, 2], dtype="Int64")
+ result = ser.where(ser > 1)
+ expected = pd.Series([pd.NA, 2], dtype="Int64")
+ tm.assert_series_equal(result, expected)
+
+
+def test_array_setitem():
+ # GH 31446
+ arr = pd.Series([1, 2], dtype="Int64").array
+ arr[arr > 1] = 1
+
+ expected = pd.array([1, 1], dtype="Int64")
+ tm.assert_extension_array_equal(arr, expected)
+
+
# TODO(jreback) - these need testing / are broken
# shift
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index 0bb8aede6298c..e0ca603aaa0ed 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -4,6 +4,7 @@
import pytest
import pandas as pd
+from pandas.core.arrays.numpy_ import PandasDtype
from .base import BaseExtensionTests
@@ -195,3 +196,14 @@ def test_setitem_preserves_views(self, data):
data[0] = data[1]
assert view1[0] == data[1]
assert view2[0] == data[1]
+
+ def test_setitem_nullable_mask(self, data):
+ # GH 31446
+ # TODO: there is some issue with PandasArray, therefore,
+ # TODO: skip the setitem test for now, and fix it later
+ if data.dtype != PandasDtype("object"):
+ arr = data[:5]
+ expected = data.take([0, 0, 0, 3, 4])
+ mask = pd.array([True, True, True, False, False])
+ arr[mask] = data[0]
+ self.assert_extension_array_equal(expected, arr)
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 743852c35dbd8..8fd4a0171a222 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -10,6 +10,7 @@
import pandas as pd
from pandas.api.extensions import no_default, register_extension_dtype
from pandas.core.arrays import ExtensionArray, ExtensionScalarOpsMixin
+from pandas.core.indexers import check_array_indexer
@register_extension_dtype
@@ -144,6 +145,8 @@ def __setitem__(self, key, value):
value = [decimal.Decimal(v) for v in value]
else:
value = decimal.Decimal(value)
+
+ key = check_array_indexer(self, key)
self._data[key] = value
def __len__(self) -> int:
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31562 | 2020-02-02T07:49:56Z | 2020-02-02T16:04:35Z | 2020-02-02T16:04:35Z | 2020-02-02T16:04:40Z |
Try manual backport | diff --git a/doc/source/development/code_style.rst b/doc/source/development/code_style.rst
index 2fc2f1fb6ee8d..a295038b5a0bd 100644
--- a/doc/source/development/code_style.rst
+++ b/doc/source/development/code_style.rst
@@ -127,3 +127,29 @@ For example:
value = str
f"Unknown recived type, got: '{type(value).__name__}'"
+
+
+Imports (aim for absolute)
+==========================
+
+In Python 3, absolute imports are recommended. In absolute import doing something
+like ``import string`` will import the string module rather than ``string.py``
+in the same directory. As much as possible, you should try to write out
+absolute imports that show the whole import chain from toplevel pandas.
+
+Explicit relative imports are also supported in Python 3. But it is not
+recommended to use it. Implicit relative imports should never be used
+and is removed in Python 3.
+
+For example:
+
+::
+
+ # preferred
+ import pandas.core.common as com
+
+ # not preferred
+ from .common import test_base
+
+ # wrong
+ from common import test_base
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index c7a984c66d640..4fdcb93745094 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -635,6 +635,8 @@ many errors as possible, but it may not correct *all* of them. Thus, it is
recommended that you run ``cpplint`` to double check and make any other style
fixes manually.
+.. _contributing.code-formatting:
+
Python (PEP8 / black)
~~~~~~~~~~~~~~~~~~~~~
@@ -656,19 +658,8 @@ apply ``black`` as you edit files.
You should use a ``black`` version >= 19.10b0 as previous versions are not compatible
with the pandas codebase.
-Optionally, you may wish to setup `pre-commit hooks <https://pre-commit.com/>`_
-to automatically run ``black`` and ``flake8`` when you make a git commit. This
-can be done by installing ``pre-commit``::
-
- pip install pre-commit
-
-and then running::
-
- pre-commit install
-
-from the root of the pandas repository. Now ``black`` and ``flake8`` will be run
-each time you commit changes. You can skip these checks with
-``git commit --no-verify``.
+If you wish to run these checks automatically, we encourage you to use
+:ref:`pre-commits <contributing.pre-commit>` instead.
One caveat about ``git diff upstream/master -u -- "*.py" | flake8 --diff``: this
command will catch any stylistic errors in your changes specifically, but
@@ -676,7 +667,7 @@ be beware it may not catch all of them. For example, if you delete the only
usage of an imported function, it is stylistically incorrect to import an
unused function. However, style-checking the diff will not catch this because
the actual import is not part of the diff. Thus, for completeness, you should
-run this command, though it will take longer::
+run this command, though it may take longer::
git diff upstream/master --name-only -- "*.py" | xargs -r flake8
@@ -694,6 +685,8 @@ behaviour as follows::
This will get all the files being changed by the PR (and ending with ``.py``),
and run ``flake8`` on them, one after the other.
+Note that these commands can be run analogously with ``black``.
+
.. _contributing.import-formatting:
Import formatting
@@ -716,7 +709,6 @@ A summary of our current import sections ( in order ):
Imports are alphabetically sorted within these sections.
-
As part of :ref:`Continuous Integration <contributing.ci>` checks we run::
isort --recursive --check-only pandas
@@ -740,8 +732,37 @@ to automatically format imports correctly. This will modify your local copy of t
The `--recursive` flag can be passed to sort all files in a directory.
+Alternatively, you can run a command similar to what was suggested for ``black`` and ``flake8`` :ref:`right above <contributing.code-formatting>`::
+
+ git diff upstream/master --name-only -- "*.py" | xargs -r isort
+
+Where similar caveats apply if you are on OSX or Windows.
+
You can then verify the changes look ok, then git :ref:`commit <contributing.commit-code>` and :ref:`push <contributing.push-code>`.
+.. _contributing.pre-commit:
+
+Pre-Commit
+~~~~~~~~~~
+
+You can run many of these styling checks manually as we have described above. However,
+we encourage you to use `pre-commit hooks <https://pre-commit.com/>`_ instead
+to automatically run ``black``, ``flake8``, ``isort`` when you make a git commit. This
+can be done by installing ``pre-commit``::
+
+ pip install pre-commit
+
+and then running::
+
+ pre-commit install
+
+from the root of the pandas repository. Now all of the styling checks will be
+run each time you commit changes without your needing to run each one manually.
+In addition, using this pre-commit hook will also allow you to more easily
+remain up-to-date with our code checks as they change.
+
+Note that if needed, you can skip these checks with ``git commit --no-verify``.
+
Backwards compatibility
~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/development/roadmap.rst b/doc/source/development/roadmap.rst
index 00598830e2fe9..fafe63d80249c 100644
--- a/doc/source/development/roadmap.rst
+++ b/doc/source/development/roadmap.rst
@@ -129,20 +129,6 @@ Some specific goals include
* Improve the overall organization of the documentation and specific subsections
of the documentation to make navigation and finding content easier.
-Package docstring validation
-----------------------------
-
-To improve the quality and consistency of pandas docstrings, we've developed
-tooling to check docstrings in a variety of ways.
-https://github.com/pandas-dev/pandas/blob/master/scripts/validate_docstrings.py
-contains the checks.
-
-Like many other projects, pandas uses the
-`numpydoc <https://numpydoc.readthedocs.io/en/latest/>`__ style for writing
-docstrings. With the collaboration of the numpydoc maintainers, we'd like to
-move the checks to a package other than pandas so that other projects can easily
-use them as well.
-
Performance monitoring
----------------------
diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index f3053452b4e6c..5690bb2e4a875 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -117,7 +117,7 @@ programming language.
:hidden:
{% endif %}
{% if not single_doc %}
- What's New in 1.0.0 <whatsnew/v1.0.0>
+ What's New in 1.1.0 <whatsnew/v1.1.0>
getting_started/index
user_guide/index
{% endif -%}
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index c9495d5b137fd..a2c40fea41e13 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -10,6 +10,14 @@ This is the list of changes to pandas between each release. For full details,
see the commit logs at http://github.com/pandas-dev/pandas. For install and
upgrade instructions, see :ref:`install`.
+Version 1.1
+-----------
+
+.. toctree::
+ :maxdepth: 2
+
+ v1.1.0
+
Version 1.0
-----------
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index b0b88c8b04ad1..6a368638feb2f 100755
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -250,6 +250,10 @@ Other enhancements
- :func:`read_excel` now can read binary Excel (``.xlsb``) files by passing ``engine='pyxlsb'``. For more details and example usage, see the :ref:`Binary Excel files documentation <io.xlsb>`. Closes :issue:`8540`.
- The ``partition_cols`` argument in :meth:`DataFrame.to_parquet` now accepts a string (:issue:`27117`)
- :func:`pandas.read_json` now parses ``NaN``, ``Infinity`` and ``-Infinity`` (:issue:`12213`)
+<<<<<<< HEAD
+- :func:`to_parquet` now appropriately handles the ``schema`` argument for user defined schemas in the pyarrow engine. (:issue:`30270`)
+=======
+>>>>>>> upstream/1.0.x
- DataFrame constructor preserve `ExtensionArray` dtype with `ExtensionArray` (:issue:`11363`)
- :meth:`DataFrame.sort_values` and :meth:`Series.sort_values` have gained ``ignore_index`` keyword to be able to reset index after sorting (:issue:`30114`)
- :meth:`DataFrame.sort_index` and :meth:`Series.sort_index` have gained ``ignore_index`` keyword to reset index (:issue:`30114`)
@@ -816,7 +820,10 @@ Deprecations
- Support for multi-dimensional indexing (e.g. ``index[:, None]``) on a :class:`Index` is deprecated and will be removed in a future version, convert to a numpy array before indexing instead (:issue:`30588`)
- The ``pandas.np`` submodule is now deprecated. Import numpy directly instead (:issue:`30296`)
- The ``pandas.datetime`` class is now deprecated. Import from ``datetime`` instead (:issue:`30610`)
+<<<<<<< HEAD
+=======
- :class:`~DataFrame.diff` will raise a ``TypeError`` rather than implicitly losing the dtype of extension types in the future. Convert to the correct dtype before calling ``diff`` instead (:issue:`31025`)
+>>>>>>> upstream/1.0.x
**Selecting Columns from a Grouped DataFrame**
diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index cb916cecd4f1b..899867378e56f 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -15,7 +15,7 @@ including other versions of pandas.
Bug fixes
~~~~~~~~~
-- Bug in :meth:`GroupBy.apply` was raising ``TypeError`` if called with function which returned a non-pandas non-scalar object (e.g. a list) (:issue:`31441`)
+
Categorical
^^^^^^^^^^^
@@ -44,7 +44,6 @@ Timezones
Numeric
^^^^^^^
- Bug in dtypes being lost in ``DataFrame.__invert__`` (``~`` operator) with mixed dtypes (:issue:`31183`)
-- Bug in :class:`Series` multiplication when multiplying a numeric :class:`Series` with >10000 elements with a timedelta-like scalar (:issue:`31467`)
-
Conversion
@@ -68,9 +67,10 @@ Interval
Indexing
^^^^^^^^
-
+- Fixed regression in :class:`DataFrame` setting values with a slice (e.g. ``df[-4:] = 1``) indexing by label instead of position (:issue:`31469`)
-
-
+- Bug where assigning to a :class:`Series` using a IntegerArray / BooleanArray as a mask would raise ``TypeError`` (:issue:`31446`)
Missing
^^^^^^^
@@ -87,7 +87,6 @@ MultiIndex
I/O
^^^
-- Fixed regression in :meth:`~DataFrame.to_csv` where specifying an ``na_rep`` might truncate the values written (:issue:`31447`)
-
-
diff --git a/pandas/__init__.py b/pandas/__init__.py
index 491bcb21f245d..d526531b159b2 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -35,8 +35,7 @@
raise ImportError(
f"C extension: {module} not built. If you want to import "
"pandas from the source directory, you may need to run "
- "'python setup.py build_ext --inplace --force' to build "
- "the C extensions first."
+ "'python setup.py build_ext --inplace --force' to build the C extensions first."
)
from pandas._config import (
@@ -198,8 +197,7 @@ def __getattr__(name):
warnings.warn(
"The Panel class is removed from pandas. Accessing it "
- "from the top-level namespace will also be removed in "
- "the next version",
+ "from the top-level namespace will also be removed in the next version",
FutureWarning,
stacklevel=2,
)
@@ -238,8 +236,7 @@ class Panel:
elif name in {"SparseSeries", "SparseDataFrame"}:
warnings.warn(
f"The {name} class is removed from pandas. Accessing it from "
- "the top-level namespace will also be removed in the next "
- "version",
+ "the top-level namespace will also be removed in the next version",
FutureWarning,
stacklevel=2,
)
diff --git a/pandas/_config/config.py b/pandas/_config/config.py
index 0a3009f74492f..42df8a84a8c77 100644
--- a/pandas/_config/config.py
+++ b/pandas/_config/config.py
@@ -165,8 +165,7 @@ def _reset_option(pat, silent=False):
raise ValueError(
"You must specify at least 4 characters when "
"resetting multiple keys, use the special keyword "
- '"all" to reset all the options to their default '
- "value"
+ '"all" to reset all the options to their default value'
)
for k in keys:
diff --git a/pandas/_config/display.py b/pandas/_config/display.py
index 067b7c503baab..ef319f4447565 100644
--- a/pandas/_config/display.py
+++ b/pandas/_config/display.py
@@ -1,6 +1,7 @@
"""
Unopinionated display configuration.
"""
+
import locale
import sys
@@ -11,7 +12,7 @@
_initial_defencoding = None
-def detect_console_encoding():
+def detect_console_encoding() -> str:
"""
Try to find the most capable encoding supported by the console.
slightly modified from the way IPython handles the same issue.
diff --git a/pandas/_config/localization.py b/pandas/_config/localization.py
index dd1d4948aa6e3..0d68e78372d8a 100644
--- a/pandas/_config/localization.py
+++ b/pandas/_config/localization.py
@@ -12,7 +12,7 @@
@contextmanager
-def set_locale(new_locale, lc_var=locale.LC_ALL):
+def set_locale(new_locale, lc_var: int = locale.LC_ALL):
"""
Context manager for temporarily setting a locale.
@@ -44,7 +44,7 @@ def set_locale(new_locale, lc_var=locale.LC_ALL):
locale.setlocale(lc_var, current_locale)
-def can_set_locale(lc, lc_var=locale.LC_ALL):
+def can_set_locale(lc: str, lc_var: int = locale.LC_ALL) -> bool:
"""
Check to see if we can set a locale, and subsequently get the locale,
without raising an Exception.
@@ -58,7 +58,7 @@ def can_set_locale(lc, lc_var=locale.LC_ALL):
Returns
-------
- is_valid : bool
+ bool
Whether the passed locale can be set
"""
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 7a2fc9dc7845a..dd1f38ce3a842 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -914,8 +914,7 @@ def rank_1d(rank_t[:] in_arr, ties_method='average',
ranks[argsorted[j]] = i + 1
elif tiebreak == TIEBREAK_FIRST:
if rank_t is object:
- raise ValueError('first not supported for '
- 'non-numeric data')
+ raise ValueError('first not supported for non-numeric data')
else:
for j in range(i - dups + 1, i + 1):
ranks[argsorted[j]] = j + 1
@@ -971,8 +970,7 @@ def rank_1d(rank_t[:] in_arr, ties_method='average',
ranks[argsorted[j]] = i + 1
elif tiebreak == TIEBREAK_FIRST:
if rank_t is object:
- raise ValueError('first not supported for '
- 'non-numeric data')
+ raise ValueError('first not supported for non-numeric data')
else:
for j in range(i - dups + 1, i + 1):
ranks[argsorted[j]] = j + 1
@@ -1137,8 +1135,7 @@ def rank_2d(rank_t[:, :] in_arr, axis=0, ties_method='average',
ranks[i, argsorted[i, z]] = j + 1
elif tiebreak == TIEBREAK_FIRST:
if rank_t is object:
- raise ValueError('first not supported '
- 'for non-numeric data')
+ raise ValueError('first not supported for non-numeric data')
else:
for z in range(j - dups + 1, j + 1):
ranks[i, argsorted[i, z]] = z + 1
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index abb8a6d388d26..93ea94f7b18fc 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -686,8 +686,7 @@ def _group_ohlc(floating[:, :] out,
raise ValueError('Output array must have 4 columns')
if K > 1:
- raise NotImplementedError("Argument 'values' must have only "
- "one dimension")
+ raise NotImplementedError("Argument 'values' must have only one dimension")
out[:] = np.nan
with nogil:
diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx
index 5298d8c5ed34e..878da670b2f68 100644
--- a/pandas/_libs/hashing.pyx
+++ b/pandas/_libs/hashing.pyx
@@ -51,8 +51,9 @@ def hash_object_array(object[:] arr, object key, object encoding='utf8'):
k = <bytes>key.encode(encoding)
kb = <uint8_t *>k
if len(k) != 16:
- raise ValueError("key should be a 16-byte string encoded, "
- f"got {k} (len {len(k)})")
+ raise ValueError(
+ f"key should be a 16-byte string encoded, got {k} (len {len(k)})"
+ )
n = len(arr)
@@ -77,8 +78,10 @@ def hash_object_array(object[:] arr, object key, object encoding='utf8'):
hash(val)
data = <bytes>str(val).encode(encoding)
else:
- raise TypeError(f"{val} of type {type(val)} is not a valid type "
- "for hashing, must be string or null")
+ raise TypeError(
+ f"{val} of type {type(val)} is not a valid type for hashing, "
+ "must be string or null"
+ )
l = len(data)
lens[i] = l
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index ac8172146d351..ce6d12d61c521 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -85,7 +85,6 @@ cdef class IndexEngine:
"""
cdef:
object loc
- void* data_ptr
loc = self.get_loc(key)
if isinstance(loc, slice) or util.is_array(loc):
@@ -101,7 +100,6 @@ cdef class IndexEngine:
"""
cdef:
object loc
- void* data_ptr
loc = self.get_loc(key)
value = convert_scalar(arr, value)
@@ -447,7 +445,6 @@ cdef class DatetimeEngine(Int64Engine):
conv = maybe_datetimelike_to_i8(val)
loc = values.searchsorted(conv, side='left')
except TypeError:
- self._date_check_type(val)
raise KeyError(val)
if loc == len(values) or values[loc] != conv:
@@ -470,12 +467,6 @@ cdef class DatetimeEngine(Int64Engine):
val = maybe_datetimelike_to_i8(val)
return self.mapping.get_item(val)
except (TypeError, ValueError):
- self._date_check_type(val)
- raise KeyError(val)
-
- cdef inline _date_check_type(self, object val):
- hash(val)
- if not util.is_integer_object(val):
raise KeyError(val)
def get_indexer(self, values):
diff --git a/pandas/_libs/indexing.pyx b/pandas/_libs/indexing.pyx
index 01f4fb060d982..cdccdb504571c 100644
--- a/pandas/_libs/indexing.pyx
+++ b/pandas/_libs/indexing.pyx
@@ -18,6 +18,7 @@ cdef class _NDFrameIndexerBase:
if ndim is None:
ndim = self._ndim = self.obj.ndim
if ndim > 2:
- raise ValueError("NDFrameIndexer does not support "
- "NDFrame objects with ndim > 2")
+ raise ValueError(
+ "NDFrameIndexer does not support NDFrame objects with ndim > 2"
+ )
return ndim
diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx
index ee83901040b36..3a6dd506b2428 100644
--- a/pandas/_libs/sparse.pyx
+++ b/pandas/_libs/sparse.pyx
@@ -72,9 +72,9 @@ cdef class IntIndex(SparseIndex):
"""
if self.npoints > self.length:
- msg = (f"Too many indices. Expected "
- f"{self.length} but found {self.npoints}")
- raise ValueError(msg)
+ raise ValueError(
+ f"Too many indices. Expected {self.length} but found {self.npoints}"
+ )
# Indices are vacuously ordered and non-negative
# if the sequence of indices is empty.
diff --git a/pandas/_libs/testing.pyx b/pandas/_libs/testing.pyx
index 5a30b71a6fea1..0e57b563d4d25 100644
--- a/pandas/_libs/testing.pyx
+++ b/pandas/_libs/testing.pyx
@@ -127,9 +127,9 @@ cpdef assert_almost_equal(a, b,
# classes can't be the same, to raise error
assert_class_equal(a, b, obj=obj)
- assert has_length(a) and has_length(b), ("Can't compare objects without "
- "length, one or both is invalid: "
- f"({a}, {b})")
+ assert has_length(a) and has_length(b), (
+ f"Can't compare objects without length, one or both is invalid: ({a}, {b})"
+ )
if a_is_ndarray and b_is_ndarray:
na, nb = a.size, b.size
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 2988d7bae9a5e..a2b433c2007ff 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -99,6 +99,11 @@ def ensure_datetime64ns(arr: ndarray, copy: bool=True):
shape = (<object>arr).shape
+ if (<object>arr).dtype.byteorder == ">":
+ # GH#29684 we incorrectly get OutOfBoundsDatetime if we dont swap
+ dtype = arr.dtype
+ arr = arr.astype(dtype.newbyteorder("<"))
+
ivalues = arr.view(np.int64).ravel()
result = np.empty(shape, dtype=NS_DTYPE)
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index abe7f9e5b4105..36566b55e74ad 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -161,8 +161,7 @@ def round_nsint64(values, mode, freq):
# if/elif above should catch all rounding modes defined in enum 'RoundTo':
# if flow of control arrives here, it is a bug
- raise ValueError("round_nsint64 called with an unrecognized "
- "rounding mode")
+ raise ValueError("round_nsint64 called with an unrecognized rounding mode")
# ----------------------------------------------------------------------
@@ -324,8 +323,10 @@ class Timestamp(_Timestamp):
Function is not implemented. Use pd.to_datetime().
"""
- raise NotImplementedError("Timestamp.strptime() is not implemented."
- "Use to_datetime() to parse date strings.")
+ raise NotImplementedError(
+ "Timestamp.strptime() is not implemented. "
+ "Use to_datetime() to parse date strings."
+ )
@classmethod
def combine(cls, date, time):
@@ -381,8 +382,9 @@ class Timestamp(_Timestamp):
if tzinfo is not None:
if not PyTZInfo_Check(tzinfo):
# tzinfo must be a datetime.tzinfo object, GH#17690
- raise TypeError(f'tzinfo must be a datetime.tzinfo object, '
- f'not {type(tzinfo)}')
+ raise TypeError(
+ f"tzinfo must be a datetime.tzinfo object, not {type(tzinfo)}"
+ )
elif tz is not None:
raise ValueError('Can provide at most one of tz, tzinfo')
@@ -393,8 +395,10 @@ class Timestamp(_Timestamp):
# User passed a date string to parse.
# Check that the user didn't also pass a date attribute kwarg.
if any(arg is not None for arg in _date_attributes):
- raise ValueError('Cannot pass a date attribute keyword '
- 'argument when passing a date string')
+ raise ValueError(
+ "Cannot pass a date attribute keyword "
+ "argument when passing a date string"
+ )
elif ts_input is _no_input:
# User passed keyword arguments.
@@ -578,8 +582,10 @@ timedelta}, default 'raise'
@tz.setter
def tz(self, value):
# GH 3746: Prevent localizing or converting the index by setting tz
- raise AttributeError("Cannot directly set timezone. Use tz_localize() "
- "or tz_convert() as appropriate")
+ raise AttributeError(
+ "Cannot directly set timezone. "
+ "Use tz_localize() or tz_convert() as appropriate"
+ )
def __setstate__(self, state):
self.value = state[0]
@@ -598,9 +604,10 @@ timedelta}, default 'raise'
if self.tz is not None:
# GH#21333
- warnings.warn("Converting to Period representation will "
- "drop timezone information.",
- UserWarning)
+ warnings.warn(
+ "Converting to Period representation will drop timezone information.",
+ UserWarning,
+ )
if freq is None:
freq = self.freq
@@ -810,13 +817,13 @@ default 'raise'
if ambiguous == 'infer':
raise ValueError('Cannot infer offset with only one time.')
- nonexistent_options = ('raise', 'NaT', 'shift_forward',
- 'shift_backward')
+ nonexistent_options = ('raise', 'NaT', 'shift_forward', 'shift_backward')
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta):
- raise ValueError("The nonexistent argument must be one of 'raise', "
- "'NaT', 'shift_forward', 'shift_backward' or "
- "a timedelta object")
+ raise ValueError(
+ "The nonexistent argument must be one of 'raise', "
+ "'NaT', 'shift_forward', 'shift_backward' or a timedelta object"
+ )
if self.tzinfo is None:
# tz naive, localize
@@ -833,8 +840,9 @@ default 'raise'
value = tz_convert_single(self.value, UTC, self.tz)
return Timestamp(value, tz=tz, freq=self.freq)
else:
- raise TypeError('Cannot localize tz-aware Timestamp, use '
- 'tz_convert for conversions')
+ raise TypeError(
+ "Cannot localize tz-aware Timestamp, use tz_convert for conversions"
+ )
def tz_convert(self, tz):
"""
@@ -857,17 +865,28 @@ default 'raise'
"""
if self.tzinfo is None:
# tz naive, use tz_localize
- raise TypeError('Cannot convert tz-naive Timestamp, use '
- 'tz_localize to localize')
+ raise TypeError(
+ "Cannot convert tz-naive Timestamp, use tz_localize to localize"
+ )
else:
# Same UTC timestamp, different time zone
return Timestamp(self.value, tz=tz, freq=self.freq)
astimezone = tz_convert
- def replace(self, year=None, month=None, day=None,
- hour=None, minute=None, second=None, microsecond=None,
- nanosecond=None, tzinfo=object, fold=0):
+ def replace(
+ self,
+ year=None,
+ month=None,
+ day=None,
+ hour=None,
+ minute=None,
+ second=None,
+ microsecond=None,
+ nanosecond=None,
+ tzinfo=object,
+ fold=0,
+ ):
"""
implements datetime.replace, handles nanoseconds.
@@ -910,8 +929,9 @@ default 'raise'
def validate(k, v):
""" validate integers """
if not is_integer_object(v):
- raise ValueError(f"value must be an integer, received "
- f"{type(v)} for {k}")
+ raise ValueError(
+ f"value must be an integer, received {type(v)} for {k}"
+ )
return v
if year is not None:
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index 0348843abc129..fe74d701ef00f 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -1871,8 +1871,7 @@ def ewmcov(float64_t[:] input_x, float64_t[:] input_y,
bint is_observation
if <Py_ssize_t>len(input_y) != N:
- raise ValueError(f"arrays are of different lengths "
- f"({N} and {len(input_y)})")
+ raise ValueError(f"arrays are of different lengths ({N} and {len(input_y)})")
output = np.empty(N, dtype=float)
if N == 0:
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 60cfecd5804ac..3547a33ea357b 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -110,8 +110,7 @@ def _import_lzma():
return lzma
except ImportError:
msg = (
- "Could not import the lzma module. "
- "Your installed Python is incomplete. "
+ "Could not import the lzma module. Your installed Python is incomplete. "
"Attempting to use lzma compression will result in a RuntimeError."
)
warnings.warn(msg)
diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py
index 27f1c32058941..6c9ac5944e6a1 100644
--- a/pandas/compat/numpy/__init__.py
+++ b/pandas/compat/numpy/__init__.py
@@ -18,11 +18,9 @@
if _nlv < "1.13.3":
raise ImportError(
- f"this version of pandas is incompatible with "
- f"numpy < 1.13.3\n"
+ "this version of pandas is incompatible with numpy < 1.13.3\n"
f"your numpy version is {_np_version}.\n"
- f"Please upgrade numpy to >= 1.13.3 to use "
- f"this pandas version"
+ "Please upgrade numpy to >= 1.13.3 to use this pandas version"
)
diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py
index 7158f251ad805..05ecccc67daef 100644
--- a/pandas/compat/numpy/function.py
+++ b/pandas/compat/numpy/function.py
@@ -33,13 +33,26 @@
class CompatValidator:
- def __init__(self, defaults, fname=None, method=None, max_fname_arg_count=None):
+ def __init__(
+ self,
+ defaults,
+ fname=None,
+ method: Optional[str] = None,
+ max_fname_arg_count=None,
+ ):
self.fname = fname
self.method = method
self.defaults = defaults
self.max_fname_arg_count = max_fname_arg_count
- def __call__(self, args, kwargs, fname=None, max_fname_arg_count=None, method=None):
+ def __call__(
+ self,
+ args,
+ kwargs,
+ fname=None,
+ max_fname_arg_count=None,
+ method: Optional[str] = None,
+ ) -> None:
if args or kwargs:
fname = self.fname if fname is None else fname
max_fname_arg_count = (
@@ -300,7 +313,7 @@ def validate_take_with_convert(convert, args, kwargs):
)
-def validate_window_func(name, args, kwargs):
+def validate_window_func(name, args, kwargs) -> None:
numpy_args = ("axis", "dtype", "out")
msg = (
f"numpy operations are not valid with window objects. "
@@ -315,7 +328,7 @@ def validate_window_func(name, args, kwargs):
raise UnsupportedFunctionCall(msg)
-def validate_rolling_func(name, args, kwargs):
+def validate_rolling_func(name, args, kwargs) -> None:
numpy_args = ("axis", "dtype", "out")
msg = (
f"numpy operations are not valid with window objects. "
@@ -330,7 +343,7 @@ def validate_rolling_func(name, args, kwargs):
raise UnsupportedFunctionCall(msg)
-def validate_expanding_func(name, args, kwargs):
+def validate_expanding_func(name, args, kwargs) -> None:
numpy_args = ("axis", "dtype", "out")
msg = (
f"numpy operations are not valid with window objects. "
@@ -345,7 +358,7 @@ def validate_expanding_func(name, args, kwargs):
raise UnsupportedFunctionCall(msg)
-def validate_groupby_func(name, args, kwargs, allowed=None):
+def validate_groupby_func(name, args, kwargs, allowed=None) -> None:
"""
'args' and 'kwargs' should be empty, except for allowed
kwargs because all of
@@ -359,16 +372,15 @@ def validate_groupby_func(name, args, kwargs, allowed=None):
if len(args) + len(kwargs) > 0:
raise UnsupportedFunctionCall(
- f"numpy operations are not valid with "
- f"groupby. Use .groupby(...).{name}() "
- f"instead"
+ "numpy operations are not valid with groupby. "
+ f"Use .groupby(...).{name}() instead"
)
RESAMPLER_NUMPY_OPS = ("min", "max", "sum", "prod", "mean", "std", "var")
-def validate_resampler_func(method, args, kwargs):
+def validate_resampler_func(method: str, args, kwargs) -> None:
"""
'args' and 'kwargs' should be empty because all of
their necessary parameters are explicitly listed in
@@ -377,15 +389,14 @@ def validate_resampler_func(method, args, kwargs):
if len(args) + len(kwargs) > 0:
if method in RESAMPLER_NUMPY_OPS:
raise UnsupportedFunctionCall(
- f"numpy operations are not "
- f"valid with resample. Use "
- f".resample(...).{method}() instead"
+ "numpy operations are not valid with resample. "
+ f"Use .resample(...).{method}() instead"
)
else:
raise TypeError("too many arguments passed in")
-def validate_minmax_axis(axis):
+def validate_minmax_axis(axis: Optional[int]) -> None:
"""
Ensure that the axis argument passed to min, max, argmin, or argmax is
zero or None, as otherwise it will be incorrectly ignored.
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 3eab2186ccb94..0c964452df5da 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -65,25 +65,28 @@ def pytest_runtest_setup(item):
pytest.skip("skipping high memory test since --run-high-memory was not set")
-# Configurations for all tests and all test modules
-
-
@pytest.fixture(autouse=True)
def configure_tests():
+ """
+ Configure settings for all tests and test modules.
+ """
pd.set_option("chained_assignment", "raise")
-# For running doctests: make np and pd names available
-
-
@pytest.fixture(autouse=True)
def add_imports(doctest_namespace):
+ """
+ Make `np` and `pd` names available for doctests.
+ """
doctest_namespace["np"] = np
doctest_namespace["pd"] = pd
@pytest.fixture(params=["bsr", "coo", "csc", "csr", "dia", "dok", "lil"])
def spmatrix(request):
+ """
+ Yields scipy sparse matrix classes.
+ """
from scipy import sparse
return getattr(sparse, request.param + "_matrix")
@@ -92,8 +95,8 @@ def spmatrix(request):
@pytest.fixture(params=[0, 1, "index", "columns"], ids=lambda x: f"axis {repr(x)}")
def axis(request):
"""
- Fixture for returning the axis numbers of a DataFrame.
- """
+ Fixture for returning the axis numbers of a DataFrame.
+ """
return request.param
@@ -237,6 +240,10 @@ def all_boolean_reductions(request):
@pytest.fixture(params=list(_cython_table))
def cython_table_items(request):
+ """
+ Yields a tuple of a function and its corresponding name. Correspond to
+ the list of aggregator "Cython functions" used on selected table items.
+ """
return request.param
@@ -337,6 +344,9 @@ def writable(request):
@pytest.fixture(scope="module")
def datetime_tz_utc():
+ """
+ Yields the UTC timezone object from the datetime module.
+ """
return timezone.utc
@@ -358,6 +368,9 @@ def join_type(request):
@pytest.fixture
def strict_data_files(pytestconfig):
+ """
+ Returns the configuration for the test setting `--strict-data-files`.
+ """
return pytestconfig.getoption("--strict-data-files")
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py
index 7b12f3348e7e7..9eeed42124f2a 100644
--- a/pandas/core/arrays/boolean.py
+++ b/pandas/core/arrays/boolean.py
@@ -26,6 +26,7 @@
from pandas.core.dtypes.missing import isna, notna
from pandas.core import nanops, ops
+from pandas.core.indexers import check_array_indexer
from .masked import BaseMaskedArray
@@ -369,6 +370,7 @@ def __setitem__(self, key, value):
value = value[0]
mask = mask[0]
+ key = check_array_indexer(self, key)
self._data[key] = value
self._mask[key] = mask
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 105b14aa3c3b7..3a6662d3e3ae2 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2073,6 +2073,8 @@ def __setitem__(self, key, value):
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
+
+ key = check_array_indexer(self, key)
self._codes[key] = lindexer
def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]:
@@ -2401,8 +2403,8 @@ def isin(self, values):
if not is_list_like(values):
values_type = type(values).__name__
raise TypeError(
- "only list-like objects are allowed to be passed"
- f" to isin(), you passed a [{values_type}]"
+ "only list-like objects are allowed to be passed "
+ f"to isin(), you passed a [{values_type}]"
)
values = sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 82fa9197b39eb..8dd9868c32495 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -601,6 +601,8 @@ def __setitem__(
f"or array of those. Got '{type(value).__name__}' instead."
)
raise TypeError(msg)
+
+ key = check_array_indexer(self, key)
self._data[key] = value
self._maybe_clear_freq()
@@ -929,7 +931,7 @@ def freq(self, value):
@property
def freqstr(self):
"""
- Return the frequency object as a string if its set, otherwise None
+ Return the frequency object as a string if its set, otherwise None.
"""
if self.freq is None:
return None
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index e42402b307f28..1988b2e9e33f2 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -234,11 +234,10 @@ def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False):
values = values._data
if not isinstance(values, np.ndarray):
- msg = (
+ raise ValueError(
f"Unexpected type '{type(values).__name__}'. 'values' must be "
"a DatetimeArray ndarray, or Series or Index containing one of those."
)
- raise ValueError(msg)
if values.ndim not in [1, 2]:
raise ValueError("Only 1-dimensional input arrays are supported.")
@@ -249,20 +248,18 @@ def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False):
values = values.view(_NS_DTYPE)
if values.dtype != _NS_DTYPE:
- msg = (
- "The dtype of 'values' is incorrect. Must be 'datetime64[ns]'."
- f" Got {values.dtype} instead."
+ raise ValueError(
+ "The dtype of 'values' is incorrect. Must be 'datetime64[ns]'. "
+ f"Got {values.dtype} instead."
)
- raise ValueError(msg)
dtype = _validate_dt64_dtype(dtype)
if freq == "infer":
- msg = (
+ raise ValueError(
"Frequency inference not allowed in DatetimeArray.__init__. "
"Use 'pd.array()' instead."
)
- raise ValueError(msg)
if copy:
values = values.copy()
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 022e6a7322872..9a0f5794e7607 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -25,6 +25,7 @@
from pandas.core.dtypes.missing import isna
from pandas.core import nanops, ops
+from pandas.core.indexers import check_array_indexer
from pandas.core.ops import invalid_comparison
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.tools.numeric import to_numeric
@@ -414,6 +415,7 @@ def __setitem__(self, key, value):
value = value[0]
mask = mask[0]
+ key = check_array_indexer(self, key)
self._data[key] = value
self._mask[key] = mask
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index d890c0c16aecc..23cf5f317ac7d 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -541,6 +541,7 @@ def __setitem__(self, key, value):
msg = f"'value' should be an interval type, got {type(value)} instead."
raise TypeError(msg)
+ key = check_array_indexer(self, key)
# Need to ensure that left and right are updated atomically, so we're
# forced to copy, update the copy, and swap in the new values.
left = self.left.copy(deep=True)
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 8b1d1e58dc36c..57cc52ce24f8c 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -243,6 +243,7 @@ def __getitem__(self, item):
def __setitem__(self, key, value):
value = extract_array(value, extract_numpy=True)
+ key = check_array_indexer(self, key)
scalar_key = lib.is_scalar(key)
scalar_value = lib.is_scalar(value)
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 8b49c2186dde0..1e2a02e988fdd 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -297,12 +297,12 @@ def __arrow_array__(self, type=None):
# ensure we have the same freq
if self.freqstr != type.freq:
raise TypeError(
- "Not supported to convert PeriodArray to array with different"
- " 'freq' ({0} vs {1})".format(self.freqstr, type.freq)
+ "Not supported to convert PeriodArray to array with different "
+ f"'freq' ({self.freqstr} vs {type.freq})"
)
else:
raise TypeError(
- "Not supported to convert PeriodArray to '{0}' type".format(type)
+ f"Not supported to convert PeriodArray to '{type}' type"
)
period_type = ArrowPeriodType(self.freqstr)
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index c485d1f50dc9d..b53484e1892f9 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -15,6 +15,7 @@
from pandas.core import ops
from pandas.core.arrays import PandasArray
from pandas.core.construction import extract_array
+from pandas.core.indexers import check_array_indexer
from pandas.core.missing import isna
@@ -224,6 +225,7 @@ def __setitem__(self, key, value):
# extract_array doesn't extract PandasArray subclasses
value = value._ndarray
+ key = check_array_indexer(self, key)
scalar_key = lib.is_scalar(key)
scalar_value = lib.is_scalar(value)
if scalar_key and not scalar_value:
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index c34d14f15075c..516a271042c9b 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -43,8 +43,6 @@
from pandas.tseries.frequencies import to_offset
from pandas.tseries.offsets import Tick
-_BAD_DTYPE = "dtype {dtype} cannot be converted to timedelta64[ns]"
-
def _is_convertible_to_td(key):
return isinstance(key, (Tick, timedelta, np.timedelta64, str))
@@ -1064,7 +1062,7 @@ def _validate_td64_dtype(dtype):
raise ValueError(msg)
if not is_dtype_equal(dtype, _TD_DTYPE):
- raise ValueError(_BAD_DTYPE.format(dtype=dtype))
+ raise ValueError(f"dtype {dtype} cannot be converted to timedelta64[ns]")
return dtype
diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py
index 1350587b5ca90..d91586e6c9b81 100644
--- a/pandas/core/computation/expr.py
+++ b/pandas/core/computation/expr.py
@@ -466,8 +466,8 @@ def _maybe_evaluate_binop(
if res.has_invalid_return_type:
raise TypeError(
- f"unsupported operand type(s) for {res.op}:"
- f" '{lhs.type}' and '{rhs.type}'"
+ f"unsupported operand type(s) for {res.op}: "
+ f"'{lhs.type}' and '{rhs.type}'"
)
if self.engine != "pytables":
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 5a007f28d63cb..f62f03be9b732 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -194,12 +194,11 @@ def ensure_python_int(value: Union[int, np.integer]) -> int:
"""
if not is_scalar(value):
raise TypeError(f"Value needs to be a scalar value, was type {type(value)}")
- msg = "Wrong type {} for value {}"
try:
new_value = int(value)
assert new_value == value
except (TypeError, ValueError, AssertionError):
- raise TypeError(msg.format(type(value), value))
+ raise TypeError(f"Wrong type {type(value)} for value {value}")
return new_value
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 466ed815e8e5a..93522abc3a48f 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -435,12 +435,11 @@ def __eq__(self, other: Any) -> bool:
return hash(self) == hash(other)
def __repr__(self) -> str_type:
- tpl = "CategoricalDtype(categories={data}ordered={ordered})"
if self.categories is None:
data = "None, "
else:
data = self.categories._format_data(name=type(self).__name__)
- return tpl.format(data=data, ordered=self.ordered)
+ return f"CategoricalDtype(categories={data}ordered={self.ordered})"
@staticmethod
def _hash_categories(categories, ordered: Ordered = True) -> int:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index cfd37ac961413..c523433f8b4ce 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -102,7 +102,6 @@
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin as DatetimeLikeArray
from pandas.core.arrays.sparse import SparseFrameAccessor
from pandas.core.generic import NDFrame, _shared_docs
-from pandas.core.groupby import generic as groupby_generic
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import Index, ensure_index, ensure_index_from_sequences
from pandas.core.indexes.datetimes import DatetimeIndex
@@ -129,6 +128,7 @@
import pandas.plotting
if TYPE_CHECKING:
+ from pandas.core.groupby.generic import DataFrameGroupBy
from pandas.io.formats.style import Styler
# ---------------------------------------------------------------------
@@ -2443,7 +2443,7 @@ def _verbose_repr():
dtype = self.dtypes.iloc[i]
col = pprint_thing(col)
- line_no = _put_str(" {num}".format(num=i), space_num)
+ line_no = _put_str(f" {i}", space_num)
count = ""
if show_counts:
count = counts.iloc[i]
@@ -5789,13 +5789,14 @@ def groupby(
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
- ) -> "groupby_generic.DataFrameGroupBy":
+ ) -> "DataFrameGroupBy":
+ from pandas.core.groupby.generic import DataFrameGroupBy
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
- return groupby_generic.DataFrameGroupBy(
+ return DataFrameGroupBy(
obj=self,
keys=by,
axis=axis,
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 18e6b913cc10d..5392f322819b5 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -8,6 +8,7 @@
import re
from textwrap import dedent
from typing import (
+ TYPE_CHECKING,
Any,
Callable,
Dict,
@@ -101,6 +102,9 @@
from pandas.io.formats.printing import pprint_thing
from pandas.tseries.frequencies import to_offset
+if TYPE_CHECKING:
+ from pandas.core.resample import Resampler
+
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs: Dict[str, str] = dict()
@@ -177,7 +181,7 @@ class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
]
_internal_names_set: Set[str] = set(_internal_names)
_accessors: Set[str] = set()
- _deprecations: FrozenSet[str] = frozenset(["get_values", "ix"])
+ _deprecations: FrozenSet[str] = frozenset(["get_values"])
_metadata: List[str] = []
_is_copy = None
_data: BlockManager
@@ -261,8 +265,8 @@ def _validate_dtype(self, dtype):
# a compound dtype
if dtype.kind == "V":
raise NotImplementedError(
- "compound dtypes are not implemented"
- f" in the {type(self).__name__} constructor"
+ "compound dtypes are not implemented "
+ f"in the {type(self).__name__} constructor"
)
return dtype
@@ -1698,8 +1702,7 @@ def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
multi_message = (
"\n"
"For a multi-index, the label must be a "
- "tuple with elements corresponding to "
- "each level."
+ "tuple with elements corresponding to each level."
)
else:
multi_message = ""
@@ -4132,7 +4135,6 @@ def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:
def sort_values(
self,
- by=None,
axis=0,
ascending=True,
inplace: bool_t = False,
@@ -7833,7 +7835,7 @@ def resample(
base: int = 0,
on=None,
level=None,
- ):
+ ) -> "Resampler":
"""
Resample time-series data.
@@ -8098,10 +8100,10 @@ def resample(
2000-01-04 36 90
"""
- from pandas.core.resample import resample
+ from pandas.core.resample import get_resampler
axis = self._get_axis_number(axis)
- return resample(
+ return get_resampler(
self,
freq=rule,
label=label,
@@ -9138,11 +9140,10 @@ def tshift(
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
elif orig_freq is not None:
- msg = (
- f"Given freq {freq.rule_code} does not match"
- f" PeriodIndex freq {orig_freq.rule_code}"
+ raise ValueError(
+ f"Given freq {freq.rule_code} does not match "
+ f"PeriodIndex freq {orig_freq.rule_code}"
)
- raise ValueError(msg)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 4fcddb5cade4a..c87e84fa1ebdd 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2847,12 +2847,12 @@ def _filter_indexer_tolerance(self, target, indexer, tolerance):
Parameters
----------
key : label of the slice bound
- kind : {'ix', 'loc', 'getitem', 'iloc'} or None
+ kind : {'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs["_convert_scalar_indexer"])
def _convert_scalar_indexer(self, key, kind=None):
- assert kind in ["ix", "loc", "getitem", "iloc", None]
+ assert kind in ["loc", "getitem", "iloc", None]
if kind == "iloc":
return self._validate_indexer("positional", key, kind)
@@ -2860,11 +2860,11 @@ def _convert_scalar_indexer(self, key, kind=None):
if len(self) and not isinstance(self, ABCMultiIndex):
# we can raise here if we are definitive that this
- # is positional indexing (eg. .ix on with a float)
+ # is positional indexing (eg. .loc on with a float)
# or label indexing if we are using a type able
# to be represented in the index
- if kind in ["getitem", "ix"] and is_float(key):
+ if kind in ["getitem"] and is_float(key):
if not self.is_floating():
return self._invalid_indexer("label", key)
@@ -2900,12 +2900,12 @@ def _convert_scalar_indexer(self, key, kind=None):
Parameters
----------
key : label of the slice bound
- kind : {'ix', 'loc', 'getitem', 'iloc'} or None
+ kind : {'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs["_convert_slice_indexer"])
def _convert_slice_indexer(self, key: slice, kind=None):
- assert kind in ["ix", "loc", "getitem", "iloc", None]
+ assert kind in ["loc", "getitem", "iloc", None]
# validate iloc
if kind == "iloc":
@@ -3044,7 +3044,7 @@ def _convert_index_indexer(self, keyarr):
@Appender(_index_shared_docs["_convert_list_indexer"])
def _convert_list_indexer(self, keyarr, kind=None):
if (
- kind in [None, "iloc", "ix"]
+ kind in [None, "iloc"]
and is_integer_dtype(keyarr)
and not self.is_floating()
and not isinstance(keyarr, ABCPeriodIndex)
@@ -4740,7 +4740,7 @@ def _validate_indexer(self, form, key, kind):
If we are positional indexer, validate that we have appropriate
typed bounds must be an integer.
"""
- assert kind in ["ix", "loc", "getitem", "iloc"]
+ assert kind in ["loc", "getitem", "iloc"]
if key is None:
pass
@@ -4761,7 +4761,7 @@ def _validate_indexer(self, form, key, kind):
----------
label : object
side : {'left', 'right'}
- kind : {'ix', 'loc', 'getitem'}
+ kind : {'loc', 'getitem'} or None
Returns
-------
@@ -4774,15 +4774,14 @@ def _validate_indexer(self, form, key, kind):
@Appender(_index_shared_docs["_maybe_cast_slice_bound"])
def _maybe_cast_slice_bound(self, label, side, kind):
- assert kind in ["ix", "loc", "getitem", None]
+ assert kind in ["loc", "getitem", None]
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. Float64Index and
# datetimelike Indexes
# reject them
if is_float(label):
- if not (kind in ["ix"] and (self.holds_integer() or self.is_floating())):
- self._invalid_indexer("slice", label)
+ self._invalid_indexer("slice", label)
# we are trying to find integer bounds on a non-integer based index
# this is rejected (generally .loc gets you here)
@@ -4816,19 +4815,19 @@ def get_slice_bound(self, label, side, kind):
----------
label : object
side : {'left', 'right'}
- kind : {'ix', 'loc', 'getitem'}
+ kind : {'loc', 'getitem'} or None
Returns
-------
int
Index of label.
"""
- assert kind in ["ix", "loc", "getitem", None]
+ assert kind in ["loc", "getitem", None]
if side not in ("left", "right"):
raise ValueError(
- f"Invalid value for side kwarg, must be either"
- f" 'left' or 'right': {side}"
+ "Invalid value for side kwarg, must be either "
+ f"'left' or 'right': {side}"
)
original_label = label
@@ -4883,7 +4882,7 @@ def slice_locs(self, start=None, end=None, step=None, kind=None):
If None, defaults to the end.
step : int, defaults None
If None, defaults to 1.
- kind : {'ix', 'loc', 'getitem'} or None
+ kind : {'loc', 'getitem'} or None
Returns
-------
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 512013678593e..5c1ff63e9eb44 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -435,19 +435,6 @@ def _engine(self):
codes = self.codes
return self._engine_type(lambda: codes, len(self))
- # introspection
- @cache_readonly
- def is_unique(self) -> bool:
- return self._engine.is_unique
-
- @property
- def is_monotonic_increasing(self):
- return self._engine.is_monotonic_increasing
-
- @property
- def is_monotonic_decreasing(self) -> bool:
- return self._engine.is_monotonic_decreasing
-
@Appender(_index_shared_docs["index_unique"] % _index_doc_kwargs)
def unique(self, level=None):
if level is not None:
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index c98b4f21dbb92..7435980323c74 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -156,13 +156,11 @@ def equals(self, other) -> bool:
def __contains__(self, key):
try:
res = self.get_loc(key)
- return (
- is_scalar(res)
- or isinstance(res, slice)
- or (is_list_like(res) and len(res))
- )
except (KeyError, TypeError, ValueError):
return False
+ return bool(
+ is_scalar(res) or isinstance(res, slice) or (is_list_like(res) and len(res))
+ )
def sort_values(self, return_indexer=False, ascending=True):
"""
@@ -372,10 +370,10 @@ def _convert_scalar_indexer(self, key, kind=None):
Parameters
----------
key : label of the slice bound
- kind : {'ix', 'loc', 'getitem', 'iloc'} or None
+ kind : {'loc', 'getitem', 'iloc'} or None
"""
- assert kind in ["ix", "loc", "getitem", "iloc", None]
+ assert kind in ["loc", "getitem", "iloc", None]
# we don't allow integer/float indexing for loc
# we don't allow float indexing for ix/getitem
@@ -384,7 +382,7 @@ def _convert_scalar_indexer(self, key, kind=None):
is_flt = is_float(key)
if kind in ["loc"] and (is_int or is_flt):
self._invalid_indexer("index", key)
- elif kind in ["ix", "getitem"] and is_flt:
+ elif kind in ["getitem"] and is_flt:
self._invalid_indexer("index", key)
return super()._convert_scalar_indexer(key, kind=kind)
@@ -859,11 +857,7 @@ def _is_convertible_to_index_for_join(cls, other: Index) -> bool:
def _wrap_joined_index(self, joined, other):
name = get_op_result_name(self, other)
- if (
- isinstance(other, type(self))
- and self.freq == other.freq
- and self._can_fast_union(other)
- ):
+ if self._can_fast_union(other):
joined = self._shallow_copy(joined)
joined.name = name
return joined
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 2241921e94694..75515949d1855 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -742,7 +742,7 @@ def _maybe_cast_slice_bound(self, label, side, kind):
----------
label : object
side : {'left', 'right'}
- kind : {'ix', 'loc', 'getitem'}
+ kind : {'loc', 'getitem'} or None
Returns
-------
@@ -752,7 +752,7 @@ def _maybe_cast_slice_bound(self, label, side, kind):
-----
Value of `side` parameter should be validated in caller.
"""
- assert kind in ["ix", "loc", "getitem", None]
+ assert kind in ["loc", "getitem", None]
if is_float(label) or isinstance(label, time) or is_integer(label):
self._invalid_indexer("slice", label)
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index a5ab7cbacea93..0718d5596ec46 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -435,22 +435,8 @@ def memory_usage(self, deep: bool = False) -> int:
# so return the bytes here
return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep)
- @cache_readonly
- def is_monotonic(self) -> bool:
- """
- Return True if the IntervalIndex is monotonic increasing (only equal or
- increasing values), else False
- """
- return self.is_monotonic_increasing
-
- @cache_readonly
- def is_monotonic_increasing(self) -> bool:
- """
- Return True if the IntervalIndex is monotonic increasing (only equal or
- increasing values), else False
- """
- return self._engine.is_monotonic_increasing
-
+ # IntervalTree doesn't have a is_monotonic_decreasing, so have to override
+ # the Index implemenation
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
"""
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 75b96666080aa..515480ed70bc0 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1292,8 +1292,8 @@ def _get_level_number(self, level) -> int:
if level < 0:
orig_level = level - self.nlevels
raise IndexError(
- f"Too many levels: Index has only {self.nlevels} levels,"
- f" {orig_level} is not a valid level number"
+ f"Too many levels: Index has only {self.nlevels} levels, "
+ f"{orig_level} is not a valid level number"
)
# Note: levels are zero-based
elif level >= self.nlevels:
@@ -2180,8 +2180,8 @@ def reorder_levels(self, order):
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError(
- f"Length of order must be same as number of levels ({self.nlevels}),"
- f" got {len(order)}"
+ f"Length of order must be same as number of levels ({self.nlevels}), "
+ f"got {len(order)}"
)
new_levels = [self.levels[i] for i in order]
new_codes = [self.codes[i] for i in order]
@@ -2536,8 +2536,8 @@ def slice_locs(self, start=None, end=None, step=None, kind=None):
def _partial_tup_index(self, tup, side="left"):
if len(tup) > self.lexsort_depth:
raise UnsortedIndexError(
- f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth"
- f" ({self.lexsort_depth})"
+ f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth "
+ f"({self.lexsort_depth})"
)
n = len(tup)
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index b9b44284edaa9..9a3a021bd801a 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -99,7 +99,7 @@ def _validate_dtype(cls, dtype: Dtype) -> None:
@Appender(_index_shared_docs["_maybe_cast_slice_bound"])
def _maybe_cast_slice_bound(self, label, side, kind):
- assert kind in ["ix", "loc", "getitem", None]
+ assert kind in ["loc", "getitem", None]
# we will try to coerce to integers
return self._maybe_cast_indexer(label)
@@ -260,7 +260,7 @@ def asi8(self) -> np.ndarray:
@Appender(_index_shared_docs["_convert_scalar_indexer"])
def _convert_scalar_indexer(self, key, kind=None):
- assert kind in ["ix", "loc", "getitem", "iloc", None]
+ assert kind in ["loc", "getitem", "iloc", None]
# don't coerce ilocs to integers
if kind != "iloc":
@@ -317,7 +317,7 @@ def asi8(self) -> np.ndarray:
@Appender(_index_shared_docs["_convert_scalar_indexer"])
def _convert_scalar_indexer(self, key, kind=None):
- assert kind in ["ix", "loc", "getitem", "iloc", None]
+ assert kind in ["loc", "getitem", "iloc", None]
# don't coerce ilocs to integers
if kind != "iloc":
@@ -404,7 +404,7 @@ def astype(self, dtype, copy=True):
@Appender(_index_shared_docs["_convert_scalar_indexer"])
def _convert_scalar_indexer(self, key, kind=None):
- assert kind in ["ix", "loc", "getitem", "iloc", None]
+ assert kind in ["loc", "getitem", "iloc", None]
if kind == "iloc":
return self._validate_indexer("positional", key, kind)
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 6877cf029ed0c..5e96b9a8f4ca2 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -630,7 +630,7 @@ def _maybe_cast_slice_bound(self, label, side, kind):
----------
label : object
side : {'left', 'right'}
- kind : {'ix', 'loc', 'getitem'}
+ kind : {'loc', 'getitem'}
Returns
-------
@@ -641,7 +641,7 @@ def _maybe_cast_slice_bound(self, label, side, kind):
Value of `side` parameter should be validated in caller.
"""
- assert kind in ["ix", "loc", "getitem"]
+ assert kind in ["loc", "getitem"]
if isinstance(label, datetime):
return Period(label, freq=self.freq)
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index c78020fba70c5..2cc2f7a0054c1 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -309,13 +309,13 @@ def _maybe_cast_slice_bound(self, label, side, kind):
----------
label : object
side : {'left', 'right'}
- kind : {'ix', 'loc', 'getitem'}
+ kind : {'loc', 'getitem'} or None
Returns
-------
label : object
"""
- assert kind in ["ix", "loc", "getitem", None]
+ assert kind in ["loc", "getitem", None]
if isinstance(label, str):
parsed = Timedelta(label)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 5624bb5799104..f3707553da363 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -26,7 +26,7 @@
is_list_like_indexer,
length_of_indexer,
)
-from pandas.core.indexes.api import Index, InvalidIndexError
+from pandas.core.indexes.api import Index
# "null slice"
_NS = slice(None, None)
@@ -578,39 +578,6 @@ def __call__(self, axis=None):
new_self.axis = axis
return new_self
- # TODO: remove once geopandas no longer needs this
- def __getitem__(self, key):
- # Used in ix and downstream in geopandas _CoordinateIndexer
- if type(key) is tuple:
- # Note: we check the type exactly instead of with isinstance
- # because NamedTuple is checked separately.
- key = tuple(com.apply_if_callable(x, self.obj) for x in key)
- try:
- values = self.obj._get_value(*key)
- except (KeyError, TypeError, InvalidIndexError, AttributeError):
- # TypeError occurs here if the key has non-hashable entries,
- # generally slice or list.
- # TODO(ix): most/all of the TypeError cases here are for ix,
- # so this check can be removed once ix is removed.
- # The InvalidIndexError is only catched for compatibility
- # with geopandas, see
- # https://github.com/pandas-dev/pandas/issues/27258
- # TODO: The AttributeError is for IntervalIndex which
- # incorrectly implements get_value, see
- # https://github.com/pandas-dev/pandas/issues/27865
- pass
- else:
- if is_scalar(values):
- return values
-
- return self._getitem_tuple(key)
- else:
- # we by definition only have the 0th axis
- axis = self.axis or 0
-
- key = com.apply_if_callable(key, self.obj)
- return self._getitem_axis(key, axis=axis)
-
def _get_label(self, label, axis: int):
if self.ndim == 1:
# for perf reasons we want to try _xs first
@@ -1339,9 +1306,6 @@ def _multi_take(self, tup: Tuple):
}
return o._reindex_with_indexers(d, copy=True, allow_dups=True)
- def _convert_for_reindex(self, key, axis: int):
- return key
-
def _handle_lowerdim_multi_index_axis0(self, tup: Tuple):
# we have an axis0 multi-index, handle or raise
axis = self.axis or 0
@@ -1462,42 +1426,6 @@ def _getitem_nested_tuple(self, tup: Tuple):
return obj
- # TODO: remove once geopandas no longer needs __getitem__
- def _getitem_axis(self, key, axis: int):
- if is_iterator(key):
- key = list(key)
- self._validate_key(key, axis)
-
- labels = self.obj._get_axis(axis)
- if isinstance(key, slice):
- return self._get_slice_axis(key, axis=axis)
- elif is_list_like_indexer(key) and not (
- isinstance(key, tuple) and isinstance(labels, ABCMultiIndex)
- ):
-
- if hasattr(key, "ndim") and key.ndim > 1:
- raise ValueError("Cannot index with multidimensional key")
-
- return self._getitem_iterable(key, axis=axis)
- else:
-
- # maybe coerce a float scalar to integer
- key = labels._maybe_cast_indexer(key)
-
- if is_integer(key):
- if axis == 0 and isinstance(labels, ABCMultiIndex):
- try:
- return self._get_label(key, axis=axis)
- except (KeyError, TypeError):
- if self.obj.index.levels[0].is_integer():
- raise
-
- # this is the fallback! (for a non-float, non-integer index)
- if not labels.is_floating() and not labels.is_integer():
- return self._get_loc(key, axis=axis)
-
- return self._get_label(key, axis=axis)
-
def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False):
"""
Transform a list-like of keys into a new index and an indexer.
@@ -1538,10 +1466,6 @@ def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False):
return ax[indexer], indexer
if ax.is_unique and not getattr(ax, "is_overlapping", False):
- # If we are trying to get actual keys from empty Series, we
- # patiently wait for a KeyError later on - otherwise, convert
- if len(ax) or not len(key):
- key = self._convert_for_reindex(key, axis)
indexer = ax.get_indexer_for(key)
keyarr = ax.reindex(keyarr)[0]
else:
@@ -1749,13 +1673,16 @@ def _get_slice_axis(self, slice_obj: slice, axis: int):
class _LocationIndexer(_NDFrameIndexer):
+ _takeable: bool = False
+
def __getitem__(self, key):
if type(key) is tuple:
key = tuple(com.apply_if_callable(x, self.obj) for x in key)
if self._is_scalar_access(key):
try:
- return self._getitem_scalar(key)
+ return self.obj._get_value(*key, takeable=self._takeable)
except (KeyError, IndexError, AttributeError):
+ # AttributeError for IntervalTree get_value
pass
return self._getitem_tuple(key)
else:
@@ -1768,9 +1695,6 @@ def __getitem__(self, key):
def _is_scalar_access(self, key: Tuple):
raise NotImplementedError()
- def _getitem_scalar(self, key):
- raise NotImplementedError()
-
def _getitem_axis(self, key, axis: int):
raise NotImplementedError()
@@ -1859,12 +1783,6 @@ def _is_scalar_access(self, key: Tuple) -> bool:
return True
- def _getitem_scalar(self, key):
- # a fast-path to scalar access
- # if not, raise
- values = self.obj._get_value(*key)
- return values
-
def _get_partial_string_timestamp_match_key(self, key, labels):
"""
Translate any partial string timestamp matches in key, returning the
@@ -1970,6 +1888,7 @@ class _iLocIndexer(_LocationIndexer):
"point is EXCLUDED), listlike of integers, boolean array"
)
_get_slice_axis = _NDFrameIndexer._get_slice_axis
+ _takeable = True
def _validate_key(self, key, axis: int):
if com.is_bool_indexer(key):
@@ -2034,12 +1953,6 @@ def _is_scalar_access(self, key: Tuple) -> bool:
return True
- def _getitem_scalar(self, key):
- # a fast-path to scalar access
- # if not, raise
- values = self.obj._get_value(*key, takeable=True)
- return values
-
def _validate_integer(self, key: int, axis: int) -> None:
"""
Check that 'key' is a valid position in the desired axis.
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index f51d71d5507a0..1355060efd097 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -664,8 +664,7 @@ def to_series(right):
elif right.ndim > 2:
raise ValueError(
- "Unable to coerce to Series/DataFrame, dim "
- f"must be <= 2: {right.shape}"
+ f"Unable to coerce to Series/DataFrame, dim must be <= 2: {right.shape}"
)
elif is_list_like(right) and not isinstance(right, (ABCSeries, ABCDataFrame)):
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 0e43880dfda07..fb837409a00f5 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -1262,7 +1262,7 @@ def _constructor(self):
return TimedeltaIndexResampler
-def resample(obj, kind=None, **kwds):
+def get_resampler(obj, kind=None, **kwds):
"""
Create a TimeGrouper and return our resampler.
"""
@@ -1270,7 +1270,7 @@ def resample(obj, kind=None, **kwds):
return tg._get_resampler(obj, kind=kind)
-resample.__doc__ = Resampler.__doc__
+get_resampler.__doc__ = Resampler.__doc__
def get_resampler_for_grouping(
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 502b8d1941fdf..449f70b2be2fd 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -305,8 +305,7 @@ def __init__(
if isinstance(objs, (NDFrame, str)):
raise TypeError(
"first argument must be an iterable of pandas "
- "objects, you passed an object of type "
- '"{name}"'.format(name=type(objs).__name__)
+ f'objects, you passed an object of type "{type(objs).__name__}"'
)
if join == "outer":
@@ -577,10 +576,7 @@ def _maybe_check_integrity(self, concat_index: Index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index[concat_index.duplicated()].unique()
- raise ValueError(
- "Indexes have overlapping values: "
- "{overlap!s}".format(overlap=overlap)
- )
+ raise ValueError(f"Indexes have overlapping values: {overlap}")
def _concat_indexes(indexes) -> Index:
@@ -648,8 +644,7 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiInde
# make sure that all of the passed indices have the same nlevels
if not len({idx.nlevels for idx in indexes}) == 1:
raise AssertionError(
- "Cannot concat indices that do "
- "not have the same number of levels"
+ "Cannot concat indices that do not have the same number of levels"
)
# also copies
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index d4ccb19fc0dda..d04287e1e9088 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -52,8 +52,7 @@ def melt(
if not missing.empty:
raise KeyError(
"The following 'id_vars' are not present "
- "in the DataFrame: {missing}"
- "".format(missing=list(missing))
+ f"in the DataFrame: {list(missing)}"
)
else:
id_vars = []
@@ -74,8 +73,7 @@ def melt(
if not missing.empty:
raise KeyError(
"The following 'value_vars' are not present in "
- "the DataFrame: {missing}"
- "".format(missing=list(missing))
+ f"the DataFrame: {list(missing)}"
)
frame = frame.loc[:, id_vars + value_vars]
else:
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 5f92e4a88b568..ceee2f66dba42 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -600,13 +600,11 @@ def __init__(
if not is_bool(left_index):
raise ValueError(
- "left_index parameter must be of type bool, not "
- "{left_index}".format(left_index=type(left_index))
+ f"left_index parameter must be of type bool, not {type(left_index)}"
)
if not is_bool(right_index):
raise ValueError(
- "right_index parameter must be of type bool, not "
- "{right_index}".format(right_index=type(right_index))
+ f"right_index parameter must be of type bool, not {type(right_index)}"
)
# warn user when merging between different levels
@@ -1073,9 +1071,8 @@ def _maybe_coerce_merge_keys(self):
continue
msg = (
- "You are trying to merge on {lk_dtype} and "
- "{rk_dtype} columns. If you wish to proceed "
- "you should use pd.concat".format(lk_dtype=lk.dtype, rk_dtype=rk.dtype)
+ f"You are trying to merge on {lk.dtype} and "
+ f"{rk.dtype} columns. If you wish to proceed you should use pd.concat"
)
# if we are numeric, then allow differing
@@ -1092,8 +1089,7 @@ def _maybe_coerce_merge_keys(self):
warnings.warn(
"You are merging on int and float "
"columns where the float values "
- "are not equal to their int "
- "representation",
+ "are not equal to their int representation",
UserWarning,
)
continue
@@ -1103,8 +1099,7 @@ def _maybe_coerce_merge_keys(self):
warnings.warn(
"You are merging on int and float "
"columns where the float values "
- "are not equal to their int "
- "representation",
+ "are not equal to their int representation",
UserWarning,
)
continue
@@ -1251,20 +1246,17 @@ def _validate(self, validate: str):
)
elif not left_unique:
raise MergeError(
- "Merge keys are not unique in left dataset; "
- "not a one-to-one merge"
+ "Merge keys are not unique in left dataset; not a one-to-one merge"
)
elif not right_unique:
raise MergeError(
- "Merge keys are not unique in right dataset; "
- "not a one-to-one merge"
+ "Merge keys are not unique in right dataset; not a one-to-one merge"
)
elif validate in ["one_to_many", "1:m"]:
if not left_unique:
raise MergeError(
- "Merge keys are not unique in left dataset; "
- "not a one-to-many merge"
+ "Merge keys are not unique in left dataset; not a one-to-many merge"
)
elif validate in ["many_to_one", "m:1"]:
@@ -1833,8 +1825,7 @@ def _left_join_on_index(left_ax: Index, right_ax: Index, join_keys, sort: bool =
raise AssertionError(
"If more than one join key is given then "
"'right_ax' must be a MultiIndex and the "
- "number of join keys must be the number of "
- "levels in right_ax"
+ "number of join keys must be the number of levels in right_ax"
)
left_indexer, right_indexer = _get_multiindex_indexer(
@@ -2004,8 +1995,7 @@ def _validate_operand(obj: FrameOrSeries) -> "DataFrame":
return obj.to_frame()
else:
raise TypeError(
- "Can only merge Series or DataFrame objects, "
- "a {obj} was passed".format(obj=type(obj))
+ f"Can only merge Series or DataFrame objects, a {type(obj)} was passed"
)
@@ -2021,10 +2011,7 @@ def _items_overlap_with_suffix(left: Index, lsuffix, right: Index, rsuffix):
return left, right
if not lsuffix and not rsuffix:
- raise ValueError(
- "columns overlap but no suffix specified: "
- "{rename}".format(rename=to_rename)
- )
+ raise ValueError(f"columns overlap but no suffix specified: {to_rename}")
def renamer(x, suffix):
"""
@@ -2043,7 +2030,7 @@ def renamer(x, suffix):
x : renamed column name
"""
if x in to_rename and suffix is not None:
- return "{x}{suffix}".format(x=x, suffix=suffix)
+ return f"{x}{suffix}"
return x
lrenamer = partial(renamer, suffix=lsuffix)
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index 15e6aaeaa5e9d..00a7645d0c7a5 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -371,8 +371,7 @@ def _bins_to_cuts(
if duplicates not in ["raise", "drop"]:
raise ValueError(
- "invalid value for 'duplicates' parameter, "
- "valid options are: raise, drop"
+ "invalid value for 'duplicates' parameter, valid options are: raise, drop"
)
if isinstance(bins, IntervalIndex):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 6a2a30a3efa17..a98c5ecc6c973 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4,7 +4,18 @@
from io import StringIO
from shutil import get_terminal_size
from textwrap import dedent
-from typing import IO, Any, Callable, Hashable, List, Optional
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Hashable,
+ Iterable,
+ List,
+ Optional,
+ Tuple,
+ Type,
+)
import warnings
import numpy as np
@@ -12,6 +23,7 @@
from pandas._config import get_option
from pandas._libs import index as libindex, lib, reshape, tslibs
+from pandas._typing import Label
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_bool_kwarg, validate_percentile
@@ -59,7 +71,6 @@
is_empty_data,
sanitize_array,
)
-from pandas.core.groupby import generic as groupby_generic
from pandas.core.indexers import maybe_convert_indices
from pandas.core.indexes.accessors import CombinedDatetimelikeProperties
from pandas.core.indexes.api import (
@@ -81,6 +92,10 @@
import pandas.io.formats.format as fmt
import pandas.plotting
+if TYPE_CHECKING:
+ from pandas.core.frame import DataFrame
+ from pandas.core.groupby.generic import SeriesGroupBy
+
__all__ = ["Series"]
_shared_doc_kwargs = dict(
@@ -357,11 +372,11 @@ def _init_dict(self, data, index=None, dtype=None):
# ----------------------------------------------------------------------
@property
- def _constructor(self):
+ def _constructor(self) -> Type["Series"]:
return Series
@property
- def _constructor_expanddim(self):
+ def _constructor_expanddim(self) -> Type["DataFrame"]:
from pandas.core.frame import DataFrame
return DataFrame
@@ -373,7 +388,7 @@ def _can_hold_na(self):
_index = None
- def _set_axis(self, axis, labels, fastpath=False):
+ def _set_axis(self, axis, labels, fastpath=False) -> None:
"""
Override generic, we want to set the _typ here.
"""
@@ -551,7 +566,7 @@ def __len__(self) -> int:
"""
return len(self._data)
- def view(self, dtype=None):
+ def view(self, dtype=None) -> "Series":
"""
Create a new view of the Series.
@@ -763,7 +778,7 @@ def __array__(self, dtype=None) -> np.ndarray:
# ----------------------------------------------------------------------
- def _unpickle_series_compat(self, state):
+ def _unpickle_series_compat(self, state) -> None:
if isinstance(state, dict):
self._data = state["_data"]
self.name = state["name"]
@@ -794,7 +809,7 @@ def _unpickle_series_compat(self, state):
# indexers
@property
- def axes(self):
+ def axes(self) -> List[Index]:
"""
Return a list of the row axis labels.
"""
@@ -861,7 +876,7 @@ def _ixs(self, i: int, axis: int = 0):
else:
return values[i]
- def _slice(self, slobj: slice, axis: int = 0, kind=None):
+ def _slice(self, slobj: slice, axis: int = 0, kind=None) -> "Series":
slobj = self.index._convert_slice_indexer(slobj, kind=kind or "getitem")
return self._get_values(slobj)
@@ -1151,7 +1166,7 @@ def _set_value(self, label, value, takeable: bool = False):
def _is_mixed_type(self):
return False
- def repeat(self, repeats, axis=None):
+ def repeat(self, repeats, axis=None) -> "Series":
"""
Repeat elements of a Series.
@@ -1440,8 +1455,8 @@ def to_string(
# catch contract violations
if not isinstance(result, str):
raise AssertionError(
- "result must be of type str, type"
- f" of result is {repr(type(result).__name__)}"
+ "result must be of type str, type "
+ f"of result is {repr(type(result).__name__)}"
)
if buf is None:
@@ -1476,7 +1491,7 @@ def to_markdown(
# ----------------------------------------------------------------------
- def items(self):
+ def items(self) -> Iterable[Tuple[Label, Any]]:
"""
Lazily iterate over (index, value) tuples.
@@ -1506,13 +1521,13 @@ def items(self):
return zip(iter(self.index), iter(self))
@Appender(items.__doc__)
- def iteritems(self):
+ def iteritems(self) -> Iterable[Tuple[Label, Any]]:
return self.items()
# ----------------------------------------------------------------------
# Misc public methods
- def keys(self):
+ def keys(self) -> Index:
"""
Return alias for index.
@@ -1558,7 +1573,7 @@ def to_dict(self, into=dict):
into_c = com.standardize_mapping(into)
return into_c(self.items())
- def to_frame(self, name=None):
+ def to_frame(self, name=None) -> "DataFrame":
"""
Convert Series to DataFrame.
@@ -1590,7 +1605,7 @@ def to_frame(self, name=None):
return df
- def _set_name(self, name, inplace=False):
+ def _set_name(self, name, inplace=False) -> "Series":
"""
Set the Series name.
@@ -1670,13 +1685,14 @@ def groupby(
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
- ) -> "groupby_generic.SeriesGroupBy":
+ ) -> "SeriesGroupBy":
+ from pandas.core.groupby.generic import SeriesGroupBy
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
- return groupby_generic.SeriesGroupBy(
+ return SeriesGroupBy(
obj=self,
keys=by,
axis=axis,
@@ -1732,7 +1748,7 @@ def count(self, level=None):
out = np.bincount(obs, minlength=len(lev) or None)
return self._constructor(out, index=lev, dtype="int64").__finalize__(self)
- def mode(self, dropna=True):
+ def mode(self, dropna=True) -> "Series":
"""
Return the mode(s) of the dataset.
@@ -1817,7 +1833,7 @@ def unique(self):
result = super().unique()
return result
- def drop_duplicates(self, keep="first", inplace=False):
+ def drop_duplicates(self, keep="first", inplace=False) -> "Series":
"""
Return Series with duplicate values removed.
@@ -1894,7 +1910,7 @@ def drop_duplicates(self, keep="first", inplace=False):
"""
return super().drop_duplicates(keep=keep, inplace=inplace)
- def duplicated(self, keep="first"):
+ def duplicated(self, keep="first") -> "Series":
"""
Indicate duplicate Series values.
@@ -2113,7 +2129,7 @@ def idxmax(self, axis=0, skipna=True, *args, **kwargs):
return np.nan
return self.index[i]
- def round(self, decimals=0, *args, **kwargs):
+ def round(self, decimals=0, *args, **kwargs) -> "Series":
"""
Round each value in a Series to the given number of decimals.
@@ -2208,7 +2224,7 @@ def quantile(self, q=0.5, interpolation="linear"):
# scalar
return result.iloc[0]
- def corr(self, other, method="pearson", min_periods=None):
+ def corr(self, other, method="pearson", min_periods=None) -> float:
"""
Compute correlation with `other` Series, excluding missing values.
@@ -2261,7 +2277,7 @@ def corr(self, other, method="pearson", min_periods=None):
f"'{method}' was supplied"
)
- def cov(self, other, min_periods=None):
+ def cov(self, other, min_periods=None) -> float:
"""
Compute covariance with Series, excluding missing values.
@@ -2290,7 +2306,7 @@ def cov(self, other, min_periods=None):
return np.nan
return nanops.nancov(this.values, other.values, min_periods=min_periods)
- def diff(self, periods=1):
+ def diff(self, periods=1) -> "Series":
"""
First discrete difference of element.
@@ -2359,7 +2375,7 @@ def diff(self, periods=1):
result = algorithms.diff(self.array, periods)
return self._constructor(result, index=self.index).__finalize__(self)
- def autocorr(self, lag=1):
+ def autocorr(self, lag=1) -> float:
"""
Compute the lag-N autocorrelation.
@@ -2502,7 +2518,7 @@ def searchsorted(self, value, side="left", sorter=None):
# -------------------------------------------------------------------
# Combination
- def append(self, to_append, ignore_index=False, verify_integrity=False):
+ def append(self, to_append, ignore_index=False, verify_integrity=False) -> "Series":
"""
Concatenate two or more Series.
@@ -2579,8 +2595,10 @@ def append(self, to_append, ignore_index=False, verify_integrity=False):
to_concat.extend(to_append)
else:
to_concat = [self, to_append]
- return concat(
- to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity
+ return self._ensure_type(
+ concat(
+ to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity
+ )
)
def _binop(self, other, func, level=None, fill_value=None):
@@ -2622,7 +2640,7 @@ def _binop(self, other, func, level=None, fill_value=None):
ret = ops._construct_result(self, result, new_index, name)
return ret
- def combine(self, other, func, fill_value=None):
+ def combine(self, other, func, fill_value=None) -> "Series":
"""
Combine the Series with a Series or scalar according to `func`.
@@ -2719,7 +2737,7 @@ def combine(self, other, func, fill_value=None):
new_values = try_cast_to_ea(self._values, new_values)
return self._constructor(new_values, index=new_index, name=new_name)
- def combine_first(self, other):
+ def combine_first(self, other) -> "Series":
"""
Combine Series values, choosing the calling Series's values first.
@@ -2759,7 +2777,7 @@ def combine_first(self, other):
return this.where(notna(this), other)
- def update(self, other):
+ def update(self, other) -> None:
"""
Modify Series in place using non-NA values from passed
Series. Aligns on index.
@@ -2818,10 +2836,10 @@ def sort_values(
self,
axis=0,
ascending=True,
- inplace=False,
- kind="quicksort",
- na_position="last",
- ignore_index=False,
+ inplace: bool = False,
+ kind: str = "quicksort",
+ na_position: str = "last",
+ ignore_index: bool = False,
):
"""
Sort by the values.
@@ -3173,7 +3191,7 @@ def sort_index(
else:
return result.__finalize__(self)
- def argsort(self, axis=0, kind="quicksort", order=None):
+ def argsort(self, axis=0, kind="quicksort", order=None) -> "Series":
"""
Override ndarray.argsort. Argsorts the value, omitting NA/null values,
and places the result in the same locations as the non-NA values.
@@ -3211,7 +3229,7 @@ def argsort(self, axis=0, kind="quicksort", order=None):
np.argsort(values, kind=kind), index=self.index, dtype="int64"
).__finalize__(self)
- def nlargest(self, n=5, keep="first"):
+ def nlargest(self, n=5, keep="first") -> "Series":
"""
Return the largest `n` elements.
@@ -3309,7 +3327,7 @@ def nlargest(self, n=5, keep="first"):
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()
- def nsmallest(self, n=5, keep="first"):
+ def nsmallest(self, n=5, keep="first") -> "Series":
"""
Return the smallest `n` elements.
@@ -3406,7 +3424,7 @@ def nsmallest(self, n=5, keep="first"):
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()
- def swaplevel(self, i=-2, j=-1, copy=True):
+ def swaplevel(self, i=-2, j=-1, copy=True) -> "Series":
"""
Swap levels i and j in a :class:`MultiIndex`.
@@ -3429,7 +3447,7 @@ def swaplevel(self, i=-2, j=-1, copy=True):
self
)
- def reorder_levels(self, order):
+ def reorder_levels(self, order) -> "Series":
"""
Rearrange index levels using input order.
@@ -3553,7 +3571,7 @@ def unstack(self, level=-1, fill_value=None):
# ----------------------------------------------------------------------
# function application
- def map(self, arg, na_action=None):
+ def map(self, arg, na_action=None) -> "Series":
"""
Map values of Series according to input correspondence.
@@ -3631,7 +3649,7 @@ def map(self, arg, na_action=None):
new_values = super()._map_values(arg, na_action=na_action)
return self._constructor(new_values, index=self.index).__finalize__(self)
- def _gotitem(self, key, ndim, subset=None):
+ def _gotitem(self, key, ndim, subset=None) -> "Series":
"""
Sub-classes to define. Return a sliced object.
@@ -4039,7 +4057,7 @@ def drop(
level=None,
inplace=False,
errors="raise",
- ):
+ ) -> "Series":
"""
Return Series with specified index labels removed.
@@ -4180,7 +4198,7 @@ def replace(
)
@Appender(generic._shared_docs["shift"] % _shared_doc_kwargs)
- def shift(self, periods=1, freq=None, axis=0, fill_value=None):
+ def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> "Series":
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
@@ -4239,7 +4257,7 @@ def memory_usage(self, index=True, deep=False):
v += self.index.memory_usage(deep=deep)
return v
- def isin(self, values):
+ def isin(self, values) -> "Series":
"""
Check whether `values` are contained in Series.
@@ -4295,7 +4313,7 @@ def isin(self, values):
result = algorithms.isin(self, values)
return self._constructor(result, index=self.index).__finalize__(self)
- def between(self, left, right, inclusive=True):
+ def between(self, left, right, inclusive=True) -> "Series":
"""
Return boolean Series equivalent to left <= series <= right.
@@ -4399,19 +4417,19 @@ def _convert_dtypes(
return result
@Appender(generic._shared_docs["isna"] % _shared_doc_kwargs)
- def isna(self):
+ def isna(self) -> "Series":
return super().isna()
@Appender(generic._shared_docs["isna"] % _shared_doc_kwargs)
- def isnull(self):
+ def isnull(self) -> "Series":
return super().isnull()
@Appender(generic._shared_docs["notna"] % _shared_doc_kwargs)
- def notna(self):
+ def notna(self) -> "Series":
return super().notna()
@Appender(generic._shared_docs["notna"] % _shared_doc_kwargs)
- def notnull(self):
+ def notnull(self) -> "Series":
return super().notnull()
def dropna(self, axis=0, inplace=False, how=None):
@@ -4505,7 +4523,7 @@ def dropna(self, axis=0, inplace=False, how=None):
# ----------------------------------------------------------------------
# Time series-oriented methods
- def to_timestamp(self, freq=None, how="start", copy=True):
+ def to_timestamp(self, freq=None, how="start", copy=True) -> "Series":
"""
Cast to DatetimeIndex of Timestamps, at *beginning* of period.
@@ -4530,7 +4548,7 @@ def to_timestamp(self, freq=None, how="start", copy=True):
new_index = self.index.to_timestamp(freq=freq, how=how)
return self._constructor(new_values, index=new_index).__finalize__(self)
- def to_period(self, freq=None, copy=True):
+ def to_period(self, freq=None, copy=True) -> "Series":
"""
Convert Series from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed).
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index de52a1e46c33c..7ffb13f00848f 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -231,9 +231,7 @@ def _return_parsed_timezone_results(result, timezones, tz, name):
"""
if tz is not None:
raise ValueError(
- "Cannot pass a tz argument when "
- "parsing strings with timezone "
- "information."
+ "Cannot pass a tz argument when parsing strings with timezone information."
)
tz_results = np.array(
[Timestamp(res).tz_localize(zone) for res, zone in zip(result, timezones)]
@@ -830,8 +828,7 @@ def f(value):
required = ",".join(req)
raise ValueError(
"to assemble mappings requires at least that "
- f"[year, month, day] be specified: [{required}] "
- "is missing"
+ f"[year, month, day] be specified: [{required}] is missing"
)
# keys we don't recognize
diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py
index 64ec0e68e11b0..ed0b816f64800 100644
--- a/pandas/core/window/common.py
+++ b/pandas/core/window/common.py
@@ -98,8 +98,7 @@ def _flex_binary_moment(arg1, arg2, f, pairwise=False):
and isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame))
):
raise TypeError(
- "arguments to moment function must be of type "
- "np.ndarray/Series/DataFrame"
+ "arguments to moment function must be of type np.ndarray/Series/DataFrame"
)
if isinstance(arg1, (np.ndarray, ABCSeries)) and isinstance(
diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py
index 0fa24a0ba1b5a..921cdb3c2523f 100644
--- a/pandas/core/window/indexers.py
+++ b/pandas/core/window/indexers.py
@@ -32,7 +32,7 @@
class BaseIndexer:
- """Base class for window bounds calculations"""
+ """Base class for window bounds calculations."""
def __init__(
self, index_array: Optional[np.ndarray] = None, window_size: int = 0, **kwargs,
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index c1e34757b45d4..382f001a74f4a 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -1820,8 +1820,7 @@ def _on(self) -> Index:
else:
raise ValueError(
f"invalid on specified as {self.on}, "
- "must be a column (of DataFrame), an Index "
- "or None"
+ "must be a column (of DataFrame), an Index or None"
)
def validate(self):
@@ -1838,9 +1837,8 @@ def validate(self):
# we don't allow center
if self.center:
raise NotImplementedError(
- "center is not implemented "
- "for datetimelike and offset "
- "based windows"
+ "center is not implemented for "
+ "datetimelike and offset based windows"
)
# this will raise ValueError on non-fixed freqs
@@ -1886,8 +1884,7 @@ def _validate_freq(self):
except (TypeError, ValueError):
raise ValueError(
f"passed window {self.window} is not "
- "compatible with a datetimelike "
- "index"
+ "compatible with a datetimelike index"
)
_agg_see_also_doc = dedent(
diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py
index 34e8e03d8771e..97178261bdf72 100644
--- a/pandas/io/clipboards.py
+++ b/pandas/io/clipboards.py
@@ -69,8 +69,7 @@ def read_clipboard(sep=r"\s+", **kwargs): # pragma: no cover
kwargs["engine"] = "python"
elif len(sep) > 1 and kwargs.get("engine") == "c":
warnings.warn(
- "read_clipboard with regex separator does not work "
- "properly with c engine"
+ "read_clipboard with regex separator does not work properly with c engine"
)
return read_csv(StringIO(text), sep=sep, **kwargs)
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 771a302d647ec..cf19169214c35 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -78,8 +78,7 @@ def _expand_user(
def validate_header_arg(header) -> None:
if isinstance(header, bool):
raise TypeError(
- "Passing a bool to header is invalid. "
- "Use header=None for no header or "
+ "Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify "
"the row(s) making up the column names"
)
@@ -407,8 +406,8 @@ def get_handle(
raise ValueError(f"Zero files found in ZIP file {path_or_buf}")
else:
raise ValueError(
- "Multiple files found in ZIP file."
- f" Only one file per ZIP: {zip_names}"
+ "Multiple files found in ZIP file. "
+ f"Only one file per ZIP: {zip_names}"
)
# XZ Compression
diff --git a/pandas/io/date_converters.py b/pandas/io/date_converters.py
index 7fdca2d65b05d..07919dbda63ae 100644
--- a/pandas/io/date_converters.py
+++ b/pandas/io/date_converters.py
@@ -57,8 +57,7 @@ def _check_columns(cols):
for i, n in enumerate(map(len, tail)):
if n != N:
raise AssertionError(
- f"All columns must have the same length: {N}; "
- f"column {i} has length {n}"
+ f"All columns must have the same length: {N}; column {i} has length {n}"
)
return N
diff --git a/pandas/io/excel/_util.py b/pandas/io/excel/_util.py
index a084be54dfa10..9d284c8031840 100644
--- a/pandas/io/excel/_util.py
+++ b/pandas/io/excel/_util.py
@@ -136,8 +136,7 @@ def _maybe_convert_usecols(usecols):
if is_integer(usecols):
raise ValueError(
"Passing an integer for `usecols` is no longer supported. "
- "Please pass in a list of int from 0 to `usecols` "
- "inclusive instead."
+ "Please pass in a list of int from 0 to `usecols` inclusive instead."
)
if isinstance(usecols, str):
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
index eb05004d9137c..5d4925620e75f 100644
--- a/pandas/io/feather_format.py
+++ b/pandas/io/feather_format.py
@@ -37,16 +37,13 @@ def to_feather(df: DataFrame, path):
typ = type(df.index)
raise ValueError(
f"feather does not support serializing {typ} "
- "for the index; you can .reset_index() "
- "to make the index into column(s)"
+ "for the index; you can .reset_index() to make the index into column(s)"
)
if not df.index.equals(RangeIndex.from_range(range(len(df)))):
raise ValueError(
- "feather does not support serializing a "
- "non-default index for the index; you "
- "can .reset_index() to make the index "
- "into column(s)"
+ "feather does not support serializing a non-default index for the index; "
+ "you can .reset_index() to make the index into column(s)"
)
if df.index.name is not None:
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 6adf69a922000..296b305f41dd2 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -737,12 +737,8 @@ def _to_str_columns(self) -> List[List[str]]:
self.header = cast(List[str], self.header)
if len(self.header) != len(self.columns):
raise ValueError(
- (
- "Writing {ncols} cols but got {nalias} "
- "aliases".format(
- ncols=len(self.columns), nalias=len(self.header)
- )
- )
+ f"Writing {len(self.columns)} cols "
+ f"but got {len(self.header)} aliases"
)
str_columns = [[label] for label in self.header]
else:
diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py
index b46b2f6c671d6..e3161415fe2bc 100644
--- a/pandas/io/formats/html.py
+++ b/pandas/io/formats/html.py
@@ -216,8 +216,8 @@ def _write_table(self, indent: int = 0) -> None:
self.classes = self.classes.split()
if not isinstance(self.classes, (list, tuple)):
raise TypeError(
- "classes must be a string, list, or tuple, "
- "not {typ}".format(typ=type(self.classes))
+ "classes must be a string, list, "
+ f"or tuple, not {type(self.classes)}"
)
_classes.extend(self.classes)
diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py
index 008a99427f3c7..8ab56437d5c05 100644
--- a/pandas/io/formats/latex.py
+++ b/pandas/io/formats/latex.py
@@ -114,8 +114,7 @@ def pad_empties(x):
column_format = index_format + column_format
elif not isinstance(self.column_format, str): # pragma: no cover
raise AssertionError(
- "column_format must be str or unicode, "
- "not {typ}".format(typ=type(column_format))
+ f"column_format must be str or unicode, not {type(column_format)}"
)
else:
column_format = self.column_format
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 04f9f317d7dae..75cb0fafaa6b3 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -904,8 +904,7 @@ def _parse(flavor, io, match, attrs, encoding, displayed_only, **kwargs):
f"The flavor {flav} failed to parse your input. "
"Since you passed a non-rewindable file "
"object, we can't rewind it to try "
- "another parser. Try read_html() with a "
- "different flavor."
+ "another parser. Try read_html() with a different flavor."
)
retained = caught
diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py
index c0596c984575a..cf292a13fed7f 100644
--- a/pandas/io/json/_normalize.py
+++ b/pandas/io/json/_normalize.py
@@ -317,8 +317,7 @@ def _recursive_extract(data, path, seen_meta, level=0):
meta_val = np.nan
else:
raise KeyError(
- "Try running with "
- "errors='ignore' as key "
+ "Try running with errors='ignore' as key "
f"{e} is not always present"
)
meta_vals[key].append(meta_val)
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 3a686a1a3b122..4be62b886f076 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -32,8 +32,7 @@ def get_engine(engine: str) -> "BaseImpl":
raise ImportError(
"Unable to find a usable engine; "
"tried using: 'pyarrow', 'fastparquet'.\n"
- "pyarrow or fastparquet is required for parquet "
- "support"
+ "pyarrow or fastparquet is required for parquet support"
)
if engine == "pyarrow":
@@ -156,8 +155,7 @@ def write(
if "partition_on" in kwargs and partition_cols is not None:
raise ValueError(
"Cannot use both partition_on and "
- "partition_cols. Use partition_cols for "
- "partitioning data"
+ "partition_cols. Use partition_cols for partitioning data"
)
elif "partition_on" in kwargs:
partition_cols = kwargs.pop("partition_on")
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index b4eb2fb1411d0..41db6ed0ef503 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -612,8 +612,7 @@ def parser_f(
if delim_whitespace and delimiter != default_sep:
raise ValueError(
"Specified a delimiter with both sep and "
- "delim_whitespace=True; you can only "
- "specify one."
+ "delim_whitespace=True; you can only specify one."
)
if engine is not None:
@@ -907,8 +906,8 @@ def _get_options_with_defaults(self, engine):
pass
else:
raise ValueError(
- f"The {repr(argname)} option is not supported with the"
- f" {repr(engine)} engine"
+ f"The {repr(argname)} option is not supported with the "
+ f"{repr(engine)} engine"
)
else:
value = _deprecated_defaults.get(argname, default)
@@ -968,8 +967,7 @@ def _clean_options(self, options, engine):
fallback_reason = (
"the 'c' engine does not support "
"regex separators (separators > 1 char and "
- r"different from '\s+' are "
- "interpreted as regex)"
+ r"different from '\s+' are interpreted as regex)"
)
engine = "python"
elif delim_whitespace:
@@ -1000,8 +998,7 @@ def _clean_options(self, options, engine):
fallback_reason = (
"ord(quotechar) > 127, meaning the "
"quotechar is larger than one byte, "
- "and the 'c' engine does not support "
- "such quotechars"
+ "and the 'c' engine does not support such quotechars"
)
engine = "python"
@@ -1119,9 +1116,8 @@ def _make_engine(self, engine="c"):
klass = FixedWidthFieldParser
else:
raise ValueError(
- f"Unknown engine: {engine} (valid options are "
- '"c", "python", or '
- '"python-fwf")'
+ f"Unknown engine: {engine} (valid options "
+ 'are "c", "python", or "python-fwf")'
)
self._engine = klass(self.f, **self.options)
@@ -1230,8 +1226,7 @@ def _validate_usecols_names(usecols, names):
missing = [c for c in usecols if c not in names]
if len(missing) > 0:
raise ValueError(
- "Usecols do not match columns, "
- f"columns expected but not found: {missing}"
+ f"Usecols do not match columns, columns expected but not found: {missing}"
)
return usecols
@@ -1325,8 +1320,7 @@ def _validate_parse_dates_arg(parse_dates):
that is the case.
"""
msg = (
- "Only booleans, lists, and "
- "dictionaries are accepted "
+ "Only booleans, lists, and dictionaries are accepted "
"for the 'parse_dates' parameter"
)
@@ -1680,8 +1674,7 @@ def _convert_to_ndarrays(
warnings.warn(
(
"Both a converter and dtype were specified "
- f"for column {c} - only the converter will "
- "be used"
+ f"for column {c} - only the converter will be used"
),
ParserWarning,
stacklevel=7,
@@ -1826,8 +1819,7 @@ def _cast_types(self, values, cast_type, column):
except NotImplementedError:
raise NotImplementedError(
f"Extension Array: {array_type} must implement "
- "_from_sequence_of_strings in order "
- "to be used in parser methods"
+ "_from_sequence_of_strings in order to be used in parser methods"
)
else:
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 4f12c0225bd2d..c36c9eb730c7c 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -413,8 +413,8 @@ def read_hdf(
for group_to_check in groups[1:]:
if not _is_metadata_of(group_to_check, candidate_only_group):
raise ValueError(
- "key must be provided when HDF5 file "
- "contains multiple datasets."
+ "key must be provided when HDF5 "
+ "file contains multiple datasets."
)
key = candidate_only_group._v_pathname
return store.select(
@@ -1240,8 +1240,7 @@ def append_to_multiple(
if v is None:
if remain_key is not None:
raise ValueError(
- "append_to_multiple can only have one value in d that "
- "is None"
+ "append_to_multiple can only have one value in d that is None"
)
remain_key = k
else:
@@ -2313,8 +2312,7 @@ def validate_attr(self, append):
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if existing_dtype is not None and existing_dtype != self.dtype:
raise ValueError(
- "appended items dtype do not match existing "
- "items dtype in table!"
+ "appended items dtype do not match existing items dtype in table!"
)
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
@@ -2681,14 +2679,12 @@ def validate_read(self, columns, where):
if columns is not None:
raise TypeError(
"cannot pass a column specification when reading "
- "a Fixed format store. this store must be "
- "selected in its entirety"
+ "a Fixed format store. this store must be selected in its entirety"
)
if where is not None:
raise TypeError(
"cannot pass a where specification when reading "
- "from a Fixed format store. this store must be "
- "selected in its entirety"
+ "from a Fixed format store. this store must be selected in its entirety"
)
@property
@@ -2909,8 +2905,7 @@ def write_array(self, key: str, value: ArrayLike, items: Optional[Index] = None)
if is_categorical_dtype(value):
raise NotImplementedError(
- "Cannot store a category dtype in "
- "a HDF5 dataset that uses format="
+ "Cannot store a category dtype in a HDF5 dataset that uses format="
'"fixed". Use format="table".'
)
if not empty_array:
diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx
index bb5bce96bc64b..211935009d2e5 100644
--- a/pandas/io/sas/sas.pyx
+++ b/pandas/io/sas/sas.pyx
@@ -267,8 +267,9 @@ cdef class Parser:
elif column_types[j] == b's':
self.column_types[j] = column_type_string
else:
- raise ValueError("unknown column type: "
- f"{self.parser.columns[j].ctype}")
+ raise ValueError(
+ f"unknown column type: {self.parser.columns[j].ctype}"
+ )
# compression
if parser.compression == const.rle_compression:
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index f917477b81489..9b40778dbcfdf 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -459,8 +459,7 @@ def _process_columnsize_subheader(self, offset, length):
if self.col_count_p1 + self.col_count_p2 != self.column_count:
print(
f"Warning: column count mismatch ({self.col_count_p1} + "
- f"{self.col_count_p2} != "
- f"{self.column_count})\n"
+ f"{self.col_count_p2} != {self.column_count})\n"
)
# Unknown purpose
@@ -672,8 +671,7 @@ def _read_next_page(self):
self.close()
msg = (
"failed to read complete page from file (read "
- f"{len(self._cached_page):d} of "
- f"{self._page_length:d} bytes)"
+ f"{len(self._cached_page):d} of {self._page_length:d} bytes)"
)
raise ValueError(msg)
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index 56ebb583bc2f9..27d56d4ede403 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -49,8 +49,7 @@ def read_sas(
if format is None:
buffer_error_msg = (
"If this is a buffer object rather "
- "than a string name, you must specify "
- "a format string"
+ "than a string name, you must specify a format string"
)
filepath_or_buffer = stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, str):
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index f4527994db0d2..58fed0d18dd4a 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -977,8 +977,7 @@ def _sqlalchemy_type(self, col):
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
- "written as integer values (ns frequency) to the "
- "database.",
+ "written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=8,
)
@@ -1413,8 +1412,7 @@ def _get_valid_sqlite_name(name):
_SAFE_NAMES_WARNING = (
"The spaces in these column names will not be changed. "
- "In pandas versions < 0.14, spaces were converted to "
- "underscores."
+ "In pandas versions < 0.14, spaces were converted to underscores."
)
@@ -1528,8 +1526,7 @@ def _sql_type_name(self, col):
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
- "written as integer values (ns frequency) to the "
- "database.",
+ "written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=8,
)
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index a7246655f490a..b8e04ad55dde1 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -641,8 +641,7 @@ def __init__(self, catarray, encoding="latin-1"):
if self.text_len > 32000:
raise ValueError(
"Stata value labels for a single variable must "
- "have a combined length less than 32,000 "
- "characters."
+ "have a combined length less than 32,000 characters."
)
# Ensure int32
@@ -1731,9 +1730,10 @@ def _do_select_columns(self, data, columns):
raise ValueError("columns contains duplicate entries")
unmatched = column_set.difference(data.columns)
if unmatched:
+ joined = ", ".join(list(unmatched))
raise ValueError(
- "The following columns were not found in the "
- "Stata data set: " + ", ".join(list(unmatched))
+ "The following columns were not "
+ f"found in the Stata data set: {joined}"
)
# Copy information for retained columns for later processing
dtyplist = []
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index 5b37ebb42aecc..a1035fd0823bb 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -421,8 +421,7 @@ def __call__(self):
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
"MillisecondLocator estimated to generate "
- f"{estimate:d} ticks from {dmin} to {dmax}: "
- "exceeds Locator.MAXTICKS"
+ f"{estimate:d} ticks from {dmin} to {dmax}: exceeds Locator.MAXTICKS"
f"* 2 ({self.MAXTICKS * 2:d}) "
)
diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py
index f8b2c7ab123d0..d54fc73b495ba 100644
--- a/pandas/plotting/_matplotlib/hist.py
+++ b/pandas/plotting/_matplotlib/hist.py
@@ -318,8 +318,7 @@ def hist_series(
if "figure" in kwds:
raise ValueError(
"Cannot pass 'figure' when using the "
- "'by' argument, since a new 'Figure' instance "
- "will be created"
+ "'by' argument, since a new 'Figure' instance will be created"
)
axes = _grouped_hist(
self,
diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index dd4034a97f58e..d7732c86911b8 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -190,8 +190,7 @@ def _subplots(
if sharex or sharey:
warnings.warn(
"When passing multiple axes, sharex and sharey "
- "are ignored. These settings must be specified "
- "when creating axes",
+ "are ignored. These settings must be specified when creating axes",
UserWarning,
stacklevel=4,
)
diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py
index 8643e7f6f89c1..0c830c65e0f8b 100644
--- a/pandas/tests/arrays/categorical/test_operators.py
+++ b/pandas/tests/arrays/categorical/test_operators.py
@@ -97,8 +97,8 @@ def test_comparisons(self):
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
msg = (
- "Cannot compare a Categorical for op __gt__ with type"
- r" <class 'numpy\.ndarray'>"
+ "Cannot compare a Categorical for op __gt__ with type "
+ r"<class 'numpy\.ndarray'>"
)
with pytest.raises(TypeError, match=msg):
cat > s
@@ -265,8 +265,8 @@ def test_comparisons(self, data, reverse, base):
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
msg = (
- "Cannot compare a Categorical for op __gt__ with type"
- r" <class 'numpy\.ndarray'>"
+ "Cannot compare a Categorical for op __gt__ with type "
+ r"<class 'numpy\.ndarray'>"
)
with pytest.raises(TypeError, match=msg):
cat > s
diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py
index 0c5ae506ae0ce..c165910777649 100644
--- a/pandas/tests/arrays/test_integer.py
+++ b/pandas/tests/arrays/test_integer.py
@@ -1072,6 +1072,23 @@ def test_cut(bins, right, include_lowest):
tm.assert_categorical_equal(result, expected)
+def test_array_setitem_nullable_boolean_mask():
+ # GH 31446
+ ser = pd.Series([1, 2], dtype="Int64")
+ result = ser.where(ser > 1)
+ expected = pd.Series([pd.NA, 2], dtype="Int64")
+ tm.assert_series_equal(result, expected)
+
+
+def test_array_setitem():
+ # GH 31446
+ arr = pd.Series([1, 2], dtype="Int64").array
+ arr[arr > 1] = 1
+
+ expected = pd.array([1, 1], dtype="Int64")
+ tm.assert_extension_array_equal(arr, expected)
+
+
# TODO(jreback) - these need testing / are broken
# shift
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index 7f68abb92ba43..656b274aa1a9e 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -274,9 +274,9 @@ def check_operands(left, right, cmp_op):
def check_simple_cmp_op(self, lhs, cmp1, rhs):
ex = f"lhs {cmp1} rhs"
msg = (
- r"only list-like( or dict-like)? objects are allowed to be"
- r" passed to (DataFrame\.)?isin\(\), you passed a"
- r" (\[|')bool(\]|')|"
+ r"only list-like( or dict-like)? objects are allowed to be "
+ r"passed to (DataFrame\.)?isin\(\), you passed a "
+ r"(\[|')bool(\]|')|"
"argument of type 'bool' is not iterable"
)
if cmp1 in ("in", "not in") and not is_list_like(rhs):
@@ -408,9 +408,9 @@ def check_compound_invert_op(self, lhs, cmp1, rhs):
ex = f"~(lhs {cmp1} rhs)"
msg = (
- r"only list-like( or dict-like)? objects are allowed to be"
- r" passed to (DataFrame\.)?isin\(\), you passed a"
- r" (\[|')float(\]|')|"
+ r"only list-like( or dict-like)? objects are allowed to be "
+ r"passed to (DataFrame\.)?isin\(\), you passed a "
+ r"(\[|')float(\]|')|"
"argument of type 'float' is not iterable"
)
if is_scalar(rhs) and cmp1 in skip_these:
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index 0bb8aede6298c..e0ca603aaa0ed 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -4,6 +4,7 @@
import pytest
import pandas as pd
+from pandas.core.arrays.numpy_ import PandasDtype
from .base import BaseExtensionTests
@@ -195,3 +196,14 @@ def test_setitem_preserves_views(self, data):
data[0] = data[1]
assert view1[0] == data[1]
assert view2[0] == data[1]
+
+ def test_setitem_nullable_mask(self, data):
+ # GH 31446
+ # TODO: there is some issue with PandasArray, therefore,
+ # TODO: skip the setitem test for now, and fix it later
+ if data.dtype != PandasDtype("object"):
+ arr = data[:5]
+ expected = data.take([0, 0, 0, 3, 4])
+ mask = pd.array([True, True, True, False, False])
+ arr[mask] = data[0]
+ self.assert_extension_array_equal(expected, arr)
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 743852c35dbd8..8fd4a0171a222 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -10,6 +10,7 @@
import pandas as pd
from pandas.api.extensions import no_default, register_extension_dtype
from pandas.core.arrays import ExtensionArray, ExtensionScalarOpsMixin
+from pandas.core.indexers import check_array_indexer
@register_extension_dtype
@@ -144,6 +145,8 @@ def __setitem__(self, key, value):
value = [decimal.Decimal(v) for v in value]
else:
value = decimal.Decimal(value)
+
+ key = check_array_indexer(self, key)
self._data[key] = value
def __len__(self) -> int:
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 33c0e92845484..40ecda7d74952 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -447,8 +447,8 @@ def test_setitem(self, float_frame):
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
msg = (
- r"\"None of \[Float64Index\(\[.*dtype='float64'\)\] are in the"
- r" \[columns\]\""
+ r"\"None of \[Float64Index\(\[.*dtype='float64'\)\] are in the "
+ r"\[columns\]\""
)
with pytest.raises(KeyError, match=msg):
float_frame[np.random.randn(len(float_frame) + 1)] = 1
@@ -1039,9 +1039,9 @@ def test_getitem_setitem_float_labels(self):
# positional slicing only via iloc!
msg = (
- "cannot do slice indexing on"
- r" <class 'pandas\.core\.indexes\.numeric\.Float64Index'> with"
- r" these indexers \[1.0\] of <class 'float'>"
+ "cannot do slice indexing on "
+ r"<class 'pandas\.core\.indexes\.numeric\.Float64Index'> with "
+ r"these indexers \[1.0\] of <class 'float'>"
)
with pytest.raises(TypeError, match=msg):
df.iloc[1.0:5]
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 9263409f7a7f8..9de5d6fe16a0d 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -382,8 +382,8 @@ def test_swapaxes(self):
tm.assert_frame_equal(df.T, df.swapaxes(1, 0))
tm.assert_frame_equal(df, df.swapaxes(0, 0))
msg = (
- "No axis named 2 for object type"
- r" <class 'pandas.core(.sparse)?.frame.(Sparse)?DataFrame'>"
+ "No axis named 2 for object type "
+ r"<class 'pandas.core(.sparse)?.frame.(Sparse)?DataFrame'>"
)
with pytest.raises(ValueError, match=msg):
df.swapaxes(2, 5)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index ea1e339f44d93..a861e0eb52391 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -1854,9 +1854,9 @@ def check(df):
# No NaN found -> error
if len(indexer) == 0:
msg = (
- "cannot do label indexing on"
- r" <class 'pandas\.core\.indexes\.range\.RangeIndex'>"
- r" with these indexers \[nan\] of <class 'float'>"
+ "cannot do label indexing on "
+ r"<class 'pandas\.core\.indexes\.range\.RangeIndex'> "
+ r"with these indexers \[nan\] of <class 'float'>"
)
with pytest.raises(TypeError, match=msg):
df.loc[:, np.nan]
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index 2d3db3a1eff51..966f0d416676c 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -897,15 +897,15 @@ def test_astype_to_incorrect_datetimelike(self, unit):
df = DataFrame(np.array([[1, 2, 3]], dtype=dtype))
msg = (
- r"cannot astype a datetimelike from \[datetime64\[ns\]\] to"
- r" \[timedelta64\[{}\]\]"
+ r"cannot astype a datetimelike from \[datetime64\[ns\]\] to "
+ r"\[timedelta64\[{}\]\]"
).format(unit)
with pytest.raises(TypeError, match=msg):
df.astype(other)
msg = (
- r"cannot astype a timedelta from \[timedelta64\[ns\]\] to"
- r" \[datetime64\[{}\]\]"
+ r"cannot astype a timedelta from \[timedelta64\[ns\]\] to "
+ r"\[datetime64\[{}\]\]"
).format(unit)
df = DataFrame(np.array([[1, 2, 3]], dtype=other))
with pytest.raises(TypeError, match=msg):
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 2e6759cb1a238..ae0516dd29a1f 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -670,8 +670,8 @@ def test_fillna_invalid_value(self, float_frame):
float_frame.fillna((1, 2))
# frame with series
msg = (
- '"value" parameter must be a scalar, dict or Series, but you'
- ' passed a "DataFrame"'
+ '"value" parameter must be a scalar, dict or Series, but you '
+ 'passed a "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
float_frame.iloc[:, 0].fillna(float_frame)
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index 05bdec4a3a4d2..49e6fe4940e18 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -164,13 +164,13 @@ def test_repr_column_name_unicode_truncation_bug(self):
"Id": [7117434],
"StringCol": (
"Is it possible to modify drop plot code"
- " so that the output graph is displayed "
+ "so that the output graph is displayed "
"in iphone simulator, Is it possible to "
"modify drop plot code so that the "
"output graph is \xe2\x80\xa8displayed "
"in iphone simulator.Now we are adding "
- "the CSV file externally. I want to Call"
- " the File through the code.."
+ "the CSV file externally. I want to Call "
+ "the File through the code.."
),
}
)
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py
index 56a0c8cf4f5bd..60b7611c8b9be 100644
--- a/pandas/tests/frame/test_reshape.py
+++ b/pandas/tests/frame/test_reshape.py
@@ -424,8 +424,8 @@ def test_stack_mixed_levels(self):
# When mixed types are passed and the ints are not level
# names, raise
msg = (
- "level should contain all level names or all level numbers, not"
- " a mixture of the two"
+ "level should contain all level names or all level numbers, not "
+ "a mixture of the two"
)
with pytest.raises(ValueError, match=msg):
df2.stack(level=["animal", 0])
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index 70ba21d89d22f..e424913804c33 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -725,10 +725,7 @@ def test_get_group(self):
g.get_group("foo")
with pytest.raises(ValueError, match=msg):
g.get_group(("foo"))
- msg = (
- "must supply a same-length tuple to get_group with multiple"
- " grouping keys"
- )
+ msg = "must supply a same-length tuple to get_group with multiple grouping keys"
with pytest.raises(ValueError, match=msg):
g.get_group(("foo", "bar", "baz"))
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index cbffb9d375cb1..e6c52a9c9fb3e 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -37,8 +37,8 @@ class Base:
def test_pickle_compat_construction(self):
# need an object to create with
msg = (
- r"Index\(\.\.\.\) must be called with a collection of some"
- r" kind, None was passed|"
+ r"Index\(\.\.\.\) must be called with a collection of some "
+ r"kind, None was passed|"
r"__new__\(\) missing 1 required positional argument: 'data'|"
r"__new__\(\) takes at least 2 arguments \(1 given\)"
)
diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py
index ffe51dd1fb9f5..95d14ad4c86f7 100644
--- a/pandas/tests/indexes/datetimes/test_constructors.py
+++ b/pandas/tests/indexes/datetimes/test_constructors.py
@@ -644,8 +644,8 @@ def test_constructor_dtype(self):
)
msg = (
- "cannot supply both a tz and a timezone-naive dtype"
- r" \(i\.e\. datetime64\[ns\]\)"
+ "cannot supply both a tz and a timezone-naive dtype "
+ r"\(i\.e\. datetime64\[ns\]\)"
)
with pytest.raises(ValueError, match=msg):
DatetimeIndex(idx, dtype="datetime64[ns]")
diff --git a/pandas/tests/indexes/multi/test_analytics.py b/pandas/tests/indexes/multi/test_analytics.py
index f04776e531bfd..2db61d4f4b852 100644
--- a/pandas/tests/indexes/multi/test_analytics.py
+++ b/pandas/tests/indexes/multi/test_analytics.py
@@ -334,8 +334,8 @@ def test_numpy_ufuncs(idx, func):
else:
expected_exception = TypeError
msg = (
- "loop of ufunc does not support argument 0 of type tuple which"
- f" has no callable {func.__name__} method"
+ "loop of ufunc does not support argument 0 of type tuple which "
+ f"has no callable {func.__name__} method"
)
with pytest.raises(expected_exception, match=msg):
func(idx)
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index 8a5bb2bf960ac..315b58dbe455e 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -409,8 +409,8 @@ def test_get_loc(self):
idx0.get_loc(1.1)
msg = (
- r"'PeriodIndex\(\['2017-09-01', '2017-09-02', '2017-09-03'\],"
- r" dtype='period\[D\]', freq='D'\)' is an invalid key"
+ r"'PeriodIndex\(\['2017-09-01', '2017-09-02', '2017-09-03'\], "
+ r"dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx0.get_loc(idx0)
@@ -434,8 +434,8 @@ def test_get_loc(self):
idx1.get_loc(1.1)
msg = (
- r"'PeriodIndex\(\['2017-09-02', '2017-09-02', '2017-09-03'\],"
- r" dtype='period\[D\]', freq='D'\)' is an invalid key"
+ r"'PeriodIndex\(\['2017-09-02', '2017-09-02', '2017-09-03'\], "
+ r"dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx1.get_loc(idx1)
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index f025168643ab9..582f6c619d287 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -188,8 +188,8 @@ def test_constructor_invalid(self):
# invalid
msg = (
- r"Float64Index\(\.\.\.\) must be called with a collection of"
- r" some kind, 0\.0 was passed"
+ r"Float64Index\(\.\.\.\) must be called with a collection of "
+ r"some kind, 0\.0 was passed"
)
with pytest.raises(TypeError, match=msg):
Float64Index(0.0)
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index 2cc8232566aa9..5530896a90941 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -123,9 +123,9 @@ def test_scalar_non_numeric(self):
# setting with a float fails with iloc
msg = (
- r"cannot do (label|index|positional) indexing"
- r" on {klass} with these indexers \[3\.0\] of"
- r" {kind}".format(klass=type(i), kind=str(float))
+ r"cannot do (label|index|positional) indexing "
+ r"on {klass} with these indexers \[3\.0\] of "
+ r"{kind}".format(klass=type(i), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s.iloc[3.0] = 0
@@ -160,9 +160,9 @@ def test_scalar_non_numeric(self):
s = Series(np.arange(len(i)), index=i)
s[3]
msg = (
- r"cannot do (label|index) indexing"
- r" on {klass} with these indexers \[3\.0\] of"
- r" {kind}".format(klass=type(i), kind=str(float))
+ r"cannot do (label|index) indexing "
+ r"on {klass} with these indexers \[3\.0\] of "
+ r"{kind}".format(klass=type(i), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[3.0]
@@ -177,9 +177,9 @@ def test_scalar_with_mixed(self):
for idxr in [lambda x: x, lambda x: x.iloc]:
msg = (
- r"cannot do label indexing"
- r" on {klass} with these indexers \[1\.0\] of"
- r" {kind}|"
+ r"cannot do label indexing "
+ r"on {klass} with these indexers \[1\.0\] of "
+ r"{kind}|"
"Cannot index by location index with a non-integer key".format(
klass=str(Index), kind=str(float)
)
@@ -199,9 +199,9 @@ def test_scalar_with_mixed(self):
for idxr in [lambda x: x]:
msg = (
- r"cannot do label indexing"
- r" on {klass} with these indexers \[1\.0\] of"
- r" {kind}".format(klass=str(Index), kind=str(float))
+ r"cannot do label indexing "
+ r"on {klass} with these indexers \[1\.0\] of "
+ r"{kind}".format(klass=str(Index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
idxr(s3)[1.0]
@@ -313,9 +313,9 @@ def test_scalar_float(self):
s.iloc[3.0]
msg = (
- r"cannot do positional indexing"
- r" on {klass} with these indexers \[3\.0\] of"
- r" {kind}".format(klass=str(Float64Index), kind=str(float))
+ r"cannot do positional indexing "
+ r"on {klass} with these indexers \[3\.0\] of "
+ r"{kind}".format(klass=str(Float64Index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s2.iloc[3.0] = 0
@@ -379,10 +379,10 @@ def test_slice_non_numeric(self):
for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
msg = (
- "cannot do slice indexing"
- r" on {klass} with these indexers"
- r" \[(3|4)(\.0)?\]"
- r" of ({kind_float}|{kind_int})".format(
+ "cannot do slice indexing "
+ r"on {klass} with these indexers "
+ r"\[(3|4)(\.0)?\] "
+ r"of ({kind_float}|{kind_int})".format(
klass=type(index),
kind_float=str(float),
kind_int=str(int),
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index a6bf0ef26cce0..d67259e8b7d40 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -437,9 +437,9 @@ def test_iloc_getitem_labelled_frame(self):
# trying to use a label
msg = (
- r"Location based indexing can only have \[integer, integer"
- r" slice \(START point is INCLUDED, END point is EXCLUDED\),"
- r" listlike of integers, boolean array\] types"
+ r"Location based indexing can only have \[integer, integer "
+ r"slice \(START point is INCLUDED, END point is EXCLUDED\), "
+ r"listlike of integers, boolean array\] types"
)
with pytest.raises(ValueError, match=msg):
df.iloc["j", "D"]
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 448a06070c45c..1913caae93932 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -81,8 +81,8 @@ def test_getitem_ndarray_3d(self, index, obj, idxr, idxr_id):
nd3 = np.random.randint(5, size=(2, 2, 2))
msg = (
- r"Buffer has wrong number of dimensions \(expected 1,"
- r" got 3\)|"
+ r"Buffer has wrong number of dimensions \(expected 1, "
+ r"got 3\)|"
"Cannot index with multidimensional key|"
r"Wrong number of dimensions. values.ndim != ndim \[3 != 1\]|"
"Index data must be 1-dimensional"
@@ -134,8 +134,8 @@ def test_setitem_ndarray_3d(self, index, obj, idxr, idxr_id):
nd3 = np.random.randint(5, size=(2, 2, 2))
msg = (
- r"Buffer has wrong number of dimensions \(expected 1,"
- r" got 3\)|"
+ r"Buffer has wrong number of dimensions \(expected 1, "
+ r"got 3\)|"
"'pandas._libs.interval.IntervalTree' object has no attribute "
"'set_value'|" # AttributeError
"unhashable type: 'numpy.ndarray'|" # TypeError
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index e5930b25aab3c..b5569aad92ecf 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -219,8 +219,8 @@ def test_loc_to_fail(self):
# raise a KeyError?
msg = (
- r"\"None of \[Int64Index\(\[1, 2\], dtype='int64'\)\] are"
- r" in the \[index\]\""
+ r"\"None of \[Int64Index\(\[1, 2\], dtype='int64'\)\] are "
+ r"in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
df.loc[[1, 2], [1, 2]]
@@ -236,8 +236,8 @@ def test_loc_to_fail(self):
s.loc[-1]
msg = (
- r"\"None of \[Int64Index\(\[-1, -2\], dtype='int64'\)\] are"
- r" in the \[index\]\""
+ r"\"None of \[Int64Index\(\[-1, -2\], dtype='int64'\)\] are "
+ r"in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
s.loc[[-1, -2]]
@@ -252,8 +252,8 @@ def test_loc_to_fail(self):
s["a"] = 2
msg = (
- r"\"None of \[Int64Index\(\[-2\], dtype='int64'\)\] are"
- r" in the \[index\]\""
+ r"\"None of \[Int64Index\(\[-2\], dtype='int64'\)\] are "
+ r"in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
s.loc[[-2]]
@@ -268,8 +268,8 @@ def test_loc_to_fail(self):
df = DataFrame([["a"], ["b"]], index=[1, 2], columns=["value"])
msg = (
- r"\"None of \[Int64Index\(\[3\], dtype='int64'\)\] are"
- r" in the \[index\]\""
+ r"\"None of \[Int64Index\(\[3\], dtype='int64'\)\] are "
+ r"in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
df.loc[[3], :]
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 5fda759020f1a..2ce07ec41758f 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -205,8 +205,8 @@ def test_series_partial_set(self):
# raises as nothing in in the index
msg = (
- r"\"None of \[Int64Index\(\[3, 3, 3\], dtype='int64'\)\] are"
- r" in the \[index\]\""
+ r"\"None of \[Int64Index\(\[3, 3, 3\], dtype='int64'\)\] are "
+ r"in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
ser.loc[[3, 3, 3]]
@@ -286,8 +286,8 @@ def test_series_partial_set_with_name(self):
# raises as nothing in in the index
msg = (
- r"\"None of \[Int64Index\(\[3, 3, 3\], dtype='int64',"
- r" name='idx'\)\] are in the \[index\]\""
+ r"\"None of \[Int64Index\(\[3, 3, 3\], dtype='int64', "
+ r"name='idx'\)\] are in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
ser.loc[[3, 3, 3]]
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index a126f83164ce5..22aa78919ef0f 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -146,8 +146,8 @@ def test_read_non_existant(self, reader, module, error_class, fn_ext):
msg3 = "Expected object or value"
msg4 = "path_or_buf needs to be a string file path or file-like"
msg5 = (
- fr"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist:"
- fr" '.+does_not_exist\.{fn_ext}'"
+ fr"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
+ fr"'.+does_not_exist\.{fn_ext}'"
)
msg6 = fr"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
msg7 = (
@@ -186,8 +186,8 @@ def test_read_expands_user_home_dir(
msg3 = "Unexpected character found when decoding 'false'"
msg4 = "path_or_buf needs to be a string file path or file-like"
msg5 = (
- fr"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist:"
- fr" '.+does_not_exist\.{fn_ext}'"
+ fr"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
+ fr"'.+does_not_exist\.{fn_ext}'"
)
msg6 = fr"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
msg7 = (
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 8e459f0cf8298..edb766a67af89 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -748,10 +748,10 @@ def test_excessively_long_string(self):
)
original = DataFrame(s)
msg = (
- r"Fixed width strings in Stata \.dta files are limited to 244"
- r" \(or fewer\)\ncharacters\. Column 's500' does not satisfy"
- r" this restriction\. Use the\n'version=117' parameter to write"
- r" the newer \(Stata 13 and later\) format\."
+ r"Fixed width strings in Stata \.dta files are limited to 244 "
+ r"\(or fewer\)\ncharacters\. Column 's500' does not satisfy "
+ r"this restriction\. Use the\n'version=117' parameter to write "
+ r"the newer \(Stata 13 and later\) format\."
)
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean() as path:
@@ -979,8 +979,8 @@ def test_categorical_warnings_and_errors(self):
)
with tm.ensure_clean() as path:
msg = (
- "Stata value labels for a single variable must have"
- r" a combined length less than 32,000 characters\."
+ "Stata value labels for a single variable must have "
+ r"a combined length less than 32,000 characters\."
)
with pytest.raises(ValueError, match=msg):
original.to_stata(path)
@@ -1708,12 +1708,12 @@ def test_invalid_file_not_written(self, version):
df = DataFrame([content], columns=["invalid"])
with tm.ensure_clean() as path:
msg1 = (
- r"'latin-1' codec can't encode character '\\ufffd'"
- r" in position 14: ordinal not in range\(256\)"
+ r"'latin-1' codec can't encode character '\\ufffd' "
+ r"in position 14: ordinal not in range\(256\)"
)
msg2 = (
- "'ascii' codec can't decode byte 0xef in position 14:"
- r" ordinal not in range\(128\)"
+ "'ascii' codec can't decode byte 0xef in position 14: "
+ r"ordinal not in range\(128\)"
)
with pytest.raises(UnicodeEncodeError, match=r"{}|{}".format(msg1, msg2)):
with tm.assert_produces_warning(ResourceWarning):
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 8f855fd0c6cff..fb86b600d3d3c 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -121,8 +121,8 @@ def test_both_style_and_color(self):
ts = tm.makeTimeSeries()
msg = (
"Cannot pass 'style' string with a color symbol and 'color' "
- "keyword argument. Please use one or the other or pass 'style'"
- " without a color symbol"
+ "keyword argument. Please use one or the other or pass 'style' "
+ "without a color symbol"
)
with pytest.raises(ValueError, match=msg):
ts.plot(style="b-", color="#000099")
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index c8aa1f23ccf1f..228c84528e882 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -319,8 +319,8 @@ def test_subplot_titles(self, iris):
# Case len(title) > len(df)
msg = (
- "The length of `title` must equal the number of columns if"
- " using `title` of type `list` and `subplots=True`"
+ "The length of `title` must equal the number of columns if "
+ "using `title` of type `list` and `subplots=True`"
)
with pytest.raises(ValueError, match=msg):
df.plot(subplots=True, title=title + ["kittens > puppies"])
@@ -331,8 +331,8 @@ def test_subplot_titles(self, iris):
# Case subplots=False and title is of type list
msg = (
- "Using `title` of type `list` is not supported unless"
- " `subplots=True` is passed"
+ "Using `title` of type `list` is not supported unless "
+ "`subplots=True` is passed"
)
with pytest.raises(ValueError, match=msg):
df.plot(subplots=False, title=title)
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index 170201b4f8e5c..d552241f9126f 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -257,8 +257,8 @@ def test_fillna():
tm.assert_series_equal(result, expected)
msg = (
- r"Invalid fill method\. Expecting pad \(ffill\), backfill"
- r" \(bfill\) or nearest\. Got 0"
+ r"Invalid fill method\. Expecting pad \(ffill\), backfill "
+ r"\(bfill\) or nearest\. Got 0"
)
with pytest.raises(ValueError, match=msg):
r.fillna(0)
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index a660acb143433..7020d373caf82 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -212,8 +212,8 @@ def test_join_on(self):
source_copy = source.copy()
source_copy["A"] = 0
msg = (
- "You are trying to merge on float64 and object columns. If"
- " you wish to proceed you should use pd.concat"
+ "You are trying to merge on float64 and object columns. If "
+ "you wish to proceed you should use pd.concat"
)
with pytest.raises(ValueError, match=msg):
target.join(source_copy, on="A")
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index 8465e2ca49d67..f9acf5b60a3cd 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -201,8 +201,8 @@ def test_merge_misspecified(self):
merge(self.left, self.right, right_index=True)
msg = (
- 'Can only pass argument "on" OR "left_on" and "right_on", not'
- " a combination of both"
+ 'Can only pass argument "on" OR "left_on" and "right_on", not '
+ "a combination of both"
)
with pytest.raises(pd.errors.MergeError, match=msg):
merge(self.left, self.left, left_on="key", on="key")
@@ -1013,10 +1013,9 @@ def test_indicator(self):
df_badcolumn = DataFrame({"col1": [1, 2], i: [2, 2]})
msg = (
- "Cannot use `indicator=True` option when data contains a"
- " column named {}|"
- "Cannot use name of an existing column for indicator"
- " column"
+ "Cannot use `indicator=True` option when data contains a "
+ "column named {}|"
+ "Cannot use name of an existing column for indicator column"
).format(i)
with pytest.raises(ValueError, match=msg):
merge(df1, df_badcolumn, on="col1", how="outer", indicator=True)
@@ -1235,8 +1234,8 @@ def test_validation(self):
)
msg = (
- "Merge keys are not unique in either left or right dataset;"
- " not a one-to-one merge"
+ "Merge keys are not unique in either left or right dataset; "
+ "not a one-to-one merge"
)
with pytest.raises(MergeError, match=msg):
merge(left, right, on="a", validate="1:1")
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index 990669f1ae13a..b3b2c5a05c6ad 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -198,8 +198,8 @@ def test_concatlike_same_dtypes(self):
# cannot append non-index
msg = (
- r"cannot concatenate object of type '.+';"
- " only Series and DataFrame objs are valid"
+ r"cannot concatenate object of type '.+'; "
+ "only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
pd.Series(vals1).append(vals2)
@@ -1866,8 +1866,8 @@ def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
msg = (
- "cannot concatenate object of type '{}';"
- " only Series and DataFrame objs are valid"
+ "cannot concatenate object of type '{}'; "
+ "only Series and DataFrame objs are valid"
)
for obj in [1, dict(), [1, 2], (1, 2)]:
with pytest.raises(TypeError, match=msg.format(type(obj))):
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index 6a9ef86c11292..5fc991df49424 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -271,8 +271,8 @@ def test_ops_ndarray(self):
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
msg = (
- "ufunc '?multiply'? cannot use operands with types"
- r" dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
+ "ufunc '?multiply'? cannot use operands with types "
+ r"dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
)
with pytest.raises(TypeError, match=msg):
td * other
diff --git a/pandas/tests/series/indexing/test_alter_index.py b/pandas/tests/series/indexing/test_alter_index.py
index 47f40e24e1637..dc8b91de3d09b 100644
--- a/pandas/tests/series/indexing/test_alter_index.py
+++ b/pandas/tests/series/indexing/test_alter_index.py
@@ -243,8 +243,8 @@ def test_reindex_corner(datetime_series):
# bad fill method
ts = datetime_series[::2]
msg = (
- r"Invalid fill method\. Expecting pad \(ffill\), backfill"
- r" \(bfill\) or nearest\. Got foo"
+ r"Invalid fill method\. Expecting pad \(ffill\), backfill "
+ r"\(bfill\) or nearest\. Got foo"
)
with pytest.raises(ValueError, match=msg):
ts.reindex(datetime_series.index, method="foo")
diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py
index d75efcf52c271..16a29d10eb414 100644
--- a/pandas/tests/series/indexing/test_boolean.py
+++ b/pandas/tests/series/indexing/test_boolean.py
@@ -44,8 +44,8 @@ def test_getitem_boolean_empty():
# invalid because of the boolean indexer
# that's empty or not-aligned
msg = (
- r"Unalignable boolean Series provided as indexer \(index of"
- r" the boolean Series and of the indexed object do not match"
+ r"Unalignable boolean Series provided as indexer \(index of "
+ r"the boolean Series and of the indexed object do not match"
)
with pytest.raises(IndexingError, match=msg):
s[Series([], dtype=bool)]
@@ -89,8 +89,8 @@ def test_getitem_setitem_boolean_corner(datetime_series):
# these used to raise...??
msg = (
- r"Unalignable boolean Series provided as indexer \(index of"
- r" the boolean Series and of the indexed object do not match"
+ r"Unalignable boolean Series provided as indexer \(index of "
+ r"the boolean Series and of the indexed object do not match"
)
with pytest.raises(IndexingError, match=msg):
ts[mask_shifted]
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 4fa70793e2815..18dbd22b73b35 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -393,8 +393,8 @@ def test_2d_to_1d_assignment_raises():
y = pd.Series(range(2))
msg = (
- r"shape mismatch: value array of shape \(2,2\) could not be"
- r" broadcast to indexing result of shape \(2,\)"
+ r"shape mismatch: value array of shape \(2,2\) could not be "
+ r"broadcast to indexing result of shape \(2,\)"
)
with pytest.raises(ValueError, match=msg):
y.loc[range(2)] = x
diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py
index ce0d04ff99077..3684ca00c2f17 100644
--- a/pandas/tests/series/indexing/test_numeric.py
+++ b/pandas/tests/series/indexing/test_numeric.py
@@ -261,8 +261,8 @@ def test_setitem_float_labels():
def test_slice_float_get_set(datetime_series):
msg = (
r"cannot do slice indexing on <class 'pandas\.core\.indexes"
- r"\.datetimes\.DatetimeIndex'> with these indexers \[{key}\]"
- r" of <class 'float'>"
+ r"\.datetimes\.DatetimeIndex'> with these indexers \[{key}\] "
+ r"of <class 'float'>"
)
with pytest.raises(TypeError, match=msg.format(key=r"4\.0")):
datetime_series[4.0:10.0]
diff --git a/pandas/tests/series/methods/test_argsort.py b/pandas/tests/series/methods/test_argsort.py
index 1fc98ded0d3d2..62273e2d363fb 100644
--- a/pandas/tests/series/methods/test_argsort.py
+++ b/pandas/tests/series/methods/test_argsort.py
@@ -52,8 +52,8 @@ def test_argsort_stable(self):
tm.assert_series_equal(mindexer, Series(mexpected), check_dtype=False)
tm.assert_series_equal(qindexer, Series(qexpected), check_dtype=False)
msg = (
- r"ndarray Expected type <class 'numpy\.ndarray'>,"
- r" found <class 'pandas\.core\.series\.Series'> instead"
+ r"ndarray Expected type <class 'numpy\.ndarray'>, "
+ r"found <class 'pandas\.core\.series\.Series'> instead"
)
with pytest.raises(AssertionError, match=msg):
tm.assert_numpy_array_equal(qindexer, mindexer)
diff --git a/pandas/tests/series/methods/test_isin.py b/pandas/tests/series/methods/test_isin.py
index ca93e989ba6b5..3836c1d56bf87 100644
--- a/pandas/tests/series/methods/test_isin.py
+++ b/pandas/tests/series/methods/test_isin.py
@@ -29,8 +29,8 @@ def test_isin_with_string_scalar(self):
# GH#4763
s = Series(["A", "B", "C", "a", "B", "B", "A", "C"])
msg = (
- r"only list-like objects are allowed to be passed to isin\(\),"
- r" you passed a \[str\]"
+ r"only list-like objects are allowed to be passed to isin\(\), "
+ r"you passed a \[str\]"
)
with pytest.raises(TypeError, match=msg):
s.isin("a")
diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py
index b20baa2836363..770ad38b0215e 100644
--- a/pandas/tests/series/methods/test_replace.py
+++ b/pandas/tests/series/methods/test_replace.py
@@ -120,8 +120,8 @@ def test_replace_with_single_list(self):
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
msg = (
- r"Invalid fill method\. Expecting pad \(ffill\) or backfill"
- r" \(bfill\)\. Got crash_cymbal"
+ r"Invalid fill method\. Expecting pad \(ffill\) or backfill "
+ r"\(bfill\)\. Got crash_cymbal"
)
with pytest.raises(ValueError, match=msg):
s.replace([1, 2, 3], inplace=True, method="crash_cymbal")
diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py
index 628c66583535d..71f6681e8c955 100644
--- a/pandas/tests/series/test_alter_axes.py
+++ b/pandas/tests/series/test_alter_axes.py
@@ -11,8 +11,8 @@ class TestSeriesAlterAxes:
def test_setindex(self, string_series):
# wrong type
msg = (
- r"Index\(\.\.\.\) must be called with a collection of some"
- r" kind, None was passed"
+ r"Index\(\.\.\.\) must be called with a collection of some "
+ r"kind, None was passed"
)
with pytest.raises(TypeError, match=msg):
string_series.index = None
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index c38e5708be09b..d760939657d47 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -967,6 +967,15 @@ def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):
expected = Series(pd.Timestamp(arg)).dt.tz_localize("CET")
tm.assert_series_equal(result, expected)
+ def test_constructor_datetime64_bigendian(self):
+ # GH#30976
+ ms = np.datetime64(1, "ms")
+ arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
+
+ result = Series(arr)
+ expected = Series([Timestamp(ms)])
+ tm.assert_series_equal(result, expected)
+
@pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray])
def test_construction_interval(self, interval_constructor):
# construction from interval & array of intervals
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index a57ec2ba05d54..1fc582156a884 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -193,8 +193,8 @@ def test_astype_dict_like(self, dtype_class):
dt3 = dtype_class({"abc": str, "def": str})
msg = (
- "Only the Series name can be used for the key in Series dtype"
- r" mappings\."
+ "Only the Series name can be used for the key in Series dtype "
+ r"mappings\."
)
with pytest.raises(KeyError, match=msg):
s.astype(dt3)
@@ -410,8 +410,8 @@ def test_arg_for_errors_in_astype(self):
s = Series([1, 2, 3])
msg = (
- r"Expected value of kwarg 'errors' to be one of \['raise',"
- r" 'ignore'\]\. Supplied value is 'False'"
+ r"Expected value of kwarg 'errors' to be one of \['raise', "
+ r"'ignore'\]\. Supplied value is 'False'"
)
with pytest.raises(ValueError, match=msg):
s.astype(np.float64, errors=False)
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 7b6d9210ed3d9..6b7d9e00a5228 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -1324,8 +1324,8 @@ def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
- r"Invalid limit_direction: expecting one of \['forward',"
- r" 'backward', 'both'\], got 'abc'"
+ r"Invalid limit_direction: expecting one of \['forward', "
+ r"'backward', 'both'\], got 'abc'"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit=2, limit_direction="abc")
@@ -1347,6 +1347,7 @@ def test_interp_limit_area(self):
[np.nan, np.nan, 3.0, 4.0, np.nan, np.nan, 7.0, np.nan, np.nan]
)
result = s.interpolate(method="linear", limit_area="inside", limit=1)
+ tm.assert_series_equal(result, expected)
expected = Series([np.nan, np.nan, 3.0, 4.0, np.nan, 6.0, 7.0, np.nan, np.nan])
result = s.interpolate(
@@ -1362,6 +1363,7 @@ def test_interp_limit_area(self):
[np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan]
)
result = s.interpolate(method="linear", limit_area="outside", limit=1)
+ tm.assert_series_equal(result, expected)
expected = Series([np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan])
result = s.interpolate(
@@ -1371,8 +1373,9 @@ def test_interp_limit_area(self):
expected = Series([3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan])
result = s.interpolate(
- method="linear", limit_area="outside", direction="backward"
+ method="linear", limit_area="outside", limit_direction="backward"
)
+ tm.assert_series_equal(result, expected)
# raises an error even if limit type is wrong.
msg = r"Invalid limit_area: expecting one of \['inside', 'outside'\], got abc"
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 57ee3bedd4d9f..a1de9c435c9ba 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -665,8 +665,8 @@ class TestIsin:
def test_invalid(self):
msg = (
- r"only list-like objects are allowed to be passed to isin\(\),"
- r" you passed a \[int\]"
+ r"only list-like objects are allowed to be passed to isin\(\), "
+ r"you passed a \[int\]"
)
with pytest.raises(TypeError, match=msg):
algos.isin(1, 1)
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index ee006233c4c1b..8edd9f20ec63c 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -8,7 +8,7 @@
import numpy as np # noqa
import pytest
-from pandas import DataFrame, Series
+from pandas import DataFrame
import pandas._testing as tm
@@ -114,26 +114,6 @@ def test_geopandas():
assert geopandas.read_file(fp) is not None
-def test_geopandas_coordinate_indexer():
- # this test is included to have coverage of one case in the indexing.py
- # code that is only kept for compatibility with geopandas, see
- # https://github.com/pandas-dev/pandas/issues/27258
- # We should be able to remove this after some time when its usage is
- # removed in geopandas
- from pandas.core.indexing import _NDFrameIndexer
-
- class _CoordinateIndexer(_NDFrameIndexer):
- def _getitem_tuple(self, tup):
- obj = self.obj
- xs, ys = tup
- return obj[xs][ys]
-
- Series._create_indexer("cx", _CoordinateIndexer)
- s = Series(range(5))
- res = s.cx[:, :]
- tm.assert_series_equal(s, res)
-
-
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
@pytest.mark.filterwarnings("ignore:RangeIndex.* is deprecated:DeprecationWarning")
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 568b3917ba4cb..62d26dacde67b 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -3392,8 +3392,8 @@ def test_encode_decode_errors(self):
encodeBase = Series(["a", "b", "a\x9d"])
msg = (
- r"'charmap' codec can't encode character '\\x9d' in position 1:"
- " character maps to <undefined>"
+ r"'charmap' codec can't encode character '\\x9d' in position 1: "
+ "character maps to <undefined>"
)
with pytest.raises(UnicodeEncodeError, match=msg):
encodeBase.str.encode("cp1252")
@@ -3406,8 +3406,8 @@ def test_encode_decode_errors(self):
decodeBase = Series([b"a", b"b", b"a\x9d"])
msg = (
- "'charmap' codec can't decode byte 0x9d in position 1:"
- " character maps to <undefined>"
+ "'charmap' codec can't decode byte 0x9d in position 1: "
+ "character maps to <undefined>"
)
with pytest.raises(UnicodeDecodeError, match=msg):
decodeBase.str.decode("cp1252")
diff --git a/pandas/tests/tslibs/test_conversion.py b/pandas/tests/tslibs/test_conversion.py
index 2beeae85de683..96c2d6bbd8106 100644
--- a/pandas/tests/tslibs/test_conversion.py
+++ b/pandas/tests/tslibs/test_conversion.py
@@ -72,6 +72,15 @@ def test_length_zero_copy(dtype, copy):
assert result.base is (None if copy else arr)
+def test_ensure_datetime64ns_bigendian():
+ # GH#29684
+ arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
+ result = conversion.ensure_datetime64ns(arr)
+
+ expected = np.array([np.datetime64(1, "ms")], dtype="M8[ns]")
+ tm.assert_numpy_array_equal(result, expected)
+
+
class SubDatetime(datetime):
pass
diff --git a/pandas/tests/util/test_validate_kwargs.py b/pandas/tests/util/test_validate_kwargs.py
index a7b6d8f98cc60..8fe2a3712bf49 100644
--- a/pandas/tests/util/test_validate_kwargs.py
+++ b/pandas/tests/util/test_validate_kwargs.py
@@ -49,8 +49,8 @@ def test_validation():
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_bool_kwarg_fail(name, value):
msg = (
- f'For argument "{name}" expected type bool,'
- f" received type {type(value).__name__}"
+ f'For argument "{name}" expected type bool, '
+ f"received type {type(value).__name__}"
)
with pytest.raises(ValueError, match=msg):
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 8bb98a271bce8..d31c23c7ccf1d 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -365,7 +365,7 @@ def apply_index(self, i):
"applied vectorized"
)
- def is_anchored(self):
+ def is_anchored(self) -> bool:
# TODO: Does this make sense for the general case? It would help
# if there were a canonical docstring for what is_anchored means.
return self.n == 1
@@ -378,7 +378,7 @@ def onOffset(self, dt):
)
return self.is_on_offset(dt)
- def isAnchored(self):
+ def isAnchored(self) -> bool:
warnings.warn(
"isAnchored is a deprecated, use is_anchored instead",
FutureWarning,
@@ -389,7 +389,7 @@ def isAnchored(self):
# TODO: Combine this with BusinessMixin version by defining a whitelisted
# set of attributes on each object rather than the existing behavior of
# iterating over internal ``__dict__``
- def _repr_attrs(self):
+ def _repr_attrs(self) -> str:
exclude = {"n", "inc", "normalize"}
attrs = []
for attr in sorted(self.__dict__):
@@ -405,7 +405,7 @@ def _repr_attrs(self):
return out
@property
- def name(self):
+ def name(self) -> str:
return self.rule_code
def rollback(self, dt):
@@ -452,15 +452,15 @@ def is_on_offset(self, dt):
# way to get around weirdness with rule_code
@property
- def _prefix(self):
+ def _prefix(self) -> str:
raise NotImplementedError("Prefix not defined")
@property
- def rule_code(self):
+ def rule_code(self) -> str:
return self._prefix
@cache_readonly
- def freqstr(self):
+ def freqstr(self) -> str:
try:
code = self.rule_code
except NotImplementedError:
@@ -480,7 +480,7 @@ def freqstr(self):
return fstr
- def _offset_str(self):
+ def _offset_str(self) -> str:
return ""
@property
@@ -529,11 +529,11 @@ def offset(self):
# Alias for backward compat
return self._offset
- def _repr_attrs(self):
+ def _repr_attrs(self) -> str:
if self.offset:
attrs = [f"offset={repr(self.offset)}"]
else:
- attrs = None
+ attrs = []
out = ""
if attrs:
out += ": " + ", ".join(attrs)
@@ -553,7 +553,7 @@ def __init__(self, n=1, normalize=False, offset=timedelta(0)):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "_offset", offset)
- def _offset_str(self):
+ def _offset_str(self) -> str:
def get_str(td):
off_str = ""
if td.days > 0:
@@ -649,7 +649,7 @@ def apply_index(self, i):
result = shifted.to_timestamp() + time
return result
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
return dt.weekday() < 5
@@ -1087,7 +1087,7 @@ def apply(self, other):
def apply_index(self, i):
raise NotImplementedError
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
day64 = _to_dt64(dt, "datetime64[D]")
@@ -1134,14 +1134,14 @@ class MonthOffset(SingleConstructorOffset):
__init__ = BaseOffset.__init__
@property
- def name(self):
+ def name(self) -> str:
if self.is_anchored:
return self.rule_code
else:
month = ccalendar.MONTH_ALIASES[self.n]
return f"{self.code_rule}-{month}"
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
return dt.day == self._get_offset_day(dt)
@@ -1333,7 +1333,7 @@ def _from_name(cls, suffix=None):
return cls(day_of_month=suffix)
@property
- def rule_code(self):
+ def rule_code(self) -> str:
suffix = f"-{self.day_of_month}"
return self._prefix + suffix
@@ -1429,7 +1429,7 @@ class SemiMonthEnd(SemiMonthOffset):
_prefix = "SM"
_min_day_of_month = 1
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
days_in_month = ccalendar.get_days_in_month(dt.year, dt.month)
@@ -1487,7 +1487,7 @@ class SemiMonthBegin(SemiMonthOffset):
_prefix = "SMS"
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
return dt.day in (1, self.day_of_month)
@@ -1556,7 +1556,7 @@ def __init__(self, n=1, normalize=False, weekday=None):
if self.weekday < 0 or self.weekday > 6:
raise ValueError(f"Day must be 0<=day<=6, got {self.weekday}")
- def is_anchored(self):
+ def is_anchored(self) -> bool:
return self.n == 1 and self.weekday is not None
@apply_wraps
@@ -1632,7 +1632,7 @@ def _end_apply_index(self, dtindex):
return base + off + Timedelta(1, "ns") - Timedelta(1, "D")
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
elif self.weekday is None:
@@ -1640,7 +1640,7 @@ def is_on_offset(self, dt):
return dt.weekday() == self.weekday
@property
- def rule_code(self):
+ def rule_code(self) -> str:
suffix = ""
if self.weekday is not None:
weekday = ccalendar.int_to_weekday[self.weekday]
@@ -1717,7 +1717,7 @@ def __init__(self, n=1, normalize=False, week=0, weekday=0):
if self.week < 0 or self.week > 3:
raise ValueError(f"Week must be 0<=week<=3, got {self.week}")
- def _get_offset_day(self, other):
+ def _get_offset_day(self, other: datetime) -> int:
"""
Find the day in the same month as other that has the same
weekday as self.weekday and is the self.week'th such day in the month.
@@ -1736,7 +1736,7 @@ def _get_offset_day(self, other):
return 1 + shift_days + self.week * 7
@property
- def rule_code(self):
+ def rule_code(self) -> str:
weekday = ccalendar.int_to_weekday.get(self.weekday, "")
return f"{self._prefix}-{self.week + 1}{weekday}"
@@ -1785,7 +1785,7 @@ def __init__(self, n=1, normalize=False, weekday=0):
if self.weekday < 0 or self.weekday > 6:
raise ValueError(f"Day must be 0<=day<=6, got {self.weekday}")
- def _get_offset_day(self, other):
+ def _get_offset_day(self, other: datetime) -> int:
"""
Find the day in the same month as other that has the same
weekday as self.weekday and is the last such day in the month.
@@ -1805,7 +1805,7 @@ def _get_offset_day(self, other):
return dim - shift_days
@property
- def rule_code(self):
+ def rule_code(self) -> str:
weekday = ccalendar.int_to_weekday.get(self.weekday, "")
return f"{self._prefix}-{weekday}"
@@ -1842,7 +1842,7 @@ def __init__(self, n=1, normalize=False, startingMonth=None):
startingMonth = self._default_startingMonth
object.__setattr__(self, "startingMonth", startingMonth)
- def is_anchored(self):
+ def is_anchored(self) -> bool:
return self.n == 1 and self.startingMonth is not None
@classmethod
@@ -1856,7 +1856,7 @@ def _from_name(cls, suffix=None):
return cls(**kwargs)
@property
- def rule_code(self):
+ def rule_code(self) -> str:
month = ccalendar.MONTH_ALIASES[self.startingMonth]
return f"{self._prefix}-{month}"
@@ -1874,7 +1874,7 @@ def apply(self, other):
months = qtrs * 3 - months_since
return shift_month(other, months, self._day_opt)
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
mod_month = (dt.month - self.startingMonth) % 3
@@ -1953,7 +1953,7 @@ class YearOffset(DateOffset):
_adjust_dst = True
_attributes = frozenset(["n", "normalize", "month"])
- def _get_offset_day(self, other):
+ def _get_offset_day(self, other: datetime) -> int:
# override BaseOffset method to use self.month instead of other.month
# TODO: there may be a more performant way to do this
return liboffsets.get_day_of_month(
@@ -1977,7 +1977,7 @@ def apply_index(self, dtindex):
shifted, freq=dtindex.freq, dtype=dtindex.dtype
)
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
return dt.month == self.month and dt.day == self._get_offset_day(dt)
@@ -1999,7 +1999,7 @@ def _from_name(cls, suffix=None):
return cls(**kwargs)
@property
- def rule_code(self):
+ def rule_code(self) -> str:
month = ccalendar.MONTH_ALIASES[self.month]
return f"{self._prefix}-{month}"
@@ -2117,12 +2117,12 @@ def __init__(
if self.variation not in ["nearest", "last"]:
raise ValueError(f"{self.variation} is not a valid variation")
- def is_anchored(self):
+ def is_anchored(self) -> bool:
return (
self.n == 1 and self.startingMonth is not None and self.weekday is not None
)
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
dt = datetime(dt.year, dt.month, dt.day)
@@ -2217,18 +2217,18 @@ def get_year_end(self, dt):
return target_date + timedelta(days_forward - 7)
@property
- def rule_code(self):
+ def rule_code(self) -> str:
prefix = self._prefix
suffix = self.get_rule_code_suffix()
return f"{prefix}-{suffix}"
- def _get_suffix_prefix(self):
+ def _get_suffix_prefix(self) -> str:
if self.variation == "nearest":
return "N"
else:
return "L"
- def get_rule_code_suffix(self):
+ def get_rule_code_suffix(self) -> str:
prefix = self._get_suffix_prefix()
month = ccalendar.MONTH_ALIASES[self.startingMonth]
weekday = ccalendar.int_to_weekday[self.weekday]
@@ -2346,7 +2346,7 @@ def _offset(self):
variation=self.variation,
)
- def is_anchored(self):
+ def is_anchored(self) -> bool:
return self.n == 1 and self._offset.is_anchored()
def _rollback_to_year(self, other):
@@ -2434,7 +2434,7 @@ def get_weeks(self, dt):
return ret
- def year_has_extra_week(self, dt):
+ def year_has_extra_week(self, dt: datetime) -> bool:
# Avoid round-down errors --> normalize to get
# e.g. '370D' instead of '360D23H'
norm = Timestamp(dt).normalize().tz_localize(None)
@@ -2445,7 +2445,7 @@ def year_has_extra_week(self, dt):
assert weeks_in_year in [52, 53], weeks_in_year
return weeks_in_year == 53
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
if self._offset.is_on_offset(dt):
@@ -2463,7 +2463,7 @@ def is_on_offset(self, dt):
return False
@property
- def rule_code(self):
+ def rule_code(self) -> str:
suffix = self._offset.get_rule_code_suffix()
qtr = self.qtr_with_extra_week
return f"{self._prefix}-{suffix}-{qtr}"
@@ -2516,7 +2516,7 @@ def apply(self, other):
)
return new
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
return date(dt.year, dt.month, dt.day) == easter(dt.year)
@@ -2596,7 +2596,7 @@ def __eq__(self, other: Any) -> bool:
# This is identical to DateOffset.__hash__, but has to be redefined here
# for Python 3, because we've redefined __eq__.
- def __hash__(self):
+ def __hash__(self) -> int:
return hash(self._params)
def __ne__(self, other):
@@ -2617,7 +2617,7 @@ def __ne__(self, other):
return True
@property
- def delta(self):
+ def delta(self) -> Timedelta:
return self.n * self._inc
@property
@@ -2648,11 +2648,11 @@ def apply(self, other):
raise ApplyTypeError(f"Unhandled type: {type(other).__name__}")
- def is_anchored(self):
+ def is_anchored(self) -> bool:
return False
-def _delta_to_tick(delta):
+def _delta_to_tick(delta: timedelta) -> Tick:
if delta.microseconds == 0 and getattr(delta, "nanoseconds", 0) == 0:
# nanoseconds only for pd.Timedelta
if delta.seconds == 0:
diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py
index b69c974661f89..a715094e65e98 100644
--- a/pandas/util/_validators.py
+++ b/pandas/util/_validators.py
@@ -297,7 +297,7 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
"\n\t'.{method_name}(index=a, columns=b)'.\nUse named "
"arguments to remove any ambiguity. In the future, using "
"positional arguments for 'index' or 'columns' will raise "
- " a 'TypeError'."
+ "a 'TypeError'."
)
warnings.warn(msg.format(method_name=method_name), FutureWarning, stacklevel=4)
out[data._AXIS_NAMES[0]] = args[0]
diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py
index 53a27e8782ad7..9e0ec4df02edf 100755
--- a/scripts/generate_pip_deps_from_conda.py
+++ b/scripts/generate_pip_deps_from_conda.py
@@ -132,8 +132,7 @@ def main(conda_fname, pip_fname, compare=False):
)
if args.azure:
msg = (
- "##vso[task.logissue type=error;"
- f"sourcepath=requirements-dev.txt]{msg}"
+ f"##vso[task.logissue type=error;sourcepath=requirements-dev.txt]{msg}"
)
sys.stderr.write(msg)
sys.exit(res)
diff --git a/web/pandas/_templates/layout.html b/web/pandas/_templates/layout.html
index 120058afd1190..92126a7b5a2f2 100644
--- a/web/pandas/_templates/layout.html
+++ b/web/pandas/_templates/layout.html
@@ -84,11 +84,6 @@
<i class="fab fa-stack-overflow"></i>
</a>
</li>
- <li class="list-inline-item">
- <a href="https://pandas.discourse.group">
- <i class="fab fa-discourse"></i>
- </a>
- </li>
</ul>
<p>
pandas is a fiscally sponsored project of <a href="https://numfocus.org">NumFOCUS</a>
diff --git a/web/pandas/about/roadmap.md b/web/pandas/about/roadmap.md
index 8a5c2735b3d93..35a6b3361f32e 100644
--- a/web/pandas/about/roadmap.md
+++ b/web/pandas/about/roadmap.md
@@ -134,19 +134,6 @@ pandas documentation. Some specific goals include
subsections of the documentation to make navigation and finding
content easier.
-## Package docstring validation
-
-To improve the quality and consistency of pandas docstrings, we've
-developed tooling to check docstrings in a variety of ways.
-<https://github.com/pandas-dev/pandas/blob/master/scripts/validate_docstrings.py>
-contains the checks.
-
-Like many other projects, pandas uses the
-[numpydoc](https://numpydoc.readthedocs.io/en/latest/) style for writing
-docstrings. With the collaboration of the numpydoc maintainers, we'd
-like to move the checks to a package other than pandas so that other
-projects can easily use them as well.
-
## Performance monitoring
Pandas uses [airspeed velocity](https://asv.readthedocs.io/en/stable/)
diff --git a/web/pandas/config.yml b/web/pandas/config.yml
index e2a95a5039884..d1fb7ba0f7b86 100644
--- a/web/pandas/config.yml
+++ b/web/pandas/config.yml
@@ -50,8 +50,6 @@ navbar:
target: /community/blog.html
- name: "Ask a question (StackOverflow)"
target: https://stackoverflow.com/questions/tagged/pandas
- - name: "Discuss"
- target: https://pandas.discourse.group
- name: "Code of conduct"
target: /community/coc.html
- name: "Ecosystem"
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31561 | 2020-02-02T07:26:29Z | 2020-02-02T07:36:33Z | null | 2020-02-02T07:36:33Z |
REF: CategoricalIndex indexing tests | diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py
index d870259c2539b..c18cd1f252c83 100644
--- a/pandas/tests/indexes/categorical/test_category.py
+++ b/pandas/tests/indexes/categorical/test_category.py
@@ -146,76 +146,6 @@ def test_contains_list(self):
with pytest.raises(TypeError, match="unhashable type"):
["a", "b"] in idx
- def test_map(self):
- ci = pd.CategoricalIndex(list("ABABC"), categories=list("CBA"), ordered=True)
- result = ci.map(lambda x: x.lower())
- exp = pd.CategoricalIndex(list("ababc"), categories=list("cba"), ordered=True)
- tm.assert_index_equal(result, exp)
-
- ci = pd.CategoricalIndex(
- list("ABABC"), categories=list("BAC"), ordered=False, name="XXX"
- )
- result = ci.map(lambda x: x.lower())
- exp = pd.CategoricalIndex(
- list("ababc"), categories=list("bac"), ordered=False, name="XXX"
- )
- tm.assert_index_equal(result, exp)
-
- # GH 12766: Return an index not an array
- tm.assert_index_equal(
- ci.map(lambda x: 1), Index(np.array([1] * 5, dtype=np.int64), name="XXX")
- )
-
- # change categories dtype
- ci = pd.CategoricalIndex(list("ABABC"), categories=list("BAC"), ordered=False)
-
- def f(x):
- return {"A": 10, "B": 20, "C": 30}.get(x)
-
- result = ci.map(f)
- exp = pd.CategoricalIndex(
- [10, 20, 10, 20, 30], categories=[20, 10, 30], ordered=False
- )
- tm.assert_index_equal(result, exp)
-
- result = ci.map(pd.Series([10, 20, 30], index=["A", "B", "C"]))
- tm.assert_index_equal(result, exp)
-
- result = ci.map({"A": 10, "B": 20, "C": 30})
- tm.assert_index_equal(result, exp)
-
- def test_map_with_categorical_series(self):
- # GH 12756
- a = pd.Index([1, 2, 3, 4])
- b = pd.Series(["even", "odd", "even", "odd"], dtype="category")
- c = pd.Series(["even", "odd", "even", "odd"])
-
- exp = CategoricalIndex(["odd", "even", "odd", np.nan])
- tm.assert_index_equal(a.map(b), exp)
- exp = pd.Index(["odd", "even", "odd", np.nan])
- tm.assert_index_equal(a.map(c), exp)
-
- @pytest.mark.parametrize(
- ("data", "f"),
- (
- ([1, 1, np.nan], pd.isna),
- ([1, 2, np.nan], pd.isna),
- ([1, 1, np.nan], {1: False}),
- ([1, 2, np.nan], {1: False, 2: False}),
- ([1, 1, np.nan], pd.Series([False, False])),
- ([1, 2, np.nan], pd.Series([False, False, False])),
- ),
- )
- def test_map_with_nan(self, data, f): # GH 24241
- values = pd.Categorical(data)
- result = values.map(f)
- if data[1] == 1:
- expected = pd.Categorical([False, False, np.nan])
- tm.assert_categorical_equal(result, expected)
- else:
- expected = pd.Index([False, False, np.nan])
- tm.assert_index_equal(result, expected)
-
@pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series])
def test_where(self, klass):
i = self.create_index()
@@ -384,89 +314,6 @@ def test_astype_category(self, name, dtype_ordered, index_ordered):
expected = index
tm.assert_index_equal(result, expected)
- def test_reindex_base(self):
- # Determined by cat ordering.
- idx = CategoricalIndex(list("cab"), categories=list("cab"))
- expected = np.arange(len(idx), dtype=np.intp)
-
- actual = idx.get_indexer(idx)
- tm.assert_numpy_array_equal(expected, actual)
-
- with pytest.raises(ValueError, match="Invalid fill method"):
- idx.get_indexer(idx, method="invalid")
-
- def test_reindexing(self):
- np.random.seed(123456789)
-
- ci = self.create_index()
- oidx = Index(np.array(ci))
-
- for n in [1, 2, 5, len(ci)]:
- finder = oidx[np.random.randint(0, len(ci), size=n)]
- expected = oidx.get_indexer_non_unique(finder)[0]
-
- actual = ci.get_indexer(finder)
- tm.assert_numpy_array_equal(expected, actual)
-
- # see gh-17323
- #
- # Even when indexer is equal to the
- # members in the index, we should
- # respect duplicates instead of taking
- # the fast-track path.
- for finder in [list("aabbca"), list("aababca")]:
- expected = oidx.get_indexer_non_unique(finder)[0]
-
- actual = ci.get_indexer(finder)
- tm.assert_numpy_array_equal(expected, actual)
-
- def test_reindex_dtype(self):
- c = CategoricalIndex(["a", "b", "c", "a"])
- res, indexer = c.reindex(["a", "c"])
- tm.assert_index_equal(res, Index(["a", "a", "c"]), exact=True)
- tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
-
- c = CategoricalIndex(["a", "b", "c", "a"])
- res, indexer = c.reindex(Categorical(["a", "c"]))
-
- exp = CategoricalIndex(["a", "a", "c"], categories=["a", "c"])
- tm.assert_index_equal(res, exp, exact=True)
- tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
-
- c = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
- res, indexer = c.reindex(["a", "c"])
- exp = Index(["a", "a", "c"], dtype="object")
- tm.assert_index_equal(res, exp, exact=True)
- tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
-
- c = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
- res, indexer = c.reindex(Categorical(["a", "c"]))
- exp = CategoricalIndex(["a", "a", "c"], categories=["a", "c"])
- tm.assert_index_equal(res, exp, exact=True)
- tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
-
- def test_reindex_duplicate_target(self):
- # See GH25459
- cat = CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c", "d"])
- res, indexer = cat.reindex(["a", "c", "c"])
- exp = Index(["a", "c", "c"], dtype="object")
- tm.assert_index_equal(res, exp, exact=True)
- tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp))
-
- res, indexer = cat.reindex(
- CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"])
- )
- exp = CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"])
- tm.assert_index_equal(res, exp, exact=True)
- tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp))
-
- def test_reindex_empty_index(self):
- # See GH16770
- c = CategoricalIndex([])
- res, indexer = c.reindex(["a", "b"])
- tm.assert_index_equal(res, Index(["a", "b"]), exact=True)
- tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp))
-
@pytest.mark.parametrize(
"data, non_lexsorted_data",
[[[1, 2, 3], [9, 0, 1, 2, 3]], [list("abc"), list("fabcd")]],
@@ -518,75 +365,6 @@ def test_drop_duplicates(self):
tm.assert_index_equal(idx.drop_duplicates(), expected)
tm.assert_index_equal(idx.unique(), expected)
- def test_get_indexer(self):
-
- idx1 = CategoricalIndex(list("aabcde"), categories=list("edabc"))
- idx2 = CategoricalIndex(list("abf"))
-
- for indexer in [idx2, list("abf"), Index(list("abf"))]:
- r1 = idx1.get_indexer(idx2)
- tm.assert_almost_equal(r1, np.array([0, 1, 2, -1], dtype=np.intp))
-
- msg = (
- "method='pad' and method='backfill' not implemented yet for "
- "CategoricalIndex"
- )
- with pytest.raises(NotImplementedError, match=msg):
- idx2.get_indexer(idx1, method="pad")
- with pytest.raises(NotImplementedError, match=msg):
- idx2.get_indexer(idx1, method="backfill")
-
- msg = "method='nearest' not implemented yet for CategoricalIndex"
- with pytest.raises(NotImplementedError, match=msg):
- idx2.get_indexer(idx1, method="nearest")
-
- def test_get_loc(self):
- # GH 12531
- cidx1 = CategoricalIndex(list("abcde"), categories=list("edabc"))
- idx1 = Index(list("abcde"))
- assert cidx1.get_loc("a") == idx1.get_loc("a")
- assert cidx1.get_loc("e") == idx1.get_loc("e")
-
- for i in [cidx1, idx1]:
- with pytest.raises(KeyError, match="'NOT-EXIST'"):
- i.get_loc("NOT-EXIST")
-
- # non-unique
- cidx2 = CategoricalIndex(list("aacded"), categories=list("edabc"))
- idx2 = Index(list("aacded"))
-
- # results in bool array
- res = cidx2.get_loc("d")
- tm.assert_numpy_array_equal(res, idx2.get_loc("d"))
- tm.assert_numpy_array_equal(
- res, np.array([False, False, False, True, False, True])
- )
- # unique element results in scalar
- res = cidx2.get_loc("e")
- assert res == idx2.get_loc("e")
- assert res == 4
-
- for i in [cidx2, idx2]:
- with pytest.raises(KeyError, match="'NOT-EXIST'"):
- i.get_loc("NOT-EXIST")
-
- # non-unique, sliceable
- cidx3 = CategoricalIndex(list("aabbb"), categories=list("abc"))
- idx3 = Index(list("aabbb"))
-
- # results in slice
- res = cidx3.get_loc("a")
- assert res == idx3.get_loc("a")
- assert res == slice(0, 2, None)
-
- res = cidx3.get_loc("b")
- assert res == idx3.get_loc("b")
- assert res == slice(2, 5, None)
-
- for i in [cidx3, idx3]:
- with pytest.raises(KeyError, match="'c'"):
- i.get_loc("c")
-
def test_repr_roundtrip(self):
ci = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True)
@@ -837,122 +615,6 @@ def test_fillna_categorical(self):
with pytest.raises(ValueError, match=msg):
idx.fillna(2.0)
- def test_take_fill_value(self):
- # GH 12631
-
- # numeric category
- idx = pd.CategoricalIndex([1, 2, 3], name="xxx")
- result = idx.take(np.array([1, 0, -1]))
- expected = pd.CategoricalIndex([2, 1, 3], name="xxx")
- tm.assert_index_equal(result, expected)
- tm.assert_categorical_equal(result.values, expected.values)
-
- # fill_value
- result = idx.take(np.array([1, 0, -1]), fill_value=True)
- expected = pd.CategoricalIndex([2, 1, np.nan], categories=[1, 2, 3], name="xxx")
- tm.assert_index_equal(result, expected)
- tm.assert_categorical_equal(result.values, expected.values)
-
- # allow_fill=False
- result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
- expected = pd.CategoricalIndex([2, 1, 3], name="xxx")
- tm.assert_index_equal(result, expected)
- tm.assert_categorical_equal(result.values, expected.values)
-
- # object category
- idx = pd.CategoricalIndex(
- list("CBA"), categories=list("ABC"), ordered=True, name="xxx"
- )
- result = idx.take(np.array([1, 0, -1]))
- expected = pd.CategoricalIndex(
- list("BCA"), categories=list("ABC"), ordered=True, name="xxx"
- )
- tm.assert_index_equal(result, expected)
- tm.assert_categorical_equal(result.values, expected.values)
-
- # fill_value
- result = idx.take(np.array([1, 0, -1]), fill_value=True)
- expected = pd.CategoricalIndex(
- ["B", "C", np.nan], categories=list("ABC"), ordered=True, name="xxx"
- )
- tm.assert_index_equal(result, expected)
- tm.assert_categorical_equal(result.values, expected.values)
-
- # allow_fill=False
- result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
- expected = pd.CategoricalIndex(
- list("BCA"), categories=list("ABC"), ordered=True, name="xxx"
- )
- tm.assert_index_equal(result, expected)
- tm.assert_categorical_equal(result.values, expected.values)
-
- msg = (
- "When allow_fill=True and fill_value is not None, "
- "all indices must be >= -1"
- )
- with pytest.raises(ValueError, match=msg):
- idx.take(np.array([1, 0, -2]), fill_value=True)
- with pytest.raises(ValueError, match=msg):
- idx.take(np.array([1, 0, -5]), fill_value=True)
-
- with pytest.raises(IndexError):
- idx.take(np.array([1, -5]))
-
- def test_take_fill_value_datetime(self):
-
- # datetime category
- idx = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx")
- idx = pd.CategoricalIndex(idx)
- result = idx.take(np.array([1, 0, -1]))
- expected = pd.DatetimeIndex(
- ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx"
- )
- expected = pd.CategoricalIndex(expected)
- tm.assert_index_equal(result, expected)
-
- # fill_value
- result = idx.take(np.array([1, 0, -1]), fill_value=True)
- expected = pd.DatetimeIndex(["2011-02-01", "2011-01-01", "NaT"], name="xxx")
- exp_cats = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"])
- expected = pd.CategoricalIndex(expected, categories=exp_cats)
- tm.assert_index_equal(result, expected)
-
- # allow_fill=False
- result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
- expected = pd.DatetimeIndex(
- ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx"
- )
- expected = pd.CategoricalIndex(expected)
- tm.assert_index_equal(result, expected)
-
- msg = (
- "When allow_fill=True and fill_value is not None, "
- "all indices must be >= -1"
- )
- with pytest.raises(ValueError, match=msg):
- idx.take(np.array([1, 0, -2]), fill_value=True)
- with pytest.raises(ValueError, match=msg):
- idx.take(np.array([1, 0, -5]), fill_value=True)
-
- with pytest.raises(IndexError):
- idx.take(np.array([1, -5]))
-
- def test_take_invalid_kwargs(self):
- idx = pd.CategoricalIndex([1, 2, 3], name="foo")
- indices = [1, 0, -1]
-
- msg = r"take\(\) got an unexpected keyword argument 'foo'"
- with pytest.raises(TypeError, match=msg):
- idx.take(indices, foo=2)
-
- msg = "the 'out' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- idx.take(indices, out=indices)
-
- msg = "the 'mode' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- idx.take(indices, mode="clip")
-
@pytest.mark.parametrize(
"dtype, engine_type",
[
@@ -976,19 +638,10 @@ def test_engine_type(self, dtype, engine_type):
assert np.issubdtype(ci.codes.dtype, dtype)
assert isinstance(ci._engine, engine_type)
- @pytest.mark.parametrize(
- "data, categories",
- [
- (list("abcbca"), list("cab")),
- (pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)),
- ],
- ids=["string", "interval"],
- )
- def test_map_str(self, data, categories, ordered_fixture):
- # GH 31202 - override base class since we want to maintain categorical/ordered
- index = CategoricalIndex(data, categories=categories, ordered=ordered_fixture)
- result = index.map(str)
- expected = CategoricalIndex(
- map(str, data), categories=map(str, categories), ordered=ordered_fixture
- )
- tm.assert_index_equal(result, expected)
+ def test_reindex_base(self):
+ # See test_reindex.py
+ pass
+
+ def test_map_str(self):
+ # See test_map.py
+ pass
diff --git a/pandas/tests/indexes/categorical/test_indexing.py b/pandas/tests/indexes/categorical/test_indexing.py
new file mode 100644
index 0000000000000..6fce6542d228e
--- /dev/null
+++ b/pandas/tests/indexes/categorical/test_indexing.py
@@ -0,0 +1,233 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import CategoricalIndex, Index
+import pandas._testing as tm
+
+
+class TestTake:
+ def test_take_fill_value(self):
+ # GH 12631
+
+ # numeric category
+ idx = pd.CategoricalIndex([1, 2, 3], name="xxx")
+ result = idx.take(np.array([1, 0, -1]))
+ expected = pd.CategoricalIndex([2, 1, 3], name="xxx")
+ tm.assert_index_equal(result, expected)
+ tm.assert_categorical_equal(result.values, expected.values)
+
+ # fill_value
+ result = idx.take(np.array([1, 0, -1]), fill_value=True)
+ expected = pd.CategoricalIndex([2, 1, np.nan], categories=[1, 2, 3], name="xxx")
+ tm.assert_index_equal(result, expected)
+ tm.assert_categorical_equal(result.values, expected.values)
+
+ # allow_fill=False
+ result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
+ expected = pd.CategoricalIndex([2, 1, 3], name="xxx")
+ tm.assert_index_equal(result, expected)
+ tm.assert_categorical_equal(result.values, expected.values)
+
+ # object category
+ idx = pd.CategoricalIndex(
+ list("CBA"), categories=list("ABC"), ordered=True, name="xxx"
+ )
+ result = idx.take(np.array([1, 0, -1]))
+ expected = pd.CategoricalIndex(
+ list("BCA"), categories=list("ABC"), ordered=True, name="xxx"
+ )
+ tm.assert_index_equal(result, expected)
+ tm.assert_categorical_equal(result.values, expected.values)
+
+ # fill_value
+ result = idx.take(np.array([1, 0, -1]), fill_value=True)
+ expected = pd.CategoricalIndex(
+ ["B", "C", np.nan], categories=list("ABC"), ordered=True, name="xxx"
+ )
+ tm.assert_index_equal(result, expected)
+ tm.assert_categorical_equal(result.values, expected.values)
+
+ # allow_fill=False
+ result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
+ expected = pd.CategoricalIndex(
+ list("BCA"), categories=list("ABC"), ordered=True, name="xxx"
+ )
+ tm.assert_index_equal(result, expected)
+ tm.assert_categorical_equal(result.values, expected.values)
+
+ msg = (
+ "When allow_fill=True and fill_value is not None, "
+ "all indices must be >= -1"
+ )
+ with pytest.raises(ValueError, match=msg):
+ idx.take(np.array([1, 0, -2]), fill_value=True)
+ with pytest.raises(ValueError, match=msg):
+ idx.take(np.array([1, 0, -5]), fill_value=True)
+
+ with pytest.raises(IndexError):
+ idx.take(np.array([1, -5]))
+
+ def test_take_fill_value_datetime(self):
+
+ # datetime category
+ idx = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx")
+ idx = pd.CategoricalIndex(idx)
+ result = idx.take(np.array([1, 0, -1]))
+ expected = pd.DatetimeIndex(
+ ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx"
+ )
+ expected = pd.CategoricalIndex(expected)
+ tm.assert_index_equal(result, expected)
+
+ # fill_value
+ result = idx.take(np.array([1, 0, -1]), fill_value=True)
+ expected = pd.DatetimeIndex(["2011-02-01", "2011-01-01", "NaT"], name="xxx")
+ exp_cats = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"])
+ expected = pd.CategoricalIndex(expected, categories=exp_cats)
+ tm.assert_index_equal(result, expected)
+
+ # allow_fill=False
+ result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
+ expected = pd.DatetimeIndex(
+ ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx"
+ )
+ expected = pd.CategoricalIndex(expected)
+ tm.assert_index_equal(result, expected)
+
+ msg = (
+ "When allow_fill=True and fill_value is not None, "
+ "all indices must be >= -1"
+ )
+ with pytest.raises(ValueError, match=msg):
+ idx.take(np.array([1, 0, -2]), fill_value=True)
+ with pytest.raises(ValueError, match=msg):
+ idx.take(np.array([1, 0, -5]), fill_value=True)
+
+ with pytest.raises(IndexError):
+ idx.take(np.array([1, -5]))
+
+ def test_take_invalid_kwargs(self):
+ idx = pd.CategoricalIndex([1, 2, 3], name="foo")
+ indices = [1, 0, -1]
+
+ msg = r"take\(\) got an unexpected keyword argument 'foo'"
+ with pytest.raises(TypeError, match=msg):
+ idx.take(indices, foo=2)
+
+ msg = "the 'out' parameter is not supported"
+ with pytest.raises(ValueError, match=msg):
+ idx.take(indices, out=indices)
+
+ msg = "the 'mode' parameter is not supported"
+ with pytest.raises(ValueError, match=msg):
+ idx.take(indices, mode="clip")
+
+
+class TestGetLoc:
+ def test_get_loc(self):
+ # GH 12531
+ cidx1 = CategoricalIndex(list("abcde"), categories=list("edabc"))
+ idx1 = Index(list("abcde"))
+ assert cidx1.get_loc("a") == idx1.get_loc("a")
+ assert cidx1.get_loc("e") == idx1.get_loc("e")
+
+ for i in [cidx1, idx1]:
+ with pytest.raises(KeyError, match="'NOT-EXIST'"):
+ i.get_loc("NOT-EXIST")
+
+ # non-unique
+ cidx2 = CategoricalIndex(list("aacded"), categories=list("edabc"))
+ idx2 = Index(list("aacded"))
+
+ # results in bool array
+ res = cidx2.get_loc("d")
+ tm.assert_numpy_array_equal(res, idx2.get_loc("d"))
+ tm.assert_numpy_array_equal(
+ res, np.array([False, False, False, True, False, True])
+ )
+ # unique element results in scalar
+ res = cidx2.get_loc("e")
+ assert res == idx2.get_loc("e")
+ assert res == 4
+
+ for i in [cidx2, idx2]:
+ with pytest.raises(KeyError, match="'NOT-EXIST'"):
+ i.get_loc("NOT-EXIST")
+
+ # non-unique, sliceable
+ cidx3 = CategoricalIndex(list("aabbb"), categories=list("abc"))
+ idx3 = Index(list("aabbb"))
+
+ # results in slice
+ res = cidx3.get_loc("a")
+ assert res == idx3.get_loc("a")
+ assert res == slice(0, 2, None)
+
+ res = cidx3.get_loc("b")
+ assert res == idx3.get_loc("b")
+ assert res == slice(2, 5, None)
+
+ for i in [cidx3, idx3]:
+ with pytest.raises(KeyError, match="'c'"):
+ i.get_loc("c")
+
+
+class TestGetIndexer:
+ def test_get_indexer_base(self):
+ # Determined by cat ordering.
+ idx = CategoricalIndex(list("cab"), categories=list("cab"))
+ expected = np.arange(len(idx), dtype=np.intp)
+
+ actual = idx.get_indexer(idx)
+ tm.assert_numpy_array_equal(expected, actual)
+
+ with pytest.raises(ValueError, match="Invalid fill method"):
+ idx.get_indexer(idx, method="invalid")
+
+ def test_get_indexer_non_unique(self):
+ np.random.seed(123456789)
+
+ ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False)
+ oidx = Index(np.array(ci))
+
+ for n in [1, 2, 5, len(ci)]:
+ finder = oidx[np.random.randint(0, len(ci), size=n)]
+ expected = oidx.get_indexer_non_unique(finder)[0]
+
+ actual = ci.get_indexer(finder)
+ tm.assert_numpy_array_equal(expected, actual)
+
+ # see gh-17323
+ #
+ # Even when indexer is equal to the
+ # members in the index, we should
+ # respect duplicates instead of taking
+ # the fast-track path.
+ for finder in [list("aabbca"), list("aababca")]:
+ expected = oidx.get_indexer_non_unique(finder)[0]
+
+ actual = ci.get_indexer(finder)
+ tm.assert_numpy_array_equal(expected, actual)
+
+ def test_get_indexer(self):
+
+ idx1 = CategoricalIndex(list("aabcde"), categories=list("edabc"))
+ idx2 = CategoricalIndex(list("abf"))
+
+ for indexer in [idx2, list("abf"), Index(list("abf"))]:
+ r1 = idx1.get_indexer(idx2)
+ tm.assert_almost_equal(r1, np.array([0, 1, 2, -1], dtype=np.intp))
+
+ msg = (
+ "method='pad' and method='backfill' not implemented yet for "
+ "CategoricalIndex"
+ )
+ with pytest.raises(NotImplementedError, match=msg):
+ idx2.get_indexer(idx1, method="pad")
+ with pytest.raises(NotImplementedError, match=msg):
+ idx2.get_indexer(idx1, method="backfill")
+
+ msg = "method='nearest' not implemented yet for CategoricalIndex"
+ with pytest.raises(NotImplementedError, match=msg):
+ idx2.get_indexer(idx1, method="nearest")
diff --git a/pandas/tests/indexes/categorical/test_map.py b/pandas/tests/indexes/categorical/test_map.py
new file mode 100644
index 0000000000000..943359a72e971
--- /dev/null
+++ b/pandas/tests/indexes/categorical/test_map.py
@@ -0,0 +1,95 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import CategoricalIndex, Index
+import pandas._testing as tm
+
+
+class TestMap:
+ @pytest.mark.parametrize(
+ "data, categories",
+ [
+ (list("abcbca"), list("cab")),
+ (pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)),
+ ],
+ ids=["string", "interval"],
+ )
+ def test_map_str(self, data, categories, ordered_fixture):
+ # GH 31202 - override base class since we want to maintain categorical/ordered
+ index = CategoricalIndex(data, categories=categories, ordered=ordered_fixture)
+ result = index.map(str)
+ expected = CategoricalIndex(
+ map(str, data), categories=map(str, categories), ordered=ordered_fixture
+ )
+ tm.assert_index_equal(result, expected)
+
+ def test_map(self):
+ ci = pd.CategoricalIndex(list("ABABC"), categories=list("CBA"), ordered=True)
+ result = ci.map(lambda x: x.lower())
+ exp = pd.CategoricalIndex(list("ababc"), categories=list("cba"), ordered=True)
+ tm.assert_index_equal(result, exp)
+
+ ci = pd.CategoricalIndex(
+ list("ABABC"), categories=list("BAC"), ordered=False, name="XXX"
+ )
+ result = ci.map(lambda x: x.lower())
+ exp = pd.CategoricalIndex(
+ list("ababc"), categories=list("bac"), ordered=False, name="XXX"
+ )
+ tm.assert_index_equal(result, exp)
+
+ # GH 12766: Return an index not an array
+ tm.assert_index_equal(
+ ci.map(lambda x: 1), Index(np.array([1] * 5, dtype=np.int64), name="XXX")
+ )
+
+ # change categories dtype
+ ci = pd.CategoricalIndex(list("ABABC"), categories=list("BAC"), ordered=False)
+
+ def f(x):
+ return {"A": 10, "B": 20, "C": 30}.get(x)
+
+ result = ci.map(f)
+ exp = pd.CategoricalIndex(
+ [10, 20, 10, 20, 30], categories=[20, 10, 30], ordered=False
+ )
+ tm.assert_index_equal(result, exp)
+
+ result = ci.map(pd.Series([10, 20, 30], index=["A", "B", "C"]))
+ tm.assert_index_equal(result, exp)
+
+ result = ci.map({"A": 10, "B": 20, "C": 30})
+ tm.assert_index_equal(result, exp)
+
+ def test_map_with_categorical_series(self):
+ # GH 12756
+ a = pd.Index([1, 2, 3, 4])
+ b = pd.Series(["even", "odd", "even", "odd"], dtype="category")
+ c = pd.Series(["even", "odd", "even", "odd"])
+
+ exp = CategoricalIndex(["odd", "even", "odd", np.nan])
+ tm.assert_index_equal(a.map(b), exp)
+ exp = pd.Index(["odd", "even", "odd", np.nan])
+ tm.assert_index_equal(a.map(c), exp)
+
+ @pytest.mark.parametrize(
+ ("data", "f"),
+ (
+ ([1, 1, np.nan], pd.isna),
+ ([1, 2, np.nan], pd.isna),
+ ([1, 1, np.nan], {1: False}),
+ ([1, 2, np.nan], {1: False, 2: False}),
+ ([1, 1, np.nan], pd.Series([False, False])),
+ ([1, 2, np.nan], pd.Series([False, False, False])),
+ ),
+ )
+ def test_map_with_nan(self, data, f): # GH 24241
+ values = pd.Categorical(data)
+ result = values.map(f)
+ if data[1] == 1:
+ expected = pd.Categorical([False, False, np.nan])
+ tm.assert_categorical_equal(result, expected)
+ else:
+ expected = pd.Index([False, False, np.nan])
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/categorical/test_reindex.py b/pandas/tests/indexes/categorical/test_reindex.py
new file mode 100644
index 0000000000000..f59ddc42ce4e4
--- /dev/null
+++ b/pandas/tests/indexes/categorical/test_reindex.py
@@ -0,0 +1,53 @@
+import numpy as np
+
+from pandas import Categorical, CategoricalIndex, Index
+import pandas._testing as tm
+
+
+class TestReindex:
+ def test_reindex_dtype(self):
+ c = CategoricalIndex(["a", "b", "c", "a"])
+ res, indexer = c.reindex(["a", "c"])
+ tm.assert_index_equal(res, Index(["a", "a", "c"]), exact=True)
+ tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
+
+ c = CategoricalIndex(["a", "b", "c", "a"])
+ res, indexer = c.reindex(Categorical(["a", "c"]))
+
+ exp = CategoricalIndex(["a", "a", "c"], categories=["a", "c"])
+ tm.assert_index_equal(res, exp, exact=True)
+ tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
+
+ c = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
+ res, indexer = c.reindex(["a", "c"])
+ exp = Index(["a", "a", "c"], dtype="object")
+ tm.assert_index_equal(res, exp, exact=True)
+ tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
+
+ c = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
+ res, indexer = c.reindex(Categorical(["a", "c"]))
+ exp = CategoricalIndex(["a", "a", "c"], categories=["a", "c"])
+ tm.assert_index_equal(res, exp, exact=True)
+ tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
+
+ def test_reindex_duplicate_target(self):
+ # See GH25459
+ cat = CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c", "d"])
+ res, indexer = cat.reindex(["a", "c", "c"])
+ exp = Index(["a", "c", "c"], dtype="object")
+ tm.assert_index_equal(res, exp, exact=True)
+ tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp))
+
+ res, indexer = cat.reindex(
+ CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"])
+ )
+ exp = CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"])
+ tm.assert_index_equal(res, exp, exact=True)
+ tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp))
+
+ def test_reindex_empty_index(self):
+ # See GH16770
+ c = CategoricalIndex([])
+ res, indexer = c.reindex(["a", "b"])
+ tm.assert_index_equal(res, Index(["a", "b"]), exact=True)
+ tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp))
| https://api.github.com/repos/pandas-dev/pandas/pulls/31559 | 2020-02-02T02:56:45Z | 2020-02-02T18:35:58Z | 2020-02-02T18:35:58Z | 2020-02-02T18:41:49Z | |
REF: organize DataFrame tests for indexing, by-method | diff --git a/pandas/tests/frame/indexing/test_datetime.py b/pandas/tests/frame/indexing/test_datetime.py
index a1c12be2b0180..6bfcac3793584 100644
--- a/pandas/tests/frame/indexing/test_datetime.py
+++ b/pandas/tests/frame/indexing/test_datetime.py
@@ -45,13 +45,6 @@ def test_set_reset(self):
df = result.set_index("foo")
tm.assert_index_equal(df.index, idx)
- def test_transpose(self, timezone_frame):
-
- result = timezone_frame.T
- expected = DataFrame(timezone_frame.values.T)
- expected.index = ["A", "B", "C"]
- tm.assert_frame_equal(result, expected)
-
def test_scalar_assignment(self):
# issue #19843
df = pd.DataFrame(index=(0, 1, 2))
diff --git a/pandas/tests/frame/indexing/test_iat.py b/pandas/tests/frame/indexing/test_iat.py
new file mode 100644
index 0000000000000..23e3392251a3a
--- /dev/null
+++ b/pandas/tests/frame/indexing/test_iat.py
@@ -0,0 +1,7 @@
+def test_iat(float_frame):
+
+ for i, row in enumerate(float_frame.index):
+ for j, col in enumerate(float_frame.columns):
+ result = float_frame.iat[i, j]
+ expected = float_frame.at[row, col]
+ assert result == expected
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 64d0f9ee2b062..2e86acf4f789a 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -28,6 +28,29 @@
from pandas.tseries.offsets import BDay
+class TestGet:
+ def test_get(self, float_frame):
+ b = float_frame.get("B")
+ tm.assert_series_equal(b, float_frame["B"])
+
+ assert float_frame.get("foo") is None
+ tm.assert_series_equal(
+ float_frame.get("foo", float_frame["B"]), float_frame["B"]
+ )
+
+ @pytest.mark.parametrize(
+ "df",
+ [
+ DataFrame(),
+ DataFrame(columns=list("AB")),
+ DataFrame(columns=list("AB"), index=range(3)),
+ ],
+ )
+ def test_get_none(self, df):
+ # see gh-5652
+ assert df.get(None) is None
+
+
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
@@ -64,27 +87,6 @@ def test_getitem_dupe_cols(self):
with pytest.raises(KeyError, match=re.escape(msg)):
df[["baf"]]
- def test_get(self, float_frame):
- b = float_frame.get("B")
- tm.assert_series_equal(b, float_frame["B"])
-
- assert float_frame.get("foo") is None
- tm.assert_series_equal(
- float_frame.get("foo", float_frame["B"]), float_frame["B"]
- )
-
- @pytest.mark.parametrize(
- "df",
- [
- DataFrame(),
- DataFrame(columns=list("AB")),
- DataFrame(columns=list("AB"), index=range(3)),
- ],
- )
- def test_get_none(self, df):
- # see gh-5652
- assert df.get(None) is None
-
@pytest.mark.parametrize("key_type", [iter, np.array, Series, Index])
def test_loc_iterable(self, float_frame, key_type):
idx = key_type(["A", "B", "C"])
@@ -1547,14 +1549,6 @@ def test_loc_duplicates(self):
df.loc[trange[bool_idx], "A"] += 6
tm.assert_frame_equal(df, expected)
- def test_iat(self, float_frame):
-
- for i, row in enumerate(float_frame.index):
- for j, col in enumerate(float_frame.columns):
- result = float_frame.iat[i, j]
- expected = float_frame.at[row, col]
- assert result == expected
-
@pytest.mark.parametrize(
"method,expected_values",
[
@@ -1916,89 +1910,6 @@ def test_at_time_between_time_datetimeindex(self):
result.loc[bkey] = df.iloc[binds]
tm.assert_frame_equal(result, df)
- def test_xs(self, float_frame, datetime_frame):
- idx = float_frame.index[5]
- xs = float_frame.xs(idx)
- for item, value in xs.items():
- if np.isnan(value):
- assert np.isnan(float_frame[item][idx])
- else:
- assert value == float_frame[item][idx]
-
- # mixed-type xs
- test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}}
- frame = DataFrame(test_data)
- xs = frame.xs("1")
- assert xs.dtype == np.object_
- assert xs["A"] == 1
- assert xs["B"] == "1"
-
- with pytest.raises(
- KeyError, match=re.escape("Timestamp('1999-12-31 00:00:00', freq='B')")
- ):
- datetime_frame.xs(datetime_frame.index[0] - BDay())
-
- # xs get column
- series = float_frame.xs("A", axis=1)
- expected = float_frame["A"]
- tm.assert_series_equal(series, expected)
-
- # view is returned if possible
- series = float_frame.xs("A", axis=1)
- series[:] = 5
- assert (expected == 5).all()
-
- def test_xs_corner(self):
- # pathological mixed-type reordering case
- df = DataFrame(index=[0])
- df["A"] = 1.0
- df["B"] = "foo"
- df["C"] = 2.0
- df["D"] = "bar"
- df["E"] = 3.0
-
- xs = df.xs(0)
- exp = pd.Series([1.0, "foo", 2.0, "bar", 3.0], index=list("ABCDE"), name=0)
- tm.assert_series_equal(xs, exp)
-
- # no columns but Index(dtype=object)
- df = DataFrame(index=["a", "b", "c"])
- result = df.xs("a")
- expected = Series([], name="a", index=pd.Index([]), dtype=np.float64)
- tm.assert_series_equal(result, expected)
-
- def test_xs_duplicates(self):
- df = DataFrame(np.random.randn(5, 2), index=["b", "b", "c", "b", "a"])
-
- cross = df.xs("c")
- exp = df.iloc[2]
- tm.assert_series_equal(cross, exp)
-
- def test_xs_keep_level(self):
- df = DataFrame(
- {
- "day": {0: "sat", 1: "sun"},
- "flavour": {0: "strawberry", 1: "strawberry"},
- "sales": {0: 10, 1: 12},
- "year": {0: 2008, 1: 2008},
- }
- ).set_index(["year", "flavour", "day"])
- result = df.xs("sat", level="day", drop_level=False)
- expected = df[:1]
- tm.assert_frame_equal(result, expected)
-
- result = df.xs([2008, "sat"], level=["year", "day"], drop_level=False)
- tm.assert_frame_equal(result, expected)
-
- def test_xs_view(self):
- # in 0.14 this will return a view if possible a copy otherwise, but
- # this is numpy dependent
-
- dm = DataFrame(np.arange(20.0).reshape(4, 5), index=range(4), columns=range(5))
-
- dm.xs(2)[:] = 10
- assert (dm.xs(2) == 10).all()
-
def test_index_namedtuple(self):
from collections import namedtuple
@@ -2154,31 +2065,6 @@ def test_mask_callable(self):
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, (df + 2).mask((df + 2) > 8, (df + 2) + 10))
- def test_head_tail(self, float_frame):
- tm.assert_frame_equal(float_frame.head(), float_frame[:5])
- tm.assert_frame_equal(float_frame.tail(), float_frame[-5:])
-
- tm.assert_frame_equal(float_frame.head(0), float_frame[0:0])
- tm.assert_frame_equal(float_frame.tail(0), float_frame[0:0])
-
- tm.assert_frame_equal(float_frame.head(-1), float_frame[:-1])
- tm.assert_frame_equal(float_frame.tail(-1), float_frame[1:])
- tm.assert_frame_equal(float_frame.head(1), float_frame[:1])
- tm.assert_frame_equal(float_frame.tail(1), float_frame[-1:])
- # with a float index
- df = float_frame.copy()
- df.index = np.arange(len(float_frame)) + 0.1
- tm.assert_frame_equal(df.head(), df.iloc[:5])
- tm.assert_frame_equal(df.tail(), df.iloc[-5:])
- tm.assert_frame_equal(df.head(0), df[0:0])
- tm.assert_frame_equal(df.tail(0), df[0:0])
- tm.assert_frame_equal(df.head(-1), df.iloc[:-1])
- tm.assert_frame_equal(df.tail(-1), df.iloc[1:])
- # test empty dataframe
- empty_df = DataFrame()
- tm.assert_frame_equal(empty_df.tail(), empty_df)
- tm.assert_frame_equal(empty_df.head(), empty_df)
-
def test_type_error_multiindex(self):
# See gh-12218
df = DataFrame(
@@ -2269,10 +2155,3 @@ def test_set_reset(self):
df = result.set_index("foo")
tm.assert_index_equal(df.index, idx)
-
- def test_transpose(self, uint64_frame):
-
- result = uint64_frame.T
- expected = DataFrame(uint64_frame.values.T)
- expected.index = ["A", "B"]
- tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py
index df1b128dcd227..507b2e9cd237b 100644
--- a/pandas/tests/frame/indexing/test_where.py
+++ b/pandas/tests/frame/indexing/test_where.py
@@ -10,22 +10,30 @@
import pandas._testing as tm
-class TestDataFrameIndexingWhere:
- def test_where(self, float_string_frame, mixed_float_frame, mixed_int_frame):
- default_frame = DataFrame(np.random.randn(5, 3), columns=["A", "B", "C"])
-
- def _safe_add(df):
- # only add to the numeric items
- def is_ok(s):
- return (
- issubclass(s.dtype.type, (np.integer, np.floating))
- and s.dtype != "uint8"
- )
-
- return DataFrame(
- dict((c, s + 1) if is_ok(s) else (c, s) for c, s in df.items())
- )
+@pytest.fixture(params=["default", "float_string", "mixed_float", "mixed_int"])
+def where_frame(request, float_string_frame, mixed_float_frame, mixed_int_frame):
+ if request.param == "default":
+ return DataFrame(np.random.randn(5, 3), columns=["A", "B", "C"])
+ if request.param == "float_string":
+ return float_string_frame
+ if request.param == "mixed_float":
+ return mixed_float_frame
+ if request.param == "mixed_int":
+ return mixed_int_frame
+
+
+def _safe_add(df):
+ # only add to the numeric items
+ def is_ok(s):
+ return (
+ issubclass(s.dtype.type, (np.integer, np.floating)) and s.dtype != "uint8"
+ )
+
+ return DataFrame(dict((c, s + 1) if is_ok(s) else (c, s) for c, s in df.items()))
+
+class TestDataFrameIndexingWhere:
+ def test_where_get(self, where_frame, float_string_frame):
def _check_get(df, cond, check_dtypes=True):
other1 = _safe_add(df)
rs = df.where(cond, other1)
@@ -40,19 +48,15 @@ def _check_get(df, cond, check_dtypes=True):
assert (rs.dtypes == df.dtypes).all()
# check getting
- for df in [
- default_frame,
- float_string_frame,
- mixed_float_frame,
- mixed_int_frame,
- ]:
- if df is float_string_frame:
- with pytest.raises(TypeError):
- df > 0
- continue
- cond = df > 0
- _check_get(df, cond)
-
+ df = where_frame
+ if df is float_string_frame:
+ with pytest.raises(TypeError):
+ df > 0
+ return
+ cond = df > 0
+ _check_get(df, cond)
+
+ def test_where_upcasting(self):
# upcasting case (GH # 2794)
df = DataFrame(
{
@@ -78,6 +82,7 @@ def _check_get(df, cond, check_dtypes=True):
tm.assert_series_equal(result, expected)
+ def test_where_alignment(self, where_frame, float_string_frame):
# aligning
def _check_align(df, cond, other, check_dtypes=True):
rs = df.where(cond, other)
@@ -107,27 +112,30 @@ def _check_align(df, cond, other, check_dtypes=True):
if check_dtypes and not isinstance(other, np.ndarray):
assert (rs.dtypes == df.dtypes).all()
- for df in [float_string_frame, mixed_float_frame, mixed_int_frame]:
- if df is float_string_frame:
- with pytest.raises(TypeError):
- df > 0
- continue
+ df = where_frame
+ if df is float_string_frame:
+ with pytest.raises(TypeError):
+ df > 0
+ return
- # other is a frame
- cond = (df > 0)[1:]
- _check_align(df, cond, _safe_add(df))
+ # other is a frame
+ cond = (df > 0)[1:]
+ _check_align(df, cond, _safe_add(df))
- # check other is ndarray
- cond = df > 0
- _check_align(df, cond, (_safe_add(df).values))
+ # check other is ndarray
+ cond = df > 0
+ _check_align(df, cond, (_safe_add(df).values))
- # integers are upcast, so don't check the dtypes
- cond = df > 0
- check_dtypes = all(not issubclass(s.type, np.integer) for s in df.dtypes)
- _check_align(df, cond, np.nan, check_dtypes=check_dtypes)
+ # integers are upcast, so don't check the dtypes
+ cond = df > 0
+ check_dtypes = all(not issubclass(s.type, np.integer) for s in df.dtypes)
+ _check_align(df, cond, np.nan, check_dtypes=check_dtypes)
+ def test_where_invalid(self):
# invalid conditions
- df = default_frame
+ df = DataFrame(np.random.randn(5, 3), columns=["A", "B", "C"])
+ cond = df > 0
+
err1 = (df + 1).values[0:2, :]
msg = "other must be the same shape as self when an ndarray"
with pytest.raises(ValueError, match=msg):
@@ -144,7 +152,9 @@ def _check_align(df, cond, other, check_dtypes=True):
with pytest.raises(ValueError, match=msg):
df.mask(0)
+ def test_where_set(self, where_frame, float_string_frame):
# where inplace
+
def _check_set(df, cond, check_dtypes=True):
dfi = df.copy()
econd = cond.reindex_like(df).fillna(True)
@@ -160,27 +170,23 @@ def _check_set(df, cond, check_dtypes=True):
v = np.dtype("float64")
assert dfi[k].dtype == v
- for df in [
- default_frame,
- float_string_frame,
- mixed_float_frame,
- mixed_int_frame,
- ]:
- if df is float_string_frame:
- with pytest.raises(TypeError):
- df > 0
- continue
+ df = where_frame
+ if df is float_string_frame:
+ with pytest.raises(TypeError):
+ df > 0
+ return
- cond = df > 0
- _check_set(df, cond)
+ cond = df > 0
+ _check_set(df, cond)
- cond = df >= 0
- _check_set(df, cond)
+ cond = df >= 0
+ _check_set(df, cond)
- # aligning
- cond = (df >= 0)[1:]
- _check_set(df, cond)
+ # aligning
+ cond = (df >= 0)[1:]
+ _check_set(df, cond)
+ def test_where_series_slicing(self):
# GH 10218
# test DataFrame.where with Series slicing
df = DataFrame({"a": range(3), "b": range(4, 7)})
diff --git a/pandas/tests/frame/indexing/test_xs.py b/pandas/tests/frame/indexing/test_xs.py
new file mode 100644
index 0000000000000..71b40585f0c2f
--- /dev/null
+++ b/pandas/tests/frame/indexing/test_xs.py
@@ -0,0 +1,95 @@
+import re
+
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import DataFrame, Series
+import pandas._testing as tm
+
+from pandas.tseries.offsets import BDay
+
+
+class TestXS:
+ def test_xs(self, float_frame, datetime_frame):
+ idx = float_frame.index[5]
+ xs = float_frame.xs(idx)
+ for item, value in xs.items():
+ if np.isnan(value):
+ assert np.isnan(float_frame[item][idx])
+ else:
+ assert value == float_frame[item][idx]
+
+ # mixed-type xs
+ test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}}
+ frame = DataFrame(test_data)
+ xs = frame.xs("1")
+ assert xs.dtype == np.object_
+ assert xs["A"] == 1
+ assert xs["B"] == "1"
+
+ with pytest.raises(
+ KeyError, match=re.escape("Timestamp('1999-12-31 00:00:00', freq='B')")
+ ):
+ datetime_frame.xs(datetime_frame.index[0] - BDay())
+
+ # xs get column
+ series = float_frame.xs("A", axis=1)
+ expected = float_frame["A"]
+ tm.assert_series_equal(series, expected)
+
+ # view is returned if possible
+ series = float_frame.xs("A", axis=1)
+ series[:] = 5
+ assert (expected == 5).all()
+
+ def test_xs_corner(self):
+ # pathological mixed-type reordering case
+ df = DataFrame(index=[0])
+ df["A"] = 1.0
+ df["B"] = "foo"
+ df["C"] = 2.0
+ df["D"] = "bar"
+ df["E"] = 3.0
+
+ xs = df.xs(0)
+ exp = pd.Series([1.0, "foo", 2.0, "bar", 3.0], index=list("ABCDE"), name=0)
+ tm.assert_series_equal(xs, exp)
+
+ # no columns but Index(dtype=object)
+ df = DataFrame(index=["a", "b", "c"])
+ result = df.xs("a")
+ expected = Series([], name="a", index=pd.Index([]), dtype=np.float64)
+ tm.assert_series_equal(result, expected)
+
+ def test_xs_duplicates(self):
+ df = DataFrame(np.random.randn(5, 2), index=["b", "b", "c", "b", "a"])
+
+ cross = df.xs("c")
+ exp = df.iloc[2]
+ tm.assert_series_equal(cross, exp)
+
+ def test_xs_keep_level(self):
+ df = DataFrame(
+ {
+ "day": {0: "sat", 1: "sun"},
+ "flavour": {0: "strawberry", 1: "strawberry"},
+ "sales": {0: 10, 1: 12},
+ "year": {0: 2008, 1: 2008},
+ }
+ ).set_index(["year", "flavour", "day"])
+ result = df.xs("sat", level="day", drop_level=False)
+ expected = df[:1]
+ tm.assert_frame_equal(result, expected)
+
+ result = df.xs([2008, "sat"], level=["year", "day"], drop_level=False)
+ tm.assert_frame_equal(result, expected)
+
+ def test_xs_view(self):
+ # in 0.14 this will return a view if possible a copy otherwise, but
+ # this is numpy dependent
+
+ dm = DataFrame(np.arange(20.0).reshape(4, 5), index=range(4), columns=range(5))
+
+ dm.xs(2)[:] = 10
+ assert (dm.xs(2) == 10).all()
diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py
new file mode 100644
index 0000000000000..7715cb1cb6eec
--- /dev/null
+++ b/pandas/tests/frame/methods/test_combine_first.py
@@ -0,0 +1,349 @@
+from datetime import datetime
+
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import DataFrame, Index, Series
+import pandas._testing as tm
+
+
+class TestDataFrameCombineFirst:
+ def test_combine_first_mixed(self):
+ a = Series(["a", "b"], index=range(2))
+ b = Series(range(2), index=range(2))
+ f = DataFrame({"A": a, "B": b})
+
+ a = Series(["a", "b"], index=range(5, 7))
+ b = Series(range(2), index=range(5, 7))
+ g = DataFrame({"A": a, "B": b})
+
+ exp = pd.DataFrame(
+ {"A": list("abab"), "B": [0.0, 1.0, 0.0, 1.0]}, index=[0, 1, 5, 6]
+ )
+ combined = f.combine_first(g)
+ tm.assert_frame_equal(combined, exp)
+
+ def test_combine_first(self, float_frame):
+ # disjoint
+ head, tail = float_frame[:5], float_frame[5:]
+
+ combined = head.combine_first(tail)
+ reordered_frame = float_frame.reindex(combined.index)
+ tm.assert_frame_equal(combined, reordered_frame)
+ assert tm.equalContents(combined.columns, float_frame.columns)
+ tm.assert_series_equal(combined["A"], reordered_frame["A"])
+
+ # same index
+ fcopy = float_frame.copy()
+ fcopy["A"] = 1
+ del fcopy["C"]
+
+ fcopy2 = float_frame.copy()
+ fcopy2["B"] = 0
+ del fcopy2["D"]
+
+ combined = fcopy.combine_first(fcopy2)
+
+ assert (combined["A"] == 1).all()
+ tm.assert_series_equal(combined["B"], fcopy["B"])
+ tm.assert_series_equal(combined["C"], fcopy2["C"])
+ tm.assert_series_equal(combined["D"], fcopy["D"])
+
+ # overlap
+ head, tail = reordered_frame[:10].copy(), reordered_frame
+ head["A"] = 1
+
+ combined = head.combine_first(tail)
+ assert (combined["A"][:10] == 1).all()
+
+ # reverse overlap
+ tail["A"][:10] = 0
+ combined = tail.combine_first(head)
+ assert (combined["A"][:10] == 0).all()
+
+ # no overlap
+ f = float_frame[:10]
+ g = float_frame[10:]
+ combined = f.combine_first(g)
+ tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
+ tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
+
+ # corner cases
+ comb = float_frame.combine_first(DataFrame())
+ tm.assert_frame_equal(comb, float_frame)
+
+ comb = DataFrame().combine_first(float_frame)
+ tm.assert_frame_equal(comb, float_frame)
+
+ comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
+ assert "faz" in comb.index
+
+ # #2525
+ df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
+ df2 = DataFrame(columns=["b"])
+ result = df.combine_first(df2)
+ assert "b" in result
+
+ def test_combine_first_mixed_bug(self):
+ idx = Index(["a", "b", "c", "e"])
+ ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
+ ser2 = Series(["a", "b", "c", "e"], index=idx)
+ ser3 = Series([12, 4, 5, 97], index=idx)
+
+ frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
+
+ idx = Index(["a", "b", "c", "f"])
+ ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
+ ser2 = Series(["a", "b", "c", "f"], index=idx)
+ ser3 = Series([12, 4, 5, 97], index=idx)
+
+ frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3})
+
+ combined = frame1.combine_first(frame2)
+ assert len(combined.columns) == 5
+
+ # gh 3016 (same as in update)
+ df = DataFrame(
+ [[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
+ columns=["A", "B", "bool1", "bool2"],
+ )
+
+ other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
+ result = df.combine_first(other)
+ tm.assert_frame_equal(result, df)
+
+ df.loc[0, "A"] = np.nan
+ result = df.combine_first(other)
+ df.loc[0, "A"] = 45
+ tm.assert_frame_equal(result, df)
+
+ # doc example
+ df1 = DataFrame(
+ {"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
+ )
+
+ df2 = DataFrame(
+ {
+ "A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
+ "B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
+ }
+ )
+
+ result = df1.combine_first(df2)
+ expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
+ tm.assert_frame_equal(result, expected)
+
+ # GH3552, return object dtype with bools
+ df1 = DataFrame(
+ [[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
+ )
+ df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
+
+ result = df1.combine_first(df2)[2]
+ expected = Series([True, True, False], name=2)
+ tm.assert_series_equal(result, expected)
+
+ # GH 3593, converting datetime64[ns] incorrectly
+ df0 = DataFrame(
+ {"a": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]}
+ )
+ df1 = DataFrame({"a": [None, None, None]})
+ df2 = df1.combine_first(df0)
+ tm.assert_frame_equal(df2, df0)
+
+ df2 = df0.combine_first(df1)
+ tm.assert_frame_equal(df2, df0)
+
+ df0 = DataFrame(
+ {"a": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]}
+ )
+ df1 = DataFrame({"a": [datetime(2000, 1, 2), None, None]})
+ df2 = df1.combine_first(df0)
+ result = df0.copy()
+ result.iloc[0, :] = df1.iloc[0, :]
+ tm.assert_frame_equal(df2, result)
+
+ df2 = df0.combine_first(df1)
+ tm.assert_frame_equal(df2, df0)
+
+ def test_combine_first_align_nan(self):
+ # GH 7509 (not fixed)
+ dfa = pd.DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"])
+ dfb = pd.DataFrame([[4], [5]], columns=["b"])
+ assert dfa["a"].dtype == "datetime64[ns]"
+ assert dfa["b"].dtype == "int64"
+
+ res = dfa.combine_first(dfb)
+ exp = pd.DataFrame(
+ {"a": [pd.Timestamp("2011-01-01"), pd.NaT], "b": [2.0, 5.0]},
+ columns=["a", "b"],
+ )
+ tm.assert_frame_equal(res, exp)
+ assert res["a"].dtype == "datetime64[ns]"
+ # ToDo: this must be int64
+ assert res["b"].dtype == "float64"
+
+ res = dfa.iloc[:0].combine_first(dfb)
+ exp = pd.DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"])
+ tm.assert_frame_equal(res, exp)
+ # ToDo: this must be datetime64
+ assert res["a"].dtype == "float64"
+ # ToDo: this must be int64
+ assert res["b"].dtype == "int64"
+
+ def test_combine_first_timezone(self):
+ # see gh-7630
+ data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC")
+ df1 = pd.DataFrame(
+ columns=["UTCdatetime", "abc"],
+ data=data1,
+ index=pd.date_range("20140627", periods=1),
+ )
+ data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC")
+ df2 = pd.DataFrame(
+ columns=["UTCdatetime", "xyz"],
+ data=data2,
+ index=pd.date_range("20140628", periods=1),
+ )
+ res = df2[["UTCdatetime"]].combine_first(df1)
+ exp = pd.DataFrame(
+ {
+ "UTCdatetime": [
+ pd.Timestamp("2010-01-01 01:01", tz="UTC"),
+ pd.Timestamp("2012-12-12 12:12", tz="UTC"),
+ ],
+ "abc": [pd.Timestamp("2010-01-01 01:01:00", tz="UTC"), pd.NaT],
+ },
+ columns=["UTCdatetime", "abc"],
+ index=pd.date_range("20140627", periods=2, freq="D"),
+ )
+ tm.assert_frame_equal(res, exp)
+ assert res["UTCdatetime"].dtype == "datetime64[ns, UTC]"
+ assert res["abc"].dtype == "datetime64[ns, UTC]"
+
+ # see gh-10567
+ dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC")
+ df1 = pd.DataFrame({"DATE": dts1})
+ dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC")
+ df2 = pd.DataFrame({"DATE": dts2})
+
+ res = df1.combine_first(df2)
+ tm.assert_frame_equal(res, df1)
+ assert res["DATE"].dtype == "datetime64[ns, UTC]"
+
+ dts1 = pd.DatetimeIndex(
+ ["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern"
+ )
+ df1 = pd.DataFrame({"DATE": dts1}, index=[1, 3, 5, 7])
+ dts2 = pd.DatetimeIndex(
+ ["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern"
+ )
+ df2 = pd.DataFrame({"DATE": dts2}, index=[2, 4, 5])
+
+ res = df1.combine_first(df2)
+ exp_dts = pd.DatetimeIndex(
+ [
+ "2011-01-01",
+ "2012-01-01",
+ "NaT",
+ "2012-01-02",
+ "2011-01-03",
+ "2011-01-04",
+ ],
+ tz="US/Eastern",
+ )
+ exp = pd.DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7])
+ tm.assert_frame_equal(res, exp)
+
+ # different tz
+ dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern")
+ df1 = pd.DataFrame({"DATE": dts1})
+ dts2 = pd.date_range("2015-01-03", "2015-01-05")
+ df2 = pd.DataFrame({"DATE": dts2})
+
+ # if df1 doesn't have NaN, keep its dtype
+ res = df1.combine_first(df2)
+ tm.assert_frame_equal(res, df1)
+ assert res["DATE"].dtype == "datetime64[ns, US/Eastern]"
+
+ dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern")
+ df1 = pd.DataFrame({"DATE": dts1})
+ dts2 = pd.date_range("2015-01-01", "2015-01-03")
+ df2 = pd.DataFrame({"DATE": dts2})
+
+ res = df1.combine_first(df2)
+ exp_dts = [
+ pd.Timestamp("2015-01-01", tz="US/Eastern"),
+ pd.Timestamp("2015-01-02", tz="US/Eastern"),
+ pd.Timestamp("2015-01-03"),
+ ]
+ exp = pd.DataFrame({"DATE": exp_dts})
+ tm.assert_frame_equal(res, exp)
+ assert res["DATE"].dtype == "object"
+
+ def test_combine_first_timedelta(self):
+ data1 = pd.TimedeltaIndex(["1 day", "NaT", "3 day", "4day"])
+ df1 = pd.DataFrame({"TD": data1}, index=[1, 3, 5, 7])
+ data2 = pd.TimedeltaIndex(["10 day", "11 day", "12 day"])
+ df2 = pd.DataFrame({"TD": data2}, index=[2, 4, 5])
+
+ res = df1.combine_first(df2)
+ exp_dts = pd.TimedeltaIndex(
+ ["1 day", "10 day", "NaT", "11 day", "3 day", "4 day"]
+ )
+ exp = pd.DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7])
+ tm.assert_frame_equal(res, exp)
+ assert res["TD"].dtype == "timedelta64[ns]"
+
+ def test_combine_first_period(self):
+ data1 = pd.PeriodIndex(["2011-01", "NaT", "2011-03", "2011-04"], freq="M")
+ df1 = pd.DataFrame({"P": data1}, index=[1, 3, 5, 7])
+ data2 = pd.PeriodIndex(["2012-01-01", "2012-02", "2012-03"], freq="M")
+ df2 = pd.DataFrame({"P": data2}, index=[2, 4, 5])
+
+ res = df1.combine_first(df2)
+ exp_dts = pd.PeriodIndex(
+ ["2011-01", "2012-01", "NaT", "2012-02", "2011-03", "2011-04"], freq="M"
+ )
+ exp = pd.DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
+ tm.assert_frame_equal(res, exp)
+ assert res["P"].dtype == data1.dtype
+
+ # different freq
+ dts2 = pd.PeriodIndex(["2012-01-01", "2012-01-02", "2012-01-03"], freq="D")
+ df2 = pd.DataFrame({"P": dts2}, index=[2, 4, 5])
+
+ res = df1.combine_first(df2)
+ exp_dts = [
+ pd.Period("2011-01", freq="M"),
+ pd.Period("2012-01-01", freq="D"),
+ pd.NaT,
+ pd.Period("2012-01-02", freq="D"),
+ pd.Period("2011-03", freq="M"),
+ pd.Period("2011-04", freq="M"),
+ ]
+ exp = pd.DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
+ tm.assert_frame_equal(res, exp)
+ assert res["P"].dtype == "object"
+
+ def test_combine_first_int(self):
+ # GH14687 - integer series that do no align exactly
+
+ df1 = pd.DataFrame({"a": [0, 1, 3, 5]}, dtype="int64")
+ df2 = pd.DataFrame({"a": [1, 4]}, dtype="int64")
+
+ res = df1.combine_first(df2)
+ tm.assert_frame_equal(res, df1)
+ assert res["a"].dtype == "int64"
+
+ @pytest.mark.parametrize("val", [1, 1.0])
+ def test_combine_first_with_asymmetric_other(self, val):
+ # see gh-20699
+ df1 = pd.DataFrame({"isNum": [val]})
+ df2 = pd.DataFrame({"isBool": [True]})
+
+ res = df1.combine_first(df2)
+ exp = pd.DataFrame({"isBool": [True], "isNum": [val]})
+
+ tm.assert_frame_equal(res, exp)
diff --git a/pandas/tests/frame/methods/test_head_tail.py b/pandas/tests/frame/methods/test_head_tail.py
new file mode 100644
index 0000000000000..93763bc12ce0d
--- /dev/null
+++ b/pandas/tests/frame/methods/test_head_tail.py
@@ -0,0 +1,30 @@
+import numpy as np
+
+from pandas import DataFrame
+import pandas._testing as tm
+
+
+def test_head_tail(float_frame):
+ tm.assert_frame_equal(float_frame.head(), float_frame[:5])
+ tm.assert_frame_equal(float_frame.tail(), float_frame[-5:])
+
+ tm.assert_frame_equal(float_frame.head(0), float_frame[0:0])
+ tm.assert_frame_equal(float_frame.tail(0), float_frame[0:0])
+
+ tm.assert_frame_equal(float_frame.head(-1), float_frame[:-1])
+ tm.assert_frame_equal(float_frame.tail(-1), float_frame[1:])
+ tm.assert_frame_equal(float_frame.head(1), float_frame[:1])
+ tm.assert_frame_equal(float_frame.tail(1), float_frame[-1:])
+ # with a float index
+ df = float_frame.copy()
+ df.index = np.arange(len(float_frame)) + 0.1
+ tm.assert_frame_equal(df.head(), df.iloc[:5])
+ tm.assert_frame_equal(df.tail(), df.iloc[-5:])
+ tm.assert_frame_equal(df.head(0), df[0:0])
+ tm.assert_frame_equal(df.tail(0), df[0:0])
+ tm.assert_frame_equal(df.head(-1), df.iloc[:-1])
+ tm.assert_frame_equal(df.tail(-1), df.iloc[1:])
+ # test empty dataframe
+ empty_df = DataFrame()
+ tm.assert_frame_equal(empty_df.tail(), empty_df)
+ tm.assert_frame_equal(empty_df.head(), empty_df)
diff --git a/pandas/tests/frame/methods/test_transpose.py b/pandas/tests/frame/methods/test_transpose.py
index 428b9e5068407..a5fe5f3a6d5e4 100644
--- a/pandas/tests/frame/methods/test_transpose.py
+++ b/pandas/tests/frame/methods/test_transpose.py
@@ -1,3 +1,5 @@
+import numpy as np
+
import pandas as pd
import pandas._testing as tm
@@ -41,3 +43,34 @@ def test_transpose_object_to_tzaware_mixed_tz(self):
assert (df2.dtypes == object).all()
res2 = df2.T
assert (res2.dtypes == [dti.dtype, dti2.dtype]).all()
+
+ def test_transpose_uint64(self, uint64_frame):
+
+ result = uint64_frame.T
+ expected = pd.DataFrame(uint64_frame.values.T)
+ expected.index = ["A", "B"]
+ tm.assert_frame_equal(result, expected)
+
+ def test_transpose_float(self, float_frame):
+ frame = float_frame
+ dft = frame.T
+ for idx, series in dft.items():
+ for col, value in series.items():
+ if np.isnan(value):
+ assert np.isnan(frame[col][idx])
+ else:
+ assert value == frame[col][idx]
+
+ # mixed type
+ index, data = tm.getMixedTypeDict()
+ mixed = pd.DataFrame(data, index=index)
+
+ mixed_T = mixed.T
+ for col, s in mixed_T.items():
+ assert s.dtype == np.object_
+
+ def test_transpose_get_view(self, float_frame):
+ dft = float_frame.T
+ dft.values[:, 5:10] = 5
+
+ assert (float_frame.values[5:10] == 5).all()
diff --git a/pandas/tests/frame/methods/test_update.py b/pandas/tests/frame/methods/test_update.py
new file mode 100644
index 0000000000000..d9de026dbf4e9
--- /dev/null
+++ b/pandas/tests/frame/methods/test_update.py
@@ -0,0 +1,135 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import DataFrame, Series, date_range
+import pandas._testing as tm
+
+
+class TestDataFrameUpdate:
+ def test_update_nan(self):
+ # #15593 #15617
+ # test 1
+ df1 = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
+ df2 = DataFrame({"A": [None, 2, 3]})
+ expected = df1.copy()
+ df1.update(df2, overwrite=False)
+
+ tm.assert_frame_equal(df1, expected)
+
+ # test 2
+ df1 = DataFrame({"A": [1.0, None, 3], "B": date_range("2000", periods=3)})
+ df2 = DataFrame({"A": [None, 2, 3]})
+ expected = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
+ df1.update(df2, overwrite=False)
+
+ tm.assert_frame_equal(df1, expected)
+
+ def test_update(self):
+ df = DataFrame(
+ [[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
+ )
+
+ other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
+
+ df.update(other)
+
+ expected = DataFrame(
+ [[1.5, np.nan, 3], [3.6, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
+ )
+ tm.assert_frame_equal(df, expected)
+
+ def test_update_dtypes(self):
+
+ # gh 3016
+ df = DataFrame(
+ [[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
+ columns=["A", "B", "bool1", "bool2"],
+ )
+
+ other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
+ df.update(other)
+
+ expected = DataFrame(
+ [[45.0, 45.0, False, True], [4.0, 5.0, True, False]],
+ columns=["A", "B", "bool1", "bool2"],
+ )
+ tm.assert_frame_equal(df, expected)
+
+ def test_update_nooverwrite(self):
+ df = DataFrame(
+ [[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
+ )
+
+ other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
+
+ df.update(other, overwrite=False)
+
+ expected = DataFrame(
+ [[1.5, np.nan, 3], [1.5, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 3.0]]
+ )
+ tm.assert_frame_equal(df, expected)
+
+ def test_update_filtered(self):
+ df = DataFrame(
+ [[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
+ )
+
+ other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
+
+ df.update(other, filter_func=lambda x: x > 2)
+
+ expected = DataFrame(
+ [[1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
+ )
+ tm.assert_frame_equal(df, expected)
+
+ @pytest.mark.parametrize(
+ "bad_kwarg, exception, msg",
+ [
+ # errors must be 'ignore' or 'raise'
+ ({"errors": "something"}, ValueError, "The parameter errors must.*"),
+ ({"join": "inner"}, NotImplementedError, "Only left join is supported"),
+ ],
+ )
+ def test_update_raise_bad_parameter(self, bad_kwarg, exception, msg):
+ df = DataFrame([[1.5, 1, 3.0]])
+ with pytest.raises(exception, match=msg):
+ df.update(df, **bad_kwarg)
+
+ def test_update_raise_on_overlap(self):
+ df = DataFrame(
+ [[1.5, 1, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
+ )
+
+ other = DataFrame([[2.0, np.nan], [np.nan, 7]], index=[1, 3], columns=[1, 2])
+ with pytest.raises(ValueError, match="Data overlaps"):
+ df.update(other, errors="raise")
+
+ def test_update_from_non_df(self):
+ d = {"a": Series([1, 2, 3, 4]), "b": Series([5, 6, 7, 8])}
+ df = DataFrame(d)
+
+ d["a"] = Series([5, 6, 7, 8])
+ df.update(d)
+
+ expected = DataFrame(d)
+
+ tm.assert_frame_equal(df, expected)
+
+ d = {"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]}
+ df = DataFrame(d)
+
+ d["a"] = [5, 6, 7, 8]
+ df.update(d)
+
+ expected = DataFrame(d)
+
+ tm.assert_frame_equal(df, expected)
+
+ def test_update_datetime_tz(self):
+ # GH 25807
+ result = DataFrame([pd.Timestamp("2019", tz="UTC")])
+ result.update(result)
+ expected = DataFrame([pd.Timestamp("2019", tz="UTC")])
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 9de5d6fe16a0d..17cc50661e3cb 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -14,15 +14,15 @@
class TestDataFrameMisc:
- def test_copy_index_name_checking(self, float_frame):
+ @pytest.mark.parametrize("attr", ["index", "columns"])
+ def test_copy_index_name_checking(self, float_frame, attr):
# don't want to be able to modify the index stored elsewhere after
# making a copy
- for attr in ("index", "columns"):
- ind = getattr(float_frame, attr)
- ind.name = None
- cp = float_frame.copy()
- getattr(cp, attr).name = "foo"
- assert getattr(float_frame, attr).name is None
+ ind = getattr(float_frame, attr)
+ ind.name = None
+ cp = float_frame.copy()
+ getattr(cp, attr).name = "foo"
+ assert getattr(float_frame, attr).name is None
def test_getitem_pop_assign_name(self, float_frame):
s = float_frame["A"]
@@ -358,24 +358,6 @@ def test_to_numpy_copy(self):
assert df.to_numpy(copy=False).base is arr
assert df.to_numpy(copy=True).base is None
- def test_transpose(self, float_frame):
- frame = float_frame
- dft = frame.T
- for idx, series in dft.items():
- for col, value in series.items():
- if np.isnan(value):
- assert np.isnan(frame[col][idx])
- else:
- assert value == frame[col][idx]
-
- # mixed type
- index, data = tm.getMixedTypeDict()
- mixed = DataFrame(data, index=index)
-
- mixed_T = mixed.T
- for col, s in mixed_T.items():
- assert s.dtype == np.object_
-
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
tm.assert_frame_equal(df.T, df.swapaxes(0, 1))
@@ -470,12 +452,6 @@ def test_deepcopy(self, float_frame):
for idx, value in series.items():
assert float_frame["A"][idx] != value
- def test_transpose_get_view(self, float_frame):
- dft = float_frame.T
- dft.values[:, 5:10] = 5
-
- assert (float_frame.values[5:10] == 5).all()
-
def test_inplace_return_self(self):
# GH 1893
diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
index 9bad54b051d6c..36a476d195fe5 100644
--- a/pandas/tests/frame/test_combine_concat.py
+++ b/pandas/tests/frame/test_combine_concat.py
@@ -128,115 +128,6 @@ def test_concat_tuple_keys(self):
)
tm.assert_frame_equal(results, expected)
- def test_update(self):
- df = DataFrame(
- [[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
- )
-
- other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
-
- df.update(other)
-
- expected = DataFrame(
- [[1.5, np.nan, 3], [3.6, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
- )
- tm.assert_frame_equal(df, expected)
-
- def test_update_dtypes(self):
-
- # gh 3016
- df = DataFrame(
- [[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
- columns=["A", "B", "bool1", "bool2"],
- )
-
- other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
- df.update(other)
-
- expected = DataFrame(
- [[45.0, 45.0, False, True], [4.0, 5.0, True, False]],
- columns=["A", "B", "bool1", "bool2"],
- )
- tm.assert_frame_equal(df, expected)
-
- def test_update_nooverwrite(self):
- df = DataFrame(
- [[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
- )
-
- other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
-
- df.update(other, overwrite=False)
-
- expected = DataFrame(
- [[1.5, np.nan, 3], [1.5, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 3.0]]
- )
- tm.assert_frame_equal(df, expected)
-
- def test_update_filtered(self):
- df = DataFrame(
- [[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
- )
-
- other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
-
- df.update(other, filter_func=lambda x: x > 2)
-
- expected = DataFrame(
- [[1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
- )
- tm.assert_frame_equal(df, expected)
-
- @pytest.mark.parametrize(
- "bad_kwarg, exception, msg",
- [
- # errors must be 'ignore' or 'raise'
- ({"errors": "something"}, ValueError, "The parameter errors must.*"),
- ({"join": "inner"}, NotImplementedError, "Only left join is supported"),
- ],
- )
- def test_update_raise_bad_parameter(self, bad_kwarg, exception, msg):
- df = DataFrame([[1.5, 1, 3.0]])
- with pytest.raises(exception, match=msg):
- df.update(df, **bad_kwarg)
-
- def test_update_raise_on_overlap(self):
- df = DataFrame(
- [[1.5, 1, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
- )
-
- other = DataFrame([[2.0, np.nan], [np.nan, 7]], index=[1, 3], columns=[1, 2])
- with pytest.raises(ValueError, match="Data overlaps"):
- df.update(other, errors="raise")
-
- def test_update_from_non_df(self):
- d = {"a": Series([1, 2, 3, 4]), "b": Series([5, 6, 7, 8])}
- df = DataFrame(d)
-
- d["a"] = Series([5, 6, 7, 8])
- df.update(d)
-
- expected = DataFrame(d)
-
- tm.assert_frame_equal(df, expected)
-
- d = {"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]}
- df = DataFrame(d)
-
- d["a"] = [5, 6, 7, 8]
- df.update(d)
-
- expected = DataFrame(d)
-
- tm.assert_frame_equal(df, expected)
-
- def test_update_datetime_tz(self):
- # GH 25807
- result = DataFrame([pd.Timestamp("2019", tz="UTC")])
- result.update(result)
- expected = DataFrame([pd.Timestamp("2019", tz="UTC")])
- tm.assert_frame_equal(result, expected)
-
def test_join_str_datetime(self):
str_dates = ["20120209", "20120222"]
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
@@ -422,347 +313,6 @@ def test_concat_astype_dup_col(self):
).astype("category")
tm.assert_frame_equal(result, expected)
-
-class TestDataFrameCombineFirst:
- def test_combine_first_mixed(self):
- a = Series(["a", "b"], index=range(2))
- b = Series(range(2), index=range(2))
- f = DataFrame({"A": a, "B": b})
-
- a = Series(["a", "b"], index=range(5, 7))
- b = Series(range(2), index=range(5, 7))
- g = DataFrame({"A": a, "B": b})
-
- exp = pd.DataFrame(
- {"A": list("abab"), "B": [0.0, 1.0, 0.0, 1.0]}, index=[0, 1, 5, 6]
- )
- combined = f.combine_first(g)
- tm.assert_frame_equal(combined, exp)
-
- def test_combine_first(self, float_frame):
- # disjoint
- head, tail = float_frame[:5], float_frame[5:]
-
- combined = head.combine_first(tail)
- reordered_frame = float_frame.reindex(combined.index)
- tm.assert_frame_equal(combined, reordered_frame)
- assert tm.equalContents(combined.columns, float_frame.columns)
- tm.assert_series_equal(combined["A"], reordered_frame["A"])
-
- # same index
- fcopy = float_frame.copy()
- fcopy["A"] = 1
- del fcopy["C"]
-
- fcopy2 = float_frame.copy()
- fcopy2["B"] = 0
- del fcopy2["D"]
-
- combined = fcopy.combine_first(fcopy2)
-
- assert (combined["A"] == 1).all()
- tm.assert_series_equal(combined["B"], fcopy["B"])
- tm.assert_series_equal(combined["C"], fcopy2["C"])
- tm.assert_series_equal(combined["D"], fcopy["D"])
-
- # overlap
- head, tail = reordered_frame[:10].copy(), reordered_frame
- head["A"] = 1
-
- combined = head.combine_first(tail)
- assert (combined["A"][:10] == 1).all()
-
- # reverse overlap
- tail["A"][:10] = 0
- combined = tail.combine_first(head)
- assert (combined["A"][:10] == 0).all()
-
- # no overlap
- f = float_frame[:10]
- g = float_frame[10:]
- combined = f.combine_first(g)
- tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
- tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
-
- # corner cases
- comb = float_frame.combine_first(DataFrame())
- tm.assert_frame_equal(comb, float_frame)
-
- comb = DataFrame().combine_first(float_frame)
- tm.assert_frame_equal(comb, float_frame)
-
- comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
- assert "faz" in comb.index
-
- # #2525
- df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
- df2 = DataFrame(columns=["b"])
- result = df.combine_first(df2)
- assert "b" in result
-
- def test_combine_first_mixed_bug(self):
- idx = Index(["a", "b", "c", "e"])
- ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
- ser2 = Series(["a", "b", "c", "e"], index=idx)
- ser3 = Series([12, 4, 5, 97], index=idx)
-
- frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
-
- idx = Index(["a", "b", "c", "f"])
- ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
- ser2 = Series(["a", "b", "c", "f"], index=idx)
- ser3 = Series([12, 4, 5, 97], index=idx)
-
- frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3})
-
- combined = frame1.combine_first(frame2)
- assert len(combined.columns) == 5
-
- # gh 3016 (same as in update)
- df = DataFrame(
- [[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
- columns=["A", "B", "bool1", "bool2"],
- )
-
- other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
- result = df.combine_first(other)
- tm.assert_frame_equal(result, df)
-
- df.loc[0, "A"] = np.nan
- result = df.combine_first(other)
- df.loc[0, "A"] = 45
- tm.assert_frame_equal(result, df)
-
- # doc example
- df1 = DataFrame(
- {"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
- )
-
- df2 = DataFrame(
- {
- "A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
- "B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
- }
- )
-
- result = df1.combine_first(df2)
- expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
- tm.assert_frame_equal(result, expected)
-
- # GH3552, return object dtype with bools
- df1 = DataFrame(
- [[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
- )
- df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
-
- result = df1.combine_first(df2)[2]
- expected = Series([True, True, False], name=2)
- tm.assert_series_equal(result, expected)
-
- # GH 3593, converting datetime64[ns] incorrectly
- df0 = DataFrame(
- {"a": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]}
- )
- df1 = DataFrame({"a": [None, None, None]})
- df2 = df1.combine_first(df0)
- tm.assert_frame_equal(df2, df0)
-
- df2 = df0.combine_first(df1)
- tm.assert_frame_equal(df2, df0)
-
- df0 = DataFrame(
- {"a": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]}
- )
- df1 = DataFrame({"a": [datetime(2000, 1, 2), None, None]})
- df2 = df1.combine_first(df0)
- result = df0.copy()
- result.iloc[0, :] = df1.iloc[0, :]
- tm.assert_frame_equal(df2, result)
-
- df2 = df0.combine_first(df1)
- tm.assert_frame_equal(df2, df0)
-
- def test_combine_first_align_nan(self):
- # GH 7509 (not fixed)
- dfa = pd.DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"])
- dfb = pd.DataFrame([[4], [5]], columns=["b"])
- assert dfa["a"].dtype == "datetime64[ns]"
- assert dfa["b"].dtype == "int64"
-
- res = dfa.combine_first(dfb)
- exp = pd.DataFrame(
- {"a": [pd.Timestamp("2011-01-01"), pd.NaT], "b": [2.0, 5.0]},
- columns=["a", "b"],
- )
- tm.assert_frame_equal(res, exp)
- assert res["a"].dtype == "datetime64[ns]"
- # ToDo: this must be int64
- assert res["b"].dtype == "float64"
-
- res = dfa.iloc[:0].combine_first(dfb)
- exp = pd.DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"])
- tm.assert_frame_equal(res, exp)
- # ToDo: this must be datetime64
- assert res["a"].dtype == "float64"
- # ToDo: this must be int64
- assert res["b"].dtype == "int64"
-
- def test_combine_first_timezone(self):
- # see gh-7630
- data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC")
- df1 = pd.DataFrame(
- columns=["UTCdatetime", "abc"],
- data=data1,
- index=pd.date_range("20140627", periods=1),
- )
- data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC")
- df2 = pd.DataFrame(
- columns=["UTCdatetime", "xyz"],
- data=data2,
- index=pd.date_range("20140628", periods=1),
- )
- res = df2[["UTCdatetime"]].combine_first(df1)
- exp = pd.DataFrame(
- {
- "UTCdatetime": [
- pd.Timestamp("2010-01-01 01:01", tz="UTC"),
- pd.Timestamp("2012-12-12 12:12", tz="UTC"),
- ],
- "abc": [pd.Timestamp("2010-01-01 01:01:00", tz="UTC"), pd.NaT],
- },
- columns=["UTCdatetime", "abc"],
- index=pd.date_range("20140627", periods=2, freq="D"),
- )
- tm.assert_frame_equal(res, exp)
- assert res["UTCdatetime"].dtype == "datetime64[ns, UTC]"
- assert res["abc"].dtype == "datetime64[ns, UTC]"
-
- # see gh-10567
- dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC")
- df1 = pd.DataFrame({"DATE": dts1})
- dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC")
- df2 = pd.DataFrame({"DATE": dts2})
-
- res = df1.combine_first(df2)
- tm.assert_frame_equal(res, df1)
- assert res["DATE"].dtype == "datetime64[ns, UTC]"
-
- dts1 = pd.DatetimeIndex(
- ["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern"
- )
- df1 = pd.DataFrame({"DATE": dts1}, index=[1, 3, 5, 7])
- dts2 = pd.DatetimeIndex(
- ["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern"
- )
- df2 = pd.DataFrame({"DATE": dts2}, index=[2, 4, 5])
-
- res = df1.combine_first(df2)
- exp_dts = pd.DatetimeIndex(
- [
- "2011-01-01",
- "2012-01-01",
- "NaT",
- "2012-01-02",
- "2011-01-03",
- "2011-01-04",
- ],
- tz="US/Eastern",
- )
- exp = pd.DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7])
- tm.assert_frame_equal(res, exp)
-
- # different tz
- dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern")
- df1 = pd.DataFrame({"DATE": dts1})
- dts2 = pd.date_range("2015-01-03", "2015-01-05")
- df2 = pd.DataFrame({"DATE": dts2})
-
- # if df1 doesn't have NaN, keep its dtype
- res = df1.combine_first(df2)
- tm.assert_frame_equal(res, df1)
- assert res["DATE"].dtype == "datetime64[ns, US/Eastern]"
-
- dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern")
- df1 = pd.DataFrame({"DATE": dts1})
- dts2 = pd.date_range("2015-01-01", "2015-01-03")
- df2 = pd.DataFrame({"DATE": dts2})
-
- res = df1.combine_first(df2)
- exp_dts = [
- pd.Timestamp("2015-01-01", tz="US/Eastern"),
- pd.Timestamp("2015-01-02", tz="US/Eastern"),
- pd.Timestamp("2015-01-03"),
- ]
- exp = pd.DataFrame({"DATE": exp_dts})
- tm.assert_frame_equal(res, exp)
- assert res["DATE"].dtype == "object"
-
- def test_combine_first_timedelta(self):
- data1 = pd.TimedeltaIndex(["1 day", "NaT", "3 day", "4day"])
- df1 = pd.DataFrame({"TD": data1}, index=[1, 3, 5, 7])
- data2 = pd.TimedeltaIndex(["10 day", "11 day", "12 day"])
- df2 = pd.DataFrame({"TD": data2}, index=[2, 4, 5])
-
- res = df1.combine_first(df2)
- exp_dts = pd.TimedeltaIndex(
- ["1 day", "10 day", "NaT", "11 day", "3 day", "4 day"]
- )
- exp = pd.DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7])
- tm.assert_frame_equal(res, exp)
- assert res["TD"].dtype == "timedelta64[ns]"
-
- def test_combine_first_period(self):
- data1 = pd.PeriodIndex(["2011-01", "NaT", "2011-03", "2011-04"], freq="M")
- df1 = pd.DataFrame({"P": data1}, index=[1, 3, 5, 7])
- data2 = pd.PeriodIndex(["2012-01-01", "2012-02", "2012-03"], freq="M")
- df2 = pd.DataFrame({"P": data2}, index=[2, 4, 5])
-
- res = df1.combine_first(df2)
- exp_dts = pd.PeriodIndex(
- ["2011-01", "2012-01", "NaT", "2012-02", "2011-03", "2011-04"], freq="M"
- )
- exp = pd.DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
- tm.assert_frame_equal(res, exp)
- assert res["P"].dtype == data1.dtype
-
- # different freq
- dts2 = pd.PeriodIndex(["2012-01-01", "2012-01-02", "2012-01-03"], freq="D")
- df2 = pd.DataFrame({"P": dts2}, index=[2, 4, 5])
-
- res = df1.combine_first(df2)
- exp_dts = [
- pd.Period("2011-01", freq="M"),
- pd.Period("2012-01-01", freq="D"),
- pd.NaT,
- pd.Period("2012-01-02", freq="D"),
- pd.Period("2011-03", freq="M"),
- pd.Period("2011-04", freq="M"),
- ]
- exp = pd.DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
- tm.assert_frame_equal(res, exp)
- assert res["P"].dtype == "object"
-
- def test_combine_first_int(self):
- # GH14687 - integer series that do no align exactly
-
- df1 = pd.DataFrame({"a": [0, 1, 3, 5]}, dtype="int64")
- df2 = pd.DataFrame({"a": [1, 4]}, dtype="int64")
-
- res = df1.combine_first(df2)
- tm.assert_frame_equal(res, df1)
- assert res["a"].dtype == "int64"
-
- @pytest.mark.parametrize("val", [1, 1.0])
- def test_combine_first_with_asymmetric_other(self, val):
- # see gh-20699
- df1 = pd.DataFrame({"isNum": [val]})
- df2 = pd.DataFrame({"isBool": [True]})
-
- res = df1.combine_first(df2)
- exp = pd.DataFrame({"isBool": [True], "isNum": [val]})
-
- tm.assert_frame_equal(res, exp)
-
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
@@ -776,23 +326,3 @@ def test_concat_datetime_datetime64_frame(self):
# it works!
pd.concat([df1, df2_obj])
-
-
-class TestDataFrameUpdate:
- def test_update_nan(self):
- # #15593 #15617
- # test 1
- df1 = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
- df2 = DataFrame({"A": [None, 2, 3]})
- expected = df1.copy()
- df1.update(df2, overwrite=False)
-
- tm.assert_frame_equal(df1, expected)
-
- # test 2
- df1 = DataFrame({"A": [1.0, None, 3], "B": date_range("2000", periods=3)})
- df2 = DataFrame({"A": [None, 2, 3]})
- expected = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
- df1.update(df2, overwrite=False)
-
- tm.assert_frame_equal(df1, expected)
| The only change that isnt just rearrangement is implementing where_frame fixture in test_where and using it to get rid of for-loops inside a bunch of tests in that file | https://api.github.com/repos/pandas-dev/pandas/pulls/31558 | 2020-02-02T02:32:19Z | 2020-02-02T18:33:51Z | 2020-02-02T18:33:51Z | 2020-02-02T18:44:05Z |
REF: organize Series indexing tests | diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py
index 16a29d10eb414..28f3c0f7429f8 100644
--- a/pandas/tests/series/indexing/test_boolean.py
+++ b/pandas/tests/series/indexing/test_boolean.py
@@ -1,10 +1,7 @@
import numpy as np
import pytest
-from pandas.core.dtypes.common import is_integer
-
-import pandas as pd
-from pandas import Index, Series, Timestamp, date_range, isna
+from pandas import Index, Series
import pandas._testing as tm
from pandas.core.indexing import IndexingError
@@ -136,492 +133,3 @@ def test_get_set_boolean_different_order(string_series):
sel = string_series[ordered > 0]
exp = string_series[string_series > 0]
tm.assert_series_equal(sel, exp)
-
-
-def test_where_unsafe_int(sint_dtype):
- s = Series(np.arange(10), dtype=sint_dtype)
- mask = s < 5
-
- s[mask] = range(2, 7)
- expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype=sint_dtype)
-
- tm.assert_series_equal(s, expected)
-
-
-def test_where_unsafe_float(float_dtype):
- s = Series(np.arange(10), dtype=float_dtype)
- mask = s < 5
-
- s[mask] = range(2, 7)
- data = list(range(2, 7)) + list(range(5, 10))
- expected = Series(data, dtype=float_dtype)
-
- tm.assert_series_equal(s, expected)
-
-
-@pytest.mark.parametrize(
- "dtype,expected_dtype",
- [
- (np.int8, np.float64),
- (np.int16, np.float64),
- (np.int32, np.float64),
- (np.int64, np.float64),
- (np.float32, np.float32),
- (np.float64, np.float64),
- ],
-)
-def test_where_unsafe_upcast(dtype, expected_dtype):
- # see gh-9743
- s = Series(np.arange(10), dtype=dtype)
- values = [2.5, 3.5, 4.5, 5.5, 6.5]
- mask = s < 5
- expected = Series(values + list(range(5, 10)), dtype=expected_dtype)
- s[mask] = values
- tm.assert_series_equal(s, expected)
-
-
-def test_where_unsafe():
- # see gh-9731
- s = Series(np.arange(10), dtype="int64")
- values = [2.5, 3.5, 4.5, 5.5]
-
- mask = s > 5
- expected = Series(list(range(6)) + values, dtype="float64")
-
- s[mask] = values
- tm.assert_series_equal(s, expected)
-
- # see gh-3235
- s = Series(np.arange(10), dtype="int64")
- mask = s < 5
- s[mask] = range(2, 7)
- expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype="int64")
- tm.assert_series_equal(s, expected)
- assert s.dtype == expected.dtype
-
- s = Series(np.arange(10), dtype="int64")
- mask = s > 5
- s[mask] = [0] * 4
- expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype="int64")
- tm.assert_series_equal(s, expected)
-
- s = Series(np.arange(10))
- mask = s > 5
-
- msg = "cannot assign mismatch length to masked array"
- with pytest.raises(ValueError, match=msg):
- s[mask] = [5, 4, 3, 2, 1]
-
- with pytest.raises(ValueError, match=msg):
- s[mask] = [0] * 5
-
- # dtype changes
- s = Series([1, 2, 3, 4])
- result = s.where(s > 2, np.nan)
- expected = Series([np.nan, np.nan, 3, 4])
- tm.assert_series_equal(result, expected)
-
- # GH 4667
- # setting with None changes dtype
- s = Series(range(10)).astype(float)
- s[8] = None
- result = s[8]
- assert isna(result)
-
- s = Series(range(10)).astype(float)
- s[s > 8] = None
- result = s[isna(s)]
- expected = Series(np.nan, index=[9])
- tm.assert_series_equal(result, expected)
-
-
-def test_where():
- s = Series(np.random.randn(5))
- cond = s > 0
-
- rs = s.where(cond).dropna()
- rs2 = s[cond]
- tm.assert_series_equal(rs, rs2)
-
- rs = s.where(cond, -s)
- tm.assert_series_equal(rs, s.abs())
-
- rs = s.where(cond)
- assert s.shape == rs.shape
- assert rs is not s
-
- # test alignment
- cond = Series([True, False, False, True, False], index=s.index)
- s2 = -(s.abs())
-
- expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
- rs = s2.where(cond[:3])
- tm.assert_series_equal(rs, expected)
-
- expected = s2.abs()
- expected.iloc[0] = s2[0]
- rs = s2.where(cond[:3], -s2)
- tm.assert_series_equal(rs, expected)
-
-
-def test_where_error():
- s = Series(np.random.randn(5))
- cond = s > 0
-
- msg = "Array conditional must be same shape as self"
- with pytest.raises(ValueError, match=msg):
- s.where(1)
- with pytest.raises(ValueError, match=msg):
- s.where(cond[:3].values, -s)
-
- # GH 2745
- s = Series([1, 2])
- s[[True, False]] = [0, 1]
- expected = Series([0, 2])
- tm.assert_series_equal(s, expected)
-
- # failures
- msg = "cannot assign mismatch length to masked array"
- with pytest.raises(ValueError, match=msg):
- s[[True, False]] = [0, 2, 3]
- msg = (
- "NumPy boolean array indexing assignment cannot assign 0 input "
- "values to the 1 output values where the mask is true"
- )
- with pytest.raises(ValueError, match=msg):
- s[[True, False]] = []
-
-
-@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
-def test_where_array_like(klass):
- # see gh-15414
- s = Series([1, 2, 3])
- cond = [False, True, True]
- expected = Series([np.nan, 2, 3])
-
- result = s.where(klass(cond))
- tm.assert_series_equal(result, expected)
-
-
-@pytest.mark.parametrize(
- "cond",
- [
- [1, 0, 1],
- Series([2, 5, 7]),
- ["True", "False", "True"],
- [Timestamp("2017-01-01"), pd.NaT, Timestamp("2017-01-02")],
- ],
-)
-def test_where_invalid_input(cond):
- # see gh-15414: only boolean arrays accepted
- s = Series([1, 2, 3])
- msg = "Boolean array expected for the condition"
-
- with pytest.raises(ValueError, match=msg):
- s.where(cond)
-
- msg = "Array conditional must be same shape as self"
- with pytest.raises(ValueError, match=msg):
- s.where([True])
-
-
-def test_where_ndframe_align():
- msg = "Array conditional must be same shape as self"
- s = Series([1, 2, 3])
-
- cond = [True]
- with pytest.raises(ValueError, match=msg):
- s.where(cond)
-
- expected = Series([1, np.nan, np.nan])
-
- out = s.where(Series(cond))
- tm.assert_series_equal(out, expected)
-
- cond = np.array([False, True, False, True])
- with pytest.raises(ValueError, match=msg):
- s.where(cond)
-
- expected = Series([np.nan, 2, np.nan])
-
- out = s.where(Series(cond))
- tm.assert_series_equal(out, expected)
-
-
-def test_where_setitem_invalid():
- # GH 2702
- # make sure correct exceptions are raised on invalid list assignment
-
- msg = "cannot set using a {} indexer with a different length than the value"
-
- # slice
- s = Series(list("abc"))
-
- with pytest.raises(ValueError, match=msg.format("slice")):
- s[0:3] = list(range(27))
-
- s[0:3] = list(range(3))
- expected = Series([0, 1, 2])
- tm.assert_series_equal(s.astype(np.int64), expected)
-
- # slice with step
- s = Series(list("abcdef"))
-
- with pytest.raises(ValueError, match=msg.format("slice")):
- s[0:4:2] = list(range(27))
-
- s = Series(list("abcdef"))
- s[0:4:2] = list(range(2))
- expected = Series([0, "b", 1, "d", "e", "f"])
- tm.assert_series_equal(s, expected)
-
- # neg slices
- s = Series(list("abcdef"))
-
- with pytest.raises(ValueError, match=msg.format("slice")):
- s[:-1] = list(range(27))
-
- s[-3:-1] = list(range(2))
- expected = Series(["a", "b", "c", 0, 1, "f"])
- tm.assert_series_equal(s, expected)
-
- # list
- s = Series(list("abc"))
-
- with pytest.raises(ValueError, match=msg.format("list-like")):
- s[[0, 1, 2]] = list(range(27))
-
- s = Series(list("abc"))
-
- with pytest.raises(ValueError, match=msg.format("list-like")):
- s[[0, 1, 2]] = list(range(2))
-
- # scalar
- s = Series(list("abc"))
- s[0] = list(range(10))
- expected = Series([list(range(10)), "b", "c"])
- tm.assert_series_equal(s, expected)
-
-
-@pytest.mark.parametrize("size", range(2, 6))
-@pytest.mark.parametrize(
- "mask", [[True, False, False, False, False], [True, False], [False]]
-)
-@pytest.mark.parametrize(
- "item", [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]
-)
-# Test numpy arrays, lists and tuples as the input to be
-# broadcast
-@pytest.mark.parametrize(
- "box", [lambda x: np.array([x]), lambda x: [x], lambda x: (x,)]
-)
-def test_broadcast(size, mask, item, box):
- selection = np.resize(mask, size)
-
- data = np.arange(size, dtype=float)
-
- # Construct the expected series by taking the source
- # data or item based on the selection
- expected = Series(
- [item if use_item else data[i] for i, use_item in enumerate(selection)]
- )
-
- s = Series(data)
- s[selection] = box(item)
- tm.assert_series_equal(s, expected)
-
- s = Series(data)
- result = s.where(~selection, box(item))
- tm.assert_series_equal(result, expected)
-
- s = Series(data)
- result = s.mask(selection, box(item))
- tm.assert_series_equal(result, expected)
-
-
-def test_where_inplace():
- s = Series(np.random.randn(5))
- cond = s > 0
-
- rs = s.copy()
-
- rs.where(cond, inplace=True)
- tm.assert_series_equal(rs.dropna(), s[cond])
- tm.assert_series_equal(rs, s.where(cond))
-
- rs = s.copy()
- rs.where(cond, -s, inplace=True)
- tm.assert_series_equal(rs, s.where(cond, -s))
-
-
-def test_where_dups():
- # GH 4550
- # where crashes with dups in index
- s1 = Series(list(range(3)))
- s2 = Series(list(range(3)))
- comb = pd.concat([s1, s2])
- result = comb.where(comb < 2)
- expected = Series([0, 1, np.nan, 0, 1, np.nan], index=[0, 1, 2, 0, 1, 2])
- tm.assert_series_equal(result, expected)
-
- # GH 4548
- # inplace updating not working with dups
- comb[comb < 1] = 5
- expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
- tm.assert_series_equal(comb, expected)
-
- comb[comb < 2] += 10
- expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
- tm.assert_series_equal(comb, expected)
-
-
-def test_where_numeric_with_string():
- # GH 9280
- s = pd.Series([1, 2, 3])
- w = s.where(s > 1, "X")
-
- assert not is_integer(w[0])
- assert is_integer(w[1])
- assert is_integer(w[2])
- assert isinstance(w[0], str)
- assert w.dtype == "object"
-
- w = s.where(s > 1, ["X", "Y", "Z"])
- assert not is_integer(w[0])
- assert is_integer(w[1])
- assert is_integer(w[2])
- assert isinstance(w[0], str)
- assert w.dtype == "object"
-
- w = s.where(s > 1, np.array(["X", "Y", "Z"]))
- assert not is_integer(w[0])
- assert is_integer(w[1])
- assert is_integer(w[2])
- assert isinstance(w[0], str)
- assert w.dtype == "object"
-
-
-def test_where_timedelta_coerce():
- s = Series([1, 2], dtype="timedelta64[ns]")
- expected = Series([10, 10])
- mask = np.array([False, False])
-
- rs = s.where(mask, [10, 10])
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, 10)
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, 10.0)
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, [10.0, 10.0])
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, [10.0, np.nan])
- expected = Series([10, None], dtype="object")
- tm.assert_series_equal(rs, expected)
-
-
-def test_where_datetime_conversion():
- s = Series(date_range("20130102", periods=2))
- expected = Series([10, 10])
- mask = np.array([False, False])
-
- rs = s.where(mask, [10, 10])
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, 10)
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, 10.0)
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, [10.0, 10.0])
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, [10.0, np.nan])
- expected = Series([10, None], dtype="object")
- tm.assert_series_equal(rs, expected)
-
- # GH 15701
- timestamps = ["2016-12-31 12:00:04+00:00", "2016-12-31 12:00:04.010000+00:00"]
- s = Series([pd.Timestamp(t) for t in timestamps])
- rs = s.where(Series([False, True]))
- expected = Series([pd.NaT, s[1]])
- tm.assert_series_equal(rs, expected)
-
-
-def test_where_dt_tz_values(tz_naive_fixture):
- ser1 = pd.Series(
- pd.DatetimeIndex(["20150101", "20150102", "20150103"], tz=tz_naive_fixture)
- )
- ser2 = pd.Series(
- pd.DatetimeIndex(["20160514", "20160515", "20160516"], tz=tz_naive_fixture)
- )
- mask = pd.Series([True, True, False])
- result = ser1.where(mask, ser2)
- exp = pd.Series(
- pd.DatetimeIndex(["20150101", "20150102", "20160516"], tz=tz_naive_fixture)
- )
- tm.assert_series_equal(exp, result)
-
-
-def test_mask():
- # compare with tested results in test_where
- s = Series(np.random.randn(5))
- cond = s > 0
-
- rs = s.where(~cond, np.nan)
- tm.assert_series_equal(rs, s.mask(cond))
-
- rs = s.where(~cond)
- rs2 = s.mask(cond)
- tm.assert_series_equal(rs, rs2)
-
- rs = s.where(~cond, -s)
- rs2 = s.mask(cond, -s)
- tm.assert_series_equal(rs, rs2)
-
- cond = Series([True, False, False, True, False], index=s.index)
- s2 = -(s.abs())
- rs = s2.where(~cond[:3])
- rs2 = s2.mask(cond[:3])
- tm.assert_series_equal(rs, rs2)
-
- rs = s2.where(~cond[:3], -s2)
- rs2 = s2.mask(cond[:3], -s2)
- tm.assert_series_equal(rs, rs2)
-
- msg = "Array conditional must be same shape as self"
- with pytest.raises(ValueError, match=msg):
- s.mask(1)
- with pytest.raises(ValueError, match=msg):
- s.mask(cond[:3].values, -s)
-
- # dtype changes
- s = Series([1, 2, 3, 4])
- result = s.mask(s > 2, np.nan)
- expected = Series([1, 2, np.nan, np.nan])
- tm.assert_series_equal(result, expected)
-
- # see gh-21891
- s = Series([1, 2])
- res = s.mask([True, False])
-
- exp = Series([np.nan, 2])
- tm.assert_series_equal(res, exp)
-
-
-def test_mask_inplace():
- s = Series(np.random.randn(5))
- cond = s > 0
-
- rs = s.copy()
- rs.mask(cond, inplace=True)
- tm.assert_series_equal(rs.dropna(), s[~cond])
- tm.assert_series_equal(rs, s.mask(cond))
-
- rs = s.copy()
- rs.mask(cond, -s, inplace=True)
- tm.assert_series_equal(rs, s.mask(cond, -s))
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index 813c195b36f67..acaa9de88a836 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -148,7 +148,6 @@ def test_frame_datetime64_duplicated():
def test_getitem_setitem_datetime_tz_pytz():
from pytz import timezone as tz
- from pandas import date_range
N = 50
# testing with timezone, GH #2785
@@ -189,8 +188,6 @@ def test_getitem_setitem_datetime_tz_dateutil():
lambda x: tzutc() if x == "UTC" else gettz(x)
) # handle special case for utc in dateutil
- from pandas import date_range
-
N = 50
# testing with timezone, GH #2785
@@ -373,7 +370,6 @@ def test_getitem_median_slice_bug():
def test_datetime_indexing():
- from pandas import date_range
index = date_range("1/1/2000", "1/7/2000")
index = index.repeat(3)
diff --git a/pandas/tests/series/indexing/test_get.py b/pandas/tests/series/indexing/test_get.py
new file mode 100644
index 0000000000000..438b61ed203a3
--- /dev/null
+++ b/pandas/tests/series/indexing/test_get.py
@@ -0,0 +1,134 @@
+import numpy as np
+
+import pandas as pd
+from pandas import Series
+
+
+def test_get():
+ # GH 6383
+ s = Series(
+ np.array(
+ [
+ 43,
+ 48,
+ 60,
+ 48,
+ 50,
+ 51,
+ 50,
+ 45,
+ 57,
+ 48,
+ 56,
+ 45,
+ 51,
+ 39,
+ 55,
+ 43,
+ 54,
+ 52,
+ 51,
+ 54,
+ ]
+ )
+ )
+
+ result = s.get(25, 0)
+ expected = 0
+ assert result == expected
+
+ s = Series(
+ np.array(
+ [
+ 43,
+ 48,
+ 60,
+ 48,
+ 50,
+ 51,
+ 50,
+ 45,
+ 57,
+ 48,
+ 56,
+ 45,
+ 51,
+ 39,
+ 55,
+ 43,
+ 54,
+ 52,
+ 51,
+ 54,
+ ]
+ ),
+ index=pd.Float64Index(
+ [
+ 25.0,
+ 36.0,
+ 49.0,
+ 64.0,
+ 81.0,
+ 100.0,
+ 121.0,
+ 144.0,
+ 169.0,
+ 196.0,
+ 1225.0,
+ 1296.0,
+ 1369.0,
+ 1444.0,
+ 1521.0,
+ 1600.0,
+ 1681.0,
+ 1764.0,
+ 1849.0,
+ 1936.0,
+ ]
+ ),
+ )
+
+ result = s.get(25, 0)
+ expected = 43
+ assert result == expected
+
+ # GH 7407
+ # with a boolean accessor
+ df = pd.DataFrame({"i": [0] * 3, "b": [False] * 3})
+ vc = df.i.value_counts()
+ result = vc.get(99, default="Missing")
+ assert result == "Missing"
+
+ vc = df.b.value_counts()
+ result = vc.get(False, default="Missing")
+ assert result == 3
+
+ result = vc.get(True, default="Missing")
+ assert result == "Missing"
+
+
+def test_get_nan():
+ # GH 8569
+ s = pd.Float64Index(range(10)).to_series()
+ assert s.get(np.nan) is None
+ assert s.get(np.nan, default="Missing") == "Missing"
+
+
+def test_get_nan_multiple():
+ # GH 8569
+ # ensure that fixing "test_get_nan" above hasn't broken get
+ # with multiple elements
+ s = pd.Float64Index(range(10)).to_series()
+
+ idx = [2, 30]
+ assert s.get(idx) is None
+
+ idx = [2, np.nan]
+ assert s.get(idx) is None
+
+ # GH 17295 - all missing keys
+ idx = [20, 30]
+ assert s.get(idx) is None
+
+ idx = [np.nan, np.nan]
+ assert s.get(idx) is None
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 18dbd22b73b35..d2a09efd01331 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -883,41 +883,6 @@ def test_pop():
tm.assert_series_equal(k, expected)
-def test_take():
- s = Series([-1, 5, 6, 2, 4])
-
- actual = s.take([1, 3, 4])
- expected = Series([5, 2, 4], index=[1, 3, 4])
- tm.assert_series_equal(actual, expected)
-
- actual = s.take([-1, 3, 4])
- expected = Series([4, 2, 4], index=[4, 3, 4])
- tm.assert_series_equal(actual, expected)
-
- msg = "index {} is out of bounds for( axis 0 with)? size 5"
- with pytest.raises(IndexError, match=msg.format(10)):
- s.take([1, 10])
- with pytest.raises(IndexError, match=msg.format(5)):
- s.take([2, 5])
-
-
-def test_take_categorical():
- # https://github.com/pandas-dev/pandas/issues/20664
- s = Series(pd.Categorical(["a", "b", "c"]))
- result = s.take([-2, -2, 0])
- expected = Series(
- pd.Categorical(["b", "b", "a"], categories=["a", "b", "c"]), index=[1, 1, 0]
- )
- tm.assert_series_equal(result, expected)
-
-
-def test_head_tail(string_series):
- tm.assert_series_equal(string_series.head(), string_series[:5])
- tm.assert_series_equal(string_series.head(0), string_series[0:0])
- tm.assert_series_equal(string_series.tail(), string_series[-5:])
- tm.assert_series_equal(string_series.tail(0), string_series[0:0])
-
-
def test_uint_drop(any_int_dtype):
# see GH18311
# assigning series.loc[0] = 4 changed series.dtype to int
diff --git a/pandas/tests/series/indexing/test_mask.py b/pandas/tests/series/indexing/test_mask.py
new file mode 100644
index 0000000000000..dc4fb530dbb52
--- /dev/null
+++ b/pandas/tests/series/indexing/test_mask.py
@@ -0,0 +1,65 @@
+import numpy as np
+import pytest
+
+from pandas import Series
+import pandas._testing as tm
+
+
+def test_mask():
+ # compare with tested results in test_where
+ s = Series(np.random.randn(5))
+ cond = s > 0
+
+ rs = s.where(~cond, np.nan)
+ tm.assert_series_equal(rs, s.mask(cond))
+
+ rs = s.where(~cond)
+ rs2 = s.mask(cond)
+ tm.assert_series_equal(rs, rs2)
+
+ rs = s.where(~cond, -s)
+ rs2 = s.mask(cond, -s)
+ tm.assert_series_equal(rs, rs2)
+
+ cond = Series([True, False, False, True, False], index=s.index)
+ s2 = -(s.abs())
+ rs = s2.where(~cond[:3])
+ rs2 = s2.mask(cond[:3])
+ tm.assert_series_equal(rs, rs2)
+
+ rs = s2.where(~cond[:3], -s2)
+ rs2 = s2.mask(cond[:3], -s2)
+ tm.assert_series_equal(rs, rs2)
+
+ msg = "Array conditional must be same shape as self"
+ with pytest.raises(ValueError, match=msg):
+ s.mask(1)
+ with pytest.raises(ValueError, match=msg):
+ s.mask(cond[:3].values, -s)
+
+ # dtype changes
+ s = Series([1, 2, 3, 4])
+ result = s.mask(s > 2, np.nan)
+ expected = Series([1, 2, np.nan, np.nan])
+ tm.assert_series_equal(result, expected)
+
+ # see gh-21891
+ s = Series([1, 2])
+ res = s.mask([True, False])
+
+ exp = Series([np.nan, 2])
+ tm.assert_series_equal(res, exp)
+
+
+def test_mask_inplace():
+ s = Series(np.random.randn(5))
+ cond = s > 0
+
+ rs = s.copy()
+ rs.mask(cond, inplace=True)
+ tm.assert_series_equal(rs.dropna(), s[~cond])
+ tm.assert_series_equal(rs, s.mask(cond))
+
+ rs = s.copy()
+ rs.mask(cond, -s, inplace=True)
+ tm.assert_series_equal(rs, s.mask(cond, -s))
diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py
index 3684ca00c2f17..176af6eda2d76 100644
--- a/pandas/tests/series/indexing/test_numeric.py
+++ b/pandas/tests/series/indexing/test_numeric.py
@@ -1,141 +1,10 @@
import numpy as np
import pytest
-import pandas as pd
from pandas import DataFrame, Index, Series
import pandas._testing as tm
-def test_get():
- # GH 6383
- s = Series(
- np.array(
- [
- 43,
- 48,
- 60,
- 48,
- 50,
- 51,
- 50,
- 45,
- 57,
- 48,
- 56,
- 45,
- 51,
- 39,
- 55,
- 43,
- 54,
- 52,
- 51,
- 54,
- ]
- )
- )
-
- result = s.get(25, 0)
- expected = 0
- assert result == expected
-
- s = Series(
- np.array(
- [
- 43,
- 48,
- 60,
- 48,
- 50,
- 51,
- 50,
- 45,
- 57,
- 48,
- 56,
- 45,
- 51,
- 39,
- 55,
- 43,
- 54,
- 52,
- 51,
- 54,
- ]
- ),
- index=pd.Float64Index(
- [
- 25.0,
- 36.0,
- 49.0,
- 64.0,
- 81.0,
- 100.0,
- 121.0,
- 144.0,
- 169.0,
- 196.0,
- 1225.0,
- 1296.0,
- 1369.0,
- 1444.0,
- 1521.0,
- 1600.0,
- 1681.0,
- 1764.0,
- 1849.0,
- 1936.0,
- ]
- ),
- )
-
- result = s.get(25, 0)
- expected = 43
- assert result == expected
-
- # GH 7407
- # with a boolean accessor
- df = pd.DataFrame({"i": [0] * 3, "b": [False] * 3})
- vc = df.i.value_counts()
- result = vc.get(99, default="Missing")
- assert result == "Missing"
-
- vc = df.b.value_counts()
- result = vc.get(False, default="Missing")
- assert result == 3
-
- result = vc.get(True, default="Missing")
- assert result == "Missing"
-
-
-def test_get_nan():
- # GH 8569
- s = pd.Float64Index(range(10)).to_series()
- assert s.get(np.nan) is None
- assert s.get(np.nan, default="Missing") == "Missing"
-
-
-def test_get_nan_multiple():
- # GH 8569
- # ensure that fixing "test_get_nan" above hasn't broken get
- # with multiple elements
- s = pd.Float64Index(range(10)).to_series()
-
- idx = [2, 30]
- assert s.get(idx) is None
-
- idx = [2, np.nan]
- assert s.get(idx) is None
-
- # GH 17295 - all missing keys
- idx = [20, 30]
- assert s.get(idx) is None
-
- idx = [np.nan, np.nan]
- assert s.get(idx) is None
-
-
def test_delitem():
# GH 5542
# should delete the item inplace
diff --git a/pandas/tests/series/indexing/test_take.py b/pandas/tests/series/indexing/test_take.py
new file mode 100644
index 0000000000000..9368d49e5ff2b
--- /dev/null
+++ b/pandas/tests/series/indexing/test_take.py
@@ -0,0 +1,33 @@
+import pytest
+
+import pandas as pd
+from pandas import Series
+import pandas._testing as tm
+
+
+def test_take():
+ ser = Series([-1, 5, 6, 2, 4])
+
+ actual = ser.take([1, 3, 4])
+ expected = Series([5, 2, 4], index=[1, 3, 4])
+ tm.assert_series_equal(actual, expected)
+
+ actual = ser.take([-1, 3, 4])
+ expected = Series([4, 2, 4], index=[4, 3, 4])
+ tm.assert_series_equal(actual, expected)
+
+ msg = "index {} is out of bounds for( axis 0 with)? size 5"
+ with pytest.raises(IndexError, match=msg.format(10)):
+ ser.take([1, 10])
+ with pytest.raises(IndexError, match=msg.format(5)):
+ ser.take([2, 5])
+
+
+def test_take_categorical():
+ # https://github.com/pandas-dev/pandas/issues/20664
+ ser = Series(pd.Categorical(["a", "b", "c"]))
+ result = ser.take([-2, -2, 0])
+ expected = Series(
+ pd.Categorical(["b", "b", "a"], categories=["a", "b", "c"]), index=[1, 1, 0]
+ )
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/indexing/test_where.py b/pandas/tests/series/indexing/test_where.py
new file mode 100644
index 0000000000000..9703f5afaf689
--- /dev/null
+++ b/pandas/tests/series/indexing/test_where.py
@@ -0,0 +1,437 @@
+import numpy as np
+import pytest
+
+from pandas.core.dtypes.common import is_integer
+
+import pandas as pd
+from pandas import Series, Timestamp, date_range, isna
+import pandas._testing as tm
+
+
+def test_where_unsafe_int(sint_dtype):
+ s = Series(np.arange(10), dtype=sint_dtype)
+ mask = s < 5
+
+ s[mask] = range(2, 7)
+ expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype=sint_dtype)
+
+ tm.assert_series_equal(s, expected)
+
+
+def test_where_unsafe_float(float_dtype):
+ s = Series(np.arange(10), dtype=float_dtype)
+ mask = s < 5
+
+ s[mask] = range(2, 7)
+ data = list(range(2, 7)) + list(range(5, 10))
+ expected = Series(data, dtype=float_dtype)
+
+ tm.assert_series_equal(s, expected)
+
+
+@pytest.mark.parametrize(
+ "dtype,expected_dtype",
+ [
+ (np.int8, np.float64),
+ (np.int16, np.float64),
+ (np.int32, np.float64),
+ (np.int64, np.float64),
+ (np.float32, np.float32),
+ (np.float64, np.float64),
+ ],
+)
+def test_where_unsafe_upcast(dtype, expected_dtype):
+ # see gh-9743
+ s = Series(np.arange(10), dtype=dtype)
+ values = [2.5, 3.5, 4.5, 5.5, 6.5]
+ mask = s < 5
+ expected = Series(values + list(range(5, 10)), dtype=expected_dtype)
+ s[mask] = values
+ tm.assert_series_equal(s, expected)
+
+
+def test_where_unsafe():
+ # see gh-9731
+ s = Series(np.arange(10), dtype="int64")
+ values = [2.5, 3.5, 4.5, 5.5]
+
+ mask = s > 5
+ expected = Series(list(range(6)) + values, dtype="float64")
+
+ s[mask] = values
+ tm.assert_series_equal(s, expected)
+
+ # see gh-3235
+ s = Series(np.arange(10), dtype="int64")
+ mask = s < 5
+ s[mask] = range(2, 7)
+ expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype="int64")
+ tm.assert_series_equal(s, expected)
+ assert s.dtype == expected.dtype
+
+ s = Series(np.arange(10), dtype="int64")
+ mask = s > 5
+ s[mask] = [0] * 4
+ expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype="int64")
+ tm.assert_series_equal(s, expected)
+
+ s = Series(np.arange(10))
+ mask = s > 5
+
+ msg = "cannot assign mismatch length to masked array"
+ with pytest.raises(ValueError, match=msg):
+ s[mask] = [5, 4, 3, 2, 1]
+
+ with pytest.raises(ValueError, match=msg):
+ s[mask] = [0] * 5
+
+ # dtype changes
+ s = Series([1, 2, 3, 4])
+ result = s.where(s > 2, np.nan)
+ expected = Series([np.nan, np.nan, 3, 4])
+ tm.assert_series_equal(result, expected)
+
+ # GH 4667
+ # setting with None changes dtype
+ s = Series(range(10)).astype(float)
+ s[8] = None
+ result = s[8]
+ assert isna(result)
+
+ s = Series(range(10)).astype(float)
+ s[s > 8] = None
+ result = s[isna(s)]
+ expected = Series(np.nan, index=[9])
+ tm.assert_series_equal(result, expected)
+
+
+def test_where():
+ s = Series(np.random.randn(5))
+ cond = s > 0
+
+ rs = s.where(cond).dropna()
+ rs2 = s[cond]
+ tm.assert_series_equal(rs, rs2)
+
+ rs = s.where(cond, -s)
+ tm.assert_series_equal(rs, s.abs())
+
+ rs = s.where(cond)
+ assert s.shape == rs.shape
+ assert rs is not s
+
+ # test alignment
+ cond = Series([True, False, False, True, False], index=s.index)
+ s2 = -(s.abs())
+
+ expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
+ rs = s2.where(cond[:3])
+ tm.assert_series_equal(rs, expected)
+
+ expected = s2.abs()
+ expected.iloc[0] = s2[0]
+ rs = s2.where(cond[:3], -s2)
+ tm.assert_series_equal(rs, expected)
+
+
+def test_where_error():
+ s = Series(np.random.randn(5))
+ cond = s > 0
+
+ msg = "Array conditional must be same shape as self"
+ with pytest.raises(ValueError, match=msg):
+ s.where(1)
+ with pytest.raises(ValueError, match=msg):
+ s.where(cond[:3].values, -s)
+
+ # GH 2745
+ s = Series([1, 2])
+ s[[True, False]] = [0, 1]
+ expected = Series([0, 2])
+ tm.assert_series_equal(s, expected)
+
+ # failures
+ msg = "cannot assign mismatch length to masked array"
+ with pytest.raises(ValueError, match=msg):
+ s[[True, False]] = [0, 2, 3]
+ msg = (
+ "NumPy boolean array indexing assignment cannot assign 0 input "
+ "values to the 1 output values where the mask is true"
+ )
+ with pytest.raises(ValueError, match=msg):
+ s[[True, False]] = []
+
+
+@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
+def test_where_array_like(klass):
+ # see gh-15414
+ s = Series([1, 2, 3])
+ cond = [False, True, True]
+ expected = Series([np.nan, 2, 3])
+
+ result = s.where(klass(cond))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "cond",
+ [
+ [1, 0, 1],
+ Series([2, 5, 7]),
+ ["True", "False", "True"],
+ [Timestamp("2017-01-01"), pd.NaT, Timestamp("2017-01-02")],
+ ],
+)
+def test_where_invalid_input(cond):
+ # see gh-15414: only boolean arrays accepted
+ s = Series([1, 2, 3])
+ msg = "Boolean array expected for the condition"
+
+ with pytest.raises(ValueError, match=msg):
+ s.where(cond)
+
+ msg = "Array conditional must be same shape as self"
+ with pytest.raises(ValueError, match=msg):
+ s.where([True])
+
+
+def test_where_ndframe_align():
+ msg = "Array conditional must be same shape as self"
+ s = Series([1, 2, 3])
+
+ cond = [True]
+ with pytest.raises(ValueError, match=msg):
+ s.where(cond)
+
+ expected = Series([1, np.nan, np.nan])
+
+ out = s.where(Series(cond))
+ tm.assert_series_equal(out, expected)
+
+ cond = np.array([False, True, False, True])
+ with pytest.raises(ValueError, match=msg):
+ s.where(cond)
+
+ expected = Series([np.nan, 2, np.nan])
+
+ out = s.where(Series(cond))
+ tm.assert_series_equal(out, expected)
+
+
+def test_where_setitem_invalid():
+ # GH 2702
+ # make sure correct exceptions are raised on invalid list assignment
+
+ msg = "cannot set using a {} indexer with a different length than the value"
+
+ # slice
+ s = Series(list("abc"))
+
+ with pytest.raises(ValueError, match=msg.format("slice")):
+ s[0:3] = list(range(27))
+
+ s[0:3] = list(range(3))
+ expected = Series([0, 1, 2])
+ tm.assert_series_equal(s.astype(np.int64), expected)
+
+ # slice with step
+ s = Series(list("abcdef"))
+
+ with pytest.raises(ValueError, match=msg.format("slice")):
+ s[0:4:2] = list(range(27))
+
+ s = Series(list("abcdef"))
+ s[0:4:2] = list(range(2))
+ expected = Series([0, "b", 1, "d", "e", "f"])
+ tm.assert_series_equal(s, expected)
+
+ # neg slices
+ s = Series(list("abcdef"))
+
+ with pytest.raises(ValueError, match=msg.format("slice")):
+ s[:-1] = list(range(27))
+
+ s[-3:-1] = list(range(2))
+ expected = Series(["a", "b", "c", 0, 1, "f"])
+ tm.assert_series_equal(s, expected)
+
+ # list
+ s = Series(list("abc"))
+
+ with pytest.raises(ValueError, match=msg.format("list-like")):
+ s[[0, 1, 2]] = list(range(27))
+
+ s = Series(list("abc"))
+
+ with pytest.raises(ValueError, match=msg.format("list-like")):
+ s[[0, 1, 2]] = list(range(2))
+
+ # scalar
+ s = Series(list("abc"))
+ s[0] = list(range(10))
+ expected = Series([list(range(10)), "b", "c"])
+ tm.assert_series_equal(s, expected)
+
+
+@pytest.mark.parametrize("size", range(2, 6))
+@pytest.mark.parametrize(
+ "mask", [[True, False, False, False, False], [True, False], [False]]
+)
+@pytest.mark.parametrize(
+ "item", [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]
+)
+# Test numpy arrays, lists and tuples as the input to be
+# broadcast
+@pytest.mark.parametrize(
+ "box", [lambda x: np.array([x]), lambda x: [x], lambda x: (x,)]
+)
+def test_broadcast(size, mask, item, box):
+ selection = np.resize(mask, size)
+
+ data = np.arange(size, dtype=float)
+
+ # Construct the expected series by taking the source
+ # data or item based on the selection
+ expected = Series(
+ [item if use_item else data[i] for i, use_item in enumerate(selection)]
+ )
+
+ s = Series(data)
+ s[selection] = box(item)
+ tm.assert_series_equal(s, expected)
+
+ s = Series(data)
+ result = s.where(~selection, box(item))
+ tm.assert_series_equal(result, expected)
+
+ s = Series(data)
+ result = s.mask(selection, box(item))
+ tm.assert_series_equal(result, expected)
+
+
+def test_where_inplace():
+ s = Series(np.random.randn(5))
+ cond = s > 0
+
+ rs = s.copy()
+
+ rs.where(cond, inplace=True)
+ tm.assert_series_equal(rs.dropna(), s[cond])
+ tm.assert_series_equal(rs, s.where(cond))
+
+ rs = s.copy()
+ rs.where(cond, -s, inplace=True)
+ tm.assert_series_equal(rs, s.where(cond, -s))
+
+
+def test_where_dups():
+ # GH 4550
+ # where crashes with dups in index
+ s1 = Series(list(range(3)))
+ s2 = Series(list(range(3)))
+ comb = pd.concat([s1, s2])
+ result = comb.where(comb < 2)
+ expected = Series([0, 1, np.nan, 0, 1, np.nan], index=[0, 1, 2, 0, 1, 2])
+ tm.assert_series_equal(result, expected)
+
+ # GH 4548
+ # inplace updating not working with dups
+ comb[comb < 1] = 5
+ expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
+ tm.assert_series_equal(comb, expected)
+
+ comb[comb < 2] += 10
+ expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
+ tm.assert_series_equal(comb, expected)
+
+
+def test_where_numeric_with_string():
+ # GH 9280
+ s = pd.Series([1, 2, 3])
+ w = s.where(s > 1, "X")
+
+ assert not is_integer(w[0])
+ assert is_integer(w[1])
+ assert is_integer(w[2])
+ assert isinstance(w[0], str)
+ assert w.dtype == "object"
+
+ w = s.where(s > 1, ["X", "Y", "Z"])
+ assert not is_integer(w[0])
+ assert is_integer(w[1])
+ assert is_integer(w[2])
+ assert isinstance(w[0], str)
+ assert w.dtype == "object"
+
+ w = s.where(s > 1, np.array(["X", "Y", "Z"]))
+ assert not is_integer(w[0])
+ assert is_integer(w[1])
+ assert is_integer(w[2])
+ assert isinstance(w[0], str)
+ assert w.dtype == "object"
+
+
+def test_where_timedelta_coerce():
+ s = Series([1, 2], dtype="timedelta64[ns]")
+ expected = Series([10, 10])
+ mask = np.array([False, False])
+
+ rs = s.where(mask, [10, 10])
+ tm.assert_series_equal(rs, expected)
+
+ rs = s.where(mask, 10)
+ tm.assert_series_equal(rs, expected)
+
+ rs = s.where(mask, 10.0)
+ tm.assert_series_equal(rs, expected)
+
+ rs = s.where(mask, [10.0, 10.0])
+ tm.assert_series_equal(rs, expected)
+
+ rs = s.where(mask, [10.0, np.nan])
+ expected = Series([10, None], dtype="object")
+ tm.assert_series_equal(rs, expected)
+
+
+def test_where_datetime_conversion():
+ s = Series(date_range("20130102", periods=2))
+ expected = Series([10, 10])
+ mask = np.array([False, False])
+
+ rs = s.where(mask, [10, 10])
+ tm.assert_series_equal(rs, expected)
+
+ rs = s.where(mask, 10)
+ tm.assert_series_equal(rs, expected)
+
+ rs = s.where(mask, 10.0)
+ tm.assert_series_equal(rs, expected)
+
+ rs = s.where(mask, [10.0, 10.0])
+ tm.assert_series_equal(rs, expected)
+
+ rs = s.where(mask, [10.0, np.nan])
+ expected = Series([10, None], dtype="object")
+ tm.assert_series_equal(rs, expected)
+
+ # GH 15701
+ timestamps = ["2016-12-31 12:00:04+00:00", "2016-12-31 12:00:04.010000+00:00"]
+ s = Series([pd.Timestamp(t) for t in timestamps])
+ rs = s.where(Series([False, True]))
+ expected = Series([pd.NaT, s[1]])
+ tm.assert_series_equal(rs, expected)
+
+
+def test_where_dt_tz_values(tz_naive_fixture):
+ ser1 = pd.Series(
+ pd.DatetimeIndex(["20150101", "20150102", "20150103"], tz=tz_naive_fixture)
+ )
+ ser2 = pd.Series(
+ pd.DatetimeIndex(["20160514", "20160515", "20160516"], tz=tz_naive_fixture)
+ )
+ mask = pd.Series([True, True, False])
+ result = ser1.where(mask, ser2)
+ exp = pd.Series(
+ pd.DatetimeIndex(["20150101", "20150102", "20160516"], tz=tz_naive_fixture)
+ )
+ tm.assert_series_equal(exp, result)
diff --git a/pandas/tests/series/test_convert_dtypes.py b/pandas/tests/series/methods/test_convert_dtypes.py
similarity index 100%
rename from pandas/tests/series/test_convert_dtypes.py
rename to pandas/tests/series/methods/test_convert_dtypes.py
diff --git a/pandas/tests/series/methods/test_head_tail.py b/pandas/tests/series/methods/test_head_tail.py
new file mode 100644
index 0000000000000..d9f8d85eda350
--- /dev/null
+++ b/pandas/tests/series/methods/test_head_tail.py
@@ -0,0 +1,8 @@
+import pandas._testing as tm
+
+
+def test_head_tail(string_series):
+ tm.assert_series_equal(string_series.head(), string_series[:5])
+ tm.assert_series_equal(string_series.head(0), string_series[0:0])
+ tm.assert_series_equal(string_series.tail(), string_series[-5:])
+ tm.assert_series_equal(string_series.tail(0), string_series[0:0])
diff --git a/pandas/tests/series/test_reshaping.py b/pandas/tests/series/methods/test_unstack.py
similarity index 100%
rename from pandas/tests/series/test_reshaping.py
rename to pandas/tests/series/methods/test_unstack.py
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index 239353d3955b4..4cb471597b67a 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -4,7 +4,7 @@
import pytest
import pandas as pd
-from pandas import DataFrame, Series
+from pandas import DataFrame, Series, to_datetime
import pandas._testing as tm
@@ -252,7 +252,6 @@ def test_concat_empty_series_dtypes(self):
assert result.dtype == expected
def test_combine_first_dt64(self):
- from pandas.core.tools.datetimes import to_datetime
s0 = to_datetime(Series(["2010", np.NaN]))
s1 = to_datetime(Series([np.NaN, "2011"]))
| We've got indexing tests pretty scattered; this is organizing the ones in tests.series.indexing by method | https://api.github.com/repos/pandas-dev/pandas/pulls/31557 | 2020-02-02T02:15:51Z | 2020-02-02T17:34:59Z | 2020-02-02T17:34:58Z | 2020-02-02T17:38:48Z |
[MRG] f-string updates for issue #29547 | diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 8829c242b1129..d9f21f0b274ac 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -352,8 +352,8 @@ def __init__(
for obj in objs:
if not isinstance(obj, (Series, DataFrame)):
msg = (
- "cannot concatenate object of type '{typ}'; "
- "only Series and DataFrame objs are valid".format(typ=type(obj))
+ f"cannot concatenate object of type '{type(obj)}'; "
+ "only Series and DataFrame objs are valid"
)
raise TypeError(msg)
@@ -403,8 +403,7 @@ def __init__(
self._is_series = isinstance(sample, ABCSeries)
if not 0 <= axis <= sample.ndim:
raise AssertionError(
- "axis must be between 0 and {ndim}, input was "
- "{axis}".format(ndim=sample.ndim, axis=axis)
+ f"axis must be between 0 and {sample.ndim}, input was {axis}"
)
# if we have mixed ndims, then convert to highest ndim
@@ -622,11 +621,7 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiInde
try:
i = level.get_loc(key)
except KeyError:
- raise ValueError(
- "Key {key!s} not in level {level!s}".format(
- key=key, level=level
- )
- )
+ raise ValueError(f"Key {key} not in level {level}")
to_concat.append(np.repeat(i, len(index)))
codes_list.append(np.concatenate(to_concat))
@@ -677,11 +672,7 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiInde
mask = mapped == -1
if mask.any():
- raise ValueError(
- "Values not found in passed level: {hlevel!s}".format(
- hlevel=hlevel[mask]
- )
- )
+ raise ValueError(f"Values not found in passed level: {hlevel[mask]!s}")
new_codes.append(np.repeat(mapped, n))
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index d04287e1e9088..782b8043430e1 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -88,9 +88,7 @@ def melt(
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
- var_name = [
- "variable_{i}".format(i=i) for i in range(len(frame.columns.names))
- ]
+ var_name = [f"variable_{i}" for i in range(len(frame.columns.names))]
else:
var_name = [
frame.columns.name if frame.columns.name is not None else "variable"
@@ -417,9 +415,7 @@ def wide_to_long(
"""
def get_var_names(df, stub: str, sep: str, suffix: str) -> List[str]:
- regex = r"^{stub}{sep}{suffix}$".format(
- stub=re.escape(stub), sep=re.escape(sep), suffix=suffix
- )
+ regex = fr"^{re.escape(stub)}{re.escape(sep)}{suffix}$"
pattern = re.compile(regex)
return [col for col in df.columns if pattern.match(col)]
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index ceee2f66dba42..480c5279ad3f6 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -611,8 +611,9 @@ def __init__(
if _left.columns.nlevels != _right.columns.nlevels:
msg = (
"merging between different levels can give an unintended "
- "result ({left} levels on the left, {right} on the right)"
- ).format(left=_left.columns.nlevels, right=_right.columns.nlevels)
+ f"result ({left.columns.nlevels} levels on the left,"
+ f"{right.columns.nlevels} on the right)"
+ )
warnings.warn(msg, UserWarning)
self._validate_specification()
@@ -679,7 +680,7 @@ def _indicator_pre_merge(
if i in columns:
raise ValueError(
"Cannot use `indicator=True` option when "
- "data contains a column named {name}".format(name=i)
+ f"data contains a column named {i}"
)
if self.indicator_name in columns:
raise ValueError(
@@ -831,7 +832,7 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
else:
result.index = Index(key_col, name=name)
else:
- result.insert(i, name or "key_{i}".format(i=i), key_col)
+ result.insert(i, name or f"key_{i}", key_col)
def _get_join_indexers(self):
""" return the join indexers """
@@ -1185,13 +1186,10 @@ def _validate_specification(self):
if len(common_cols) == 0:
raise MergeError(
"No common columns to perform merge on. "
- "Merge options: left_on={lon}, right_on={ron}, "
- "left_index={lidx}, right_index={ridx}".format(
- lon=self.left_on,
- ron=self.right_on,
- lidx=self.left_index,
- ridx=self.right_index,
- )
+ f"Merge options: left_on={self.left_on}, "
+ f"right_on={self.right_on}, "
+ f"left_index={self.left_index}, "
+ f"right_index={self.right_index}"
)
if not common_cols.is_unique:
raise MergeError(f"Data columns not unique: {repr(common_cols)}")
@@ -1486,12 +1484,12 @@ def get_result(self):
def _asof_function(direction: str):
- name = "asof_join_{dir}".format(dir=direction)
+ name = f"asof_join_{direction}"
return getattr(libjoin, name, None)
def _asof_by_function(direction: str):
- name = "asof_join_{dir}_on_X_by_Y".format(dir=direction)
+ name = f"asof_join_{direction}_on_X_by_Y"
return getattr(libjoin, name, None)
@@ -1601,9 +1599,7 @@ def _validate_specification(self):
# check 'direction' is valid
if self.direction not in ["backward", "forward", "nearest"]:
- raise MergeError(
- "direction invalid: {direction}".format(direction=self.direction)
- )
+ raise MergeError(f"direction invalid: {self.direction}")
@property
def _asof_key(self):
@@ -1628,17 +1624,13 @@ def _get_merge_keys(self):
# later with a ValueError, so we don't *need* to check
# for them here.
msg = (
- "incompatible merge keys [{i}] {lkdtype} and "
- "{rkdtype}, both sides category, but not equal ones".format(
- i=i, lkdtype=repr(lk.dtype), rkdtype=repr(rk.dtype)
- )
+ f"incompatible merge keys [{i}] {repr(lk.dtype)} and "
+ f"{repr(rk.dtype)}, both sides category, but not equal ones"
)
else:
msg = (
- "incompatible merge keys [{i}] {lkdtype} and "
- "{rkdtype}, must be the same type".format(
- i=i, lkdtype=repr(lk.dtype), rkdtype=repr(rk.dtype)
- )
+ f"incompatible merge keys [{i}] {repr(lk.dtype)} and "
+ f"{repr(rk.dtype)}, must be the same type"
)
raise MergeError(msg)
@@ -1651,10 +1643,8 @@ def _get_merge_keys(self):
lt = left_join_keys[-1]
msg = (
- "incompatible tolerance {tolerance}, must be compat "
- "with type {lkdtype}".format(
- tolerance=type(self.tolerance), lkdtype=repr(lt.dtype)
- )
+ f"incompatible tolerance {self.tolerance}, must be compat "
+ f"with type {repr(lk.dtype)}"
)
if needs_i8_conversion(lt):
@@ -1680,8 +1670,11 @@ def _get_merge_keys(self):
# validate allow_exact_matches
if not is_bool(self.allow_exact_matches):
- msg = "allow_exact_matches must be boolean, passed {passed}"
- raise MergeError(msg.format(passed=self.allow_exact_matches))
+ msg = (
+ "allow_exact_matches must be boolean, "
+ f"passed {self.allow_exact_matches}"
+ )
+ raise MergeError(msg)
return left_join_keys, right_join_keys, join_names
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index a5a9ec9fb79ba..053fb86836ff8 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -200,7 +200,7 @@ def _add_margins(
if not isinstance(margins_name, str):
raise ValueError("margins_name argument must be a string")
- msg = 'Conflicting name "{name}" in margins'.format(name=margins_name)
+ msg = f'Conflicting name "{margins_name}" in margins'
for level in table.index.names:
if margins_name in table.index.get_level_values(level):
raise ValueError(msg)
@@ -650,9 +650,7 @@ def _normalize(table, normalize, margins: bool, margins_name="All"):
if (margins_name not in table.iloc[-1, :].name) | (
margins_name != table.iloc[:, -1].name
):
- raise ValueError(
- "{mname} not in pivoted DataFrame".format(mname=margins_name)
- )
+ raise ValueError(f"{margins_name} not in pivoted DataFrame")
column_margin = table.iloc[:-1, -1]
index_margin = table.iloc[-1, :-1]
@@ -702,7 +700,7 @@ def _get_names(arrs, names, prefix: str = "row"):
if isinstance(arr, ABCSeries) and arr.name is not None:
names.append(arr.name)
else:
- names.append("{prefix}_{i}".format(prefix=prefix, i=i))
+ names.append(f"{prefix}_{i}")
else:
if len(names) != len(arrs):
raise AssertionError("arrays and names must have the same length")
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index f00ff0d4ba5ed..359e5b956f8a5 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -873,15 +873,13 @@ def get_dummies(
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
- len_msg = (
- "Length of '{name}' ({len_item}) did not match the "
- "length of the columns being encoded ({len_enc})."
- )
if is_list_like(item):
if not len(item) == data_to_encode.shape[1]:
- len_msg = len_msg.format(
- name=name, len_item=len(item), len_enc=data_to_encode.shape[1]
+ len_msg = (
+ f"Length of '{name}' ({len(item)}) did not match the "
+ "length of the columns being encoded "
+ f"({data_to_encode.shape[1]})."
)
raise ValueError(len_msg)
@@ -990,8 +988,7 @@ def get_empty_frame(data) -> DataFrame:
# PY2 embedded unicode, gh-22084
def _make_col_name(prefix, prefix_sep, level) -> str:
- fstr = "{prefix}{prefix_sep}{level}"
- return fstr.format(prefix=prefix, prefix_sep=prefix_sep, level=level)
+ return f"{prefix}{prefix_sep}{level}"
dummy_cols = [_make_col_name(prefix, prefix_sep, level) for level in levels]
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index f9acf5b60a3cd..fd189c7435b29 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -371,10 +371,8 @@ def test_no_overlap_more_informative_error(self):
msg = (
"No common columns to perform merge on. "
- "Merge options: left_on={lon}, right_on={ron}, "
- "left_index={lidx}, right_index={ridx}".format(
- lon=None, ron=None, lidx=False, ridx=False
- )
+ f"Merge options: left_on={None}, right_on={None}, "
+ f"left_index={False}, right_index={False}"
)
with pytest.raises(MergeError, match=msg):
| Addresses, in part, https://github.com/pandas-dev/pandas/issues/29547
…ape/pivot, reshape/reshape
- [ x] xref #29547
- [n/a ] tests added / passed
- [x ] passes `black pandas`
- [ x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [n/a ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31556 | 2020-02-02T01:22:48Z | 2020-02-04T00:41:19Z | 2020-02-04T00:41:19Z | 2020-02-04T00:41:26Z |
Backport PR #31545 on branch 1.0.x (BUG&TST: df.replace fail after converting to new dtype) | diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index aa91e7a489356..92b74c4409d7d 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -1356,3 +1356,10 @@ def test_replace_replacer_dtype(self, replacer):
result = df.replace({"a": replacer, "b": replacer})
expected = pd.DataFrame([replacer])
tm.assert_frame_equal(result, expected)
+
+ def test_replace_after_convert_dtypes(self):
+ # GH31517
+ df = pd.DataFrame({"grp": [1, 2, 3, 4, 5]}, dtype="Int64")
+ result = df.replace(1, 10)
+ expected = pd.DataFrame({"grp": [10, 2, 3, 4, 5]}, dtype="Int64")
+ tm.assert_frame_equal(result, expected)
| Backport PR #31545: BUG&TST: df.replace fail after converting to new dtype | https://api.github.com/repos/pandas-dev/pandas/pulls/31555 | 2020-02-01T23:10:51Z | 2020-02-02T19:08:28Z | null | 2020-02-02T19:16:54Z |
REF: move _convert_to_indexer to Loc | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 7e56148b7569e..3d9069a5516f1 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1577,6 +1577,43 @@ def _validate_read_indexer(
"https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike" # noqa:E501
)
+ def _convert_to_indexer(self, key, axis: int):
+ raise AbstractMethodError(self)
+
+
+class _LocationIndexer(_NDFrameIndexer):
+ _takeable: bool = False
+
+ def __getitem__(self, key):
+ if type(key) is tuple:
+ key = tuple(com.apply_if_callable(x, self.obj) for x in key)
+ if self._is_scalar_access(key):
+ try:
+ return self.obj._get_value(*key, takeable=self._takeable)
+ except (KeyError, IndexError, AttributeError):
+ # AttributeError for IntervalTree get_value
+ pass
+ return self._getitem_tuple(key)
+ else:
+ # we by definition only have the 0th axis
+ axis = self.axis or 0
+
+ maybe_callable = com.apply_if_callable(key, self.obj)
+ return self._getitem_axis(maybe_callable, axis=axis)
+
+ def _is_scalar_access(self, key: Tuple):
+ raise NotImplementedError()
+
+ def _getitem_axis(self, key, axis: int):
+ raise NotImplementedError()
+
+ def _getbool_axis(self, key, axis: int):
+ # caller is responsible for ensuring non-None axis
+ labels = self.obj._get_axis(axis)
+ key = check_bool_indexer(labels, key)
+ inds = key.nonzero()[0]
+ return self.obj._take_with_is_copy(inds, axis=axis)
+
def _convert_to_indexer(self, key, axis: int):
"""
Convert indexing key into something we can use to do actual fancy
@@ -1631,15 +1668,8 @@ def _convert_to_indexer(self, key, axis: int):
# if we are setting and its not a valid location
# its an insert which fails by definition
- if self.name == "loc":
- # always valid
- return {"key": key}
-
- if key >= self.obj.shape[axis] and not isinstance(labels, ABCMultiIndex):
- # a positional
- raise ValueError("cannot set by positional indexing with enlargement")
-
- return key
+ # always valid
+ return {"key": key}
if is_nested_tuple(key, labels):
return labels.get_locs(key)
@@ -1663,40 +1693,6 @@ def _convert_to_indexer(self, key, axis: int):
raise
-class _LocationIndexer(_NDFrameIndexer):
- _takeable: bool = False
-
- def __getitem__(self, key):
- if type(key) is tuple:
- key = tuple(com.apply_if_callable(x, self.obj) for x in key)
- if self._is_scalar_access(key):
- try:
- return self.obj._get_value(*key, takeable=self._takeable)
- except (KeyError, IndexError, AttributeError):
- # AttributeError for IntervalTree get_value
- pass
- return self._getitem_tuple(key)
- else:
- # we by definition only have the 0th axis
- axis = self.axis or 0
-
- maybe_callable = com.apply_if_callable(key, self.obj)
- return self._getitem_axis(maybe_callable, axis=axis)
-
- def _is_scalar_access(self, key: Tuple):
- raise NotImplementedError()
-
- def _getitem_axis(self, key, axis: int):
- raise NotImplementedError()
-
- def _getbool_axis(self, key, axis: int):
- # caller is responsible for ensuring non-None axis
- labels = self.obj._get_axis(axis)
- key = check_bool_indexer(labels, key)
- inds = key.nonzero()[0]
- return self.obj._take_with_is_copy(inds, axis=axis)
-
-
@Appender(IndexingMixin.loc.__doc__)
class _LocIndexer(_LocationIndexer):
_valid_types = (
| It is overriden in iLoc, so by putting it directly in Loc we can simplify bits of it that check `self.name` and keep only the `self.name == "loc"` branches. This lets us get rid of a bit of dead code (L1638-L1642).
Most of the diff is misleading; it is only the one method that is moved, with a placeholder kept in NDFrameIndexer | https://api.github.com/repos/pandas-dev/pandas/pulls/31554 | 2020-02-01T21:47:35Z | 2020-02-01T22:36:50Z | 2020-02-01T22:36:50Z | 2020-02-01T22:40:26Z |
REF: collect MultiIndex indexing, set methods | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 889622f44bbb7..4af9901d79a46 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -234,6 +234,8 @@ class MultiIndex(Index):
_comparables = ["names"]
rename = Index.set_names
+ _tuples = None
+
# --------------------------------------------------------------------
# Constructors
@@ -620,29 +622,29 @@ def from_frame(cls, df, sortorder=None, names=None):
# --------------------------------------------------------------------
- @property
- def levels(self):
- result = [
- x._shallow_copy(name=name) for x, name in zip(self._levels, self._names)
- ]
- for level in result:
- # disallow midx.levels[0].name = "foo"
- level._no_setting_name = True
- return FrozenList(result)
-
@property
def _values(self):
# We override here, since our parent uses _data, which we don't use.
return self.values
@property
- def shape(self):
- """
- Return a tuple of the shape of the underlying data.
- """
- # overriding the base Index.shape definition to avoid materializing
- # the values (GH-27384, GH-27775)
- return (len(self),)
+ def values(self):
+ if self._tuples is not None:
+ return self._tuples
+
+ values = []
+
+ for i in range(self.nlevels):
+ vals = self._get_level_values(i)
+ if is_categorical_dtype(vals):
+ vals = vals._internal_get_values()
+ if isinstance(vals.dtype, ExtensionDtype) or hasattr(vals, "_box_values"):
+ vals = vals.astype(object)
+ vals = np.array(vals, copy=False)
+ values.append(vals)
+
+ self._tuples = lib.fast_zip(values)
+ return self._tuples
@property
def array(self):
@@ -659,6 +661,31 @@ def array(self):
"'MultiIndex.to_numpy()' to get a NumPy array of tuples."
)
+ @property
+ def shape(self):
+ """
+ Return a tuple of the shape of the underlying data.
+ """
+ # overriding the base Index.shape definition to avoid materializing
+ # the values (GH-27384, GH-27775)
+ return (len(self),)
+
+ def __len__(self) -> int:
+ return len(self.codes[0])
+
+ # --------------------------------------------------------------------
+ # Levels Methods
+
+ @property
+ def levels(self):
+ result = [
+ x._shallow_copy(name=name) for x, name in zip(self._levels, self._names)
+ ]
+ for level in result:
+ # disallow midx.levels[0].name = "foo"
+ level._no_setting_name = True
+ return FrozenList(result)
+
def _set_levels(
self, levels, level=None, copy=False, validate=True, verify_integrity=False
):
@@ -785,6 +812,23 @@ def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
if not inplace:
return idx
+ @property
+ def nlevels(self) -> int:
+ """
+ Integer number of levels in this MultiIndex.
+ """
+ return len(self._levels)
+
+ @property
+ def levshape(self):
+ """
+ A tuple with the length of each level.
+ """
+ return tuple(len(x) for x in self.levels)
+
+ # --------------------------------------------------------------------
+ # Codes Methods
+
@property
def codes(self):
return self._codes
@@ -895,6 +939,57 @@ def set_codes(self, codes, level=None, inplace=False, verify_integrity=True):
if not inplace:
return idx
+ # --------------------------------------------------------------------
+ # Index Internals
+
+ @cache_readonly
+ def _engine(self):
+ # Calculate the number of bits needed to represent labels in each
+ # level, as log2 of their sizes (including -1 for NaN):
+ sizes = np.ceil(np.log2([len(l) + 1 for l in self.levels]))
+
+ # Sum bit counts, starting from the _right_....
+ lev_bits = np.cumsum(sizes[::-1])[::-1]
+
+ # ... in order to obtain offsets such that sorting the combination of
+ # shifted codes (one for each level, resulting in a unique integer) is
+ # equivalent to sorting lexicographically the codes themselves. Notice
+ # that each level needs to be shifted by the number of bits needed to
+ # represent the _previous_ ones:
+ offsets = np.concatenate([lev_bits[1:], [0]]).astype("uint64")
+
+ # Check the total number of bits needed for our representation:
+ if lev_bits[0] > 64:
+ # The levels would overflow a 64 bit uint - use Python integers:
+ return MultiIndexPyIntEngine(self.levels, self.codes, offsets)
+ return MultiIndexUIntEngine(self.levels, self.codes, offsets)
+
+ @property
+ def _constructor(self):
+ return MultiIndex.from_tuples
+
+ @Appender(Index._shallow_copy.__doc__)
+ def _shallow_copy(self, values=None, **kwargs):
+ if values is not None:
+ names = kwargs.pop("names", kwargs.pop("name", self.names))
+ # discards freq
+ kwargs.pop("freq", None)
+ return MultiIndex.from_tuples(values, names=names, **kwargs)
+ return self.copy(**kwargs)
+
+ def _shallow_copy_with_infer(self, values, **kwargs):
+ # On equal MultiIndexes the difference is empty.
+ # Therefore, an empty MultiIndex is returned GH13490
+ if len(values) == 0:
+ return MultiIndex(
+ levels=[[] for _ in range(self.nlevels)],
+ codes=[[] for _ in range(self.nlevels)],
+ **kwargs,
+ )
+ return self._shallow_copy(values, **kwargs)
+
+ # --------------------------------------------------------------------
+
def copy(
self,
names=None,
@@ -961,17 +1056,6 @@ def view(self, cls=None):
result._id = self._id
return result
- def _shallow_copy_with_infer(self, values, **kwargs):
- # On equal MultiIndexes the difference is empty.
- # Therefore, an empty MultiIndex is returned GH13490
- if len(values) == 0:
- return MultiIndex(
- levels=[[] for _ in range(self.nlevels)],
- codes=[[] for _ in range(self.nlevels)],
- **kwargs,
- )
- return self._shallow_copy(values, **kwargs)
-
@Appender(Index.__contains__.__doc__)
def __contains__(self, key: Any) -> bool:
hash(key)
@@ -981,15 +1065,6 @@ def __contains__(self, key: Any) -> bool:
except (LookupError, TypeError, ValueError):
return False
- @Appender(Index._shallow_copy.__doc__)
- def _shallow_copy(self, values=None, **kwargs):
- if values is not None:
- names = kwargs.pop("names", kwargs.pop("name", self.names))
- # discards freq
- kwargs.pop("freq", None)
- return MultiIndex.from_tuples(values, names=names, **kwargs)
- return self.copy(**kwargs)
-
@cache_readonly
def dtype(self) -> np.dtype:
return np.dtype("O")
@@ -1039,6 +1114,7 @@ def _nbytes(self, deep: bool = False) -> int:
# --------------------------------------------------------------------
# Rendering Methods
+
def _formatter_func(self, tup):
"""
Formats each item in tup according to its level's formatter function.
@@ -1165,9 +1241,7 @@ def format(
return result_levels
# --------------------------------------------------------------------
-
- def __len__(self) -> int:
- return len(self.codes[0])
+ # Names Methods
def _get_names(self):
return FrozenList(self._names)
@@ -1231,6 +1305,8 @@ def _set_names(self, names, level=None, validate=True):
fset=_set_names, fget=_get_names, doc="""\nNames of levels in MultiIndex.\n"""
)
+ # --------------------------------------------------------------------
+
@Appender(Index._get_grouper_for_level.__doc__)
def _get_grouper_for_level(self, mapper, level):
indexer = self.codes[level]
@@ -1268,10 +1344,6 @@ def _get_grouper_for_level(self, mapper, level):
return grouper, codes, level_index
- @property
- def _constructor(self):
- return MultiIndex.from_tuples
-
@cache_readonly
def inferred_type(self) -> str:
return "mixed"
@@ -1303,49 +1375,6 @@ def _get_level_number(self, level) -> int:
)
return level
- _tuples = None
-
- @cache_readonly
- def _engine(self):
- # Calculate the number of bits needed to represent labels in each
- # level, as log2 of their sizes (including -1 for NaN):
- sizes = np.ceil(np.log2([len(l) + 1 for l in self.levels]))
-
- # Sum bit counts, starting from the _right_....
- lev_bits = np.cumsum(sizes[::-1])[::-1]
-
- # ... in order to obtain offsets such that sorting the combination of
- # shifted codes (one for each level, resulting in a unique integer) is
- # equivalent to sorting lexicographically the codes themselves. Notice
- # that each level needs to be shifted by the number of bits needed to
- # represent the _previous_ ones:
- offsets = np.concatenate([lev_bits[1:], [0]]).astype("uint64")
-
- # Check the total number of bits needed for our representation:
- if lev_bits[0] > 64:
- # The levels would overflow a 64 bit uint - use Python integers:
- return MultiIndexPyIntEngine(self.levels, self.codes, offsets)
- return MultiIndexUIntEngine(self.levels, self.codes, offsets)
-
- @property
- def values(self):
- if self._tuples is not None:
- return self._tuples
-
- values = []
-
- for i in range(self.nlevels):
- vals = self._get_level_values(i)
- if is_categorical_dtype(vals):
- vals = vals._internal_get_values()
- if isinstance(vals.dtype, ExtensionDtype) or hasattr(vals, "_box_values"):
- vals = vals.astype(object)
- vals = np.array(vals, copy=False)
- values.append(vals)
-
- self._tuples = lib.fast_zip(values)
- return self._tuples
-
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
@@ -1461,68 +1490,6 @@ def dropna(self, how="any"):
new_codes = [level_codes[~indexer] for level_codes in self.codes]
return self.copy(codes=new_codes, deep=True)
- def get_value(self, series, key):
- # Label-based
- s = com.values_from_object(series)
- k = com.values_from_object(key)
-
- def _try_mi(k):
- # TODO: what if a level contains tuples??
- loc = self.get_loc(k)
- new_values = series._values[loc]
- new_index = self[loc]
- new_index = maybe_droplevels(new_index, k)
- return series._constructor(
- new_values, index=new_index, name=series.name
- ).__finalize__(self)
-
- try:
- return self._engine.get_value(s, k)
- except KeyError as e1:
- try:
- return _try_mi(key)
- except KeyError:
- pass
-
- try:
- return libindex.get_value_at(s, k)
- except IndexError:
- raise
- except TypeError:
- # generator/iterator-like
- if is_iterator(key):
- raise InvalidIndexError(key)
- else:
- raise e1
- except Exception: # pragma: no cover
- raise e1
- except TypeError:
-
- # a Timestamp will raise a TypeError in a multi-index
- # rather than a KeyError, try it here
- # note that a string that 'looks' like a Timestamp will raise
- # a KeyError! (GH5725)
- if isinstance(key, (datetime.datetime, np.datetime64, str)):
- try:
- return _try_mi(key)
- except KeyError:
- raise
- except (IndexError, ValueError, TypeError):
- pass
-
- try:
- return _try_mi(Timestamp(key))
- except (
- KeyError,
- TypeError,
- IndexError,
- ValueError,
- tslibs.OutOfBoundsDatetime,
- ):
- pass
-
- raise InvalidIndexError(key)
-
def _get_level_values(self, level, unique=False):
"""
Return vector of label values for requested level,
@@ -1869,19 +1836,8 @@ def remove_unused_levels(self):
return result
- @property
- def nlevels(self) -> int:
- """
- Integer number of levels in this MultiIndex.
- """
- return len(self._levels)
-
- @property
- def levshape(self):
- """
- A tuple with the length of each level.
- """
- return tuple(len(x) for x in self.levels)
+ # --------------------------------------------------------------------
+ # Pickling Methods
def __reduce__(self):
"""Necessary for making this object picklable"""
@@ -1915,6 +1871,8 @@ def __setstate__(self, state):
self.sortorder = sortorder
self._reset_identity()
+ # --------------------------------------------------------------------
+
def __getitem__(self, key):
if is_scalar(key):
key = com.cast_scalar_indexer(key)
@@ -2287,6 +2245,135 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True):
return new_index, indexer
+ def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
+ """
+ Create index with target's values (move/add/delete values as necessary)
+
+ Returns
+ -------
+ new_index : pd.MultiIndex
+ Resulting index
+ indexer : np.ndarray or None
+ Indices of output values in original index.
+
+ """
+ # GH6552: preserve names when reindexing to non-named target
+ # (i.e. neither Index nor Series).
+ preserve_names = not hasattr(target, "names")
+
+ if level is not None:
+ if method is not None:
+ raise TypeError("Fill method not supported if level passed")
+
+ # GH7774: preserve dtype/tz if target is empty and not an Index.
+ # target may be an iterator
+ target = ibase._ensure_has_len(target)
+ if len(target) == 0 and not isinstance(target, Index):
+ idx = self.levels[level]
+ attrs = idx._get_attributes_dict()
+ attrs.pop("freq", None) # don't preserve freq
+ target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs)
+ else:
+ target = ensure_index(target)
+ target, indexer, _ = self._join_level(
+ target, level, how="right", return_indexers=True, keep_order=False
+ )
+ else:
+ target = ensure_index(target)
+ if self.equals(target):
+ indexer = None
+ else:
+ if self.is_unique:
+ indexer = self.get_indexer(
+ target, method=method, limit=limit, tolerance=tolerance
+ )
+ else:
+ raise ValueError("cannot handle a non-unique multi-index!")
+
+ if not isinstance(target, MultiIndex):
+ if indexer is None:
+ target = self
+ elif (indexer >= 0).all():
+ target = self.take(indexer)
+ else:
+ # hopefully?
+ target = MultiIndex.from_tuples(target)
+
+ if (
+ preserve_names
+ and target.nlevels == self.nlevels
+ and target.names != self.names
+ ):
+ target = target.copy(deep=False)
+ target.names = self.names
+
+ return target, indexer
+
+ # --------------------------------------------------------------------
+ # Indexing Methods
+
+ def get_value(self, series, key):
+ # Label-based
+ s = com.values_from_object(series)
+ k = com.values_from_object(key)
+
+ def _try_mi(k):
+ # TODO: what if a level contains tuples??
+ loc = self.get_loc(k)
+ new_values = series._values[loc]
+ new_index = self[loc]
+ new_index = maybe_droplevels(new_index, k)
+ return series._constructor(
+ new_values, index=new_index, name=series.name
+ ).__finalize__(self)
+
+ try:
+ return self._engine.get_value(s, k)
+ except KeyError as e1:
+ try:
+ return _try_mi(key)
+ except KeyError:
+ pass
+
+ try:
+ return libindex.get_value_at(s, k)
+ except IndexError:
+ raise
+ except TypeError:
+ # generator/iterator-like
+ if is_iterator(key):
+ raise InvalidIndexError(key)
+ else:
+ raise e1
+ except Exception: # pragma: no cover
+ raise e1
+ except TypeError:
+
+ # a Timestamp will raise a TypeError in a multi-index
+ # rather than a KeyError, try it here
+ # note that a string that 'looks' like a Timestamp will raise
+ # a KeyError! (GH5725)
+ if isinstance(key, (datetime.datetime, np.datetime64, str)):
+ try:
+ return _try_mi(key)
+ except KeyError:
+ raise
+ except (IndexError, ValueError, TypeError):
+ pass
+
+ try:
+ return _try_mi(Timestamp(key))
+ except (
+ KeyError,
+ TypeError,
+ IndexError,
+ ValueError,
+ tslibs.OutOfBoundsDatetime,
+ ):
+ pass
+
+ raise InvalidIndexError(key)
+
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
Parameters
@@ -2361,70 +2448,6 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
def get_indexer_non_unique(self, target):
return super().get_indexer_non_unique(target)
- def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
- """
- Create index with target's values (move/add/delete values as necessary)
-
- Returns
- -------
- new_index : pd.MultiIndex
- Resulting index
- indexer : np.ndarray or None
- Indices of output values in original index.
-
- """
- # GH6552: preserve names when reindexing to non-named target
- # (i.e. neither Index nor Series).
- preserve_names = not hasattr(target, "names")
-
- if level is not None:
- if method is not None:
- raise TypeError("Fill method not supported if level passed")
-
- # GH7774: preserve dtype/tz if target is empty and not an Index.
- # target may be an iterator
- target = ibase._ensure_has_len(target)
- if len(target) == 0 and not isinstance(target, Index):
- idx = self.levels[level]
- attrs = idx._get_attributes_dict()
- attrs.pop("freq", None) # don't preserve freq
- target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs)
- else:
- target = ensure_index(target)
- target, indexer, _ = self._join_level(
- target, level, how="right", return_indexers=True, keep_order=False
- )
- else:
- target = ensure_index(target)
- if self.equals(target):
- indexer = None
- else:
- if self.is_unique:
- indexer = self.get_indexer(
- target, method=method, limit=limit, tolerance=tolerance
- )
- else:
- raise ValueError("cannot handle a non-unique multi-index!")
-
- if not isinstance(target, MultiIndex):
- if indexer is None:
- target = self
- elif (indexer >= 0).all():
- target = self.take(indexer)
- else:
- # hopefully?
- target = MultiIndex.from_tuples(target)
-
- if (
- preserve_names
- and target.nlevels == self.nlevels
- and target.names != self.names
- ):
- target = target.copy(deep=False)
- target.names = self.names
-
- return target, indexer
-
def get_slice_bound(
self, label: Union[Hashable, Sequence[Hashable]], side: str, kind: str
) -> int:
@@ -3060,6 +3083,8 @@ def _update_indexer(idxr, indexer=indexer):
return Int64Index([])._ndarray_values
return indexer._ndarray_values
+ # --------------------------------------------------------------------
+
def truncate(self, before=None, after=None):
"""
Slice index between two labels / tuples, return new MultiIndex
@@ -3158,6 +3183,9 @@ def equal_levels(self, other) -> bool:
return False
return True
+ # --------------------------------------------------------------------
+ # Set Methods
+
def union(self, other, sort=None):
"""
Form the union of two MultiIndex objects
@@ -3310,21 +3338,6 @@ def difference(self, other, sort=None):
else:
return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)
- @Appender(Index.astype.__doc__)
- def astype(self, dtype, copy=True):
- dtype = pandas_dtype(dtype)
- if is_categorical_dtype(dtype):
- msg = "> 1 ndim Categorical are not supported at this time"
- raise NotImplementedError(msg)
- elif not is_object_dtype(dtype):
- raise TypeError(
- f"Setting {type(self)} dtype to anything other "
- "than object is not supported"
- )
- elif copy is True:
- return self._shallow_copy()
- return self
-
def _convert_can_do_setop(self, other):
result_names = self.names
@@ -3345,6 +3358,23 @@ def _convert_can_do_setop(self, other):
result_names = self.names if self.names == other.names else None
return other, result_names
+ # --------------------------------------------------------------------
+
+ @Appender(Index.astype.__doc__)
+ def astype(self, dtype, copy=True):
+ dtype = pandas_dtype(dtype)
+ if is_categorical_dtype(dtype):
+ msg = "> 1 ndim Categorical are not supported at this time"
+ raise NotImplementedError(msg)
+ elif not is_object_dtype(dtype):
+ raise TypeError(
+ f"Setting {type(self)} dtype to anything other "
+ "than object is not supported"
+ )
+ elif copy is True:
+ return self._shallow_copy()
+ return self
+
def insert(self, loc: int, item):
"""
Make new MultiIndex inserting new item at location
| https://api.github.com/repos/pandas-dev/pandas/pulls/31553 | 2020-02-01T20:53:42Z | 2020-02-01T22:23:20Z | 2020-02-01T22:23:20Z | 2020-02-01T22:43:36Z | |
BUG: to_json not allowing uploads to S3 (#28375) | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index e07a8fa0469f4..3304658a37a1a 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -167,7 +167,7 @@ MultiIndex
I/O
^^^
- Bug in :meth:`read_json` where integer overflow was occuring when json contains big number strings. (:issue:`30320`)
--
+- Bug in :meth:`DataFrame.to_json` was raising ``NotFoundError`` when ``path_or_buf`` was an S3 URI (:issue:`28375`)
-
Plotting
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 204807b55c877..04fd17a00041b 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -19,12 +19,7 @@
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.reshape.concat import concat
-from pandas.io.common import (
- get_filepath_or_buffer,
- get_handle,
- infer_compression,
- stringify_path,
-)
+from pandas.io.common import get_filepath_or_buffer, get_handle, infer_compression
from pandas.io.json._normalize import convert_to_line_delimits
from pandas.io.json._table_schema import build_table_schema, parse_table_schema
from pandas.io.parsers import _validate_integer
@@ -56,7 +51,11 @@ def to_json(
"'index=False' is only valid when 'orient' is 'split' or 'table'"
)
- path_or_buf = stringify_path(path_or_buf)
+ if path_or_buf is not None:
+ path_or_buf, _, _, _ = get_filepath_or_buffer(
+ path_or_buf, compression=compression, mode="w"
+ )
+
if lines and orient != "records":
raise ValueError("'lines' keyword only valid when 'orient' is records")
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 94d51589023c4..602022a21c4a6 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -1662,3 +1662,12 @@ def test_json_multiindex(self, dataframe, expected):
series = dataframe.stack()
result = series.to_json(orient="index")
assert result == expected
+
+ def test_to_s3(self, s3_resource):
+ # GH 28375
+ mock_bucket_name, target_file = "pandas-test", "test.json"
+ df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]})
+ df.to_json(f"s3://{mock_bucket_name}/{target_file}")
+ assert target_file in (
+ obj.key for obj in s3_resource.Bucket("pandas-test").objects.all()
+ )
| - [x] closes #28375
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31552 | 2020-02-01T20:48:08Z | 2020-02-02T22:29:04Z | 2020-02-02T22:29:04Z | 2020-06-26T12:55:47Z |
ADMIN: Create separate issue templates for different use cases | diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE/bug_report.md
similarity index 58%
rename from .github/ISSUE_TEMPLATE.md
rename to .github/ISSUE_TEMPLATE/bug_report.md
index e33835c462511..fafb251bbaa5c 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -1,29 +1,34 @@
-#### Code Sample, a copy-pastable example if possible
+---
+name: Bug Report
+about: Create a bug report to help us improve pandas
+title: "BUG:"
+labels: ""
+
+---
+
+#### Code Sample, a copy-pastable example
```python
# Your code here
```
+
+**Note**: Please read [this guide](https://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports) detailing how to provide the necessary information for us to reproduce your bug.
+
#### Problem description
-[this should explain **why** the current behaviour is a problem and why the expected output is a better solution.]
+[this should explain **why** the current behaviour is a problem and why the expected output is a better solution]
**Note**: We receive a lot of issues on our GitHub tracker, so it is very possible that your issue has been posted before. Please check first before submitting so that we do not have to handle and close duplicates!
**Note**: Many problems can be resolved by simply upgrading `pandas` to the latest version. Before submitting, please check if that solution works for you. If possible, you may want to check if `master` addresses this issue, but that is not necessary.
-For documentation-related issues, you can check the latest versions of the docs on `master` here:
-
-https://pandas-docs.github.io/pandas-docs-travis/
-
-If the issue has not been resolved there, go ahead and file it in the issue tracker.
-
#### Expected Output
#### Output of ``pd.show_versions()``
<details>
-[paste the output of ``pd.show_versions()`` here below this line]
+[paste the output of ``pd.show_versions()`` here leaving a blank line after the details tag]
</details>
diff --git a/.github/ISSUE_TEMPLATE/documentation_enhancement.md b/.github/ISSUE_TEMPLATE/documentation_enhancement.md
new file mode 100644
index 0000000000000..819544e628b97
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/documentation_enhancement.md
@@ -0,0 +1,17 @@
+---
+name: Documentation Enhancement
+about: Report needed documentation
+title: "DOC:"
+labels: ""
+
+---
+
+#### Report needed documentation
+
+[this should provide a description of what documentation you believe is needed and why]
+
+**Note**: You can check the latest versions of the docs on `master` [here](https://dev.pandas.io/docs).
+
+#### Describe the documentation you'd like
+
+[this should provide a clear and concise description of what you want to happen]
diff --git a/.github/ISSUE_TEMPLATE/documentation_error.md b/.github/ISSUE_TEMPLATE/documentation_error.md
new file mode 100644
index 0000000000000..46cf7b370e5a0
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/documentation_error.md
@@ -0,0 +1,30 @@
+---
+name: Documentation Error
+about: Report incorrect documentation
+title: "DOC:"
+labels: ""
+
+---
+
+#### Location of incorrect documentation
+
+[this should provide links and line numbers to the incorrect documentation]
+
+**Note**: You can check the latest versions of the docs on `master` [here](https://dev.pandas.io/docs).
+
+#### Describe the problems or issues found in the documentation
+
+[this should explain **why** the current documentation is a problem]
+
+#### Suggested fix for documentation
+
+[this should explain the suggested fix and **why** it's better than the existing documentation]
+
+#### Steps taken to verify documentation is incorrect
+
+[this should detail steps you've taken to verify the documentation is incorrect, with code examples, if applicable]
+
+```python
+# Your code here, if applicable
+
+```
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000000000..c3a73bfdd6ffb
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,28 @@
+---
+name: Feature Request
+about: Suggest an idea for pandas
+title: "ENH:"
+labels: ""
+
+---
+
+#### Is your feature request related to a problem?
+
+[this should provide a description of what the problem is, e.g. "I wish I could use pandas to do [...]"]
+
+#### Describe the solution you'd like
+
+[this should provide a description of the feature request, e.g. "`DataFrame.foo` should get a new parameter `bar` that [...]"]
+
+#### Describe alternatives you've considered
+
+[this should provide a description of any alternative solutions or features you've considered]
+
+#### Additional context
+
+[add any other context, code examples, or references to existing implementations about the feature request here]
+
+```python
+# Your code here, if applicable
+
+```
diff --git a/.github/ISSUE_TEMPLATE/submit_question.md b/.github/ISSUE_TEMPLATE/submit_question.md
new file mode 100644
index 0000000000000..4bd4aae419c06
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/submit_question.md
@@ -0,0 +1,19 @@
+---
+name: Submit Question
+about: Ask a general question about pandas
+title: "QST:"
+labels: ""
+
+---
+
+#### Question about pandas
+
+**Note**: It's highly recommended that you use [StackOverflow](https://www.stackoverflow.com) for any usage related questions, e.g. "How do I do [...] with pandas?" You are much more likely to receive a quick response to your question on StackOverflow than the GitHub issue tracker. You may also want to search the [pandas tag](https://stackoverflow.com/questions/tagged/pandas) on StackOverflow to see if a similar question has already been asked and answered.
+
+**Note**: If you'd still like to submit a question, please read [this guide](
+https://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports) detailing how to provide the necessary information for us to reproduce your question.
+
+```python
+# Your code here, if applicable
+
+```
| I recall this being mentioned on one of the core dev calls a few months back, and took inspiration from [rapidsai/cudf](https://github.com/rapidsai/cudf) for the various templates.
Added templates for the following use cases:
- Bug Report
- This is largely the same as the current issue template
- Documentation Enhancement
- Documentation Error
- Feature Request
- Submit Question
- This attempts to direct users to StackOverflow for usage questions
We could also add a Blank Template for opening an issue without any template provided, but opted not to do that for now, as I don't want to encourage people to bypass these templates. Could certainly add one if there's a consensus that we want this.
I've created a [local repo](https://github.com/jschendel/pandas-templates/issues) where you can see these templates in action since I couldn't figure out another way to actually display these. Click the "New Issue" button to see what these changes would look like.
cc @pandas-dev/pandas-core @pandas-dev/pandas-triage | https://api.github.com/repos/pandas-dev/pandas/pulls/31551 | 2020-02-01T20:47:13Z | 2020-03-11T02:28:01Z | null | 2020-03-31T17:41:49Z |
BUG: parase_dates column is in dataframe (#31251) | diff --git a/README.md b/README.md
index 1130eb30954dc..5342eda4390eb 100644
--- a/README.md
+++ b/README.md
@@ -5,82 +5,16 @@
-----------------
# pandas: powerful Python data analysis toolkit
-
-<table>
-<tr>
- <td>Latest Release</td>
- <td>
- <a href="https://pypi.org/project/pandas/">
- <img src="https://img.shields.io/pypi/v/pandas.svg" alt="latest release" />
- </a>
- </td>
-</tr>
- <td></td>
- <td>
- <a href="https://anaconda.org/anaconda/pandas/">
- <img src="https://anaconda.org/conda-forge/pandas/badges/version.svg" alt="latest release" />
- </a>
-</td>
-</tr>
-<tr>
- <td>Package Status</td>
- <td>
- <a href="https://pypi.org/project/pandas/">
- <img src="https://img.shields.io/pypi/status/pandas.svg" alt="status" />
- </a>
- </td>
-</tr>
-<tr>
- <td>License</td>
- <td>
- <a href="https://github.com/pandas-dev/pandas/blob/master/LICENSE">
- <img src="https://img.shields.io/pypi/l/pandas.svg" alt="license" />
- </a>
-</td>
-</tr>
-<tr>
- <td>Build Status</td>
- <td>
- <a href="https://travis-ci.org/pandas-dev/pandas">
- <img src="https://travis-ci.org/pandas-dev/pandas.svg?branch=master" alt="travis build status" />
- </a>
- </td>
-</tr>
-<tr>
- <td></td>
- <td>
- <a href="https://dev.azure.com/pandas-dev/pandas/_build/latest?definitionId=1&branch=master">
- <img src="https://dev.azure.com/pandas-dev/pandas/_apis/build/status/pandas-dev.pandas?branch=master" alt="Azure Pipelines build status" />
- </a>
- </td>
-</tr>
-<tr>
- <td>Coverage</td>
- <td>
- <a href="https://codecov.io/gh/pandas-dev/pandas">
- <img src="https://codecov.io/github/pandas-dev/pandas/coverage.svg?branch=master" alt="coverage" />
- </a>
- </td>
-</tr>
-<tr>
- <td>Downloads</td>
- <td>
- <a href="https://pandas.pydata.org">
- <img src="https://anaconda.org/conda-forge/pandas/badges/downloads.svg" alt="conda-forge downloads" />
- </a>
- </td>
-</tr>
-<tr>
- <td>Gitter</td>
- <td>
- <a href="https://gitter.im/pydata/pandas">
- <img src="https://badges.gitter.im/Join%20Chat.svg" />
- </a>
- </td>
-</tr>
-</table>
-
-
+[](https://pypi.org/project/pandas/)
+[](https://anaconda.org/anaconda/pandas/)
+[](https://pypi.org/project/pandas/)
+[](https://github.com/pandas-dev/pandas/blob/master/LICENSE)
+[](https://travis-ci.org/pandas-dev/pandas)
+[](https://dev.azure.com/pandas-dev/pandas/_build/latest?definitionId=1&branch=master)
+[](https://codecov.io/gh/pandas-dev/pandas)
+[](https://pandas.pydata.org)
+[](https://gitter.im/pydata/pandas)
+[](https://numfocus.org)
## What is it?
diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 0cc42be42d61e..b46989894ae12 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -113,7 +113,7 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then
# Imports - Check formatting using isort see setup.cfg for settings
MSG='Check import format using isort' ; echo $MSG
- ISORT_CMD="isort --recursive --check-only pandas asv_bench"
+ ISORT_CMD="isort --quiet --recursive --check-only pandas asv_bench"
if [[ "$GITHUB_ACTIONS" == "true" ]]; then
eval $ISORT_CMD | awk '{print "##[error]" $0}'; RET=$(($RET + ${PIPESTATUS[0]}))
else
diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml
index 869d2ab683f0c..6883301a63a9b 100644
--- a/ci/deps/travis-36-cov.yaml
+++ b/ci/deps/travis-36-cov.yaml
@@ -27,8 +27,7 @@ dependencies:
- numexpr
- numpy=1.15.*
- odfpy
- - openpyxl<=3.0.1
- # https://github.com/pandas-dev/pandas/pull/30009 openpyxl 3.0.2 broke
+ - openpyxl
- pandas-gbq
- psycopg2
- pyarrow>=0.13.0
diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml
index 73e2c20b31438..682b1016ff3a2 100644
--- a/ci/deps/travis-37.yaml
+++ b/ci/deps/travis-37.yaml
@@ -2,7 +2,6 @@ name: pandas-dev
channels:
- defaults
- conda-forge
- - c3i_test
dependencies:
- python=3.7.*
diff --git a/doc/redirects.csv b/doc/redirects.csv
index 0a71f037d23c3..3a990b09e7f7d 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -46,7 +46,10 @@ developer,development/developer
extending,development/extending
internals,development/internals
-# api
+# api moved function
+reference/api/pandas.io.json.json_normalize,pandas.json_normalize
+
+# api rename
api,reference/index
generated/pandas.api.extensions.ExtensionArray.argsort,../reference/api/pandas.api.extensions.ExtensionArray.argsort
generated/pandas.api.extensions.ExtensionArray.astype,../reference/api/pandas.api.extensions.ExtensionArray.astype
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 28df08a8607b9..c12c148d0f10d 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -209,6 +209,7 @@
"external_links": [],
"github_url": "https://github.com/pandas-dev/pandas",
"twitter_url": "https://twitter.com/pandas_dev",
+ "google_analytics_id": "UA-27880019-2",
}
# Add any paths that contain custom themes here, relative to this directory.
diff --git a/doc/source/getting_started/basics.rst b/doc/source/getting_started/basics.rst
index 4fef5efbd1551..277080006cb3c 100644
--- a/doc/source/getting_started/basics.rst
+++ b/doc/source/getting_started/basics.rst
@@ -1973,7 +1973,7 @@ Pandas has two ways to store strings.
1. ``object`` dtype, which can hold any Python object, including strings.
2. :class:`StringDtype`, which is dedicated to strings.
-Generally, we recommend using :class:`StringDtype`. See :ref:`text.types` fore more.
+Generally, we recommend using :class:`StringDtype`. See :ref:`text.types` for more.
Finally, arbitrary objects may be stored using the ``object`` dtype, but should
be avoided to the extent possible (for performance and interoperability with
diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst
index 6680ba854cb6f..756dd06aced7f 100644
--- a/doc/source/user_guide/visualization.rst
+++ b/doc/source/user_guide/visualization.rst
@@ -1641,3 +1641,46 @@ when plotting a large number of points.
:suppress:
plt.close('all')
+
+Plotting backends
+-----------------
+
+Starting in version 0.25, pandas can be extended with third-party plotting backends. The
+main idea is letting users select a plotting backend different than the provided
+one based on Matplotlib.
+
+This can be done by passsing 'backend.module' as the argument ``backend`` in ``plot``
+function. For example:
+
+.. code-block:: python
+
+ >>> Series([1, 2, 3]).plot(backend='backend.module')
+
+Alternatively, you can also set this option globally, do you don't need to specify
+the keyword in each ``plot`` call. For example:
+
+.. code-block:: python
+
+ >>> pd.set_option('plotting.backend', 'backend.module')
+ >>> pd.Series([1, 2, 3]).plot()
+
+Or:
+
+.. code-block:: python
+
+ >>> pd.options.plotting.backend = 'backend.module'
+ >>> pd.Series([1, 2, 3]).plot()
+
+This would be more or less equivalent to:
+
+.. code-block:: python
+
+ >>> import backend.module
+ >>> backend.module.plot(pd.Series([1, 2, 3]))
+
+The backend module can then use other visualization tools (Bokeh, Altair, hvplot,...)
+to generate the plots. Some libraries implementing a backend for pandas are listed
+on the ecosystem :ref:`ecosystem.visualization` page.
+
+Developers guide can be found at
+https://dev.pandas.io/docs/development/extending.html#plotting-backends
\ No newline at end of file
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index 111caa81f7169..68aabfe76d8de 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -26,6 +26,7 @@ Version 1.0
v1.0.0
v1.0.1
+ v1.0.2
Version 0.25
------------
diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 801d97b777e00..ef3bb8161d13f 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -1,7 +1,7 @@
.. _whatsnew_101:
-What's new in 1.0.1 (??)
-------------------------
+What's new in 1.0.1 (February 5, 2020)
+--------------------------------------
These are the changes in pandas 1.0.1. See :ref:`release` for a full changelog
including other versions of pandas.
@@ -10,126 +10,64 @@ including other versions of pandas.
.. ---------------------------------------------------------------------------
+.. _whatsnew_101.regressions:
-.. _whatsnew_101.bug_fixes:
-
-Bug fixes
-~~~~~~~~~
-- Bug in :meth:`GroupBy.apply` was raising ``TypeError`` if called with function which returned a non-pandas non-scalar object (e.g. a list) (:issue:`31441`)
-
-Categorical
-^^^^^^^^^^^
-
--
--
-
-Datetimelike
-^^^^^^^^^^^^
-- Fixed regression in :meth:`to_datetime` when parsing non-nanosecond resolution datetimes (:issue:`31491`)
-- Fixed bug in :meth:`to_datetime` raising when ``cache=True`` and out-of-bound values are present (:issue:`31491`)
-
-Timedelta
-^^^^^^^^^
-
--
--
-
-Timezones
-^^^^^^^^^
-
--
--
-
-
-Numeric
-^^^^^^^
-- Bug in dtypes being lost in ``DataFrame.__invert__`` (``~`` operator) with mixed dtypes (:issue:`31183`)
-- Bug in :class:`Series` multiplication when multiplying a numeric :class:`Series` with >10000 elements with a timedelta-like scalar (:issue:`31467`)
--
-
-Conversion
-^^^^^^^^^^
-
--
--
-
-Strings
-^^^^^^^
-
--
--
-
+Fixed regressions
+~~~~~~~~~~~~~~~~~
-Interval
-^^^^^^^^
-
--
--
-
-Indexing
-^^^^^^^^
-
-- Fixed regression when indexing a ``Series`` or ``DataFrame`` indexed by ``DatetimeIndex`` with a slice containg a :class:`datetime.date` (:issue:`31501`)
- Fixed regression in :class:`DataFrame` setting values with a slice (e.g. ``df[-4:] = 1``) indexing by label instead of position (:issue:`31469`)
--
--
-- Bug where assigning to a :class:`Series` using a IntegerArray / BooleanArray as a mask would raise ``TypeError`` (:issue:`31446`)
-
-Missing
-^^^^^^^
-
--
--
-
-MultiIndex
-^^^^^^^^^^
-
--
--
+- Fixed regression when indexing a ``Series`` or ``DataFrame`` indexed by ``DatetimeIndex`` with a slice containg a :class:`datetime.date` (:issue:`31501`)
+- Fixed regression in ``DataFrame.__setitem__`` raising an ``AttributeError`` with a :class:`MultiIndex` and a non-monotonic indexer (:issue:`31449`)
+- Fixed regression in :class:`Series` multiplication when multiplying a numeric :class:`Series` with >10000 elements with a timedelta-like scalar (:issue:`31457`)
+- Fixed regression in ``.groupby().agg()`` raising an ``AssertionError`` for some reductions like ``min`` on object-dtype columns (:issue:`31522`)
+- Fixed regression in ``.groupby()`` aggregations with categorical dtype using Cythonized reduction functions (e.g. ``first``) (:issue:`31450`)
+- Fixed regression in :meth:`GroupBy.apply` if called with a function which returned a non-pandas non-scalar object (e.g. a list or numpy array) (:issue:`31441`)
+- Fixed regression in :meth:`DataFrame.groupby` whereby taking the minimum or maximum of a column with period dtype would raise a ``TypeError``. (:issue:`31471`)
+- Fixed regression in :meth:`DataFrame.groupby` with an empty DataFrame grouping by a level of a MultiIndex (:issue:`31670`).
+- Fixed regression in :meth:`DataFrame.apply` with object dtype and non-reducing function (:issue:`31505`)
+- Fixed regression in :meth:`to_datetime` when parsing non-nanosecond resolution datetimes (:issue:`31491`)
+- Fixed regression in :meth:`~DataFrame.to_csv` where specifying an ``na_rep`` might truncate the values written (:issue:`31447`)
+- Fixed regression in :class:`Categorical` construction with ``numpy.str_`` categories (:issue:`31499`)
+- Fixed regression in :meth:`DataFrame.loc` and :meth:`DataFrame.iloc` when selecting a row containing a single ``datetime64`` or ``timedelta64`` column (:issue:`31649`)
+- Fixed regression where setting :attr:`pd.options.display.max_colwidth` was not accepting negative integer. In addition, this behavior has been deprecated in favor of using ``None`` (:issue:`31532`)
+- Fixed regression in objTOJSON.c fix return-type warning (:issue:`31463`)
+- Fixed regression in :meth:`qcut` when passed a nullable integer. (:issue:`31389`)
+- Fixed regression in assigning to a :class:`Series` using a nullable integer dtype (:issue:`31446`)
+- Fixed performance regression when indexing a ``DataFrame`` or ``Series`` with a :class:`MultiIndex` for the index using a list of labels (:issue:`31648`)
+- Fixed regression in :meth:`read_csv` used in file like object ``RawIOBase`` is not recognize ``encoding`` option (:issue:`31575`)
-I/O
-^^^
+.. ---------------------------------------------------------------------------
-- Fixed regression in :meth:`~DataFrame.to_csv` where specifying an ``na_rep`` might truncate the values written (:issue:`31447`)
--
--
+.. _whatsnew_101.deprecations:
-Plotting
-^^^^^^^^
+Deprecations
+~~~~~~~~~~~~
--
--
+- Support for negative integer for :attr:`pd.options.display.max_colwidth` is deprecated in favor of using ``None`` (:issue:`31532`)
-Groupby/resample/rolling
-^^^^^^^^^^^^^^^^^^^^^^^^
+.. ---------------------------------------------------------------------------
--
--
+.. _whatsnew_101.bug_fixes:
+Bug fixes
+~~~~~~~~~
-Reshaping
-^^^^^^^^^
+**Datetimelike**
--
--
+- Fixed bug in :meth:`to_datetime` raising when ``cache=True`` and out-of-bound values are present (:issue:`31491`)
-Sparse
-^^^^^^
+**Numeric**
--
--
+- Bug in dtypes being lost in ``DataFrame.__invert__`` (``~`` operator) with mixed dtypes (:issue:`31183`)
+ and for extension-array backed ``Series`` and ``DataFrame`` (:issue:`23087`)
-ExtensionArray
-^^^^^^^^^^^^^^
+**Plotting**
-- Bug in dtype being lost in ``__invert__`` (``~`` operator) for extension-array backed ``Series`` and ``DataFrame`` (:issue:`23087`)
--
+- Plotting tz-aware timeseries no longer gives UserWarning (:issue:`31205`)
+**Interval**
-Other
-^^^^^
--
--
+- Bug in :meth:`Series.shift` with ``interval`` dtype raising a ``TypeError`` when shifting an interval array of integers or datetimes (:issue:`34195`)
.. ---------------------------------------------------------------------------
@@ -137,3 +75,5 @@ Other
Contributors
~~~~~~~~~~~~
+
+.. contributors:: v1.0.0..v1.0.1|HEAD
diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
new file mode 100644
index 0000000000000..70aaaa6d0a60d
--- /dev/null
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -0,0 +1,39 @@
+.. _whatsnew_102:
+
+What's new in 1.0.2 (February ??, 2020)
+---------------------------------------
+
+These are the changes in pandas 1.0.2. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_102.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+
+- Fixed regression in :meth:`DataFrame.to_excel` when ``columns`` kwarg is passed (:issue:`31677`)
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_102.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+
+**I/O**
+
+- Using ``pd.NA`` with :meth:`DataFrame.to_json` now correctly outputs a null value instead of an empty object (:issue:`31615`)
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_102.contributors:
+
+Contributors
+~~~~~~~~~~~~
+
+.. contributors:: v1.0.1..v1.0.2|HEAD
\ No newline at end of file
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index e07a8fa0469f4..aea5695a96388 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -60,7 +60,11 @@ Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- :meth:`DataFrame.swaplevels` now raises a ``TypeError`` if the axis is not a :class:`MultiIndex`.
Previously a ``AttributeError`` was raised (:issue:`31126`)
-
+- :meth:`DataFrameGroupby.mean` and :meth:`SeriesGroupby.mean` (and similarly for :meth:`~DataFrameGroupby.median`, :meth:`~DataFrameGroupby.std`` and :meth:`~DataFrameGroupby.var``)
+ now raise a ``TypeError`` if a not-accepted keyword argument is passed into it.
+ Previously a ``UnsupportedFunctionCall`` was raised (``AssertionError`` if ``min_count`` passed into :meth:`~DataFrameGroupby.median``) (:issue:`31485`)
+- :meth:`DataFrame.at` and :meth:`Series.at` will raise a ``TypeError`` instead of a ``ValueError`` if an incompatible key is passed, and ``KeyError`` if a missing key is passed, matching the behavior of ``.loc[]`` (:issue:`31722`)
+-
.. ---------------------------------------------------------------------------
@@ -105,11 +109,13 @@ Datetimelike
- Bug in :class:`Timestamp` where constructing :class:`Timestamp` from ambiguous epoch time and calling constructor again changed :meth:`Timestamp.value` property (:issue:`24329`)
- :meth:`DatetimeArray.searchsorted`, :meth:`TimedeltaArray.searchsorted`, :meth:`PeriodArray.searchsorted` not recognizing non-pandas scalars and incorrectly raising ``ValueError`` instead of ``TypeError`` (:issue:`30950`)
- Bug in :class:`Timestamp` where constructing :class:`Timestamp` with dateutil timezone less than 128 nanoseconds before daylight saving time switch from winter to summer would result in nonexistent time (:issue:`31043`)
+- Bug in :meth:`DataFrame.reindex` and :meth:`Series.reindex` when reindexing with a tz-aware index (:issue:`26683`)
+- Bug in :meth:`Period.to_timestamp`, :meth:`Period.start_time` with microsecond frequency returning a timestamp one nanosecond earlier than the correct time (:issue:`31475`)
Timedelta
^^^^^^^^^
--
+- Bug in constructing a :class:`Timedelta` with a high precision integer that would round the :class:`Timedelta` components (:issue:`31354`)
-
Timezones
@@ -150,7 +156,8 @@ Indexing
- Bug in :meth:`PeriodIndex.get_loc` treating higher-resolution strings differently from :meth:`PeriodIndex.get_value` (:issue:`31172`)
- Bug in :meth:`Series.at` and :meth:`DataFrame.at` not matching ``.loc`` behavior when looking up an integer in a :class:`Float64Index` (:issue:`31329`)
- Bug in :meth:`PeriodIndex.is_monotonic` incorrectly returning ``True`` when containing leading ``NaT`` entries (:issue:`31437`)
--
+- Bug in :meth:`DatetimeIndex.get_loc` raising ``KeyError`` with converted-integer key instead of the user-passed key (:issue:`31425`)
+- Bug in :meth:`Series.xs` incorrectly returning ``Timestamp`` instead of ``datetime64`` in some object-dtype cases (:issue:`31630`)
Missing
^^^^^^^
@@ -160,15 +167,24 @@ Missing
MultiIndex
^^^^^^^^^^
+- Bug in :meth:`Dataframe.loc` when used with a :class:`MultiIndex`. The returned values were not in the same order as the given inputs (:issue:`22797`)
--
+.. ipython:: python
+
+ df = pd.DataFrame(np.arange(4),
+ index=[["a", "a", "b", "b"], [1, 2, 1, 2]])
+ # Rows are now ordered as the requested keys
+ df.loc[(['b', 'a'], [2, 1]), :]
-
I/O
^^^
- Bug in :meth:`read_json` where integer overflow was occuring when json contains big number strings. (:issue:`30320`)
--
--
+- `read_csv` will now raise a ``ValueError`` when the arguments `header` and `prefix` both are not `None`. (:issue:`27394`)
+- Bug in :meth:`DataFrame.to_json` was raising ``NotFoundError`` when ``path_or_buf`` was an S3 URI (:issue:`28375`)
+- Bug in :meth:`DataFrame.to_parquet` overwriting pyarrow's default for
+ ``coerce_timestamps``; following pyarrow's default allows writing nanosecond
+ timestamps with ``version="2.0"`` (:issue:`31652`).
Plotting
^^^^^^^^
@@ -210,7 +226,7 @@ Other
^^^^^
- Appending a dictionary to a :class:`DataFrame` without passing ``ignore_index=True`` will raise ``TypeError: Can only append a dict if ignore_index=True``
instead of ``TypeError: Can only append a Series if ignore_index=True or if the Series has a name`` (:issue:`30871`)
--
+- Set operations on an object-dtype :class:`Index` now always return object-dtype results (:issue:`31401`)
.. ---------------------------------------------------------------------------
diff --git a/doc/sphinxext/announce.py b/doc/sphinxext/announce.py
index f394aac5c545b..e4859157f73de 100755
--- a/doc/sphinxext/announce.py
+++ b/doc/sphinxext/announce.py
@@ -57,6 +57,16 @@ def get_authors(revision_range):
pat = "^.*\\t(.*)$"
lst_release, cur_release = [r.strip() for r in revision_range.split("..")]
+ if "|" in cur_release:
+ # e.g. v1.0.1|HEAD
+ maybe_tag, head = cur_release.split("|")
+ assert head == "HEAD"
+ if maybe_tag in this_repo.tags:
+ cur_release = maybe_tag
+ else:
+ cur_release = head
+ revision_range = f"{lst_release}..{cur_release}"
+
# authors, in current release and previous to current release.
cur = set(re.findall(pat, this_repo.git.shortlog("-s", revision_range), re.M))
pre = set(re.findall(pat, this_repo.git.shortlog("-s", lst_release), re.M))
diff --git a/doc/sphinxext/contributors.py b/doc/sphinxext/contributors.py
index d9ba2bb2cfb07..c2b21e40cadad 100644
--- a/doc/sphinxext/contributors.py
+++ b/doc/sphinxext/contributors.py
@@ -6,7 +6,13 @@
This will be replaced with a message indicating the number of
code contributors and commits, and then list each contributor
-individually.
+individually. For development versions (before a tag is available)
+use::
+
+ .. contributors:: v0.23.0..v0.23.1|HEAD
+
+While the v0.23.1 tag does not exist, that will use the HEAD of the
+branch as the end of the revision range.
"""
from announce import build_components
from docutils import nodes
diff --git a/pandas/_config/config.py b/pandas/_config/config.py
index cacd6f5454de7..8b6116d3abd60 100644
--- a/pandas/_config/config.py
+++ b/pandas/_config/config.py
@@ -155,9 +155,7 @@ def _describe_option(pat: str = "", _print_desc: bool = True):
if len(keys) == 0:
raise OptionError("No such keys(s)")
- s = ""
- for k in keys: # filter by pat
- s += _build_option_description(k)
+ s = "\n".join([_build_option_description(k) for k in keys])
if _print_desc:
print(s)
diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in
index 7d57c67e70b58..6671375f628e7 100644
--- a/pandas/_libs/hashtable_class_helper.pxi.in
+++ b/pandas/_libs/hashtable_class_helper.pxi.in
@@ -670,7 +670,9 @@ cdef class StringHashTable(HashTable):
val = values[i]
if isinstance(val, str):
- v = get_c_string(val)
+ # GH#31499 if we have a np.str_ get_c_string wont recognize
+ # it as a str, even though isinstance does.
+ v = get_c_string(<str>val)
else:
v = get_c_string(self.na_string_sentinel)
vecs[i] = v
@@ -703,7 +705,9 @@ cdef class StringHashTable(HashTable):
val = values[i]
if isinstance(val, str):
- v = get_c_string(val)
+ # GH#31499 if we have a np.str_ get_c_string wont recognize
+ # it as a str, even though isinstance does.
+ v = get_c_string(<str>val)
else:
v = get_c_string(self.na_string_sentinel)
vecs[i] = v
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 1915eaf6e07dd..4185cc2084469 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -1,17 +1,12 @@
-from datetime import datetime, timedelta, date
import warnings
-import cython
-
import numpy as np
cimport numpy as cnp
from numpy cimport (ndarray, intp_t,
float64_t, float32_t,
int64_t, int32_t, int16_t, int8_t,
- uint64_t, uint32_t, uint16_t, uint8_t,
- # Note: NPY_DATETIME, NPY_TIMEDELTA are only available
- # for cimport in cython>=0.27.3
- NPY_DATETIME, NPY_TIMEDELTA)
+ uint64_t, uint32_t, uint16_t, uint8_t
+)
cnp.import_array()
@@ -23,7 +18,7 @@ from pandas._libs.tslibs.c_timestamp cimport _Timestamp
from pandas._libs.hashtable cimport HashTable
from pandas._libs import algos, hashtable as _hash
-from pandas._libs.tslibs import Timestamp, Timedelta, period as periodlib
+from pandas._libs.tslibs import Timedelta, period as periodlib
from pandas._libs.missing import checknull
@@ -35,16 +30,6 @@ cdef inline bint is_definitely_invalid_key(object val):
return False
-cpdef get_value_at(ndarray arr, object loc, object tz=None):
- obj = util.get_value_at(arr, loc)
-
- if arr.descr.type_num == NPY_DATETIME:
- return Timestamp(obj, tz=tz)
- elif arr.descr.type_num == NPY_TIMEDELTA:
- return Timedelta(obj)
- return obj
-
-
# Don't populate hash tables in monotonic indexes larger than this
_SIZE_CUTOFF = 1_000_000
@@ -72,35 +57,6 @@ cdef class IndexEngine:
self._ensure_mapping_populated()
return val in self.mapping
- cpdef get_value(self, ndarray arr, object key, object tz=None):
- """
- Parameters
- ----------
- arr : 1-dimensional ndarray
- """
- cdef:
- object loc
-
- loc = self.get_loc(key)
- if isinstance(loc, slice) or util.is_array(loc):
- return arr[loc]
- else:
- return get_value_at(arr, loc, tz=tz)
-
- cpdef set_value(self, ndarray arr, object key, object value):
- """
- Parameters
- ----------
- arr : 1-dimensional ndarray
- """
- cdef:
- object loc
-
- loc = self.get_loc(key)
- value = convert_scalar(arr, value)
-
- arr[loc] = value
-
cpdef get_loc(self, object val):
cdef:
Py_ssize_t loc
@@ -549,54 +505,6 @@ cdef class PeriodEngine(Int64Engine):
return super(PeriodEngine, self).get_indexer_non_unique(ordinal_array)
-cpdef convert_scalar(ndarray arr, object value):
- # we don't turn integers
- # into datetimes/timedeltas
-
- # we don't turn bools into int/float/complex
-
- if arr.descr.type_num == NPY_DATETIME:
- if util.is_array(value):
- pass
- elif isinstance(value, (datetime, np.datetime64, date)):
- return Timestamp(value).to_datetime64()
- elif util.is_timedelta64_object(value):
- # exclude np.timedelta64("NaT") from value != value below
- pass
- elif value is None or value != value:
- return np.datetime64("NaT", "ns")
- raise ValueError("cannot set a Timestamp with a non-timestamp "
- f"{type(value).__name__}")
-
- elif arr.descr.type_num == NPY_TIMEDELTA:
- if util.is_array(value):
- pass
- elif isinstance(value, timedelta) or util.is_timedelta64_object(value):
- value = Timedelta(value)
- if value is NaT:
- return np.timedelta64("NaT", "ns")
- return value.to_timedelta64()
- elif util.is_datetime64_object(value):
- # exclude np.datetime64("NaT") which would otherwise be picked up
- # by the `value != value check below
- pass
- elif value is None or value != value:
- return np.timedelta64("NaT", "ns")
- raise ValueError("cannot set a Timedelta with a non-timedelta "
- f"{type(value).__name__}")
-
- if (issubclass(arr.dtype.type, (np.integer, np.floating, np.complex)) and
- not issubclass(arr.dtype.type, np.bool_)):
- if util.is_bool_object(value):
- raise ValueError("Cannot assign bool to float/integer series")
-
- if issubclass(arr.dtype.type, (np.integer, np.bool_)):
- if util.is_float_object(value) and value != value:
- raise ValueError("Cannot assign nan to integer series")
-
- return value
-
-
cdef class BaseMultiIndexCodesEngine:
"""
Base class for MultiIndexUIntEngine and MultiIndexPyIntEngine, which
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 377d49f2bbd29..3077f73a8d1a4 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -638,7 +638,7 @@ cdef class TextReader:
raise ValueError(f'Unrecognized compression type: '
f'{self.compression}')
- if self.encoding and isinstance(source, io.BufferedIOBase):
+ if self.encoding and isinstance(source, (io.BufferedIOBase, io.RawIOBase)):
source = io.TextIOWrapper(
source, self.encoding.decode('utf-8'), newline='')
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 89164c527002a..43d253f632f0f 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -114,7 +114,8 @@ cdef class Reducer:
if self.typ is not None:
# In this case, we also have self.index
name = labels[i]
- cached_typ = self.typ(chunk, index=self.index, name=name)
+ cached_typ = self.typ(
+ chunk, index=self.index, name=name, dtype=arr.dtype)
# use the cached_typ if possible
if cached_typ is not None:
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index 62c2870c198c4..8cfc20ffd2c1c 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -53,6 +53,7 @@ static PyTypeObject *cls_dataframe;
static PyTypeObject *cls_series;
static PyTypeObject *cls_index;
static PyTypeObject *cls_nat;
+static PyTypeObject *cls_na;
PyObject *cls_timedelta;
npy_int64 get_nat(void) { return NPY_MIN_INT64; }
@@ -127,7 +128,6 @@ typedef struct __PyObjectEncoder {
// pass-through to encode numpy data directly
int npyType;
void *npyValue;
- TypeContext basicTypeContext;
int datetimeIso;
NPY_DATETIMEUNIT datetimeUnit;
@@ -150,6 +150,7 @@ int PdBlock_iterNext(JSOBJ, JSONTypeContext *);
void *initObjToJSON(void) {
PyObject *mod_pandas;
PyObject *mod_nattype;
+ PyObject *mod_natype;
PyObject *mod_decimal = PyImport_ImportModule("decimal");
type_decimal =
(PyTypeObject *)PyObject_GetAttrString(mod_decimal, "Decimal");
@@ -175,8 +176,16 @@ void *initObjToJSON(void) {
Py_DECREF(mod_nattype);
}
+ mod_natype = PyImport_ImportModule("pandas._libs.missing");
+ if (mod_natype) {
+ cls_na = (PyTypeObject *)PyObject_GetAttrString(mod_natype, "NAType");
+ Py_DECREF(mod_natype);
+ }
+
/* Initialise numpy API */
import_array();
+ // GH 31463
+ return NULL;
}
static TypeContext *createTypeContext(void) {
@@ -925,15 +934,15 @@ char *Tuple_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc),
}
//=============================================================================
-// Iterator iteration functions
+// Set iteration functions
// itemValue is borrowed reference, no ref counting
//=============================================================================
-void Iter_iterBegin(JSOBJ obj, JSONTypeContext *tc) {
+void Set_iterBegin(JSOBJ obj, JSONTypeContext *tc) {
GET_TC(tc)->itemValue = NULL;
GET_TC(tc)->iterator = PyObject_GetIter(obj);
}
-int Iter_iterNext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+int Set_iterNext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
PyObject *item;
if (GET_TC(tc)->itemValue) {
@@ -951,7 +960,7 @@ int Iter_iterNext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
return 1;
}
-void Iter_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+void Set_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
if (GET_TC(tc)->itemValue) {
Py_DECREF(GET_TC(tc)->itemValue);
GET_TC(tc)->itemValue = NULL;
@@ -963,11 +972,11 @@ void Iter_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
}
}
-JSOBJ Iter_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
+JSOBJ Set_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
return GET_TC(tc)->itemValue;
}
-char *Iter_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc),
+char *Set_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc),
size_t *Py_UNUSED(outLen)) {
return NULL;
}
@@ -1788,6 +1797,10 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
"%R (0d array) is not JSON serializable at the moment",
obj);
goto INVALID;
+ } else if (PyObject_TypeCheck(obj, cls_na)) {
+ PRINTMARK();
+ tc->type = JT_NULL;
+ return;
}
ISITERABLE:
@@ -2040,11 +2053,11 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
} else if (PyAnySet_Check(obj)) {
PRINTMARK();
tc->type = JT_ARRAY;
- pc->iterBegin = Iter_iterBegin;
- pc->iterEnd = Iter_iterEnd;
- pc->iterNext = Iter_iterNext;
- pc->iterGetValue = Iter_iterGetValue;
- pc->iterGetName = Iter_iterGetName;
+ pc->iterBegin = Set_iterBegin;
+ pc->iterEnd = Set_iterEnd;
+ pc->iterNext = Set_iterNext;
+ pc->iterGetValue = Set_iterGetValue;
+ pc->iterGetName = Set_iterGetName;
return;
}
@@ -2115,10 +2128,7 @@ void Object_endTypeContext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) {
PyObject_Free(GET_TC(tc)->cStr);
GET_TC(tc)->cStr = NULL;
- if (tc->prv !=
- &(((PyObjectEncoder *)tc->encoder)->basicTypeContext)) { // NOLINT
- PyObject_Free(tc->prv);
- }
+ PyObject_Free(tc->prv);
tc->prv = NULL;
}
}
@@ -2216,16 +2226,6 @@ PyObject *objToJSON(PyObject *Py_UNUSED(self), PyObject *args,
pyEncoder.datetimeUnit = NPY_FR_ms;
pyEncoder.outputFormat = COLUMNS;
pyEncoder.defaultHandler = 0;
- pyEncoder.basicTypeContext.newObj = NULL;
- pyEncoder.basicTypeContext.dictObj = NULL;
- pyEncoder.basicTypeContext.itemValue = NULL;
- pyEncoder.basicTypeContext.itemName = NULL;
- pyEncoder.basicTypeContext.attrList = NULL;
- pyEncoder.basicTypeContext.iterator = NULL;
- pyEncoder.basicTypeContext.cStr = NULL;
- pyEncoder.basicTypeContext.npyarr = NULL;
- pyEncoder.basicTypeContext.rowLabels = NULL;
- pyEncoder.basicTypeContext.columnLabels = NULL;
PRINTMARK();
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index e0862b9250045..bf38fcfb6103c 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -29,7 +29,7 @@ from pandas._libs.tslibs.util cimport (
from pandas._libs.tslibs.timedeltas cimport cast_from_unit
from pandas._libs.tslibs.timezones cimport (
is_utc, is_tzlocal, is_fixed_offset, get_utcoffset, get_dst_info,
- get_timezone, maybe_get_tz, tz_compare, treat_tz_as_dateutil)
+ get_timezone, maybe_get_tz, tz_compare)
from pandas._libs.tslibs.timezones import UTC
from pandas._libs.tslibs.parsing import parse_datetime_string
@@ -341,14 +341,6 @@ cdef _TSObject convert_datetime_to_tsobject(datetime ts, object tz,
obj.tzinfo = tz
else:
obj.value = pydatetime_to_dt64(ts, &obj.dts)
- # GH 24329 When datetime is ambiguous,
- # pydatetime_to_dt64 doesn't take DST into account
- # but with dateutil timezone, get_utcoffset does
- # so we need to correct for it
- if treat_tz_as_dateutil(ts.tzinfo):
- if ts.tzinfo.is_ambiguous(ts):
- dst_offset = ts.tzinfo.dst(ts)
- obj.value += int(dst_offset.total_seconds() * 1e9)
obj.tzinfo = ts.tzinfo
if obj.tzinfo is not None and not is_utc(obj.tzinfo):
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 357f183b3a845..9f6f401a1a5f5 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -2,7 +2,7 @@ from cpython.object cimport (
PyObject_RichCompare,
Py_GT, Py_GE, Py_EQ, Py_NE, Py_LT, Py_LE)
-from cpython.datetime cimport (datetime,
+from cpython.datetime cimport (datetime, timedelta,
PyDateTime_Check, PyDelta_Check,
PyDateTime_IMPORT)
@@ -276,13 +276,6 @@ cdef class _NaT(datetime):
def __long__(self):
return NPY_NAT
- def total_seconds(self):
- """
- Total duration of timedelta in seconds (to microsecond precision).
- """
- # GH#10939
- return np.nan
-
@property
def is_leap_year(self):
return False
@@ -386,6 +379,7 @@ class NaTType(_NaT):
# nan methods
weekday = _make_nan_func('weekday', datetime.weekday.__doc__)
isoweekday = _make_nan_func('isoweekday', datetime.isoweekday.__doc__)
+ total_seconds = _make_nan_func('total_seconds', timedelta.total_seconds.__doc__)
month_name = _make_nan_func('month_name', # noqa:E128
"""
Return the month name of the Timestamp with specified locale.
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 3dd560ece188d..9419f0eba39aa 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -22,7 +22,7 @@ PyDateTime_IMPORT
from pandas._libs.tslibs.np_datetime cimport (
npy_datetimestruct, dtstruct_to_dt64, dt64_to_dtstruct,
pandas_datetime_to_datetimestruct, check_dts_bounds,
- NPY_DATETIMEUNIT, NPY_FR_D)
+ NPY_DATETIMEUNIT, NPY_FR_D, NPY_FR_us)
cdef extern from "src/datetime/np_datetime.h":
int64_t npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr,
@@ -272,6 +272,8 @@ cdef int64_t DtoB_weekday(int64_t unix_date) nogil:
cdef int64_t DtoB(npy_datetimestruct *dts, int roll_back,
int64_t unix_date) nogil:
+ # calculate the current week (counting from 1970-01-01) treating
+ # sunday as last day of a week
cdef:
int day_of_week = dayofweek(dts.year, dts.month, dts.day)
@@ -473,9 +475,6 @@ cdef int DtoQ_yq(int64_t ordinal, asfreq_info *af_info, int *year) nogil:
int quarter
pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts)
- # TODO: Another version of this function used
- # date_info_from_days_and_time(&dts, unix_date, 0)
- # instead of pandas_datetime_to_datetimestruct; is one more performant?
if af_info.to_end != 12:
dts.month -= af_info.to_end
if dts.month <= 0:
@@ -509,14 +508,18 @@ cdef int64_t asfreq_DTtoM(int64_t ordinal, asfreq_info *af_info) nogil:
cdef int64_t asfreq_DTtoW(int64_t ordinal, asfreq_info *af_info) nogil:
ordinal = downsample_daytime(ordinal, af_info)
- return (ordinal + 3 - af_info.to_end) // 7 + 1
+ return unix_date_to_week(ordinal, af_info.to_end)
+
+
+cdef int64_t unix_date_to_week(int64_t unix_date, int to_end) nogil:
+ return (unix_date + 3 - to_end) // 7 + 1
# --------------------------------------------------------------------
# Conversion _from_ BusinessDay Freq
cdef int64_t asfreq_BtoDT(int64_t ordinal, asfreq_info *af_info) nogil:
- ordinal = ((ordinal + 3) // 5) * 7 + (ordinal + 3) % 5 -3
+ ordinal = ((ordinal + 3) // 5) * 7 + (ordinal + 3) % 5 - 3
return upsample_daytime(ordinal, af_info)
@@ -753,14 +756,7 @@ cdef int64_t get_period_ordinal(npy_datetimestruct *dts, int freq) nogil:
if fmonth == 0:
fmonth = 12
- mdiff = dts.month - fmonth
- # TODO: Aren't the next two conditions equivalent to
- # unconditional incrementing?
- if mdiff < 0:
- mdiff += 12
- if dts.month >= fmonth:
- mdiff += 12
-
+ mdiff = dts.month - fmonth + 12
return (dts.year - 1970) * 4 + (mdiff - 1) // 3
elif freq == FR_MTH:
@@ -797,23 +793,10 @@ cdef int64_t get_period_ordinal(npy_datetimestruct *dts, int freq) nogil:
return unix_date
elif freq == FR_BUS:
- # calculate the current week (counting from 1970-01-01) treating
- # sunday as last day of a week
- weeks = (unix_date + 3) // 7
- # calculate the current weekday (in range 1 .. 7)
- delta = (unix_date + 3) % 7 + 1
- # return the number of business days in full weeks plus the business
- # days in the last - possible partial - week
- if delta <= 5:
- return (5 * weeks) + delta - 4
- else:
- return (5 * weeks) + (5 + 1) - 4
+ return DtoB(dts, 0, unix_date)
elif freq_group == FR_WK:
- day_adj = freq - FR_WK
- return (unix_date + 3 - day_adj) // 7 + 1
-
- # raise ValueError
+ return unix_date_to_week(unix_date, freq - FR_WK)
cdef void get_date_info(int64_t ordinal, int freq,
@@ -983,7 +966,7 @@ cdef inline int month_to_quarter(int month) nogil:
@cython.wraparound(False)
@cython.boundscheck(False)
-def dt64arr_to_periodarr(int64_t[:] dtarr, int freq, tz=None):
+def dt64arr_to_periodarr(const int64_t[:] dtarr, int freq, tz=None):
"""
Convert array of datetime64 values (passed in as 'i8' dtype) to a set of
periods corresponding to desired frequency, per period convention.
@@ -1186,7 +1169,12 @@ cdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq) except? -1:
if ordinal == NPY_NAT:
return NPY_NAT
- get_date_info(ordinal, freq, &dts)
+ if freq == 11000:
+ # Microsecond, avoid get_date_info to prevent floating point errors
+ pandas_datetime_to_datetimestruct(ordinal, NPY_FR_us, &dts)
+ else:
+ get_date_info(ordinal, freq, &dts)
+
check_dts_bounds(&dts)
return dtstruct_to_dt64(&dts)
@@ -1383,7 +1371,7 @@ cdef int pdays_in_month(int64_t ordinal, int freq):
@cython.wraparound(False)
@cython.boundscheck(False)
-def get_period_field_arr(int code, int64_t[:] arr, int freq):
+def get_period_field_arr(int code, const int64_t[:] arr, int freq):
cdef:
Py_ssize_t i, sz
int64_t[:] out
@@ -1496,7 +1484,7 @@ def extract_freq(ndarray[object] values):
@cython.wraparound(False)
@cython.boundscheck(False)
-cdef int64_t[:] localize_dt64arr_to_period(int64_t[:] stamps,
+cdef int64_t[:] localize_dt64arr_to_period(const int64_t[:] stamps,
int freq, object tz):
cdef:
Py_ssize_t n = len(stamps)
@@ -1584,7 +1572,7 @@ cdef class _Period:
return freq
@classmethod
- def _from_ordinal(cls, ordinal, freq):
+ def _from_ordinal(cls, ordinal: int, freq) -> "Period":
"""
Fast creation from an ordinal and freq that are already validated!
"""
@@ -1704,7 +1692,7 @@ cdef class _Period:
else:
return NotImplemented
- def asfreq(self, freq, how='E'):
+ def asfreq(self, freq, how='E') -> "Period":
"""
Convert Period to desired frequency, at the start or end of the interval.
@@ -1735,7 +1723,7 @@ cdef class _Period:
return Period(ordinal=ordinal, freq=freq)
@property
- def start_time(self):
+ def start_time(self) -> Timestamp:
"""
Get the Timestamp for the start of the period.
@@ -1765,13 +1753,13 @@ cdef class _Period:
return self.to_timestamp(how='S')
@property
- def end_time(self):
+ def end_time(self) -> Timestamp:
# freq.n can't be negative or 0
# ordinal = (self + self.freq.n).start_time.value - 1
ordinal = (self + self.freq).start_time.value - 1
return Timestamp(ordinal)
- def to_timestamp(self, freq=None, how='start', tz=None):
+ def to_timestamp(self, freq=None, how='start', tz=None) -> Timestamp:
"""
Return the Timestamp representation of the Period.
@@ -1811,17 +1799,17 @@ cdef class _Period:
return Timestamp(dt64, tz=tz)
@property
- def year(self):
+ def year(self) -> int:
base, mult = get_freq_code(self.freq)
return pyear(self.ordinal, base)
@property
- def month(self):
+ def month(self) -> int:
base, mult = get_freq_code(self.freq)
return pmonth(self.ordinal, base)
@property
- def day(self):
+ def day(self) -> int:
"""
Get day of the month that a Period falls on.
@@ -1844,7 +1832,7 @@ cdef class _Period:
return pday(self.ordinal, base)
@property
- def hour(self):
+ def hour(self) -> int:
"""
Get the hour of the day component of the Period.
@@ -1874,7 +1862,7 @@ cdef class _Period:
return phour(self.ordinal, base)
@property
- def minute(self):
+ def minute(self) -> int:
"""
Get minute of the hour component of the Period.
@@ -1898,7 +1886,7 @@ cdef class _Period:
return pminute(self.ordinal, base)
@property
- def second(self):
+ def second(self) -> int:
"""
Get the second component of the Period.
@@ -1922,12 +1910,12 @@ cdef class _Period:
return psecond(self.ordinal, base)
@property
- def weekofyear(self):
+ def weekofyear(self) -> int:
base, mult = get_freq_code(self.freq)
return pweek(self.ordinal, base)
@property
- def week(self):
+ def week(self) -> int:
"""
Get the week of the year on the given Period.
@@ -1957,7 +1945,7 @@ cdef class _Period:
return self.weekofyear
@property
- def dayofweek(self):
+ def dayofweek(self) -> int:
"""
Day of the week the period lies in, with Monday=0 and Sunday=6.
@@ -2008,7 +1996,7 @@ cdef class _Period:
return pweekday(self.ordinal, base)
@property
- def weekday(self):
+ def weekday(self) -> int:
"""
Day of the week the period lies in, with Monday=0 and Sunday=6.
@@ -2061,7 +2049,7 @@ cdef class _Period:
return self.dayofweek
@property
- def dayofyear(self):
+ def dayofyear(self) -> int:
"""
Return the day of the year.
@@ -2096,12 +2084,12 @@ cdef class _Period:
return pday_of_year(self.ordinal, base)
@property
- def quarter(self):
+ def quarter(self) -> int:
base, mult = get_freq_code(self.freq)
return pquarter(self.ordinal, base)
@property
- def qyear(self):
+ def qyear(self) -> int:
"""
Fiscal year the Period lies in according to its starting-quarter.
@@ -2145,7 +2133,7 @@ cdef class _Period:
return pqyear(self.ordinal, base)
@property
- def days_in_month(self):
+ def days_in_month(self) -> int:
"""
Get the total number of days in the month that this period falls on.
@@ -2179,7 +2167,7 @@ cdef class _Period:
return pdays_in_month(self.ordinal, base)
@property
- def daysinmonth(self):
+ def daysinmonth(self) -> int:
"""
Get the total number of days of the month that the Period falls in.
@@ -2209,7 +2197,7 @@ cdef class _Period:
return Period(datetime.now(), freq=freq)
@property
- def freqstr(self):
+ def freqstr(self) -> str:
return self.freq.freqstr
def __repr__(self) -> str:
diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx
index c0b20c14e9920..1e0eb7f97ec54 100644
--- a/pandas/_libs/tslibs/resolution.pyx
+++ b/pandas/_libs/tslibs/resolution.pyx
@@ -27,7 +27,7 @@ cdef:
# ----------------------------------------------------------------------
-cpdef resolution(int64_t[:] stamps, tz=None):
+cpdef resolution(const int64_t[:] stamps, tz=None):
cdef:
Py_ssize_t i, n = len(stamps)
npy_datetimestruct dts
@@ -38,7 +38,7 @@ cpdef resolution(int64_t[:] stamps, tz=None):
return _reso_local(stamps, tz)
-cdef _reso_local(int64_t[:] stamps, object tz):
+cdef _reso_local(const int64_t[:] stamps, object tz):
cdef:
Py_ssize_t i, n = len(stamps)
int reso = RESO_DAY, curr_reso
@@ -106,7 +106,7 @@ cdef inline int _reso_stamp(npy_datetimestruct *dts):
return RESO_DAY
-def get_freq_group(freq):
+def get_freq_group(freq) -> int:
"""
Return frequency code group of given frequency str or offset.
@@ -189,7 +189,7 @@ class Resolution:
_freq_reso_map = {v: k for k, v in _reso_freq_map.items()}
@classmethod
- def get_str(cls, reso):
+ def get_str(cls, reso: int) -> str:
"""
Return resolution str against resolution code.
@@ -201,7 +201,7 @@ class Resolution:
return cls._reso_str_map.get(reso, 'day')
@classmethod
- def get_reso(cls, resostr):
+ def get_reso(cls, resostr: str) -> int:
"""
Return resolution str against resolution code.
@@ -216,7 +216,7 @@ class Resolution:
return cls._str_reso_map.get(resostr, cls.RESO_DAY)
@classmethod
- def get_freq_group(cls, resostr):
+ def get_freq_group(cls, resostr: str) -> int:
"""
Return frequency str against resolution str.
@@ -228,7 +228,7 @@ class Resolution:
return get_freq_group(cls.get_freq(resostr))
@classmethod
- def get_freq(cls, resostr):
+ def get_freq(cls, resostr: str) -> str:
"""
Return frequency str against resolution str.
@@ -240,7 +240,7 @@ class Resolution:
return cls._reso_freq_map[resostr]
@classmethod
- def get_str_from_freq(cls, freq):
+ def get_str_from_freq(cls, freq: str) -> str:
"""
Return resolution str against frequency str.
@@ -252,7 +252,7 @@ class Resolution:
return cls._freq_reso_map.get(freq, 'day')
@classmethod
- def get_reso_from_freq(cls, freq):
+ def get_reso_from_freq(cls, freq: str) -> int:
"""
Return resolution code against frequency str.
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 9c031baf70a77..3742506a7f8af 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1,5 +1,4 @@
import collections
-import textwrap
import cython
@@ -859,14 +858,6 @@ cdef class _Timedelta(timedelta):
"""
return self.to_timedelta64()
- def total_seconds(self):
- """
- Total duration of timedelta in seconds (to microsecond precision).
- """
- # GH 31043
- # Microseconds precision to avoid confusing tzinfo.utcoffset
- return (self.value - self.value % 1000) / 1e9
-
def view(self, dtype):
"""
Array view compatibility.
@@ -1250,7 +1241,7 @@ class Timedelta(_Timedelta):
return NaT
# make timedelta happy
- td_base = _Timedelta.__new__(cls, microseconds=int(value) / 1000)
+ td_base = _Timedelta.__new__(cls, microseconds=int(value) // 1000)
td_base.value = value
td_base.is_populated = 0
return td_base
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 4915671aa6512..b8c462abe35f1 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -1,4 +1,3 @@
-import sys
import warnings
import numpy as np
diff --git a/pandas/_libs/util.pxd b/pandas/_libs/util.pxd
index 15fedbb20beec..828bccf7d5641 100644
--- a/pandas/_libs/util.pxd
+++ b/pandas/_libs/util.pxd
@@ -1,7 +1,5 @@
from pandas._libs.tslibs.util cimport *
-from cython cimport Py_ssize_t
-
cimport numpy as cnp
from numpy cimport ndarray
@@ -51,49 +49,3 @@ cdef inline void set_array_not_contiguous(ndarray ao) nogil:
PyArray_CLEARFLAGS(ao,
(NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS))
-
-cdef inline Py_ssize_t validate_indexer(ndarray arr, object loc) except -1:
- """
- Cast the given indexer `loc` to an integer. If it is negative, i.e. a
- python-style indexing-from-the-end indexer, translate it to a
- from-the-front indexer. Raise if this is not possible.
-
- Parameters
- ----------
- arr : ndarray
- loc : object
-
- Returns
- -------
- idx : Py_ssize_t
-
- Raises
- ------
- IndexError
- """
- cdef:
- Py_ssize_t idx, size
- int casted
-
- if is_float_object(loc):
- casted = int(loc)
- if casted == loc:
- loc = casted
-
- idx = <Py_ssize_t>loc
- size = cnp.PyArray_SIZE(arr)
-
- if idx < 0 and size > 0:
- idx += size
- if idx >= size or size == 0 or idx < 0:
- raise IndexError('index out of bounds')
-
- return idx
-
-
-cdef inline object get_value_at(ndarray arr, object loc):
- cdef:
- Py_ssize_t i
-
- i = validate_indexer(arr, loc)
- return arr[i]
diff --git a/pandas/_testing.py b/pandas/_testing.py
index 631d550c60534..13af8703cef93 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -8,7 +8,7 @@
from shutil import rmtree
import string
import tempfile
-from typing import Any, List, Optional, Union, cast
+from typing import Any, Callable, List, Optional, Type, Union, cast
import warnings
import zipfile
@@ -2757,3 +2757,24 @@ def convert_rows_list_to_csv_str(rows_list: List[str]):
sep = os.linesep
expected = sep.join(rows_list) + sep
return expected
+
+
+def external_error_raised(
+ expected_exception: Type[Exception],
+) -> Callable[[Type[Exception], None], None]:
+ """
+ Helper function to mark pytest.raises that have an external error message.
+
+ Parameters
+ ----------
+ expected_exception : Exception
+ Expected error to raise.
+
+ Returns
+ -------
+ Callable
+ Regular `pytest.raises` function with `match` equal to `None`.
+ """
+ import pytest
+
+ return pytest.raises(expected_exception, match=None)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 3a6662d3e3ae2..d26ff7490e714 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2504,10 +2504,6 @@ class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
>>> s.cat.as_unordered()
"""
- _deprecations = PandasObject._deprecations | frozenset(
- ["categorical", "index", "name"]
- )
-
def __init__(self, data):
self._validate(data)
self._parent = data.values
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 48ad659b771f6..4bfd5f5770b69 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -25,6 +25,7 @@
from pandas.core.dtypes.missing import isna
from pandas.core import nanops, ops
+import pandas.core.common as com
from pandas.core.indexers import check_array_indexer
from pandas.core.ops import invalid_comparison
from pandas.core.ops.common import unpack_zerodim_and_defer
@@ -586,9 +587,8 @@ def _reduce(self, name, skipna=True, **kwargs):
# if we have a preservable numeric op,
# provide coercion back to an integer type if possible
elif name in ["sum", "min", "max", "prod"]:
- int_result = int(result)
- if int_result == result:
- result = int_result
+ # GH#31409 more performant than casting-then-checking
+ result = com.cast_scalar_indexer(result)
return result
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 398ed75c060ca..0b35a031bc53f 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -27,6 +27,7 @@
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas.core.dtypes.generic import (
ABCDatetimeIndex,
+ ABCExtensionArray,
ABCIndexClass,
ABCInterval,
ABCIntervalIndex,
@@ -789,6 +790,33 @@ def size(self) -> int:
# Avoid materializing self.values
return self.left.size
+ def shift(self, periods: int = 1, fill_value: object = None) -> ABCExtensionArray:
+ if not len(self) or periods == 0:
+ return self.copy()
+
+ if isna(fill_value):
+ fill_value = self.dtype.na_value
+
+ # ExtensionArray.shift doesn't work for two reasons
+ # 1. IntervalArray.dtype.na_value may not be correct for the dtype.
+ # 2. IntervalArray._from_sequence only accepts NaN for missing values,
+ # not other values like NaT
+
+ empty_len = min(abs(periods), len(self))
+ if isna(fill_value):
+ fill_value = self.left._na_value
+ empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1))
+ else:
+ empty = self._from_sequence([fill_value] * empty_len)
+
+ if periods > 0:
+ a = empty
+ b = self[:-periods]
+ else:
+ a = self[abs(periods) :]
+ b = empty
+ return self._concat_same_type([a, b])
+
def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs):
"""
Take elements from the IntervalArray.
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index b476a019c66cc..8008805ddcf87 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -9,7 +9,7 @@
import numpy as np
-from pandas._libs import index as libindex, lib
+from pandas._libs import lib
import pandas._libs.sparse as splib
from pandas._libs.sparse import BlockIndex, IntIndex, SparseIndex
from pandas._libs.tslibs import NaT
@@ -794,7 +794,9 @@ def _get_val_at(self, loc):
if sp_loc == -1:
return self.fill_value
else:
- return libindex.get_value_at(self.sp_values, sp_loc)
+ val = self.sp_values[sp_loc]
+ val = com.maybe_box_datetimelike(val, self.sp_values.dtype)
+ return val
def take(self, indices, allow_fill=False, fill_value=None):
if is_scalar(indices):
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 9fe1af776dd2b..f3c8b50e774af 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1027,12 +1027,10 @@ def tolist(self):
--------
numpy.ndarray.tolist
"""
- if self.dtype.kind in ["m", "M"]:
- return [com.maybe_box_datetimelike(x) for x in self._values]
- elif is_extension_array_dtype(self._values):
+ if not isinstance(self._values, np.ndarray):
+ # check for ndarray instead of dtype to catch DTA/TDA
return list(self._values)
- else:
- return self._values.tolist()
+ return self._values.tolist()
to_list = tolist
@@ -1049,9 +1047,8 @@ def __iter__(self):
iterator
"""
# We are explicitly making element iterators.
- if self.dtype.kind in ["m", "M"]:
- return map(com.maybe_box_datetimelike, self._values)
- elif is_extension_array_dtype(self._values):
+ if not isinstance(self._values, np.ndarray):
+ # Check type instead of dtype to catch DTA/TDA
return iter(self._values)
else:
return map(self._values.item, range(self._values.size))
diff --git a/pandas/core/common.py b/pandas/core/common.py
index a76119da2707a..00c7a41477017 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -72,8 +72,12 @@ def consensus_name_attr(objs):
return name
-def maybe_box_datetimelike(value):
+def maybe_box_datetimelike(value, dtype=None):
# turn a datetime like into a Timestamp/timedelta as needed
+ if dtype == object:
+ # If we dont have datetime64/timedelta64 dtype, we dont want to
+ # box datetimelike scalars
+ return value
if isinstance(value, (np.datetime64, datetime)):
value = tslibs.Timestamp(value)
@@ -156,7 +160,7 @@ def cast_scalar_indexer(val):
outval : scalar
"""
# assumes lib.is_scalar(val)
- if lib.is_float(val) and val == int(val):
+ if lib.is_float(val) and val.is_integer():
return int(val)
return val
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index 3776c6f816d96..b0410e31c6de7 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -9,6 +9,8 @@
module is imported, register them here rather than in the module.
"""
+import warnings
+
import pandas._config.config as cf
from pandas._config.config import (
is_bool,
@@ -341,8 +343,26 @@ def is_terminal() -> bool:
validator=is_instance_factory([type(None), int]),
)
cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int)
+
+ def _deprecate_negative_int_max_colwidth(key):
+ value = cf.get_option(key)
+ if value is not None and value < 0:
+ warnings.warn(
+ "Passing a negative integer is deprecated in version 1.0 and "
+ "will not be supported in future version. Instead, use None "
+ "to not limit the column width.",
+ FutureWarning,
+ stacklevel=4,
+ )
+
cf.register_option(
- "max_colwidth", 50, max_colwidth_doc, validator=is_nonnegative_int
+ # FIXME: change `validator=is_nonnegative_int`
+ # in version 1.2
+ "max_colwidth",
+ 50,
+ max_colwidth_doc,
+ validator=is_instance_factory([type(None), int]),
+ cb=_deprecate_negative_int_max_colwidth,
)
if is_terminal():
max_cols = 0 # automatically determine optimal number of columns
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 52c569793e499..0719b8ce6010b 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1,11 +1,18 @@
""" routings for casting """
-from datetime import datetime, timedelta
+from datetime import date, datetime, timedelta
import numpy as np
from pandas._libs import lib, tslib, tslibs
-from pandas._libs.tslibs import NaT, OutOfBoundsDatetime, Period, iNaT
+from pandas._libs.tslibs import (
+ NaT,
+ OutOfBoundsDatetime,
+ Period,
+ Timedelta,
+ Timestamp,
+ iNaT,
+)
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import Dtype
from pandas.util._validators import validate_bool_kwarg
@@ -1599,3 +1606,59 @@ def maybe_cast_to_integer_array(arr, dtype, copy: bool = False):
if is_integer_dtype(dtype) and (is_float_dtype(arr) or is_object_dtype(arr)):
raise ValueError("Trying to coerce float values to integers")
+
+
+def convert_scalar_for_putitemlike(scalar, dtype: np.dtype):
+ """
+ Convert datetimelike scalar if we are setting into a datetime64
+ or timedelta64 ndarray.
+
+ Parameters
+ ----------
+ scalar : scalar
+ dtype : np.dtpye
+
+ Returns
+ -------
+ scalar
+ """
+ if dtype.kind == "m":
+ if isinstance(scalar, (timedelta, np.timedelta64)):
+ # We have to cast after asm8 in case we have NaT
+ return Timedelta(scalar).asm8.view("timedelta64[ns]")
+ elif scalar is None or scalar is NaT or (is_float(scalar) and np.isnan(scalar)):
+ return np.timedelta64("NaT", "ns")
+ if dtype.kind == "M":
+ if isinstance(scalar, (date, np.datetime64)):
+ # Note: we include date, not just datetime
+ return Timestamp(scalar).to_datetime64()
+ elif scalar is None or scalar is NaT or (is_float(scalar) and np.isnan(scalar)):
+ return np.datetime64("NaT", "ns")
+ else:
+ validate_numeric_casting(dtype, scalar)
+ return scalar
+
+
+def validate_numeric_casting(dtype: np.dtype, value):
+ """
+ Check that we can losslessly insert the given value into an array
+ with the given dtype.
+
+ Parameters
+ ----------
+ dtype : np.dtype
+ value : scalar
+
+ Raises
+ ------
+ ValueError
+ """
+ if issubclass(dtype.type, (np.integer, np.bool_)):
+ if is_float(value) and np.isnan(value):
+ raise ValueError("Cannot assign nan to integer series")
+
+ if issubclass(dtype.type, (np.integer, np.floating, np.complex)) and not issubclass(
+ dtype.type, np.bool_
+ ):
+ if is_bool(value):
+ raise ValueError("Cannot assign bool to float/integer series")
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 0dea8235e9d3f..e0efa93379bca 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -69,6 +69,7 @@
maybe_infer_to_datetimelike,
maybe_upcast,
maybe_upcast_putmask,
+ validate_numeric_casting,
)
from pandas.core.dtypes.common import (
ensure_float64,
@@ -2900,12 +2901,8 @@ def _get_value(self, index, col, takeable: bool = False):
engine = self.index._engine
try:
- if isinstance(series._values, np.ndarray):
- # i.e. not EA, we can use engine
- return engine.get_value(series._values, index)
- else:
- loc = series.index.get_loc(index)
- return series._values[loc]
+ loc = engine.get_loc(index)
+ return series._values[loc]
except KeyError:
# GH 20629
if self.index.nlevels > 1:
@@ -3028,10 +3025,14 @@ def _set_value(self, index, col, value, takeable: bool = False):
series = self._get_item_cache(col)
engine = self.index._engine
- engine.set_value(series._values, index, value)
+ loc = engine.get_loc(index)
+ validate_numeric_casting(series.dtype, value)
+
+ series._values[loc] = value
+ # Note: trying to use series._set_value breaks tests in
+ # tests.frame.indexing.test_indexing and tests.indexing.test_partial
return self
except (KeyError, TypeError):
-
# set using a non-recursive method & reset the cache
if takeable:
self.iloc[index, col] = value
@@ -6556,7 +6557,9 @@ def unstack(self, level=-1, fill_value=None):
@Appender(
_shared_docs["melt"]
% dict(
- caller="df.melt(", versionadded=".. versionadded:: 0.20.0\n", other="melt"
+ caller="df.melt(",
+ versionadded="\n .. versionadded:: 0.20.0\n",
+ other="melt",
)
)
def melt(
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3b1d7e4c50be5..313d40b575629 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1922,10 +1922,8 @@ def _repr_data_resource_(self):
Parameters
----------
- buf : writable buffer, defaults to sys.stdout
- Where to send the output. By default, the output is printed to
- sys.stdout. Pass a writable buffer if you need to further process
- the output.
+ buf : str, Path or StringIO-like, optional, default None
+ Buffer to write to. If None, the output is returned as a string.
mode : str, optional
Mode in which file is opened.
**kwargs
@@ -3444,15 +3442,14 @@ class animal locomotion
new_index = self.index[loc]
if is_scalar(loc):
- new_values = self._data.fast_xs(loc)
+ # In this case loc should be an integer
+ if self.ndim == 1:
+ # if we encounter an array-like and we only have 1 dim
+ # that means that their are list/ndarrays inside the Series!
+ # so just return them (GH 6394)
+ return self._values[loc]
- # may need to box a datelike-scalar
- #
- # if we encounter an array-like and we only have 1 dim
- # that means that their are list/ndarrays inside the Series!
- # so just return them (GH 6394)
- if not is_list_like(new_values) or self.ndim == 1:
- return com.maybe_box_datetimelike(new_values)
+ new_values = self._data.fast_xs(loc)
result = self._constructor_sliced(
new_values,
@@ -3501,7 +3498,9 @@ def _iget_item_cache(self, item):
def _box_item_values(self, key, values):
raise AbstractMethodError(self)
- def _slice(self: FrameOrSeries, slobj: slice, axis=0, kind=None) -> FrameOrSeries:
+ def _slice(
+ self: FrameOrSeries, slobj: slice, axis=0, kind: str = "getitem"
+ ) -> FrameOrSeries:
"""
Construct a slice of this container.
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 27dd6e953c219..f194c774cf329 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1022,6 +1022,10 @@ def _cython_agg_blocks(
agg_blocks: List[Block] = []
new_items: List[np.ndarray] = []
deleted_items: List[np.ndarray] = []
+ # Some object-dtype blocks might be split into List[Block[T], Block[U]]
+ split_items: List[np.ndarray] = []
+ split_frames: List[DataFrame] = []
+
no_result = object()
for block in data.blocks:
# Avoid inheriting result from earlier in the loop
@@ -1061,40 +1065,56 @@ def _cython_agg_blocks(
else:
result = cast(DataFrame, result)
# unwrap DataFrame to get array
+ if len(result._data.blocks) != 1:
+ # We've split an object block! Everything we've assumed
+ # about a single block input returning a single block output
+ # is a lie. To keep the code-path for the typical non-split case
+ # clean, we choose to clean up this mess later on.
+ split_items.append(locs)
+ split_frames.append(result)
+ continue
+
assert len(result._data.blocks) == 1
result = result._data.blocks[0].values
if isinstance(result, np.ndarray) and result.ndim == 1:
result = result.reshape(1, -1)
- finally:
- assert not isinstance(result, DataFrame)
-
- if result is not no_result:
- # see if we can cast the block back to the original dtype
- result = maybe_downcast_numeric(result, block.dtype)
-
- if block.is_extension and isinstance(result, np.ndarray):
- # e.g. block.values was an IntegerArray
- # (1, N) case can occur if block.values was Categorical
- # and result is ndarray[object]
- assert result.ndim == 1 or result.shape[0] == 1
- try:
- # Cast back if feasible
- result = type(block.values)._from_sequence(
- result.ravel(), dtype=block.values.dtype
- )
- except ValueError:
- # reshape to be valid for non-Extension Block
- result = result.reshape(1, -1)
+ assert not isinstance(result, DataFrame)
+
+ if result is not no_result:
+ # see if we can cast the block back to the original dtype
+ result = maybe_downcast_numeric(result, block.dtype)
+
+ if block.is_extension and isinstance(result, np.ndarray):
+ # e.g. block.values was an IntegerArray
+ # (1, N) case can occur if block.values was Categorical
+ # and result is ndarray[object]
+ assert result.ndim == 1 or result.shape[0] == 1
+ try:
+ # Cast back if feasible
+ result = type(block.values)._from_sequence(
+ result.ravel(), dtype=block.values.dtype
+ )
+ except ValueError:
+ # reshape to be valid for non-Extension Block
+ result = result.reshape(1, -1)
- agg_block: Block = block.make_block(result)
+ agg_block: Block = block.make_block(result)
new_items.append(locs)
agg_blocks.append(agg_block)
- if not agg_blocks:
+ if not (agg_blocks or split_frames):
raise DataError("No numeric types to aggregate")
+ if split_items:
+ # Clean up the mess left over from split blocks.
+ for locs, result in zip(split_items, split_frames):
+ assert len(locs) == result.shape[1]
+ for i, loc in enumerate(locs):
+ new_items.append(np.array([loc], dtype=locs.dtype))
+ agg_blocks.append(result.iloc[:, [i]]._data.blocks[0])
+
# reset the locs in the blocks to correspond to our
# current ordering
indexer = np.concatenate(new_items)
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 54275dc52bb56..0245b9f74d944 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1180,10 +1180,16 @@ def count(self):
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
- def mean(self, *args, **kwargs):
+ def mean(self, numeric_only: bool = True):
"""
Compute mean of groups, excluding missing values.
+ Parameters
+ ----------
+ numeric_only : bool, default True
+ Include only float, int, boolean columns. If None, will attempt to use
+ everything, then use only numeric data.
+
Returns
-------
pandas.Series or pandas.DataFrame
@@ -1222,19 +1228,26 @@ def mean(self, *args, **kwargs):
2 4.0
Name: B, dtype: float64
"""
- nv.validate_groupby_func("mean", args, kwargs, ["numeric_only"])
return self._cython_agg_general(
- "mean", alt=lambda x, axis: Series(x).mean(**kwargs), **kwargs
+ "mean",
+ alt=lambda x, axis: Series(x).mean(numeric_only=numeric_only),
+ numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
- def median(self, **kwargs):
+ def median(self, numeric_only=True):
"""
Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
+ Parameters
+ ----------
+ numeric_only : bool, default True
+ Include only float, int, boolean columns. If None, will attempt to use
+ everything, then use only numeric data.
+
Returns
-------
Series or DataFrame
@@ -1242,13 +1255,13 @@ def median(self, **kwargs):
"""
return self._cython_agg_general(
"median",
- alt=lambda x, axis: Series(x).median(axis=axis, **kwargs),
- **kwargs,
+ alt=lambda x, axis: Series(x).median(axis=axis, numeric_only=numeric_only),
+ numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
- def std(self, ddof: int = 1, *args, **kwargs):
+ def std(self, ddof: int = 1):
"""
Compute standard deviation of groups, excluding missing values.
@@ -1266,12 +1279,11 @@ def std(self, ddof: int = 1, *args, **kwargs):
"""
# TODO: implement at Cython level?
- nv.validate_groupby_func("std", args, kwargs)
- return np.sqrt(self.var(ddof=ddof, **kwargs))
+ return np.sqrt(self.var(ddof=ddof))
@Substitution(name="groupby")
@Appender(_common_see_also)
- def var(self, ddof: int = 1, *args, **kwargs):
+ def var(self, ddof: int = 1):
"""
Compute variance of groups, excluding missing values.
@@ -1287,15 +1299,14 @@ def var(self, ddof: int = 1, *args, **kwargs):
Series or DataFrame
Variance of values within each group.
"""
- nv.validate_groupby_func("var", args, kwargs)
if ddof == 1:
return self._cython_agg_general(
- "var", alt=lambda x, axis: Series(x).var(ddof=ddof, **kwargs), **kwargs
+ "var", alt=lambda x, axis: Series(x).var(ddof=ddof)
)
else:
- f = lambda x: x.var(ddof=ddof, **kwargs)
+ func = lambda x: x.var(ddof=ddof)
with _group_selection_context(self):
- return self._python_agg_general(f)
+ return self._python_agg_general(func)
@Substitution(name="groupby")
@Appender(_common_see_also)
@@ -1383,7 +1394,9 @@ def func(self, numeric_only=numeric_only, min_count=min_count):
except DataError:
pass
except NotImplementedError as err:
- if "function is not implemented for this dtype" in str(err):
+ if "function is not implemented for this dtype" in str(
+ err
+ ) or "category dtype not supported" in str(err):
# raised in _get_cython_function, in some cases can
# be trimmed by implementing cython funcs for more dtypes
pass
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 77c54ec736aaa..761353ca5a6ca 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -31,6 +31,7 @@
is_extension_array_dtype,
is_integer_dtype,
is_numeric_dtype,
+ is_period_dtype,
is_sparse,
is_timedelta64_dtype,
needs_i8_conversion,
@@ -567,7 +568,12 @@ def _cython_operation(
if swapped:
result = result.swapaxes(0, axis)
- if is_datetime64tz_dtype(orig_values.dtype):
+ if is_datetime64tz_dtype(orig_values.dtype) or is_period_dtype(
+ orig_values.dtype
+ ):
+ # We need to use the constructors directly for these dtypes
+ # since numpy won't recognize them
+ # https://github.com/pandas-dev/pandas/issues/31471
result = type(orig_values)(result.astype(np.int64), dtype=orig_values.dtype)
elif is_datetimelike and kind == "aggregate":
result = result.astype(orig_values.dtype)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 95cfab4c96af3..e8ad2bef099a1 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1,7 +1,7 @@
from datetime import datetime
import operator
from textwrap import dedent
-from typing import Any, FrozenSet, Hashable, Optional, Union
+from typing import TYPE_CHECKING, Any, FrozenSet, Hashable, Optional, Union
import warnings
import numpy as np
@@ -18,7 +18,10 @@
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes import concat as _concat
-from pandas.core.dtypes.cast import maybe_cast_to_integer_array
+from pandas.core.dtypes.cast import (
+ maybe_cast_to_integer_array,
+ validate_numeric_casting,
+)
from pandas.core.dtypes.common import (
ensure_categorical,
ensure_int64,
@@ -68,7 +71,7 @@
from pandas.core.arrays import ExtensionArray
from pandas.core.base import IndexOpsMixin, PandasObject
import pandas.core.common as com
-from pandas.core.indexers import deprecate_ndim_indexing, maybe_convert_indices
+from pandas.core.indexers import deprecate_ndim_indexing
from pandas.core.indexes.frozen import FrozenList
import pandas.core.missing as missing
from pandas.core.ops import get_op_result_name
@@ -83,6 +86,10 @@
pprint_thing,
)
+if TYPE_CHECKING:
+ from pandas import Series
+
+
__all__ = ["Index"]
_unsortable_types = frozenset(("mixed", "mixed-integer"))
@@ -522,6 +529,7 @@ def _shallow_copy(self, values=None, **kwargs):
values = self.values
attributes = self._get_attributes_dict()
+
attributes.update(kwargs)
return self._simple_new(values, **attributes)
@@ -2566,6 +2574,7 @@ def _union(self, other, sort):
# worth making this faster? a very unusual case
value_set = set(lvals)
result.extend([x for x in rvals if x not in value_set])
+ result = Index(result)._values # do type inference here
else:
# find indexes of things in "other" that are not in "self"
if self.is_unique:
@@ -2595,7 +2604,8 @@ def _union(self, other, sort):
return self._wrap_setop_result(other, result)
def _wrap_setop_result(self, other, result):
- return self._constructor(result, name=get_op_result_name(self, other))
+ name = get_op_result_name(self, other)
+ return self._shallow_copy(result, name=name)
# TODO: standardize return type of non-union setops type(self vs other)
def intersection(self, other, sort=False):
@@ -2652,9 +2662,10 @@ def intersection(self, other, sort=False):
if self.is_monotonic and other.is_monotonic:
try:
result = self._inner_indexer(lvals, rvals)[0]
- return self._wrap_setop_result(other, result)
except TypeError:
pass
+ else:
+ return self._wrap_setop_result(other, result)
try:
indexer = Index(rvals).get_indexer(lvals)
@@ -2880,10 +2891,15 @@ def get_loc(self, key, method=None, tolerance=None):
"tolerance argument only valid if using pad, "
"backfill or nearest lookups"
)
+ casted_key = self._maybe_cast_indexer(key)
try:
- return self._engine.get_loc(key)
+ return self._engine.get_loc(casted_key)
except KeyError:
- return self._engine.get_loc(self._maybe_cast_indexer(key))
+ raise KeyError(key)
+
+ if tolerance is not None:
+ tolerance = self._convert_tolerance(tolerance, np.asarray(key))
+
indexer = self.get_indexer([key], method=method, tolerance=tolerance)
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError("get_loc requires scalar valued input")
@@ -3061,9 +3077,8 @@ def _get_nearest_indexer(self, target: "Index", limit, tolerance) -> np.ndarray:
left_indexer = self.get_indexer(target, "pad", limit=limit)
right_indexer = self.get_indexer(target, "backfill", limit=limit)
- target = np.asarray(target)
- left_distances = abs(self.values[left_indexer] - target)
- right_distances = abs(self.values[right_indexer] - target)
+ left_distances = np.abs(self[left_indexer] - target)
+ right_distances = np.abs(self[right_indexer] - target)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(
@@ -3085,20 +3100,16 @@ def _filter_indexer_tolerance(
# --------------------------------------------------------------------
# Indexer Conversion Methods
- def _convert_scalar_indexer(self, key, kind=None):
+ def _convert_scalar_indexer(self, key, kind: str_t):
"""
Convert a scalar indexer.
Parameters
----------
key : label of the slice bound
- kind : {'loc', 'getitem', 'iloc'} or None
+ kind : {'loc', 'getitem'}
"""
- assert kind in ["loc", "getitem", "iloc", None]
-
- if kind == "iloc":
- self._validate_indexer("positional", key, "iloc")
- return key
+ assert kind in ["loc", "getitem"]
if len(self) and not isinstance(self, ABCMultiIndex):
@@ -3147,9 +3158,9 @@ def _convert_slice_indexer(self, key: slice, kind=None):
# validate iloc
if kind == "iloc":
- self._validate_indexer("slice", key.start, "iloc")
- self._validate_indexer("slice", key.stop, "iloc")
- self._validate_indexer("slice", key.step, "iloc")
+ self._validate_indexer("positional", key.start, "iloc")
+ self._validate_indexer("positional", key.stop, "iloc")
+ self._validate_indexer("positional", key.step, "iloc")
return key
# potentially cast the bounds to integers
@@ -3200,7 +3211,7 @@ def is_int(v):
return indexer
- def _convert_listlike_indexer(self, keyarr, kind=None):
+ def _convert_listlike_indexer(self, keyarr):
"""
Parameters
----------
@@ -3219,7 +3230,7 @@ def _convert_listlike_indexer(self, keyarr, kind=None):
else:
keyarr = self._convert_arr_indexer(keyarr)
- indexer = self._convert_list_indexer(keyarr, kind=kind)
+ indexer = self._convert_list_indexer(keyarr)
return indexer, keyarr
def _convert_arr_indexer(self, keyarr):
@@ -3253,7 +3264,7 @@ def _convert_index_indexer(self, keyarr):
"""
return keyarr
- def _convert_list_indexer(self, keyarr, kind=None):
+ def _convert_list_indexer(self, keyarr):
"""
Convert a list-like indexer to the appropriate dtype.
@@ -3267,29 +3278,6 @@ def _convert_list_indexer(self, keyarr, kind=None):
-------
positional indexer or None
"""
- if (
- kind in [None, "iloc"]
- and is_integer_dtype(keyarr)
- and not self.is_floating()
- ):
-
- if self.inferred_type == "mixed-integer":
- indexer = self.get_indexer(keyarr)
- if (indexer >= 0).all():
- return indexer
- # missing values are flagged as -1 by get_indexer and negative
- # indices are already converted to positive indices in the
- # above if-statement, so the negative flags are changed to
- # values outside the range of indices so as to trigger an
- # IndexError in maybe_convert_indices
- indexer[indexer < 0] = len(self)
-
- return maybe_convert_indices(indexer, len(self))
-
- elif not self.inferred_type == "integer":
- keyarr = np.where(keyarr < 0, len(self) + keyarr, keyarr)
- return keyarr
-
return None
def _invalid_indexer(self, form: str_t, key):
@@ -3297,8 +3285,8 @@ def _invalid_indexer(self, form: str_t, key):
Consistent invalid indexer message.
"""
raise TypeError(
- f"cannot do {form} indexing on {type(self)} with these "
- f"indexers [{key}] of {type(key)}"
+ f"cannot do {form} indexing on {type(self).__name__} with these "
+ f"indexers [{key}] of type {type(key).__name__}"
)
# --------------------------------------------------------------------
@@ -4096,6 +4084,11 @@ def __contains__(self, key: Any) -> bool:
bool
Whether the key search is in the index.
+ Raises
+ ------
+ TypeError
+ If the key is not hashable.
+
See Also
--------
Index.isin : Returns an ndarray of boolean dtype indicating whether the
@@ -4573,21 +4566,15 @@ def argsort(self, *args, **kwargs) -> np.ndarray:
result = np.array(self)
return result.argsort(*args, **kwargs)
- _index_shared_docs[
- "get_value"
- ] = """
+ def get_value(self, series: "Series", key):
+ """
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing.
Returns
-------
- scalar
- A value in the Series with the index of the key value in self.
+ scalar or Series
"""
-
- @Appender(_index_shared_docs["get_value"] % _index_doc_kwargs)
- def get_value(self, series, key):
-
if not is_scalar(key):
# if key is not a scalar, directly raise an error (the code below
# would convert to numpy arrays and raise later any way) - GH29926
@@ -4599,9 +4586,9 @@ def get_value(self, series, key):
# If that fails, raise a KeyError if an integer
# index, otherwise, see if key is an integer, and
# try that
- loc = self._engine.get_loc(key)
+ loc = self.get_loc(key)
except KeyError:
- if len(self) > 0 and (self.holds_integer() or self.is_boolean()):
+ if not self._should_fallback_to_positional():
raise
elif is_integer(key):
# If the Index cannot hold integer, then this is unambiguously
@@ -4612,7 +4599,15 @@ def get_value(self, series, key):
return self._get_values_for_loc(series, loc)
- def _get_values_for_loc(self, series, loc):
+ def _should_fallback_to_positional(self) -> bool:
+ """
+ If an integer key is not found, should we fall back to positional indexing?
+ """
+ if len(self) > 0 and (self.holds_integer() or self.is_boolean()):
+ return False
+ return True
+
+ def _get_values_for_loc(self, series: "Series", loc):
"""
Do a positional lookup on the given Series, returning either a scalar
or a Series.
@@ -4620,10 +4615,6 @@ def _get_values_for_loc(self, series, loc):
Assumes that `series.index is self`
"""
if is_integer(loc):
- if isinstance(series._values, np.ndarray):
- # Since we have an ndarray and not DatetimeArray, we dont
- # have to worry about a tz.
- return libindex.get_value_at(series._values, loc, tz=None)
return series._values[loc]
return series.iloc[loc]
@@ -4646,9 +4637,9 @@ def set_value(self, arr, key, value):
FutureWarning,
stacklevel=2,
)
- self._engine.set_value(
- com.values_from_object(arr), com.values_from_object(key), value
- )
+ loc = self._engine.get_loc(key)
+ validate_numeric_casting(arr.dtype, value)
+ arr[loc] = value
_index_shared_docs[
"get_indexer_non_unique"
@@ -4929,13 +4920,8 @@ def _maybe_cast_indexer(self, key):
to an int if equivalent.
"""
- if is_float(key) and not self.is_floating():
- try:
- ckey = int(key)
- if ckey == key:
- key = ckey
- except (OverflowError, ValueError, TypeError):
- pass
+ if not self.is_floating():
+ return com.cast_scalar_indexer(key)
return key
def _validate_indexer(self, form: str_t, key, kind: str_t):
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index d556c014467cf..85229c728848f 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -1,4 +1,4 @@
-from typing import Any, List
+from typing import TYPE_CHECKING, Any, List
import warnings
import numpy as np
@@ -7,7 +7,6 @@
from pandas._libs import index as libindex
from pandas._libs.hashtable import duplicated_int64
-from pandas._typing import AnyArrayLike
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes.common import (
@@ -29,7 +28,9 @@
from pandas.core.indexes.base import Index, _index_shared_docs, maybe_extract_name
from pandas.core.indexes.extension import ExtensionIndex, inherit_names
import pandas.core.missing as missing
-from pandas.core.ops import get_op_result_name
+
+if TYPE_CHECKING:
+ from pandas import Series
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass="CategoricalIndex"))
@@ -159,17 +160,6 @@ class CategoricalIndex(ExtensionIndex, accessor.PandasDelegate):
_typ = "categoricalindex"
- _raw_inherit = {
- "argsort",
- "_internal_get_values",
- "tolist",
- "codes",
- "categories",
- "ordered",
- "_reverse_indexer",
- "searchsorted",
- }
-
codes: np.ndarray
categories: Index
_data: Categorical
@@ -386,12 +376,6 @@ def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True
- def _wrap_setop_result(self, other, result):
- name = get_op_result_name(self, other)
- # We use _shallow_copy rather than the Index implementation
- # (which uses _constructor) in order to preserve dtype.
- return self._shallow_copy(result, name=name)
-
@Appender(Index.__contains__.__doc__)
def __contains__(self, key: Any) -> bool:
# if key is a NaN, check if any NaN is in self.
@@ -455,53 +439,19 @@ def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.astype("object")
- def get_loc(self, key, method=None):
- """
- Get integer location, slice or boolean mask for requested label.
-
- Parameters
- ----------
- key : label
- method : {None}
- * default: exact matches only.
-
- Returns
- -------
- loc : int if unique index, slice if monotonic index, else mask
-
- Raises
- ------
- KeyError : if the key is not in the index
-
- Examples
- --------
- >>> unique_index = pd.CategoricalIndex(list('abc'))
- >>> unique_index.get_loc('b')
- 1
-
- >>> monotonic_index = pd.CategoricalIndex(list('abbc'))
- >>> monotonic_index.get_loc('b')
- slice(1, 3, None)
-
- >>> non_monotonic_index = pd.CategoricalIndex(list('abcb'))
- >>> non_monotonic_index.get_loc('b')
- array([False, True, False, True], dtype=bool)
- """
+ def _maybe_cast_indexer(self, key):
code = self.categories.get_loc(key)
code = self.codes.dtype.type(code)
- try:
- return self._engine.get_loc(code)
- except KeyError:
- raise KeyError(key)
+ return code
- def get_value(self, series: AnyArrayLike, key: Any):
+ def get_value(self, series: "Series", key: Any):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
Parameters
----------
- series : Series, ExtensionArray, Index, or ndarray
+ series : Series
1-dimensional array to take values from
key: : scalar
The value of this index at the position of the desired value,
@@ -521,7 +471,7 @@ def get_value(self, series: AnyArrayLike, key: Any):
pass
# we might be a positional inexer
- return super().get_value(series, key)
+ return Index.get_value(self, series, key)
@Appender(Index.where.__doc__)
def where(self, cond, other=None):
@@ -674,21 +624,22 @@ def get_indexer_non_unique(self, target):
return ensure_platform_int(indexer), missing
@Appender(Index._convert_scalar_indexer.__doc__)
- def _convert_scalar_indexer(self, key, kind=None):
+ def _convert_scalar_indexer(self, key, kind: str):
+ assert kind in ["loc", "getitem"]
if kind == "loc":
try:
- return self.categories._convert_scalar_indexer(key, kind=kind)
+ return self.categories._convert_scalar_indexer(key, kind="loc")
except TypeError:
self._invalid_indexer("label", key)
return super()._convert_scalar_indexer(key, kind=kind)
@Appender(Index._convert_list_indexer.__doc__)
- def _convert_list_indexer(self, keyarr, kind=None):
+ def _convert_list_indexer(self, keyarr):
# Return our indexer or raise if all of the values are not included in
# the categories
if self.categories._defer_to_indexing:
- indexer = self.categories._convert_list_indexer(keyarr, kind=kind)
+ indexer = self.categories._convert_list_indexer(keyarr)
return Index(self.codes).get_indexer_for(indexer)
indexer = self.categories.get_indexer(np.asarray(keyarr))
@@ -852,18 +803,13 @@ def _concat_same_dtype(self, to_concat, name):
result.name = name
return result
- def _delegate_property_get(self, name: str, *args, **kwargs):
- """ method delegation to the ._values """
- prop = getattr(self._values, name)
- return prop # no wrapping for now
-
def _delegate_method(self, name: str, *args, **kwargs):
""" method delegation to the ._values """
method = getattr(self._values, name)
if "inplace" in kwargs:
raise ValueError("cannot use inplace with CategoricalIndex")
res = method(*args, **kwargs)
- if is_scalar(res) or name in self._raw_inherit:
+ if is_scalar(res):
return res
return CategoricalIndex(res, name=self.name)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index e3eeca2c45e76..d06d0d499ef47 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -80,7 +80,16 @@ def wrapper(left, right):
cache=True,
)
@inherit_names(
- ["__iter__", "mean", "freq", "freqstr", "_ndarray_values", "asi8", "_box_values"],
+ [
+ "__iter__",
+ "mean",
+ "freq",
+ "freqstr",
+ "_ndarray_values",
+ "asi8",
+ "_box_values",
+ "_box_func",
+ ],
DatetimeLikeArrayMixin,
)
class DatetimeIndexOpsMixin(ExtensionIndex):
@@ -191,7 +200,7 @@ def sort_values(self, return_indexer=False, ascending=True):
arr = type(self._data)._simple_new(
sorted_values, dtype=self.dtype, freq=freq
)
- return self._simple_new(arr, name=self.name)
+ return type(self)._simple_new(arr, name=self.name)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
@@ -374,8 +383,9 @@ def _format_attrs(self):
return attrs
# --------------------------------------------------------------------
+ # Indexing Methods
- def _convert_scalar_indexer(self, key, kind=None):
+ def _convert_scalar_indexer(self, key, kind: str):
"""
We don't allow integer or float indexing on datetime-like when using
loc.
@@ -383,23 +393,27 @@ def _convert_scalar_indexer(self, key, kind=None):
Parameters
----------
key : label of the slice bound
- kind : {'loc', 'getitem', 'iloc'} or None
+ kind : {'loc', 'getitem'}
"""
- assert kind in ["loc", "getitem", "iloc", None]
+ assert kind in ["loc", "getitem"]
+
+ if not is_scalar(key):
+ raise TypeError(key)
# we don't allow integer/float indexing for loc
- # we don't allow float indexing for ix/getitem
- if is_scalar(key):
- is_int = is_integer(key)
- is_flt = is_float(key)
- if kind in ["loc"] and (is_int or is_flt):
- self._invalid_indexer("index", key)
- elif kind in ["getitem"] and is_flt:
- self._invalid_indexer("index", key)
+ # we don't allow float indexing for getitem
+ is_int = is_integer(key)
+ is_flt = is_float(key)
+ if kind == "loc" and (is_int or is_flt):
+ self._invalid_indexer("label", key)
+ elif kind == "getitem" and is_flt:
+ self._invalid_indexer("label", key)
return super()._convert_scalar_indexer(key, kind=kind)
+ # --------------------------------------------------------------------
+
__add__ = make_wrapped_arith_op("__add__")
__radd__ = make_wrapped_arith_op("__radd__")
__sub__ = make_wrapped_arith_op("__sub__")
@@ -514,7 +528,7 @@ def _concat_same_dtype(self, to_concat, name):
if is_diff_evenly_spaced:
new_data._freq = self.freq
- return self._simple_new(new_data, name=name)
+ return type(self)._simple_new(new_data, name=name)
def shift(self, periods=1, freq=None):
"""
@@ -617,7 +631,7 @@ def _shallow_copy(self, values=None, **kwargs):
del attributes["freq"]
attributes.update(kwargs)
- return self._simple_new(values, **attributes)
+ return type(self)._simple_new(values, **attributes)
# --------------------------------------------------------------------
# Set Operation Methods
@@ -789,11 +803,10 @@ def _union(self, other, sort):
if this._can_fast_union(other):
return this._fast_union(other, sort=sort)
else:
- result = Index._union(this, other, sort=sort)
- if isinstance(result, type(self)):
- assert result._data.dtype == this.dtype
- if result.freq is None:
- result._set_freq("infer")
+ i8self = Int64Index._simple_new(self.asi8, name=self.name)
+ i8other = Int64Index._simple_new(other.asi8, name=other.name)
+ i8result = i8self._union(i8other, sort=sort)
+ result = type(self)(i8result, dtype=self.dtype, freq="infer")
return result
# --------------------------------------------------------------------
@@ -875,7 +888,7 @@ def _wrap_joined_index(self, joined, other):
kwargs = {}
if hasattr(self, "tz"):
kwargs["tz"] = getattr(other, "tz", None)
- return self._simple_new(joined, name, **kwargs)
+ return type(self)._simple_new(joined, name, **kwargs)
# --------------------------------------------------------------------
# List-Like Methods
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 416c3d0701a85..3d57f0944b318 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -5,15 +5,8 @@
import numpy as np
-from pandas._libs import (
- NaT,
- Timedelta,
- Timestamp,
- index as libindex,
- lib,
- tslib as libts,
-)
-from pandas._libs.tslibs import ccalendar, fields, parsing, timezones
+from pandas._libs import NaT, Period, Timestamp, index as libindex, lib, tslib as libts
+from pandas._libs.tslibs import fields, parsing, timezones
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import _NS_DTYPE, is_float, is_integer, is_scalar
@@ -29,7 +22,6 @@
from pandas.core.indexes.base import Index, InvalidIndexError, maybe_extract_name
from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
from pandas.core.indexes.extension import inherit_names
-from pandas.core.ops import get_op_result_name
import pandas.core.tools.datetimes as tools
from pandas.tseries.frequencies import Resolution, to_offset
@@ -70,7 +62,6 @@ def _new_DatetimeIndex(cls, d):
"_field_ops",
"_datetimelike_ops",
"_datetimelike_methods",
- "_box_func",
"tz",
"tzinfo",
"dtype",
@@ -348,18 +339,9 @@ def union_many(self, others):
if this._can_fast_union(other):
this = this._fast_union(other)
else:
- dtype = this.dtype
this = Index.union(this, other)
- if isinstance(this, DatetimeIndex):
- # TODO: we shouldn't be setting attributes like this;
- # in all the tests this equality already holds
- this._data._dtype = dtype
return this
- def _wrap_setop_result(self, other, result):
- name = get_op_result_name(self, other)
- return self._shallow_copy(result, name=name, freq=None)
-
# --------------------------------------------------------------------
def _get_time_micros(self):
@@ -476,7 +458,7 @@ def _parsed_string_to_bounds(self, reso: str, parsed: datetime):
Parameters
----------
- reso : Resolution
+ reso : str
Resolution provided by parsed string.
parsed : datetime
Datetime from parsed string.
@@ -484,7 +466,6 @@ def _parsed_string_to_bounds(self, reso: str, parsed: datetime):
Returns
-------
lower, upper: pd.Timestamp
-
"""
valid_resos = {
"year",
@@ -500,50 +481,11 @@ def _parsed_string_to_bounds(self, reso: str, parsed: datetime):
}
if reso not in valid_resos:
raise KeyError
- if reso == "year":
- start = Timestamp(parsed.year, 1, 1)
- end = Timestamp(parsed.year + 1, 1, 1) - Timedelta(nanoseconds=1)
- elif reso == "month":
- d = ccalendar.get_days_in_month(parsed.year, parsed.month)
- start = Timestamp(parsed.year, parsed.month, 1)
- end = start + Timedelta(days=d, nanoseconds=-1)
- elif reso == "quarter":
- qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead
- d = ccalendar.get_days_in_month(parsed.year, qe) # at end of month
- start = Timestamp(parsed.year, parsed.month, 1)
- end = Timestamp(parsed.year, qe, 1) + Timedelta(days=d, nanoseconds=-1)
- elif reso == "day":
- start = Timestamp(parsed.year, parsed.month, parsed.day)
- end = start + Timedelta(days=1, nanoseconds=-1)
- elif reso == "hour":
- start = Timestamp(parsed.year, parsed.month, parsed.day, parsed.hour)
- end = start + Timedelta(hours=1, nanoseconds=-1)
- elif reso == "minute":
- start = Timestamp(
- parsed.year, parsed.month, parsed.day, parsed.hour, parsed.minute
- )
- end = start + Timedelta(minutes=1, nanoseconds=-1)
- elif reso == "second":
- start = Timestamp(
- parsed.year,
- parsed.month,
- parsed.day,
- parsed.hour,
- parsed.minute,
- parsed.second,
- )
- end = start + Timedelta(seconds=1, nanoseconds=-1)
- elif reso == "microsecond":
- start = Timestamp(
- parsed.year,
- parsed.month,
- parsed.day,
- parsed.hour,
- parsed.minute,
- parsed.second,
- parsed.microsecond,
- )
- end = start + Timedelta(microseconds=1, nanoseconds=-1)
+
+ grp = Resolution.get_freq_group(reso)
+ per = Period(parsed, freq=(grp, 1))
+ start, end = per.start_time, per.end_time
+
# GH 24076
# If an incoming date string contained a UTC offset, need to localize
# the parsed date to this offset first before aligning with the index's
@@ -601,6 +543,7 @@ def _partial_date_slice(
raise KeyError
# a monotonic (sorted) series can be sliced
+ # Use asi8.searchsorted to avoid re-validating
left = stamps.searchsorted(t1.value, side="left") if use_lhs else None
right = stamps.searchsorted(t2.value, side="right") if use_rhs else None
@@ -617,17 +560,6 @@ def _maybe_promote(self, other):
other = DatetimeIndex(other)
return self, other
- def get_value(self, series, key):
- """
- Fast lookup of value from 1-dimensional ndarray. Only use this if you
- know what you're doing
- """
- if is_integer(key):
- loc = key
- else:
- loc = self.get_loc(key)
- return self._get_values_for_loc(series, loc)
-
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
@@ -639,18 +571,13 @@ def get_loc(self, key, method=None, tolerance=None):
if not is_scalar(key):
raise InvalidIndexError(key)
+ orig_key = key
if is_valid_nat_for_dtype(key, self.dtype):
key = NaT
- if tolerance is not None:
- # try converting tolerance now, so errors don't get swallowed by
- # the try/except clauses below
- tolerance = self._convert_tolerance(tolerance, np.asarray(key))
-
- if isinstance(key, (datetime, np.datetime64)):
+ if isinstance(key, self._data._recognized_scalars):
# needed to localize naive datetimes
key = self._maybe_cast_for_get_loc(key)
- return Index.get_loc(self, key, method, tolerance)
elif isinstance(key, str):
try:
@@ -659,9 +586,8 @@ def get_loc(self, key, method=None, tolerance=None):
pass
try:
- stamp = self._maybe_cast_for_get_loc(key)
- return Index.get_loc(self, stamp, method, tolerance)
- except (KeyError, ValueError):
+ key = self._maybe_cast_for_get_loc(key)
+ except ValueError:
raise KeyError(key)
elif isinstance(key, timedelta):
@@ -670,14 +596,21 @@ def get_loc(self, key, method=None, tolerance=None):
f"Cannot index {type(self).__name__} with {type(key).__name__}"
)
- if isinstance(key, time):
+ elif isinstance(key, time):
if method is not None:
raise NotImplementedError(
"cannot yet lookup inexact labels when key is a time object"
)
return self.indexer_at_time(key)
- return Index.get_loc(self, key, method, tolerance)
+ else:
+ # unrecognized type
+ raise KeyError(key)
+
+ try:
+ return Index.get_loc(self, key, method, tolerance)
+ except KeyError:
+ raise KeyError(orig_key)
def _maybe_cast_for_get_loc(self, key) -> Timestamp:
# needed to localize naive datetimes
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 6a3e808ab9821..03fb8db2e1e1e 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1,7 +1,7 @@
""" define the IntervalIndex """
from operator import le, lt
import textwrap
-from typing import TYPE_CHECKING, Any, Optional, Tuple, Union
+from typing import Any, Optional, Tuple, Union
import numpy as np
@@ -57,10 +57,6 @@
from pandas.tseries.frequencies import to_offset
from pandas.tseries.offsets import DateOffset
-if TYPE_CHECKING:
- from pandas import Series
-
-
_VALID_CLOSED = {"left", "right", "both", "neither"}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
@@ -527,17 +523,22 @@ def is_overlapping(self) -> bool:
# GH 23309
return self._engine.is_overlapping
+ def _should_fallback_to_positional(self):
+ # integer lookups in Series.__getitem__ are unambiguously
+ # positional in this case
+ return self.dtype.subtype.kind in ["m", "M"]
+
@Appender(Index._convert_scalar_indexer.__doc__)
- def _convert_scalar_indexer(self, key, kind=None):
- if kind == "iloc":
- return super()._convert_scalar_indexer(key, kind=kind)
+ def _convert_scalar_indexer(self, key, kind: str):
+ assert kind in ["getitem", "loc"]
+ # never iloc, so no-op
return key
def _maybe_cast_slice_bound(self, label, side, kind):
return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)
@Appender(Index._convert_list_indexer.__doc__)
- def _convert_list_indexer(self, keyarr, kind=None):
+ def _convert_list_indexer(self, keyarr):
"""
we are passed a list-like indexer. Return the
indexer for matching intervals.
@@ -884,11 +885,6 @@ def get_indexer_for(self, target: AnyArrayLike, **kwargs) -> np.ndarray:
return self.get_indexer_non_unique(target)[0]
return self.get_indexer(target, **kwargs)
- @Appender(_index_shared_docs["get_value"] % _index_doc_kwargs)
- def get_value(self, series: "Series", key):
- loc = self.get_loc(key)
- return series.iloc[loc]
-
def _convert_slice_indexer(self, key: slice, kind=None):
if not (key.step is None or key.step == 1):
raise ValueError("cannot support not-default step in a slice")
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 889622f44bbb7..708bea7d132a2 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1,14 +1,14 @@
-import datetime
from sys import getsizeof
-from typing import Any, Hashable, List, Optional, Sequence, Union
+from typing import Any, Hashable, Iterable, List, Optional, Sequence, Tuple, Union
import warnings
import numpy as np
from pandas._config import get_option
-from pandas._libs import Timestamp, algos as libalgos, index as libindex, lib, tslibs
+from pandas._libs import algos as libalgos, index as libindex, lib
from pandas._libs.hashtable import duplicated_int64
+from pandas._typing import AnyArrayLike, ArrayLike, Scalar
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.util._decorators import Appender, cache_readonly
@@ -234,6 +234,8 @@ class MultiIndex(Index):
_comparables = ["names"]
rename = Index.set_names
+ _tuples = None
+
# --------------------------------------------------------------------
# Constructors
@@ -620,29 +622,29 @@ def from_frame(cls, df, sortorder=None, names=None):
# --------------------------------------------------------------------
- @property
- def levels(self):
- result = [
- x._shallow_copy(name=name) for x, name in zip(self._levels, self._names)
- ]
- for level in result:
- # disallow midx.levels[0].name = "foo"
- level._no_setting_name = True
- return FrozenList(result)
-
@property
def _values(self):
# We override here, since our parent uses _data, which we don't use.
return self.values
@property
- def shape(self):
- """
- Return a tuple of the shape of the underlying data.
- """
- # overriding the base Index.shape definition to avoid materializing
- # the values (GH-27384, GH-27775)
- return (len(self),)
+ def values(self):
+ if self._tuples is not None:
+ return self._tuples
+
+ values = []
+
+ for i in range(self.nlevels):
+ vals = self._get_level_values(i)
+ if is_categorical_dtype(vals):
+ vals = vals._internal_get_values()
+ if isinstance(vals.dtype, ExtensionDtype) or hasattr(vals, "_box_values"):
+ vals = vals.astype(object)
+ vals = np.array(vals, copy=False)
+ values.append(vals)
+
+ self._tuples = lib.fast_zip(values)
+ return self._tuples
@property
def array(self):
@@ -659,6 +661,34 @@ def array(self):
"'MultiIndex.to_numpy()' to get a NumPy array of tuples."
)
+ @property
+ def shape(self):
+ """
+ Return a tuple of the shape of the underlying data.
+ """
+ # overriding the base Index.shape definition to avoid materializing
+ # the values (GH-27384, GH-27775)
+ return (len(self),)
+
+ def __len__(self) -> int:
+ return len(self.codes[0])
+
+ # --------------------------------------------------------------------
+ # Levels Methods
+
+ @cache_readonly
+ def levels(self):
+ # Use cache_readonly to ensure that self.get_locs doesn't repeatedly
+ # create new IndexEngine
+ # https://github.com/pandas-dev/pandas/issues/31648
+ result = [
+ x._shallow_copy(name=name) for x, name in zip(self._levels, self._names)
+ ]
+ for level in result:
+ # disallow midx.levels[0].name = "foo"
+ level._no_setting_name = True
+ return FrozenList(result)
+
def _set_levels(
self, levels, level=None, copy=False, validate=True, verify_integrity=False
):
@@ -785,6 +815,23 @@ def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
if not inplace:
return idx
+ @property
+ def nlevels(self) -> int:
+ """
+ Integer number of levels in this MultiIndex.
+ """
+ return len(self._levels)
+
+ @property
+ def levshape(self):
+ """
+ A tuple with the length of each level.
+ """
+ return tuple(len(x) for x in self.levels)
+
+ # --------------------------------------------------------------------
+ # Codes Methods
+
@property
def codes(self):
return self._codes
@@ -895,6 +942,57 @@ def set_codes(self, codes, level=None, inplace=False, verify_integrity=True):
if not inplace:
return idx
+ # --------------------------------------------------------------------
+ # Index Internals
+
+ @cache_readonly
+ def _engine(self):
+ # Calculate the number of bits needed to represent labels in each
+ # level, as log2 of their sizes (including -1 for NaN):
+ sizes = np.ceil(np.log2([len(l) + 1 for l in self.levels]))
+
+ # Sum bit counts, starting from the _right_....
+ lev_bits = np.cumsum(sizes[::-1])[::-1]
+
+ # ... in order to obtain offsets such that sorting the combination of
+ # shifted codes (one for each level, resulting in a unique integer) is
+ # equivalent to sorting lexicographically the codes themselves. Notice
+ # that each level needs to be shifted by the number of bits needed to
+ # represent the _previous_ ones:
+ offsets = np.concatenate([lev_bits[1:], [0]]).astype("uint64")
+
+ # Check the total number of bits needed for our representation:
+ if lev_bits[0] > 64:
+ # The levels would overflow a 64 bit uint - use Python integers:
+ return MultiIndexPyIntEngine(self.levels, self.codes, offsets)
+ return MultiIndexUIntEngine(self.levels, self.codes, offsets)
+
+ @property
+ def _constructor(self):
+ return MultiIndex.from_tuples
+
+ @Appender(Index._shallow_copy.__doc__)
+ def _shallow_copy(self, values=None, **kwargs):
+ if values is not None:
+ names = kwargs.pop("names", kwargs.pop("name", self.names))
+ # discards freq
+ kwargs.pop("freq", None)
+ return MultiIndex.from_tuples(values, names=names, **kwargs)
+ return self.copy(**kwargs)
+
+ def _shallow_copy_with_infer(self, values, **kwargs):
+ # On equal MultiIndexes the difference is empty.
+ # Therefore, an empty MultiIndex is returned GH13490
+ if len(values) == 0:
+ return MultiIndex(
+ levels=[[] for _ in range(self.nlevels)],
+ codes=[[] for _ in range(self.nlevels)],
+ **kwargs,
+ )
+ return self._shallow_copy(values, **kwargs)
+
+ # --------------------------------------------------------------------
+
def copy(
self,
names=None,
@@ -961,17 +1059,6 @@ def view(self, cls=None):
result._id = self._id
return result
- def _shallow_copy_with_infer(self, values, **kwargs):
- # On equal MultiIndexes the difference is empty.
- # Therefore, an empty MultiIndex is returned GH13490
- if len(values) == 0:
- return MultiIndex(
- levels=[[] for _ in range(self.nlevels)],
- codes=[[] for _ in range(self.nlevels)],
- **kwargs,
- )
- return self._shallow_copy(values, **kwargs)
-
@Appender(Index.__contains__.__doc__)
def __contains__(self, key: Any) -> bool:
hash(key)
@@ -981,15 +1068,6 @@ def __contains__(self, key: Any) -> bool:
except (LookupError, TypeError, ValueError):
return False
- @Appender(Index._shallow_copy.__doc__)
- def _shallow_copy(self, values=None, **kwargs):
- if values is not None:
- names = kwargs.pop("names", kwargs.pop("name", self.names))
- # discards freq
- kwargs.pop("freq", None)
- return MultiIndex.from_tuples(values, names=names, **kwargs)
- return self.copy(**kwargs)
-
@cache_readonly
def dtype(self) -> np.dtype:
return np.dtype("O")
@@ -1039,6 +1117,7 @@ def _nbytes(self, deep: bool = False) -> int:
# --------------------------------------------------------------------
# Rendering Methods
+
def _formatter_func(self, tup):
"""
Formats each item in tup according to its level's formatter function.
@@ -1165,9 +1244,7 @@ def format(
return result_levels
# --------------------------------------------------------------------
-
- def __len__(self) -> int:
- return len(self.codes[0])
+ # Names Methods
def _get_names(self):
return FrozenList(self._names)
@@ -1227,10 +1304,15 @@ def _set_names(self, names, level=None, validate=True):
)
self._names[lev] = name
+ # If .levels has been accessed, the names in our cache will be stale.
+ self._reset_cache()
+
names = property(
fset=_set_names, fget=_get_names, doc="""\nNames of levels in MultiIndex.\n"""
)
+ # --------------------------------------------------------------------
+
@Appender(Index._get_grouper_for_level.__doc__)
def _get_grouper_for_level(self, mapper, level):
indexer = self.codes[level]
@@ -1268,10 +1350,6 @@ def _get_grouper_for_level(self, mapper, level):
return grouper, codes, level_index
- @property
- def _constructor(self):
- return MultiIndex.from_tuples
-
@cache_readonly
def inferred_type(self) -> str:
return "mixed"
@@ -1303,49 +1381,6 @@ def _get_level_number(self, level) -> int:
)
return level
- _tuples = None
-
- @cache_readonly
- def _engine(self):
- # Calculate the number of bits needed to represent labels in each
- # level, as log2 of their sizes (including -1 for NaN):
- sizes = np.ceil(np.log2([len(l) + 1 for l in self.levels]))
-
- # Sum bit counts, starting from the _right_....
- lev_bits = np.cumsum(sizes[::-1])[::-1]
-
- # ... in order to obtain offsets such that sorting the combination of
- # shifted codes (one for each level, resulting in a unique integer) is
- # equivalent to sorting lexicographically the codes themselves. Notice
- # that each level needs to be shifted by the number of bits needed to
- # represent the _previous_ ones:
- offsets = np.concatenate([lev_bits[1:], [0]]).astype("uint64")
-
- # Check the total number of bits needed for our representation:
- if lev_bits[0] > 64:
- # The levels would overflow a 64 bit uint - use Python integers:
- return MultiIndexPyIntEngine(self.levels, self.codes, offsets)
- return MultiIndexUIntEngine(self.levels, self.codes, offsets)
-
- @property
- def values(self):
- if self._tuples is not None:
- return self._tuples
-
- values = []
-
- for i in range(self.nlevels):
- vals = self._get_level_values(i)
- if is_categorical_dtype(vals):
- vals = vals._internal_get_values()
- if isinstance(vals.dtype, ExtensionDtype) or hasattr(vals, "_box_values"):
- vals = vals.astype(object)
- vals = np.array(vals, copy=False)
- values.append(vals)
-
- self._tuples = lib.fast_zip(values)
- return self._tuples
-
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
@@ -1461,68 +1496,6 @@ def dropna(self, how="any"):
new_codes = [level_codes[~indexer] for level_codes in self.codes]
return self.copy(codes=new_codes, deep=True)
- def get_value(self, series, key):
- # Label-based
- s = com.values_from_object(series)
- k = com.values_from_object(key)
-
- def _try_mi(k):
- # TODO: what if a level contains tuples??
- loc = self.get_loc(k)
- new_values = series._values[loc]
- new_index = self[loc]
- new_index = maybe_droplevels(new_index, k)
- return series._constructor(
- new_values, index=new_index, name=series.name
- ).__finalize__(self)
-
- try:
- return self._engine.get_value(s, k)
- except KeyError as e1:
- try:
- return _try_mi(key)
- except KeyError:
- pass
-
- try:
- return libindex.get_value_at(s, k)
- except IndexError:
- raise
- except TypeError:
- # generator/iterator-like
- if is_iterator(key):
- raise InvalidIndexError(key)
- else:
- raise e1
- except Exception: # pragma: no cover
- raise e1
- except TypeError:
-
- # a Timestamp will raise a TypeError in a multi-index
- # rather than a KeyError, try it here
- # note that a string that 'looks' like a Timestamp will raise
- # a KeyError! (GH5725)
- if isinstance(key, (datetime.datetime, np.datetime64, str)):
- try:
- return _try_mi(key)
- except KeyError:
- raise
- except (IndexError, ValueError, TypeError):
- pass
-
- try:
- return _try_mi(Timestamp(key))
- except (
- KeyError,
- TypeError,
- IndexError,
- ValueError,
- tslibs.OutOfBoundsDatetime,
- ):
- pass
-
- raise InvalidIndexError(key)
-
def _get_level_values(self, level, unique=False):
"""
Return vector of label values for requested level,
@@ -1869,19 +1842,8 @@ def remove_unused_levels(self):
return result
- @property
- def nlevels(self) -> int:
- """
- Integer number of levels in this MultiIndex.
- """
- return len(self._levels)
-
- @property
- def levshape(self):
- """
- A tuple with the length of each level.
- """
- return tuple(len(x) for x in self.levels)
+ # --------------------------------------------------------------------
+ # Pickling Methods
def __reduce__(self):
"""Necessary for making this object picklable"""
@@ -1915,6 +1877,8 @@ def __setstate__(self, state):
self.sortorder = sortorder
self._reset_identity()
+ # --------------------------------------------------------------------
+
def __getitem__(self, key):
if is_scalar(key):
key = com.cast_scalar_indexer(key)
@@ -2287,7 +2251,104 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True):
return new_index, indexer
- def _convert_listlike_indexer(self, keyarr, kind=None):
+ def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
+ """
+ Create index with target's values (move/add/delete values as necessary)
+
+ Returns
+ -------
+ new_index : pd.MultiIndex
+ Resulting index
+ indexer : np.ndarray or None
+ Indices of output values in original index.
+
+ """
+ # GH6552: preserve names when reindexing to non-named target
+ # (i.e. neither Index nor Series).
+ preserve_names = not hasattr(target, "names")
+
+ if level is not None:
+ if method is not None:
+ raise TypeError("Fill method not supported if level passed")
+
+ # GH7774: preserve dtype/tz if target is empty and not an Index.
+ # target may be an iterator
+ target = ibase._ensure_has_len(target)
+ if len(target) == 0 and not isinstance(target, Index):
+ idx = self.levels[level]
+ attrs = idx._get_attributes_dict()
+ attrs.pop("freq", None) # don't preserve freq
+ target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs)
+ else:
+ target = ensure_index(target)
+ target, indexer, _ = self._join_level(
+ target, level, how="right", return_indexers=True, keep_order=False
+ )
+ else:
+ target = ensure_index(target)
+ if self.equals(target):
+ indexer = None
+ else:
+ if self.is_unique:
+ indexer = self.get_indexer(
+ target, method=method, limit=limit, tolerance=tolerance
+ )
+ else:
+ raise ValueError("cannot handle a non-unique multi-index!")
+
+ if not isinstance(target, MultiIndex):
+ if indexer is None:
+ target = self
+ elif (indexer >= 0).all():
+ target = self.take(indexer)
+ else:
+ # hopefully?
+ target = MultiIndex.from_tuples(target)
+
+ if (
+ preserve_names
+ and target.nlevels == self.nlevels
+ and target.names != self.names
+ ):
+ target = target.copy(deep=False)
+ target.names = self.names
+
+ return target, indexer
+
+ # --------------------------------------------------------------------
+ # Indexing Methods
+
+ def get_value(self, series, key):
+ # Label-based
+ if not is_hashable(key) or is_iterator(key):
+ # We allow tuples if they are hashable, whereas other Index
+ # subclasses require scalar.
+ # We have to explicitly exclude generators, as these are hashable.
+ raise InvalidIndexError(key)
+
+ def _try_mi(k):
+ # TODO: what if a level contains tuples??
+ loc = self.get_loc(k)
+
+ new_values = series._values[loc]
+ if is_scalar(loc):
+ return new_values
+
+ new_index = self[loc]
+ new_index = maybe_droplevels(new_index, k)
+ return series._constructor(
+ new_values, index=new_index, name=series.name
+ ).__finalize__(self)
+
+ try:
+ return _try_mi(key)
+ except KeyError:
+ if is_integer(key):
+ return series._values[key]
+ else:
+ raise
+
+ def _convert_listlike_indexer(self, keyarr):
"""
Parameters
----------
@@ -2300,7 +2361,7 @@ def _convert_listlike_indexer(self, keyarr, kind=None):
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
- indexer, keyarr = super()._convert_listlike_indexer(keyarr, kind=kind)
+ indexer, keyarr = super()._convert_listlike_indexer(keyarr)
# are we indexing a specific level
if indexer is None and len(keyarr) and not isinstance(keyarr[0], tuple):
@@ -2361,70 +2422,6 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
def get_indexer_non_unique(self, target):
return super().get_indexer_non_unique(target)
- def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
- """
- Create index with target's values (move/add/delete values as necessary)
-
- Returns
- -------
- new_index : pd.MultiIndex
- Resulting index
- indexer : np.ndarray or None
- Indices of output values in original index.
-
- """
- # GH6552: preserve names when reindexing to non-named target
- # (i.e. neither Index nor Series).
- preserve_names = not hasattr(target, "names")
-
- if level is not None:
- if method is not None:
- raise TypeError("Fill method not supported if level passed")
-
- # GH7774: preserve dtype/tz if target is empty and not an Index.
- # target may be an iterator
- target = ibase._ensure_has_len(target)
- if len(target) == 0 and not isinstance(target, Index):
- idx = self.levels[level]
- attrs = idx._get_attributes_dict()
- attrs.pop("freq", None) # don't preserve freq
- target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs)
- else:
- target = ensure_index(target)
- target, indexer, _ = self._join_level(
- target, level, how="right", return_indexers=True, keep_order=False
- )
- else:
- target = ensure_index(target)
- if self.equals(target):
- indexer = None
- else:
- if self.is_unique:
- indexer = self.get_indexer(
- target, method=method, limit=limit, tolerance=tolerance
- )
- else:
- raise ValueError("cannot handle a non-unique multi-index!")
-
- if not isinstance(target, MultiIndex):
- if indexer is None:
- target = self
- elif (indexer >= 0).all():
- target = self.take(indexer)
- else:
- # hopefully?
- target = MultiIndex.from_tuples(target)
-
- if (
- preserve_names
- and target.nlevels == self.nlevels
- and target.names != self.names
- ):
- target = target.copy(deep=False)
- target.names = self.names
-
- return target, indexer
-
def get_slice_bound(
self, label: Union[Hashable, Sequence[Hashable]], side: str, kind: str
) -> int:
@@ -3058,8 +3055,70 @@ def _update_indexer(idxr, indexer=indexer):
# empty indexer
if indexer is None:
return Int64Index([])._ndarray_values
+
+ indexer = self._reorder_indexer(seq, indexer)
+
return indexer._ndarray_values
+ def _reorder_indexer(
+ self, seq: Tuple[Union[Scalar, Iterable, AnyArrayLike], ...], indexer: ArrayLike
+ ) -> ArrayLike:
+ """
+ Reorder an indexer of a MultiIndex (self) so that the label are in the
+ same order as given in seq
+
+ Parameters
+ ----------
+ seq : label/slice/list/mask or a sequence of such
+ indexer: an Int64Index indexer of self
+
+ Returns
+ -------
+ indexer : a sorted Int64Index indexer of self ordered as seq
+ """
+ # If the index is lexsorted and the list_like label in seq are sorted
+ # then we do not need to sort
+ if self.is_lexsorted():
+ need_sort = False
+ for i, k in enumerate(seq):
+ if is_list_like(k):
+ if not need_sort:
+ k_codes = self.levels[i].get_indexer(k)
+ k_codes = k_codes[k_codes >= 0] # Filter absent keys
+ # True if the given codes are not ordered
+ need_sort = (k_codes[:-1] > k_codes[1:]).any()
+ # Bail out if both index and seq are sorted
+ if not need_sort:
+ return indexer
+
+ n = len(self)
+ keys: Tuple[np.ndarray, ...] = tuple()
+ # For each level of the sequence in seq, map the level codes with the
+ # order they appears in a list-like sequence
+ # This mapping is then use to reorder the indexer
+ for i, k in enumerate(seq):
+ if com.is_bool_indexer(k):
+ new_order = np.arange(n)[indexer]
+ elif is_list_like(k):
+ # Generate a map with all level codes as sorted initially
+ key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len(
+ self.levels[i]
+ )
+ # Set order as given in the indexer list
+ level_indexer = self.levels[i].get_indexer(k)
+ level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys
+ key_order_map[level_indexer] = np.arange(len(level_indexer))
+
+ new_order = key_order_map[self.codes[i][indexer]]
+ else:
+ # For all other case, use the same order as the level
+ new_order = np.arange(n)[indexer]
+ keys = (new_order,) + keys
+
+ # Find the reordering using lexsort on the keys mapping
+ ind = np.lexsort(keys)
+ return indexer[ind]
+
def truncate(self, before=None, after=None):
"""
Slice index between two labels / tuples, return new MultiIndex
@@ -3158,6 +3217,9 @@ def equal_levels(self, other) -> bool:
return False
return True
+ # --------------------------------------------------------------------
+ # Set Methods
+
def union(self, other, sort=None):
"""
Form the union of two MultiIndex objects
@@ -3310,21 +3372,6 @@ def difference(self, other, sort=None):
else:
return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)
- @Appender(Index.astype.__doc__)
- def astype(self, dtype, copy=True):
- dtype = pandas_dtype(dtype)
- if is_categorical_dtype(dtype):
- msg = "> 1 ndim Categorical are not supported at this time"
- raise NotImplementedError(msg)
- elif not is_object_dtype(dtype):
- raise TypeError(
- f"Setting {type(self)} dtype to anything other "
- "than object is not supported"
- )
- elif copy is True:
- return self._shallow_copy()
- return self
-
def _convert_can_do_setop(self, other):
result_names = self.names
@@ -3345,6 +3392,23 @@ def _convert_can_do_setop(self, other):
result_names = self.names if self.names == other.names else None
return other, result_names
+ # --------------------------------------------------------------------
+
+ @Appender(Index.astype.__doc__)
+ def astype(self, dtype, copy=True):
+ dtype = pandas_dtype(dtype)
+ if is_categorical_dtype(dtype):
+ msg = "> 1 ndim Categorical are not supported at this time"
+ raise NotImplementedError(msg)
+ elif not is_object_dtype(dtype):
+ raise TypeError(
+ f"Setting {type(self)} dtype to anything other "
+ "than object is not supported"
+ )
+ elif copy is True:
+ return self._shallow_copy()
+ return self
+
def insert(self, loc: int, item):
"""
Make new MultiIndex inserting new item at location
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 4d3d560aaa688..d67c40a78d807 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -1,4 +1,4 @@
-from typing import TYPE_CHECKING, Any
+from typing import Any
import numpy as np
@@ -32,12 +32,9 @@
from pandas.core import algorithms
import pandas.core.common as com
-from pandas.core.indexes.base import Index, InvalidIndexError, maybe_extract_name
+from pandas.core.indexes.base import Index, maybe_extract_name
from pandas.core.ops import get_op_result_name
-if TYPE_CHECKING:
- from pandas import Series
-
_num_index_shared_docs = dict()
@@ -253,12 +250,11 @@ def asi8(self) -> np.ndarray:
return self.values.view(self._default_dtype)
@Appender(Index._convert_scalar_indexer.__doc__)
- def _convert_scalar_indexer(self, key, kind=None):
- assert kind in ["loc", "getitem", "iloc", None]
+ def _convert_scalar_indexer(self, key, kind: str):
+ assert kind in ["loc", "getitem"]
- # don't coerce ilocs to integers
- if kind != "iloc":
- key = self._maybe_cast_indexer(key)
+ # never iloc, which we don't coerce to integers
+ key = self._maybe_cast_indexer(key)
return super()._convert_scalar_indexer(key, kind=kind)
@@ -383,13 +379,17 @@ def astype(self, dtype, copy=True):
return Int64Index(arr)
return super().astype(dtype, copy=copy)
- @Appender(Index._convert_scalar_indexer.__doc__)
- def _convert_scalar_indexer(self, key, kind=None):
- assert kind in ["loc", "getitem", "iloc", None]
+ # ----------------------------------------------------------------
+ # Indexing Methods
- if kind == "iloc":
- self._validate_indexer("positional", key, "iloc")
+ @Appender(Index._should_fallback_to_positional.__doc__)
+ def _should_fallback_to_positional(self):
+ return False
+ @Appender(Index._convert_scalar_indexer.__doc__)
+ def _convert_scalar_indexer(self, key, kind: str):
+ assert kind in ["loc", "getitem"]
+ # no-op for non-iloc
return key
@Appender(Index._convert_slice_indexer.__doc__)
@@ -401,6 +401,8 @@ def _convert_slice_indexer(self, key: slice, kind=None):
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step, kind=kind)
+ # ----------------------------------------------------------------
+
def _format_native_types(
self, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs
):
@@ -416,16 +418,6 @@ def _format_native_types(
)
return formatter.get_result_as_array()
- def get_value(self, series: "Series", key):
- """
- We always want to get an index value, never a value.
- """
- if not is_scalar(key):
- raise InvalidIndexError
-
- loc = self.get_loc(key)
- return self._get_values_for_loc(series, loc)
-
def equals(self, other) -> bool:
"""
Determines if two Index objects contain the same elements.
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 75c100c9d2c08..42f0a012902a3 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -1,11 +1,11 @@
from datetime import datetime, timedelta
-from typing import TYPE_CHECKING, Any
+from typing import Any
import weakref
import numpy as np
from pandas._libs import index as libindex
-from pandas._libs.tslibs import NaT, frequencies as libfrequencies, resolution
+from pandas._libs.tslibs import frequencies as libfrequencies, resolution
from pandas._libs.tslibs.parsing import parse_time_string
from pandas._libs.tslibs.period import Period
from pandas.util._decorators import Appender, cache_readonly
@@ -18,7 +18,6 @@
is_float,
is_integer,
is_integer_dtype,
- is_list_like,
is_object_dtype,
is_scalar,
pandas_dtype,
@@ -51,9 +50,6 @@
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass="PeriodIndex or list of Periods"))
-if TYPE_CHECKING:
- from pandas import Series
-
# --- Period index sketch
@@ -280,22 +276,6 @@ def _shallow_copy_with_infer(self, values=None, **kwargs):
""" we always want to return a PeriodIndex """
return self._shallow_copy(values=values, **kwargs)
- @property
- def _box_func(self):
- """Maybe box an ordinal or Period"""
- # TODO(DatetimeArray): Avoid double-boxing
- # PeriodArray takes care of boxing already, so we need to check
- # whether we're given an ordinal or a Period. It seems like some
- # places outside of indexes/period.py are calling this _box_func,
- # but passing data that's already boxed.
- def func(x):
- if isinstance(x, Period) or x is NaT:
- return x
- else:
- return Period._from_ordinal(ordinal=x, freq=self.freq)
-
- return func
-
def _maybe_convert_timedelta(self, other):
"""
Convert timedelta-like input to an integer multiple of self.freq
@@ -471,17 +451,6 @@ def inferred_type(self) -> str:
# indexing
return "period"
- def get_value(self, series: "Series", key):
- """
- Fast lookup of value from 1-dimensional ndarray. Only use this if you
- know what you're doing
- """
- if is_integer(key):
- loc = key
- else:
- loc = self.get_loc(key)
- return self._get_values_for_loc(series, loc)
-
@Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
target = ensure_index(target)
@@ -576,12 +545,9 @@ def get_loc(self, key, method=None, tolerance=None):
key = Period(key, freq=self.freq)
except ValueError:
# we cannot construct the Period
- # as we have an invalid type
- if is_list_like(key):
- raise TypeError(f"'{key}' is an invalid key")
raise KeyError(key)
- ordinal = key.ordinal if key is not NaT else key.value
+ ordinal = self._data._unbox_scalar(key)
try:
return self._engine.get_loc(ordinal)
except KeyError:
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 08a07e8d30348..ec0414adc1376 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -1,7 +1,5 @@
""" implement the TimedeltaIndex """
-import numpy as np
-
from pandas._libs import NaT, Timedelta, index as libindex
from pandas.util._decorators import Appender
@@ -53,7 +51,6 @@
"_datetimelike_methods",
"_other_ops",
"components",
- "_box_func",
"to_pytimedelta",
"sum",
"std",
@@ -225,17 +222,6 @@ def _maybe_promote(self, other):
other = TimedeltaIndex(other)
return self, other
- def get_value(self, series, key):
- """
- Fast lookup of value from 1-dimensional ndarray. Only use this if you
- know what you're doing
- """
- if is_integer(key):
- loc = key
- else:
- loc = self.get_loc(key)
- return self._get_values_for_loc(series, loc)
-
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
@@ -262,11 +248,6 @@ def get_loc(self, key, method=None, tolerance=None):
else:
raise KeyError(key)
- if tolerance is not None:
- # try converting tolerance now, so errors don't get swallowed by
- # the try/except clauses below
- tolerance = self._convert_tolerance(tolerance, np.asarray(key))
-
return Index.get_loc(self, key, method, tolerance)
def _maybe_cast_slice_bound(self, label, side: str, kind):
@@ -297,12 +278,6 @@ def _maybe_cast_slice_bound(self, label, side: str, kind):
return label
- def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):
- # TODO: Check for non-True use_lhs/use_rhs
- assert isinstance(key, str), type(key)
- # given a key, try to figure out a location for a partial slice
- raise NotImplementedError
-
def is_type_compatible(self, typ) -> bool:
return typ == self.inferred_type or typ == "timedelta"
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 7e56148b7569e..5c0f893554957 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -566,7 +566,7 @@ def iat(self) -> "_iAtIndexer":
return _iAtIndexer("iat", self)
-class _NDFrameIndexer(_NDFrameIndexerBase):
+class _LocationIndexer(_NDFrameIndexerBase):
_valid_types: str
axis = None
@@ -591,15 +591,9 @@ def _get_label(self, label, axis: int):
return self.obj._xs(label, axis=axis)
- def _get_loc(self, key: int, axis: int):
- return self.obj._ixs(key, axis=axis)
-
- def _slice(self, obj, axis: int, kind=None):
- return self.obj._slice(obj, axis=axis, kind=kind)
-
def _get_setitem_indexer(self, key):
if self.axis is not None:
- return self._convert_tuple(key)
+ return self._convert_tuple(key, is_setter=True)
ax = self.obj._get_axis(0)
@@ -612,7 +606,7 @@ def _get_setitem_indexer(self, key):
if isinstance(key, tuple):
try:
- return self._convert_tuple(key)
+ return self._convert_tuple(key, is_setter=True)
except IndexingError:
pass
@@ -620,7 +614,7 @@ def _get_setitem_indexer(self, key):
return list(key)
try:
- return self._convert_to_indexer(key, axis=0)
+ return self._convert_to_indexer(key, axis=0, is_setter=True)
except TypeError as e:
# invalid indexer type vs 'other' indexing errors
@@ -683,68 +677,25 @@ def _is_nested_tuple_indexer(self, tup: Tuple) -> bool:
return any(is_nested_tuple(tup, ax) for ax in self.obj.axes)
return False
- def _convert_tuple(self, key):
+ def _convert_tuple(self, key, is_setter: bool = False):
keyidx = []
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
for i in range(self.ndim):
if i == axis:
- keyidx.append(self._convert_to_indexer(key, axis=axis))
+ keyidx.append(
+ self._convert_to_indexer(key, axis=axis, is_setter=is_setter)
+ )
else:
keyidx.append(slice(None))
else:
for i, k in enumerate(key):
if i >= self.ndim:
raise IndexingError("Too many indexers")
- idx = self._convert_to_indexer(k, axis=i)
+ idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter)
keyidx.append(idx)
return tuple(keyidx)
- def _convert_scalar_indexer(self, key, axis: int):
- # if we are accessing via lowered dim, use the last dim
- ax = self.obj._get_axis(min(axis, self.ndim - 1))
- # a scalar
- return ax._convert_scalar_indexer(key, kind=self.name)
-
- def _convert_slice_indexer(self, key: slice, axis: int):
- # if we are accessing via lowered dim, use the last dim
- ax = self.obj._get_axis(min(axis, self.ndim - 1))
- return ax._convert_slice_indexer(key, kind=self.name)
-
- def _has_valid_setitem_indexer(self, indexer) -> bool:
- return True
-
- def _has_valid_positional_setitem_indexer(self, indexer) -> bool:
- """
- Validate that a positional indexer cannot enlarge its target
- will raise if needed, does not modify the indexer externally.
-
- Returns
- -------
- bool
- """
- if isinstance(indexer, dict):
- raise IndexError(f"{self.name} cannot enlarge its target object")
- else:
- if not isinstance(indexer, tuple):
- indexer = _tuplify(self.ndim, indexer)
- for ax, i in zip(self.obj.axes, indexer):
- if isinstance(i, slice):
- # should check the stop slice?
- pass
- elif is_list_like_indexer(i):
- # should check the elements?
- pass
- elif is_integer(i):
- if i >= len(ax):
- raise IndexError(
- f"{self.name} cannot enlarge its target object"
- )
- elif isinstance(i, dict):
- raise IndexError(f"{self.name} cannot enlarge its target object")
-
- return True
-
def _setitem_with_indexer(self, indexer, value):
self._has_valid_setitem_indexer(indexer)
@@ -893,7 +844,8 @@ def _setitem_with_indexer(self, indexer, value):
# we can directly set the series here
# as we select a slice indexer on the mi
- idx = index._convert_slice_indexer(idx)
+ if isinstance(idx, slice):
+ idx = index._convert_slice_indexer(idx)
obj._consolidate_inplace()
obj = obj.copy()
obj._data = obj._data.setitem(indexer=tuple([idx]), value=value)
@@ -1232,80 +1184,6 @@ def _align_frame(self, indexer, df: ABCDataFrame):
raise ValueError("Incompatible indexer with DataFrame")
- def _getitem_tuple(self, tup: Tuple):
- try:
- return self._getitem_lowerdim(tup)
- except IndexingError:
- pass
-
- # no multi-index, so validate all of the indexers
- self._has_valid_tuple(tup)
-
- # ugly hack for GH #836
- if self._multi_take_opportunity(tup):
- return self._multi_take(tup)
-
- # no shortcut needed
- retval = self.obj
- for i, key in enumerate(tup):
- if com.is_null_slice(key):
- continue
-
- retval = getattr(retval, self.name)._getitem_axis(key, axis=i)
-
- return retval
-
- def _multi_take_opportunity(self, tup: Tuple) -> bool:
- """
- Check whether there is the possibility to use ``_multi_take``.
-
- Currently the limit is that all axes being indexed, must be indexed with
- list-likes.
-
- Parameters
- ----------
- tup : tuple
- Tuple of indexers, one per axis.
-
- Returns
- -------
- bool
- Whether the current indexing,
- can be passed through `_multi_take`.
- """
- if not all(is_list_like_indexer(x) for x in tup):
- return False
-
- # just too complicated
- if any(com.is_bool_indexer(x) for x in tup):
- return False
-
- return True
-
- def _multi_take(self, tup: Tuple):
- """
- Create the indexers for the passed tuple of keys, and
- executes the take operation. This allows the take operation to be
- executed all at once, rather than once for each dimension.
- Improving efficiency.
-
- Parameters
- ----------
- tup : tuple
- Tuple of indexers, one per axis.
-
- Returns
- -------
- values: same type as the object being indexed
- """
- # GH 836
- o = self.obj
- d = {
- axis: self._get_listlike_indexer(key, axis)
- for (key, axis) in zip(tup, o._AXIS_ORDERS)
- }
- return o._reindex_with_indexers(d, copy=True, allow_dups=True)
-
def _handle_lowerdim_multi_index_axis0(self, tup: Tuple):
# we have an axis0 multi-index, handle or raise
axis = self.axis or 0
@@ -1426,97 +1304,6 @@ def _getitem_nested_tuple(self, tup: Tuple):
return obj
- def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False):
- """
- Transform a list-like of keys into a new index and an indexer.
-
- Parameters
- ----------
- key : list-like
- Targeted labels.
- axis: int
- Dimension on which the indexing is being made.
- raise_missing: bool, default False
- Whether to raise a KeyError if some labels were not found.
- Will be removed in the future, and then this method will always behave as
- if ``raise_missing=True``.
-
- Raises
- ------
- KeyError
- If at least one key was requested but none was found, and
- raise_missing=True.
-
- Returns
- -------
- keyarr: Index
- New index (coinciding with 'key' if the axis is unique).
- values : array-like
- Indexer for the return object, -1 denotes keys not found.
- """
- o = self.obj
- ax = o._get_axis(axis)
-
- # Have the index compute an indexer or return None
- # if it cannot handle:
- indexer, keyarr = ax._convert_listlike_indexer(key, kind=self.name)
- # We only act on all found values:
- if indexer is not None and (indexer != -1).all():
- self._validate_read_indexer(key, indexer, axis, raise_missing=raise_missing)
- return ax[indexer], indexer
-
- if ax.is_unique and not getattr(ax, "is_overlapping", False):
- indexer = ax.get_indexer_for(key)
- keyarr = ax.reindex(keyarr)[0]
- else:
- keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr)
-
- self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing)
- return keyarr, indexer
-
- def _getitem_iterable(self, key, axis: int):
- """
- Index current object with an an iterable key.
-
- The iterable key can be a boolean indexer or a collection of keys.
-
- Parameters
- ----------
- key : iterable
- Targeted labels or boolean indexer.
- axis: int
- Dimension on which the indexing is being made.
-
- Raises
- ------
- KeyError
- If no key was found. Will change in the future to raise if not all
- keys were found.
- IndexingError
- If the boolean indexer is unalignable with the object being
- indexed.
-
- Returns
- -------
- scalar, DataFrame, or Series: indexed value(s).
- """
- # caller is responsible for ensuring non-None axis
- self._validate_key(key, axis)
-
- labels = self.obj._get_axis(axis)
-
- if com.is_bool_indexer(key):
- # A boolean indexer
- key = check_bool_indexer(labels, key)
- (inds,) = key.nonzero()
- return self.obj._take_with_is_copy(inds, axis=axis)
- else:
- # A collection of keys
- keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False)
- return self.obj._reindex_with_indexers(
- {axis: [keyarr, indexer]}, copy=True, allow_dups=True
- )
-
def _validate_read_indexer(
self, key, indexer, axis: int, raise_missing: bool = False
):
@@ -1577,135 +1364,59 @@ def _validate_read_indexer(
"https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike" # noqa:E501
)
- def _convert_to_indexer(self, key, axis: int):
- """
- Convert indexing key into something we can use to do actual fancy
- indexing on a ndarray.
-
- Examples
- ix[:5] -> slice(0, 5)
- ix[[1,2,3]] -> [1,2,3]
- ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
-
- Going by Zen of Python?
- 'In the face of ambiguity, refuse the temptation to guess.'
- raise AmbiguousIndexError with integer labels?
- - No, prefer label-based indexing
- """
- labels = self.obj._get_axis(axis)
-
- if isinstance(key, slice):
- return self._convert_slice_indexer(key, axis)
+ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
+ raise AbstractMethodError(self)
- # try to find out correct indexer, if not type correct raise
- try:
- key = self._convert_scalar_indexer(key, axis)
- except TypeError:
- # but we will allow setting
- pass
+ def __getitem__(self, key):
+ if type(key) is tuple:
+ key = tuple(com.apply_if_callable(x, self.obj) for x in key)
+ if self._is_scalar_access(key):
+ try:
+ return self.obj._get_value(*key, takeable=self._takeable)
+ except (KeyError, IndexError, AttributeError):
+ # AttributeError for IntervalTree get_value
+ pass
+ return self._getitem_tuple(key)
+ else:
+ # we by definition only have the 0th axis
+ axis = self.axis or 0
- # see if we are positional in nature
- is_int_index = labels.is_integer()
- is_int_positional = is_integer(key) and not is_int_index
+ maybe_callable = com.apply_if_callable(key, self.obj)
+ return self._getitem_axis(maybe_callable, axis=axis)
- if is_scalar(key) or isinstance(labels, ABCMultiIndex):
- # Otherwise get_loc will raise InvalidIndexError
+ def _is_scalar_access(self, key: Tuple):
+ raise NotImplementedError()
- # if we are a label return me
- try:
- return labels.get_loc(key)
- except LookupError:
- if isinstance(key, tuple) and isinstance(labels, ABCMultiIndex):
- if len(key) == labels.nlevels:
- return {"key": key}
- raise
- except TypeError:
- pass
- except ValueError:
- if not is_int_positional:
- raise
+ def _getitem_tuple(self, tup: Tuple):
+ raise AbstractMethodError(self)
- # a positional
- if is_int_positional:
+ def _getitem_axis(self, key, axis: int):
+ raise NotImplementedError()
- # if we are setting and its not a valid location
- # its an insert which fails by definition
+ def _has_valid_setitem_indexer(self, indexer) -> bool:
+ raise AbstractMethodError(self)
- if self.name == "loc":
- # always valid
- return {"key": key}
-
- if key >= self.obj.shape[axis] and not isinstance(labels, ABCMultiIndex):
- # a positional
- raise ValueError("cannot set by positional indexing with enlargement")
-
- return key
-
- if is_nested_tuple(key, labels):
- return labels.get_locs(key)
-
- elif is_list_like_indexer(key):
-
- if com.is_bool_indexer(key):
- key = check_bool_indexer(labels, key)
- (inds,) = key.nonzero()
- return inds
- else:
- # When setting, missing keys are not allowed, even with .loc:
- return self._get_listlike_indexer(key, axis, raise_missing=True)[1]
- else:
- try:
- return labels.get_loc(key)
- except LookupError:
- # allow a not found key only if we are a setter
- if not is_list_like_indexer(key):
- return {"key": key}
- raise
-
-
-class _LocationIndexer(_NDFrameIndexer):
- _takeable: bool = False
-
- def __getitem__(self, key):
- if type(key) is tuple:
- key = tuple(com.apply_if_callable(x, self.obj) for x in key)
- if self._is_scalar_access(key):
- try:
- return self.obj._get_value(*key, takeable=self._takeable)
- except (KeyError, IndexError, AttributeError):
- # AttributeError for IntervalTree get_value
- pass
- return self._getitem_tuple(key)
- else:
- # we by definition only have the 0th axis
- axis = self.axis or 0
-
- maybe_callable = com.apply_if_callable(key, self.obj)
- return self._getitem_axis(maybe_callable, axis=axis)
-
- def _is_scalar_access(self, key: Tuple):
- raise NotImplementedError()
-
- def _getitem_axis(self, key, axis: int):
- raise NotImplementedError()
-
- def _getbool_axis(self, key, axis: int):
- # caller is responsible for ensuring non-None axis
- labels = self.obj._get_axis(axis)
- key = check_bool_indexer(labels, key)
- inds = key.nonzero()[0]
- return self.obj._take_with_is_copy(inds, axis=axis)
+ def _getbool_axis(self, key, axis: int):
+ # caller is responsible for ensuring non-None axis
+ labels = self.obj._get_axis(axis)
+ key = check_bool_indexer(labels, key)
+ inds = key.nonzero()[0]
+ return self.obj._take_with_is_copy(inds, axis=axis)
@Appender(IndexingMixin.loc.__doc__)
class _LocIndexer(_LocationIndexer):
+ _takeable: bool = False
_valid_types = (
"labels (MUST BE IN THE INDEX), slices of labels (BOTH "
"endpoints included! Can be slices of integers if the "
"index is integers), listlike of labels, boolean"
)
- @Appender(_NDFrameIndexer._validate_key.__doc__)
+ # -------------------------------------------------------------------
+ # Key Checks
+
+ @Appender(_LocationIndexer._validate_key.__doc__)
def _validate_key(self, key, axis: int):
# valid for a collection of labels (we check their presence later)
@@ -1720,7 +1431,11 @@ def _validate_key(self, key, axis: int):
return
if not is_list_like_indexer(key):
- self._convert_scalar_indexer(key, axis)
+ labels = self.obj._get_axis(axis)
+ labels._convert_scalar_indexer(key, kind="loc")
+
+ def _has_valid_setitem_indexer(self, indexer) -> bool:
+ return True
def _is_scalar_access(self, key: Tuple) -> bool:
"""
@@ -1753,6 +1468,61 @@ def _is_scalar_access(self, key: Tuple) -> bool:
return True
+ # -------------------------------------------------------------------
+ # MultiIndex Handling
+
+ def _multi_take_opportunity(self, tup: Tuple) -> bool:
+ """
+ Check whether there is the possibility to use ``_multi_take``.
+
+ Currently the limit is that all axes being indexed, must be indexed with
+ list-likes.
+
+ Parameters
+ ----------
+ tup : tuple
+ Tuple of indexers, one per axis.
+
+ Returns
+ -------
+ bool
+ Whether the current indexing,
+ can be passed through `_multi_take`.
+ """
+ if not all(is_list_like_indexer(x) for x in tup):
+ return False
+
+ # just too complicated
+ if any(com.is_bool_indexer(x) for x in tup):
+ return False
+
+ return True
+
+ def _multi_take(self, tup: Tuple):
+ """
+ Create the indexers for the passed tuple of keys, and
+ executes the take operation. This allows the take operation to be
+ executed all at once, rather than once for each dimension.
+ Improving efficiency.
+
+ Parameters
+ ----------
+ tup : tuple
+ Tuple of indexers, one per axis.
+
+ Returns
+ -------
+ values: same type as the object being indexed
+ """
+ # GH 836
+ d = {
+ axis: self._get_listlike_indexer(key, axis)
+ for (key, axis) in zip(tup, self.obj._AXIS_ORDERS)
+ }
+ return self.obj._reindex_with_indexers(d, copy=True, allow_dups=True)
+
+ # -------------------------------------------------------------------
+
def _get_partial_string_timestamp_match_key(self, key, labels):
"""
Translate any partial string timestamp matches in key, returning the
@@ -1785,6 +1555,60 @@ def _get_partial_string_timestamp_match_key(self, key, labels):
return key
+ def _getitem_iterable(self, key, axis: int):
+ """
+ Index current object with an an iterable collection of keys.
+
+ Parameters
+ ----------
+ key : iterable
+ Targeted labels.
+ axis: int
+ Dimension on which the indexing is being made.
+
+ Raises
+ ------
+ KeyError
+ If no key was found. Will change in the future to raise if not all
+ keys were found.
+
+ Returns
+ -------
+ scalar, DataFrame, or Series: indexed value(s).
+ """
+ # we assume that not com.is_bool_indexer(key), as that is
+ # handled before we get here.
+ self._validate_key(key, axis)
+
+ # A collection of keys
+ keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False)
+ return self.obj._reindex_with_indexers(
+ {axis: [keyarr, indexer]}, copy=True, allow_dups=True
+ )
+
+ def _getitem_tuple(self, tup: Tuple):
+ try:
+ return self._getitem_lowerdim(tup)
+ except IndexingError:
+ pass
+
+ # no multi-index, so validate all of the indexers
+ self._has_valid_tuple(tup)
+
+ # ugly hack for GH #836
+ if self._multi_take_opportunity(tup):
+ return self._multi_take(tup)
+
+ # no shortcut needed
+ retval = self.obj
+ for i, key in enumerate(tup):
+ if com.is_null_slice(key):
+ continue
+
+ retval = getattr(retval, self.name)._getitem_axis(key, axis=i)
+
+ return retval
+
def _getitem_axis(self, key, axis: int):
key = item_from_zerodim(key)
if is_iterator(key):
@@ -1865,12 +1689,139 @@ def _get_slice_axis(self, slice_obj: slice, axis: int):
)
if isinstance(indexer, slice):
- return self._slice(indexer, axis=axis, kind="iloc")
+ return self.obj._slice(indexer, axis=axis, kind="iloc")
else:
# DatetimeIndex overrides Index.slice_indexer and may
# return a DatetimeIndex instead of a slice object.
return self.obj.take(indexer, axis=axis)
+ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
+ """
+ Convert indexing key into something we can use to do actual fancy
+ indexing on a ndarray.
+
+ Examples
+ ix[:5] -> slice(0, 5)
+ ix[[1,2,3]] -> [1,2,3]
+ ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
+
+ Going by Zen of Python?
+ 'In the face of ambiguity, refuse the temptation to guess.'
+ raise AmbiguousIndexError with integer labels?
+ - No, prefer label-based indexing
+ """
+ labels = self.obj._get_axis(axis)
+
+ if isinstance(key, slice):
+ return labels._convert_slice_indexer(key, kind="loc")
+
+ if is_scalar(key):
+ # try to find out correct indexer, if not type correct raise
+ try:
+ key = labels._convert_scalar_indexer(key, kind="loc")
+ except TypeError:
+ # but we will allow setting
+ if not is_setter:
+ raise
+
+ # see if we are positional in nature
+ is_int_index = labels.is_integer()
+ is_int_positional = is_integer(key) and not is_int_index
+
+ if is_scalar(key) or isinstance(labels, ABCMultiIndex):
+ # Otherwise get_loc will raise InvalidIndexError
+
+ # if we are a label return me
+ try:
+ return labels.get_loc(key)
+ except LookupError:
+ if isinstance(key, tuple) and isinstance(labels, ABCMultiIndex):
+ if len(key) == labels.nlevels:
+ return {"key": key}
+ raise
+ except TypeError:
+ pass
+ except ValueError:
+ if not is_int_positional:
+ raise
+
+ # a positional
+ if is_int_positional:
+
+ # if we are setting and its not a valid location
+ # its an insert which fails by definition
+
+ # always valid
+ return {"key": key}
+
+ if is_nested_tuple(key, labels):
+ return labels.get_locs(key)
+
+ elif is_list_like_indexer(key):
+
+ if com.is_bool_indexer(key):
+ key = check_bool_indexer(labels, key)
+ (inds,) = key.nonzero()
+ return inds
+ else:
+ # When setting, missing keys are not allowed, even with .loc:
+ return self._get_listlike_indexer(key, axis, raise_missing=True)[1]
+ else:
+ try:
+ return labels.get_loc(key)
+ except LookupError:
+ # allow a not found key only if we are a setter
+ if not is_list_like_indexer(key):
+ return {"key": key}
+ raise
+
+ def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False):
+ """
+ Transform a list-like of keys into a new index and an indexer.
+
+ Parameters
+ ----------
+ key : list-like
+ Targeted labels.
+ axis: int
+ Dimension on which the indexing is being made.
+ raise_missing: bool, default False
+ Whether to raise a KeyError if some labels were not found.
+ Will be removed in the future, and then this method will always behave as
+ if ``raise_missing=True``.
+
+ Raises
+ ------
+ KeyError
+ If at least one key was requested but none was found, and
+ raise_missing=True.
+
+ Returns
+ -------
+ keyarr: Index
+ New index (coinciding with 'key' if the axis is unique).
+ values : array-like
+ Indexer for the return object, -1 denotes keys not found.
+ """
+ ax = self.obj._get_axis(axis)
+
+ # Have the index compute an indexer or return None
+ # if it cannot handle:
+ indexer, keyarr = ax._convert_listlike_indexer(key)
+ # We only act on all found values:
+ if indexer is not None and (indexer != -1).all():
+ self._validate_read_indexer(key, indexer, axis, raise_missing=raise_missing)
+ return ax[indexer], indexer
+
+ if ax.is_unique and not getattr(ax, "is_overlapping", False):
+ indexer = ax.get_indexer_for(key)
+ keyarr = ax.reindex(keyarr)[0]
+ else:
+ keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr)
+
+ self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing)
+ return keyarr, indexer
+
@Appender(IndexingMixin.iloc.__doc__)
class _iLocIndexer(_LocationIndexer):
@@ -1880,6 +1831,9 @@ class _iLocIndexer(_LocationIndexer):
)
_takeable = True
+ # -------------------------------------------------------------------
+ # Key Checks
+
def _validate_key(self, key, axis: int):
if com.is_bool_indexer(key):
if hasattr(key, "index") and isinstance(key.index, Index):
@@ -1920,6 +1874,37 @@ def _validate_key(self, key, axis: int):
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
+ def _has_valid_positional_setitem_indexer(self, indexer) -> bool:
+ """
+ Validate that a positional indexer cannot enlarge its target
+ will raise if needed, does not modify the indexer externally.
+
+ Returns
+ -------
+ bool
+ """
+ if isinstance(indexer, dict):
+ raise IndexError(f"{self.name} cannot enlarge its target object")
+ else:
+ if not isinstance(indexer, tuple):
+ indexer = _tuplify(self.ndim, indexer)
+ for ax, i in zip(self.obj.axes, indexer):
+ if isinstance(i, slice):
+ # should check the stop slice?
+ pass
+ elif is_list_like_indexer(i):
+ # should check the elements?
+ pass
+ elif is_integer(i):
+ if i >= len(ax):
+ raise IndexError(
+ f"{self.name} cannot enlarge its target object"
+ )
+ elif isinstance(i, dict):
+ raise IndexError(f"{self.name} cannot enlarge its target object")
+
+ return True
+
def _is_scalar_access(self, key: Tuple) -> bool:
"""
Returns
@@ -1963,6 +1948,8 @@ def _validate_integer(self, key: int, axis: int) -> None:
if key >= len_axis or key < -len_axis:
raise IndexError("single positional indexer is out-of-bounds")
+ # -------------------------------------------------------------------
+
def _getitem_tuple(self, tup: Tuple):
self._has_valid_tuple(tup)
@@ -2038,7 +2025,7 @@ def _getitem_axis(self, key, axis: int):
# validate the location
self._validate_integer(key, axis)
- return self._get_loc(key, axis=axis)
+ return self.obj._ixs(key, axis=axis)
def _get_slice_axis(self, slice_obj: slice, axis: int):
# caller is responsible for ensuring non-None axis
@@ -2047,25 +2034,26 @@ def _get_slice_axis(self, slice_obj: slice, axis: int):
if not need_slice(slice_obj):
return obj.copy(deep=False)
- indexer = self._convert_slice_indexer(slice_obj, axis)
- return self._slice(indexer, axis=axis, kind="iloc")
+ labels = obj._get_axis(axis)
+ indexer = labels._convert_slice_indexer(slice_obj, kind="iloc")
+ return self.obj._slice(indexer, axis=axis, kind="iloc")
- def _convert_to_indexer(self, key, axis: int):
+ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
"""
Much simpler as we only have to deal with our valid types.
"""
+ labels = self.obj._get_axis(axis)
+
# make need to convert a float key
if isinstance(key, slice):
- return self._convert_slice_indexer(key, axis)
+ return labels._convert_slice_indexer(key, kind="iloc")
elif is_float(key):
- return self._convert_scalar_indexer(key, axis)
-
- try:
- self._validate_key(key, axis)
+ labels._validate_indexer("positional", key, "iloc")
return key
- except ValueError:
- raise ValueError(f"Can only index by location with a [{self._valid_types}]")
+
+ self._validate_key(key, axis)
+ return key
class _ScalarAccessIndexer(_NDFrameIndexerBase):
@@ -2116,21 +2104,11 @@ def _convert_key(self, key, is_setter: bool = False):
if is_setter:
return list(key)
- for ax, i in zip(self.obj.axes, key):
- if ax.is_integer():
- if not is_integer(i):
- raise ValueError(
- "At based indexing on an integer index "
- "can only have integer indexers"
- )
- else:
- if is_integer(i) and not (ax.holds_integer() or ax.is_floating()):
- raise ValueError(
- "At based indexing on an non-integer "
- "index can only have non-integer "
- "indexers"
- )
- return key
+ lkey = list(key)
+ for n, (ax, i) in enumerate(zip(self.obj.axes, key)):
+ lkey[n] = ax._convert_scalar_indexer(i, kind="loc")
+
+ return tuple(lkey)
@Appender(IndexingMixin.iat.__doc__)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 9e31ccebd0f1b..85a26179276f5 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -7,8 +7,7 @@
import numpy as np
-from pandas._libs import NaT, algos as libalgos, lib, tslib, writers
-from pandas._libs.index import convert_scalar
+from pandas._libs import NaT, Timestamp, algos as libalgos, lib, tslib, writers
import pandas._libs.internals as libinternals
from pandas._libs.tslibs import Timedelta, conversion
from pandas._libs.tslibs.timezones import tz_compare
@@ -16,6 +15,7 @@
from pandas.core.dtypes.cast import (
astype_nansafe,
+ convert_scalar_for_putitemlike,
find_common_type,
infer_dtype_from,
infer_dtype_from_scalar,
@@ -762,7 +762,7 @@ def replace(
# The only non-DatetimeLike class that also has a non-trivial
# try_coerce_args is ObjectBlock, but that overrides replace,
# so does not get here.
- to_replace = convert_scalar(values, to_replace)
+ to_replace = convert_scalar_for_putitemlike(to_replace, values.dtype)
mask = missing.mask_missing(values, to_replace)
if filter is not None:
@@ -841,7 +841,7 @@ def setitem(self, indexer, value):
# We only get here for non-Extension Blocks, so _try_coerce_args
# is only relevant for DatetimeBlock and TimedeltaBlock
if lib.is_scalar(value):
- value = convert_scalar(values, value)
+ value = convert_scalar_for_putitemlike(value, values.dtype)
else:
# current dtype cannot store value, coerce to common dtype
@@ -957,7 +957,7 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False)
# We only get here for non-Extension Blocks, so _try_coerce_args
# is only relevant for DatetimeBlock and TimedeltaBlock
if lib.is_scalar(new):
- new = convert_scalar(new_values, new)
+ new = convert_scalar_for_putitemlike(new, new_values.dtype)
if transpose:
new_values = new_values.T
@@ -1200,7 +1200,7 @@ def _interpolate_with_fill(
values = self.values if inplace else self.values.copy()
# We only get here for non-ExtensionBlock
- fill_value = convert_scalar(self.values, fill_value)
+ fill_value = convert_scalar_for_putitemlike(fill_value, self.values.dtype)
values = missing.interpolate_2d(
values,
@@ -1405,7 +1405,7 @@ def where_func(cond, values, other):
raise TypeError
if lib.is_scalar(other) and isinstance(values, np.ndarray):
# convert datetime to datetime64, timedelta to timedelta64
- other = convert_scalar(values, other)
+ other = convert_scalar_for_putitemlike(other, values.dtype)
# By the time we get here, we should have all Series/Index
# args extracted to ndarray
@@ -2158,6 +2158,16 @@ def internal_values(self):
# Override to return DatetimeArray and TimedeltaArray
return self.array_values()
+ def iget(self, key):
+ # GH#31649 we need to wrap scalars in Timestamp/Timedelta
+ # TODO: this can be removed if we ever have 2D EA
+ result = super().iget(key)
+ if isinstance(result, np.datetime64):
+ result = Timestamp(result)
+ elif isinstance(result, np.timedelta64):
+ result = Timedelta(result)
+ return result
+
class DatetimeBlock(DatetimeLikeBlockMixin, Block):
__slots__ = ()
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 526863d2e5ec3..08ae0b02169d4 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1565,7 +1565,7 @@ def fast_xs(self, loc):
fast path for getting a cross-section
return a view of the data
"""
- return self._block.values[loc]
+ raise NotImplementedError("Use series._values[loc] instead")
def concat(self, to_concat, new_axis) -> "SingleBlockManager":
"""
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 8829c242b1129..d9f21f0b274ac 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -352,8 +352,8 @@ def __init__(
for obj in objs:
if not isinstance(obj, (Series, DataFrame)):
msg = (
- "cannot concatenate object of type '{typ}'; "
- "only Series and DataFrame objs are valid".format(typ=type(obj))
+ f"cannot concatenate object of type '{type(obj)}'; "
+ "only Series and DataFrame objs are valid"
)
raise TypeError(msg)
@@ -403,8 +403,7 @@ def __init__(
self._is_series = isinstance(sample, ABCSeries)
if not 0 <= axis <= sample.ndim:
raise AssertionError(
- "axis must be between 0 and {ndim}, input was "
- "{axis}".format(ndim=sample.ndim, axis=axis)
+ f"axis must be between 0 and {sample.ndim}, input was {axis}"
)
# if we have mixed ndims, then convert to highest ndim
@@ -622,11 +621,7 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiInde
try:
i = level.get_loc(key)
except KeyError:
- raise ValueError(
- "Key {key!s} not in level {level!s}".format(
- key=key, level=level
- )
- )
+ raise ValueError(f"Key {key} not in level {level}")
to_concat.append(np.repeat(i, len(index)))
codes_list.append(np.concatenate(to_concat))
@@ -677,11 +672,7 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiInde
mask = mapped == -1
if mask.any():
- raise ValueError(
- "Values not found in passed level: {hlevel!s}".format(
- hlevel=hlevel[mask]
- )
- )
+ raise ValueError(f"Values not found in passed level: {hlevel[mask]!s}")
new_codes.append(np.repeat(mapped, n))
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index d04287e1e9088..782b8043430e1 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -88,9 +88,7 @@ def melt(
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
- var_name = [
- "variable_{i}".format(i=i) for i in range(len(frame.columns.names))
- ]
+ var_name = [f"variable_{i}" for i in range(len(frame.columns.names))]
else:
var_name = [
frame.columns.name if frame.columns.name is not None else "variable"
@@ -417,9 +415,7 @@ def wide_to_long(
"""
def get_var_names(df, stub: str, sep: str, suffix: str) -> List[str]:
- regex = r"^{stub}{sep}{suffix}$".format(
- stub=re.escape(stub), sep=re.escape(sep), suffix=suffix
- )
+ regex = fr"^{re.escape(stub)}{re.escape(sep)}{suffix}$"
pattern = re.compile(regex)
return [col for col in df.columns if pattern.match(col)]
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index ceee2f66dba42..480c5279ad3f6 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -611,8 +611,9 @@ def __init__(
if _left.columns.nlevels != _right.columns.nlevels:
msg = (
"merging between different levels can give an unintended "
- "result ({left} levels on the left, {right} on the right)"
- ).format(left=_left.columns.nlevels, right=_right.columns.nlevels)
+ f"result ({left.columns.nlevels} levels on the left,"
+ f"{right.columns.nlevels} on the right)"
+ )
warnings.warn(msg, UserWarning)
self._validate_specification()
@@ -679,7 +680,7 @@ def _indicator_pre_merge(
if i in columns:
raise ValueError(
"Cannot use `indicator=True` option when "
- "data contains a column named {name}".format(name=i)
+ f"data contains a column named {i}"
)
if self.indicator_name in columns:
raise ValueError(
@@ -831,7 +832,7 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
else:
result.index = Index(key_col, name=name)
else:
- result.insert(i, name or "key_{i}".format(i=i), key_col)
+ result.insert(i, name or f"key_{i}", key_col)
def _get_join_indexers(self):
""" return the join indexers """
@@ -1185,13 +1186,10 @@ def _validate_specification(self):
if len(common_cols) == 0:
raise MergeError(
"No common columns to perform merge on. "
- "Merge options: left_on={lon}, right_on={ron}, "
- "left_index={lidx}, right_index={ridx}".format(
- lon=self.left_on,
- ron=self.right_on,
- lidx=self.left_index,
- ridx=self.right_index,
- )
+ f"Merge options: left_on={self.left_on}, "
+ f"right_on={self.right_on}, "
+ f"left_index={self.left_index}, "
+ f"right_index={self.right_index}"
)
if not common_cols.is_unique:
raise MergeError(f"Data columns not unique: {repr(common_cols)}")
@@ -1486,12 +1484,12 @@ def get_result(self):
def _asof_function(direction: str):
- name = "asof_join_{dir}".format(dir=direction)
+ name = f"asof_join_{direction}"
return getattr(libjoin, name, None)
def _asof_by_function(direction: str):
- name = "asof_join_{dir}_on_X_by_Y".format(dir=direction)
+ name = f"asof_join_{direction}_on_X_by_Y"
return getattr(libjoin, name, None)
@@ -1601,9 +1599,7 @@ def _validate_specification(self):
# check 'direction' is valid
if self.direction not in ["backward", "forward", "nearest"]:
- raise MergeError(
- "direction invalid: {direction}".format(direction=self.direction)
- )
+ raise MergeError(f"direction invalid: {self.direction}")
@property
def _asof_key(self):
@@ -1628,17 +1624,13 @@ def _get_merge_keys(self):
# later with a ValueError, so we don't *need* to check
# for them here.
msg = (
- "incompatible merge keys [{i}] {lkdtype} and "
- "{rkdtype}, both sides category, but not equal ones".format(
- i=i, lkdtype=repr(lk.dtype), rkdtype=repr(rk.dtype)
- )
+ f"incompatible merge keys [{i}] {repr(lk.dtype)} and "
+ f"{repr(rk.dtype)}, both sides category, but not equal ones"
)
else:
msg = (
- "incompatible merge keys [{i}] {lkdtype} and "
- "{rkdtype}, must be the same type".format(
- i=i, lkdtype=repr(lk.dtype), rkdtype=repr(rk.dtype)
- )
+ f"incompatible merge keys [{i}] {repr(lk.dtype)} and "
+ f"{repr(rk.dtype)}, must be the same type"
)
raise MergeError(msg)
@@ -1651,10 +1643,8 @@ def _get_merge_keys(self):
lt = left_join_keys[-1]
msg = (
- "incompatible tolerance {tolerance}, must be compat "
- "with type {lkdtype}".format(
- tolerance=type(self.tolerance), lkdtype=repr(lt.dtype)
- )
+ f"incompatible tolerance {self.tolerance}, must be compat "
+ f"with type {repr(lk.dtype)}"
)
if needs_i8_conversion(lt):
@@ -1680,8 +1670,11 @@ def _get_merge_keys(self):
# validate allow_exact_matches
if not is_bool(self.allow_exact_matches):
- msg = "allow_exact_matches must be boolean, passed {passed}"
- raise MergeError(msg.format(passed=self.allow_exact_matches))
+ msg = (
+ "allow_exact_matches must be boolean, "
+ f"passed {self.allow_exact_matches}"
+ )
+ raise MergeError(msg)
return left_join_keys, right_join_keys, join_names
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index a5a9ec9fb79ba..053fb86836ff8 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -200,7 +200,7 @@ def _add_margins(
if not isinstance(margins_name, str):
raise ValueError("margins_name argument must be a string")
- msg = 'Conflicting name "{name}" in margins'.format(name=margins_name)
+ msg = f'Conflicting name "{margins_name}" in margins'
for level in table.index.names:
if margins_name in table.index.get_level_values(level):
raise ValueError(msg)
@@ -650,9 +650,7 @@ def _normalize(table, normalize, margins: bool, margins_name="All"):
if (margins_name not in table.iloc[-1, :].name) | (
margins_name != table.iloc[:, -1].name
):
- raise ValueError(
- "{mname} not in pivoted DataFrame".format(mname=margins_name)
- )
+ raise ValueError(f"{margins_name} not in pivoted DataFrame")
column_margin = table.iloc[:-1, -1]
index_margin = table.iloc[-1, :-1]
@@ -702,7 +700,7 @@ def _get_names(arrs, names, prefix: str = "row"):
if isinstance(arr, ABCSeries) and arr.name is not None:
names.append(arr.name)
else:
- names.append("{prefix}_{i}".format(prefix=prefix, i=i))
+ names.append(f"{prefix}_{i}")
else:
if len(names) != len(arrs):
raise AssertionError("arrays and names must have the same length")
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index f00ff0d4ba5ed..359e5b956f8a5 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -873,15 +873,13 @@ def get_dummies(
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
- len_msg = (
- "Length of '{name}' ({len_item}) did not match the "
- "length of the columns being encoded ({len_enc})."
- )
if is_list_like(item):
if not len(item) == data_to_encode.shape[1]:
- len_msg = len_msg.format(
- name=name, len_item=len(item), len_enc=data_to_encode.shape[1]
+ len_msg = (
+ f"Length of '{name}' ({len(item)}) did not match the "
+ "length of the columns being encoded "
+ f"({data_to_encode.shape[1]})."
)
raise ValueError(len_msg)
@@ -990,8 +988,7 @@ def get_empty_frame(data) -> DataFrame:
# PY2 embedded unicode, gh-22084
def _make_col_name(prefix, prefix_sep, level) -> str:
- fstr = "{prefix}{prefix_sep}{level}"
- return fstr.format(prefix=prefix, prefix_sep=prefix_sep, level=level)
+ return f"{prefix}{prefix_sep}{level}"
dummy_cols = [_make_col_name(prefix, prefix_sep, level) for level in levels]
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index 00a7645d0c7a5..a18b45a077be0 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -202,17 +202,10 @@ def cut(
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
- # for handling the cut for datetime and timedelta objects
original = x
x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
- # To support cut(IntegerArray), we convert to object dtype with NaN
- # Will properly support in the future.
- # https://github.com/pandas-dev/pandas/pull/31290
- if is_extension_array_dtype(x.dtype) and is_integer_dtype(x.dtype):
- x = x.to_numpy(dtype=object, na_value=np.nan)
-
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
@@ -434,7 +427,7 @@ def _bins_to_cuts(
def _coerce_to_type(x):
"""
- if the passed data is of datetime/timedelta or bool type,
+ if the passed data is of datetime/timedelta, bool or nullable int type,
this method converts it to numeric so that cut or qcut method can
handle it
"""
@@ -451,6 +444,12 @@ def _coerce_to_type(x):
elif is_bool_dtype(x):
# GH 20303
x = x.astype(np.int64)
+ # To support cut and qcut for IntegerArray we convert to float dtype.
+ # Will properly support in the future.
+ # https://github.com/pandas-dev/pandas/pull/31290
+ # https://github.com/pandas-dev/pandas/issues/31389
+ elif is_extension_array_dtype(x) and is_integer_dtype(x):
+ x = x.to_numpy(dtype=np.float64, na_value=np.nan)
if dtype is not None:
# GH 19768: force NaT to NaN during integer conversion
diff --git a/pandas/core/series.py b/pandas/core/series.py
index e5cea8ebfc914..0786674daf874 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -22,13 +22,13 @@
from pandas._config import get_option
-from pandas._libs import index as libindex, lib, properties, reshape, tslibs
+from pandas._libs import lib, properties, reshape, tslibs
from pandas._typing import Label
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_bool_kwarg, validate_percentile
-from pandas.core.dtypes.cast import convert_dtypes
+from pandas.core.dtypes.cast import convert_dtypes, validate_numeric_casting
from pandas.core.dtypes.common import (
_is_unorderable_exception,
ensure_platform_int,
@@ -838,16 +838,11 @@ def _ixs(self, i: int, axis: int = 0):
-------
scalar (int) or Series (slice, sequence)
"""
+ return self._values[i]
- # dispatch to the values if we need
- values = self._values
- if isinstance(values, np.ndarray):
- return libindex.get_value_at(values, i)
- else:
- return values[i]
-
- def _slice(self, slobj: slice, axis: int = 0, kind=None) -> "Series":
- slobj = self.index._convert_slice_indexer(slobj, kind=kind or "getitem")
+ def _slice(self, slobj: slice, axis: int = 0, kind: str = "getitem") -> "Series":
+ assert kind in ["getitem", "iloc"]
+ slobj = self.index._convert_slice_indexer(slobj, kind=kind)
return self._get_values(slobj)
def __getitem__(self, key):
@@ -856,31 +851,33 @@ def __getitem__(self, key):
if key is Ellipsis:
return self
- try:
- result = self.index.get_value(self, key)
+ key_is_scalar = is_scalar(key)
+ if key_is_scalar:
+ key = self.index._convert_scalar_indexer(key, kind="getitem")
- return result
- except InvalidIndexError:
- pass
- except (KeyError, ValueError):
- if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
- # kludge
- pass
- elif com.is_bool_indexer(key):
- pass
- else:
+ if key_is_scalar or isinstance(self.index, MultiIndex):
+ # Otherwise index.get_value will raise InvalidIndexError
+ try:
+ result = self.index.get_value(self, key)
- # we can try to coerce the indexer (or this will raise)
- new_key = self.index._convert_scalar_indexer(key, kind="getitem")
- if type(new_key) != type(key):
- return self.__getitem__(new_key)
- raise
+ return result
+ except InvalidIndexError:
+ pass
+ except (KeyError, ValueError):
+ if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
+ # kludge
+ pass
+ else:
+ raise
- if is_iterator(key):
- key = list(key)
+ if not key_is_scalar:
+ # avoid expensive checks if we know we have a scalar
+ if is_iterator(key):
+ key = list(key)
- if com.is_bool_indexer(key):
- key = check_bool_indexer(self.index, key)
+ if com.is_bool_indexer(key):
+ key = check_bool_indexer(self.index, key)
+ return self._get_values(key)
return self._get_with(key)
@@ -913,6 +910,8 @@ def _get_with(self, key):
else:
key_type = lib.infer_dtype(key, skipna=False)
+ # Note: The key_type == "boolean" case should be caught by the
+ # com.is_bool_indexer check in __getitem__
if key_type == "integer":
if self.index.is_integer() or self.index.is_floating():
return self.loc[key]
@@ -921,8 +920,6 @@ def _get_with(self, key):
return self.iloc[indexer]
else:
return self._get_values(key)
- elif key_type == "boolean":
- return self._get_values(key)
if isinstance(key, (list, tuple)):
# TODO: de-dup with tuple case handled above?
@@ -981,7 +978,7 @@ def _get_value(self, label, takeable: bool = False):
scalar value
"""
if takeable:
- return com.maybe_box_datetimelike(self._values[label])
+ return self._values[label]
return self.index.get_value(self, label)
def __setitem__(self, key, value):
@@ -1026,17 +1023,10 @@ def __setitem__(self, key, value):
self._maybe_update_cacher()
def _set_with_engine(self, key, value):
- values = self._values
- if is_extension_array_dtype(values.dtype):
- # The cython indexing engine does not support ExtensionArrays.
- values[self.index.get_loc(key)] = value
- return
- try:
- self.index._engine.set_value(values, key, value)
- return
- except KeyError:
- values[self.index.get_loc(key)] = value
- return
+ # fails with AttributeError for IntervalIndex
+ loc = self.index._engine.get_loc(key)
+ validate_numeric_casting(self.dtype, value)
+ self._values[loc] = value
def _set_with(self, key, value):
# other: fancy integer or otherwise
@@ -1116,11 +1106,10 @@ def _set_value(self, label, value, takeable: bool = False):
try:
if takeable:
self._values[label] = value
- elif isinstance(self._values, np.ndarray):
- # i.e. not EA, so we can use _engine
- self.index._engine.set_value(self._values, label, value)
else:
- self.loc[label] = value
+ loc = self.index.get_loc(label)
+ validate_numeric_casting(self.dtype, value)
+ self._values[loc] = value
except KeyError:
# set using a non-recursive method
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 00f2961e41617..c4772895afd1e 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -3,11 +3,23 @@
import bz2
from collections import abc
import gzip
-from io import BufferedIOBase, BytesIO
+from io import BufferedIOBase, BytesIO, RawIOBase
import mmap
import os
import pathlib
-from typing import IO, Any, AnyStr, Dict, List, Mapping, Optional, Tuple, Union
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ AnyStr,
+ Dict,
+ List,
+ Mapping,
+ Optional,
+ Tuple,
+ Type,
+ Union,
+)
from urllib.parse import ( # noqa
urlencode,
urljoin,
@@ -37,6 +49,10 @@
_VALID_URLS.discard("")
+if TYPE_CHECKING:
+ from io import IOBase # noqa: F401
+
+
def is_url(url) -> bool:
"""
Check to see if a URL has a valid protocol.
@@ -356,12 +372,13 @@ def get_handle(
handles : list of file-like objects
A list of file-like object that were opened in this function.
"""
+ need_text_wrapping: Tuple[Type["IOBase"], ...]
try:
from s3fs import S3File
- need_text_wrapping = (BufferedIOBase, S3File)
+ need_text_wrapping = (BufferedIOBase, RawIOBase, S3File)
except ImportError:
- need_text_wrapping = BufferedIOBase # type: ignore
+ need_text_wrapping = (BufferedIOBase, RawIOBase)
handles: List[IO] = list()
f = path_or_buf
@@ -437,7 +454,7 @@ def get_handle(
from io import TextIOWrapper
g = TextIOWrapper(f, encoding=encoding, newline="")
- if not isinstance(f, BufferedIOBase):
+ if not isinstance(f, (BufferedIOBase, RawIOBase)):
handles.append(g)
f = g
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 14e79538541af..28a069bc9fc1b 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -403,7 +403,7 @@ def __init__(
# Deprecated in GH#17295, enforced in 1.0.0
raise KeyError("Not all names specified in 'columns' are found")
- self.df = df
+ self.df = df.reindex(columns=cols)
self.columns = self.df.columns
self.float_format = float_format
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 204807b55c877..04fd17a00041b 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -19,12 +19,7 @@
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.reshape.concat import concat
-from pandas.io.common import (
- get_filepath_or_buffer,
- get_handle,
- infer_compression,
- stringify_path,
-)
+from pandas.io.common import get_filepath_or_buffer, get_handle, infer_compression
from pandas.io.json._normalize import convert_to_line_delimits
from pandas.io.json._table_schema import build_table_schema, parse_table_schema
from pandas.io.parsers import _validate_integer
@@ -56,7 +51,11 @@ def to_json(
"'index=False' is only valid when 'orient' is 'split' or 'table'"
)
- path_or_buf = stringify_path(path_or_buf)
+ if path_or_buf is not None:
+ path_or_buf, _, _, _ = get_filepath_or_buffer(
+ path_or_buf, compression=compression, mode="w"
+ )
+
if lines and orient != "records":
raise ValueError("'lines' keyword only valid when 'orient' is records")
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 98f2eb3929b59..926635062d853 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -85,7 +85,6 @@ def write(
df: DataFrame,
path,
compression="snappy",
- coerce_timestamps="ms",
index: Optional[bool] = None,
partition_cols=None,
**kwargs,
@@ -103,17 +102,12 @@ def write(
table,
path,
compression=compression,
- coerce_timestamps=coerce_timestamps,
partition_cols=partition_cols,
**kwargs,
)
else:
self.api.parquet.write_table(
- table,
- path,
- compression=compression,
- coerce_timestamps=coerce_timestamps,
- **kwargs,
+ table, path, compression=compression, **kwargs,
)
def read(self, path, columns=None, **kwargs):
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index a33d81ff437bf..a7d8c374a9aae 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -5,7 +5,8 @@
from collections import abc, defaultdict
import csv
import datetime
-from io import BufferedIOBase, StringIO, TextIOWrapper
+from io import BufferedIOBase, RawIOBase, StringIO, TextIOWrapper
+from itertools import chain
import re
import sys
from textwrap import fill
@@ -1399,17 +1400,21 @@ def __init__(self, kwds):
"index_col must only contain row numbers "
"when specifying a multi-index header"
)
-
- # GH 16338
- elif self.header is not None and not is_integer(self.header):
- raise ValueError("header must be integer or list of integers")
-
- # GH 27779
- elif self.header is not None and self.header < 0:
- raise ValueError(
- "Passing negative integer to header is invalid. "
- "For no header, use header=None instead"
- )
+ elif self.header is not None:
+ # GH 27394
+ if self.prefix is not None:
+ raise ValueError(
+ "Argument prefix must be None if argument header is not None"
+ )
+ # GH 16338
+ elif not is_integer(self.header):
+ raise ValueError("header must be integer or list of integers")
+ # GH 27779
+ elif self.header < 0:
+ raise ValueError(
+ "Passing negative integer to header is invalid. "
+ "For no header, use header=None instead"
+ )
self._name_processed = False
@@ -1419,6 +1424,26 @@ def __init__(self, kwds):
# keep references to file handles opened by the parser itself
self.handles = []
+ def _confirm_parse_dates_presence(self, columns):
+ """
+ if user has provided names for parse_dates, check if those columns
+ are available.
+ """
+ if isinstance(self.parse_dates, list):
+ cols_needed = self.parse_dates
+ elif isinstance(self.parse_dates, dict):
+ cols_needed = chain(*self.parse_dates.values())
+ else:
+ cols_needed = []
+
+ missing_cols = ", ".join(
+ [col for col in cols_needed if isinstance(col, str) and col not in columns]
+ )
+ if missing_cols:
+ raise ValueError(
+ f"Missing column provided to 'parse_dates': '{missing_cols}'"
+ )
+
def close(self):
for f in self.handles:
f.close()
@@ -1868,7 +1893,7 @@ def __init__(self, src, **kwds):
# Handle the file object with universal line mode enabled.
# We will handle the newline character ourselves later on.
- if isinstance(src, BufferedIOBase):
+ if isinstance(src, (BufferedIOBase, RawIOBase)):
src = TextIOWrapper(src, encoding=encoding, newline="")
kwds["encoding"] = "utf-8"
@@ -1938,6 +1963,7 @@ def __init__(self, src, **kwds):
if len(self.names) < len(usecols):
_validate_usecols_names(usecols, self.names)
+ self._confirm_parse_dates_presence(self.names)
self._set_noconvert_columns()
self.orig_names = self.names
@@ -2308,6 +2334,7 @@ def __init__(self, f, **kwds):
if self.index_names is None:
self.index_names = index_names
+ self._confirm_parse_dates_presence(self.columns)
if self.parse_dates:
self._no_thousands_columns = self._set_no_thousands_columns()
else:
@@ -3278,6 +3305,10 @@ def _isindex(colspec):
if is_scalar(colspec):
if isinstance(colspec, int) and colspec not in data_dict:
colspec = orig_names[colspec]
+ elif colspec not in orig_names:
+ raise ValueError(
+ f"Missing column provided to 'parse_dates': '{colspec}'"
+ )
if _isindex(colspec):
continue
data_dict[colspec] = converter(data_dict[colspec])
diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py
index dd048114142f3..3abce690cbe6b 100644
--- a/pandas/plotting/_matplotlib/timeseries.py
+++ b/pandas/plotting/_matplotlib/timeseries.py
@@ -251,7 +251,7 @@ def _maybe_convert_index(ax, data):
freq = frequencies.get_period_alias(freq)
if isinstance(data.index, ABCDatetimeIndex):
- data = data.to_period(freq=freq)
+ data = data.tz_localize(None).to_period(freq=freq)
elif isinstance(data.index, ABCPeriodIndex):
data.index = data.index.asfreq(freq=freq)
return data
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index cfba3da354d44..70e1421c8dcf4 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -408,6 +408,11 @@ def test_constructor_str_unknown(self):
with pytest.raises(ValueError, match="Unknown dtype"):
Categorical([1, 2], dtype="foo")
+ def test_constructor_np_strs(self):
+ # GH#31499 Hastable.map_locations needs to work on np.str_ objects
+ cat = pd.Categorical(["1", "0", "1"], [np.str_("0"), np.str_("1")])
+ assert all(isinstance(x, np.str_) for x in cat.categories)
+
def test_constructor_from_categorical_with_dtype(self):
dtype = CategoricalDtype(["a", "b", "c"], ordered=True)
values = Categorical(["a", "b", "d"])
diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py
index 35eda4a0ec5bc..7e7762d8973a0 100644
--- a/pandas/tests/arrays/interval/test_interval.py
+++ b/pandas/tests/arrays/interval/test_interval.py
@@ -81,6 +81,24 @@ def test_where_raises(self, other):
with pytest.raises(ValueError, match=match):
ser.where([True, False, True], other=other)
+ def test_shift(self):
+ # https://github.com/pandas-dev/pandas/issues/31495
+ a = IntervalArray.from_breaks([1, 2, 3])
+ result = a.shift()
+ # int -> float
+ expected = IntervalArray.from_tuples([(np.nan, np.nan), (1.0, 2.0)])
+ tm.assert_interval_array_equal(result, expected)
+
+ def test_shift_datetime(self):
+ a = IntervalArray.from_breaks(pd.date_range("2000", periods=4))
+ result = a.shift(2)
+ expected = a.take([-1, -1, 0], allow_fill=True)
+ tm.assert_interval_array_equal(result, expected)
+
+ result = a.shift(-1)
+ expected = a.take([1, 2, -1], allow_fill=True)
+ tm.assert_interval_array_equal(result, expected)
+
class TestSetitem:
def test_set_na(self, left_right_dtypes):
diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py
index cc81ae4504dd8..7a0c9300a43a2 100644
--- a/pandas/tests/arrays/test_integer.py
+++ b/pandas/tests/arrays/test_integer.py
@@ -1061,19 +1061,6 @@ def test_value_counts_na():
tm.assert_series_equal(result, expected)
-@pytest.mark.parametrize("bins", [3, [0, 5, 15]])
-@pytest.mark.parametrize("right", [True, False])
-@pytest.mark.parametrize("include_lowest", [True, False])
-def test_cut(bins, right, include_lowest):
- a = np.random.randint(0, 10, size=50).astype(object)
- a[::2] = np.nan
- result = pd.cut(
- pd.array(a, dtype="Int64"), bins, right=right, include_lowest=include_lowest
- )
- expected = pd.cut(a, bins, right=right, include_lowest=include_lowest)
- tm.assert_categorical_equal(result, expected)
-
-
def test_array_setitem_nullable_boolean_mask():
# GH 31446
ser = pd.Series([1, 2], dtype="Int64")
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 097e83d93ee71..4c917b9bb42d2 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -675,6 +675,8 @@ def test__get_dtype(input_param, result):
)
def test__get_dtype_fails(input_param, expected_error_message):
# python objects
+ # 2020-02-02 npdev changed error message
+ expected_error_message += f"|Cannot interpret '{input_param}' as a data type"
with pytest.raises(TypeError, match=expected_error_message):
com._get_dtype(input_param)
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index a599a086ae92b..dd99b81fb6764 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -32,66 +32,60 @@
class Base:
- def setup_method(self, method):
- self.dtype = self.create()
-
- def test_hash(self):
- hash(self.dtype)
-
- def test_equality_invalid(self):
- assert not self.dtype == "foo"
- assert not is_dtype_equal(self.dtype, np.int64)
-
- def test_numpy_informed(self):
- with pytest.raises(TypeError, match="data type not understood"):
- np.dtype(self.dtype)
+ def test_hash(self, dtype):
+ hash(dtype)
+
+ def test_equality_invalid(self, dtype):
+ assert not dtype == "foo"
+ assert not is_dtype_equal(dtype, np.int64)
+
+ def test_numpy_informed(self, dtype):
+ # npdev 2020-02-02 changed from "data type not understood" to
+ # "Cannot interpret 'foo' as a data type"
+ msg = "|".join(
+ ["data type not understood", "Cannot interpret '.*' as a data type"]
+ )
+ with pytest.raises(TypeError, match=msg):
+ np.dtype(dtype)
- assert not self.dtype == np.str_
- assert not np.str_ == self.dtype
+ assert not dtype == np.str_
+ assert not np.str_ == dtype
- def test_pickle(self):
+ def test_pickle(self, dtype):
# make sure our cache is NOT pickled
# clear the cache
- type(self.dtype).reset_cache()
- assert not len(self.dtype._cache)
+ type(dtype).reset_cache()
+ assert not len(dtype._cache)
# force back to the cache
- result = tm.round_trip_pickle(self.dtype)
- assert not len(self.dtype._cache)
- assert result == self.dtype
+ result = tm.round_trip_pickle(dtype)
+ assert not len(dtype._cache)
+ assert result == dtype
class TestCategoricalDtype(Base):
- def create(self):
+ @pytest.fixture
+ def dtype(self):
+ """
+ Class level fixture of dtype for TestCategoricalDtype
+ """
return CategoricalDtype()
- def test_pickle(self):
- # make sure our cache is NOT pickled
-
- # clear the cache
- type(self.dtype).reset_cache()
- assert not len(self.dtype._cache)
-
- # force back to the cache
- result = tm.round_trip_pickle(self.dtype)
- assert result == self.dtype
-
- def test_hash_vs_equality(self):
- dtype = self.dtype
+ def test_hash_vs_equality(self, dtype):
dtype2 = CategoricalDtype()
assert dtype == dtype2
assert dtype2 == dtype
assert hash(dtype) == hash(dtype2)
- def test_equality(self):
- assert is_dtype_equal(self.dtype, "category")
- assert is_dtype_equal(self.dtype, CategoricalDtype())
- assert not is_dtype_equal(self.dtype, "foo")
+ def test_equality(self, dtype):
+ assert is_dtype_equal(dtype, "category")
+ assert is_dtype_equal(dtype, CategoricalDtype())
+ assert not is_dtype_equal(dtype, "foo")
- def test_construction_from_string(self):
+ def test_construction_from_string(self, dtype):
result = CategoricalDtype.construct_from_string("category")
- assert is_dtype_equal(self.dtype, result)
+ assert is_dtype_equal(dtype, result)
msg = "Cannot construct a 'CategoricalDtype' from 'foo'"
with pytest.raises(TypeError, match=msg):
CategoricalDtype.construct_from_string("foo")
@@ -133,16 +127,16 @@ def test_from_values_or_dtype_raises(self, values, categories, ordered, dtype):
with pytest.raises(ValueError, match=msg):
CategoricalDtype._from_values_or_dtype(values, categories, ordered, dtype)
- def test_is_dtype(self):
- assert CategoricalDtype.is_dtype(self.dtype)
+ def test_is_dtype(self, dtype):
+ assert CategoricalDtype.is_dtype(dtype)
assert CategoricalDtype.is_dtype("category")
assert CategoricalDtype.is_dtype(CategoricalDtype())
assert not CategoricalDtype.is_dtype("foo")
assert not CategoricalDtype.is_dtype(np.float64)
- def test_basic(self):
+ def test_basic(self, dtype):
- assert is_categorical_dtype(self.dtype)
+ assert is_categorical_dtype(dtype)
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
@@ -180,7 +174,11 @@ def test_is_boolean(self, categories, expected):
class TestDatetimeTZDtype(Base):
- def create(self):
+ @pytest.fixture
+ def dtype(self):
+ """
+ Class level fixture of dtype for TestDatetimeTZDtype
+ """
return DatetimeTZDtype("ns", "US/Eastern")
def test_alias_to_unit_raises(self):
@@ -196,9 +194,8 @@ def test_alias_to_unit_bad_alias_raises(self):
with pytest.raises(TypeError, match=""):
DatetimeTZDtype("datetime64[ns, US/NotATZ]")
- def test_hash_vs_equality(self):
+ def test_hash_vs_equality(self, dtype):
# make sure that we satisfy is semantics
- dtype = self.dtype
dtype2 = DatetimeTZDtype("ns", "US/Eastern")
dtype3 = DatetimeTZDtype(dtype2)
assert dtype == dtype2
@@ -223,54 +220,54 @@ def test_subclass(self):
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
- def test_compat(self):
- assert is_datetime64tz_dtype(self.dtype)
+ def test_compat(self, dtype):
+ assert is_datetime64tz_dtype(dtype)
assert is_datetime64tz_dtype("datetime64[ns, US/Eastern]")
- assert is_datetime64_any_dtype(self.dtype)
+ assert is_datetime64_any_dtype(dtype)
assert is_datetime64_any_dtype("datetime64[ns, US/Eastern]")
- assert is_datetime64_ns_dtype(self.dtype)
+ assert is_datetime64_ns_dtype(dtype)
assert is_datetime64_ns_dtype("datetime64[ns, US/Eastern]")
- assert not is_datetime64_dtype(self.dtype)
+ assert not is_datetime64_dtype(dtype)
assert not is_datetime64_dtype("datetime64[ns, US/Eastern]")
- def test_construction_from_string(self):
+ def test_construction_from_string(self, dtype):
result = DatetimeTZDtype.construct_from_string("datetime64[ns, US/Eastern]")
- assert is_dtype_equal(self.dtype, result)
- msg = "Cannot construct a 'DatetimeTZDtype' from 'foo'"
- with pytest.raises(TypeError, match=msg):
- DatetimeTZDtype.construct_from_string("foo")
-
- def test_construct_from_string_raises(self):
- with pytest.raises(TypeError, match="notatz"):
- DatetimeTZDtype.construct_from_string("datetime64[ns, notatz]")
+ assert is_dtype_equal(dtype, result)
- msg = "'construct_from_string' expects a string, got <class 'list'>"
- with pytest.raises(TypeError, match=re.escape(msg)):
- # list instead of string
- DatetimeTZDtype.construct_from_string(["datetime64[ns, notatz]"])
-
- msg = "^Cannot construct a 'DatetimeTZDtype'"
- with pytest.raises(TypeError, match=msg):
+ @pytest.mark.parametrize(
+ "string",
+ [
+ "foo",
+ "datetime64[ns, notatz]",
# non-nano unit
- DatetimeTZDtype.construct_from_string("datetime64[ps, UTC]")
+ "datetime64[ps, UTC]",
+ # dateutil str that returns None from gettz
+ "datetime64[ns, dateutil/invalid]",
+ ],
+ )
+ def test_construct_from_string_invalid_raises(self, string):
+ msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'"
+ with pytest.raises(TypeError, match=re.escape(msg)):
+ DatetimeTZDtype.construct_from_string(string)
+ def test_construct_from_string_wrong_type_raises(self):
+ msg = "'construct_from_string' expects a string, got <class 'list'>"
with pytest.raises(TypeError, match=msg):
- # dateutil str that returns None from gettz
- DatetimeTZDtype.construct_from_string("datetime64[ns, dateutil/invalid]")
+ DatetimeTZDtype.construct_from_string(["datetime64[ns, notatz]"])
- def test_is_dtype(self):
+ def test_is_dtype(self, dtype):
assert not DatetimeTZDtype.is_dtype(None)
- assert DatetimeTZDtype.is_dtype(self.dtype)
+ assert DatetimeTZDtype.is_dtype(dtype)
assert DatetimeTZDtype.is_dtype("datetime64[ns, US/Eastern]")
assert not DatetimeTZDtype.is_dtype("foo")
assert DatetimeTZDtype.is_dtype(DatetimeTZDtype("ns", "US/Pacific"))
assert not DatetimeTZDtype.is_dtype(np.float64)
- def test_equality(self):
- assert is_dtype_equal(self.dtype, "datetime64[ns, US/Eastern]")
- assert is_dtype_equal(self.dtype, DatetimeTZDtype("ns", "US/Eastern"))
- assert not is_dtype_equal(self.dtype, "foo")
- assert not is_dtype_equal(self.dtype, DatetimeTZDtype("ns", "CET"))
+ def test_equality(self, dtype):
+ assert is_dtype_equal(dtype, "datetime64[ns, US/Eastern]")
+ assert is_dtype_equal(dtype, DatetimeTZDtype("ns", "US/Eastern"))
+ assert not is_dtype_equal(dtype, "foo")
+ assert not is_dtype_equal(dtype, DatetimeTZDtype("ns", "CET"))
assert not is_dtype_equal(
DatetimeTZDtype("ns", "US/Eastern"), DatetimeTZDtype("ns", "US/Pacific")
)
@@ -278,9 +275,9 @@ def test_equality(self):
# numpy compat
assert is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]")
- def test_basic(self):
+ def test_basic(self, dtype):
- assert is_datetime64tz_dtype(self.dtype)
+ assert is_datetime64tz_dtype(dtype)
dr = date_range("20130101", periods=3, tz="US/Eastern")
s = Series(dr, name="A")
@@ -326,12 +323,15 @@ def test_tz_standardize(self):
class TestPeriodDtype(Base):
- def create(self):
+ @pytest.fixture
+ def dtype(self):
+ """
+ Class level fixture of dtype for TestPeriodDtype
+ """
return PeriodDtype("D")
- def test_hash_vs_equality(self):
+ def test_hash_vs_equality(self, dtype):
# make sure that we satisfy is semantics
- dtype = self.dtype
dtype2 = PeriodDtype("D")
dtype3 = PeriodDtype(dtype2)
assert dtype == dtype2
@@ -386,17 +386,17 @@ def test_identity(self):
assert PeriodDtype("period[1S1U]") == PeriodDtype("period[1000001U]")
assert PeriodDtype("period[1S1U]") is PeriodDtype("period[1000001U]")
- def test_compat(self):
- assert not is_datetime64_ns_dtype(self.dtype)
+ def test_compat(self, dtype):
+ assert not is_datetime64_ns_dtype(dtype)
assert not is_datetime64_ns_dtype("period[D]")
- assert not is_datetime64_dtype(self.dtype)
+ assert not is_datetime64_dtype(dtype)
assert not is_datetime64_dtype("period[D]")
- def test_construction_from_string(self):
+ def test_construction_from_string(self, dtype):
result = PeriodDtype("period[D]")
- assert is_dtype_equal(self.dtype, result)
+ assert is_dtype_equal(dtype, result)
result = PeriodDtype.construct_from_string("period[D]")
- assert is_dtype_equal(self.dtype, result)
+ assert is_dtype_equal(dtype, result)
with pytest.raises(TypeError):
PeriodDtype.construct_from_string("foo")
with pytest.raises(TypeError):
@@ -412,8 +412,8 @@ def test_construction_from_string(self):
with pytest.raises(TypeError, match="list"):
PeriodDtype.construct_from_string([1, 2, 3])
- def test_is_dtype(self):
- assert PeriodDtype.is_dtype(self.dtype)
+ def test_is_dtype(self, dtype):
+ assert PeriodDtype.is_dtype(dtype)
assert PeriodDtype.is_dtype("period[D]")
assert PeriodDtype.is_dtype("period[3D]")
assert PeriodDtype.is_dtype(PeriodDtype("3D"))
@@ -431,17 +431,17 @@ def test_is_dtype(self):
assert not PeriodDtype.is_dtype(np.int64)
assert not PeriodDtype.is_dtype(np.float64)
- def test_equality(self):
- assert is_dtype_equal(self.dtype, "period[D]")
- assert is_dtype_equal(self.dtype, PeriodDtype("D"))
- assert is_dtype_equal(self.dtype, PeriodDtype("D"))
+ def test_equality(self, dtype):
+ assert is_dtype_equal(dtype, "period[D]")
+ assert is_dtype_equal(dtype, PeriodDtype("D"))
+ assert is_dtype_equal(dtype, PeriodDtype("D"))
assert is_dtype_equal(PeriodDtype("D"), PeriodDtype("D"))
- assert not is_dtype_equal(self.dtype, "D")
+ assert not is_dtype_equal(dtype, "D")
assert not is_dtype_equal(PeriodDtype("D"), PeriodDtype("2D"))
- def test_basic(self):
- assert is_period_dtype(self.dtype)
+ def test_basic(self, dtype):
+ assert is_period_dtype(dtype)
pidx = pd.period_range("2013-01-01 09:00", periods=5, freq="H")
@@ -467,12 +467,15 @@ def test_not_string(self):
class TestIntervalDtype(Base):
- def create(self):
+ @pytest.fixture
+ def dtype(self):
+ """
+ Class level fixture of dtype for TestIntervalDtype
+ """
return IntervalDtype("int64")
- def test_hash_vs_equality(self):
+ def test_hash_vs_equality(self, dtype):
# make sure that we satisfy is semantics
- dtype = self.dtype
dtype2 = IntervalDtype("int64")
dtype3 = IntervalDtype(dtype2)
assert dtype == dtype2
@@ -539,11 +542,11 @@ def test_construction_errors(self, subtype):
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
- def test_construction_from_string(self):
+ def test_construction_from_string(self, dtype):
result = IntervalDtype("interval[int64]")
- assert is_dtype_equal(self.dtype, result)
+ assert is_dtype_equal(dtype, result)
result = IntervalDtype.construct_from_string("interval[int64]")
- assert is_dtype_equal(self.dtype, result)
+ assert is_dtype_equal(dtype, result)
@pytest.mark.parametrize("string", [0, 3.14, ("a", "b"), None])
def test_construction_from_string_errors(self, string):
@@ -572,8 +575,8 @@ def test_subclass(self):
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
- def test_is_dtype(self):
- assert IntervalDtype.is_dtype(self.dtype)
+ def test_is_dtype(self, dtype):
+ assert IntervalDtype.is_dtype(dtype)
assert IntervalDtype.is_dtype("interval")
assert IntervalDtype.is_dtype(IntervalDtype("float64"))
assert IntervalDtype.is_dtype(IntervalDtype("int64"))
@@ -589,12 +592,12 @@ def test_is_dtype(self):
assert not IntervalDtype.is_dtype(np.int64)
assert not IntervalDtype.is_dtype(np.float64)
- def test_equality(self):
- assert is_dtype_equal(self.dtype, "interval[int64]")
- assert is_dtype_equal(self.dtype, IntervalDtype("int64"))
+ def test_equality(self, dtype):
+ assert is_dtype_equal(dtype, "interval[int64]")
+ assert is_dtype_equal(dtype, IntervalDtype("int64"))
assert is_dtype_equal(IntervalDtype("int64"), IntervalDtype("int64"))
- assert not is_dtype_equal(self.dtype, "int64")
+ assert not is_dtype_equal(dtype, "int64")
assert not is_dtype_equal(IntervalDtype("int64"), IntervalDtype("float64"))
# invalid subtype comparisons do not raise when directly compared
@@ -650,8 +653,8 @@ def test_name_repr_generic(self, subtype):
assert str(dtype) == "interval"
assert dtype.name == "interval"
- def test_basic(self):
- assert is_interval_dtype(self.dtype)
+ def test_basic(self, dtype):
+ assert is_interval_dtype(dtype)
ii = IntervalIndex.from_breaks(range(3))
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index 4a84a21084de2..22e53dbc89f01 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -280,6 +280,13 @@ def test_shift_empty_array(self, data, periods):
expected = empty
self.assert_extension_array_equal(result, expected)
+ def test_shift_zero_copies(self, data):
+ result = data.shift(0)
+ assert result is not data
+
+ result = data[:0].shift(2)
+ assert result is not data
+
def test_shift_fill_value(self, data):
arr = data[:4]
fill_value = data[0]
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 9e741bb7f267c..1ba1b872fa5e2 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -16,7 +16,7 @@
import random
import string
import sys
-from typing import Type
+from typing import Any, Mapping, Type
import numpy as np
@@ -27,7 +27,7 @@
class JSONDtype(ExtensionDtype):
type = abc.Mapping
name = "json"
- na_value = UserDict()
+ na_value: Mapping[str, Any] = UserDict()
@classmethod
def construct_array_type(cls) -> Type["JSONArray"]:
diff --git a/pandas/tests/frame/indexing/test_datetime.py b/pandas/tests/frame/indexing/test_datetime.py
index a1c12be2b0180..6bfcac3793584 100644
--- a/pandas/tests/frame/indexing/test_datetime.py
+++ b/pandas/tests/frame/indexing/test_datetime.py
@@ -45,13 +45,6 @@ def test_set_reset(self):
df = result.set_index("foo")
tm.assert_index_equal(df.index, idx)
- def test_transpose(self, timezone_frame):
-
- result = timezone_frame.T
- expected = DataFrame(timezone_frame.values.T)
- expected.index = ["A", "B", "C"]
- tm.assert_frame_equal(result, expected)
-
def test_scalar_assignment(self):
# issue #19843
df = pd.DataFrame(index=(0, 1, 2))
diff --git a/pandas/tests/frame/indexing/test_iat.py b/pandas/tests/frame/indexing/test_iat.py
new file mode 100644
index 0000000000000..23e3392251a3a
--- /dev/null
+++ b/pandas/tests/frame/indexing/test_iat.py
@@ -0,0 +1,7 @@
+def test_iat(float_frame):
+
+ for i, row in enumerate(float_frame.index):
+ for j, col in enumerate(float_frame.columns):
+ result = float_frame.iat[i, j]
+ expected = float_frame.at[row, col]
+ assert result == expected
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 64d0f9ee2b062..6fc8c0e9ad459 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -28,6 +28,29 @@
from pandas.tseries.offsets import BDay
+class TestGet:
+ def test_get(self, float_frame):
+ b = float_frame.get("B")
+ tm.assert_series_equal(b, float_frame["B"])
+
+ assert float_frame.get("foo") is None
+ tm.assert_series_equal(
+ float_frame.get("foo", float_frame["B"]), float_frame["B"]
+ )
+
+ @pytest.mark.parametrize(
+ "df",
+ [
+ DataFrame(),
+ DataFrame(columns=list("AB")),
+ DataFrame(columns=list("AB"), index=range(3)),
+ ],
+ )
+ def test_get_none(self, df):
+ # see gh-5652
+ assert df.get(None) is None
+
+
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
@@ -64,27 +87,6 @@ def test_getitem_dupe_cols(self):
with pytest.raises(KeyError, match=re.escape(msg)):
df[["baf"]]
- def test_get(self, float_frame):
- b = float_frame.get("B")
- tm.assert_series_equal(b, float_frame["B"])
-
- assert float_frame.get("foo") is None
- tm.assert_series_equal(
- float_frame.get("foo", float_frame["B"]), float_frame["B"]
- )
-
- @pytest.mark.parametrize(
- "df",
- [
- DataFrame(),
- DataFrame(columns=list("AB")),
- DataFrame(columns=list("AB"), index=range(3)),
- ],
- )
- def test_get_none(self, df):
- # see gh-5652
- assert df.get(None) is None
-
@pytest.mark.parametrize("key_type", [iter, np.array, Series, Index])
def test_loc_iterable(self, float_frame, key_type):
idx = key_type(["A", "B", "C"])
@@ -1048,9 +1050,8 @@ def test_getitem_setitem_float_labels(self):
# positional slicing only via iloc!
msg = (
- "cannot do slice indexing on "
- r"<class 'pandas\.core\.indexes\.numeric\.Float64Index'> with "
- r"these indexers \[1.0\] of <class 'float'>"
+ "cannot do positional indexing on Float64Index with "
+ r"these indexers \[1.0\] of type float"
)
with pytest.raises(TypeError, match=msg):
df.iloc[1.0:5]
@@ -1547,14 +1548,6 @@ def test_loc_duplicates(self):
df.loc[trange[bool_idx], "A"] += 6
tm.assert_frame_equal(df, expected)
- def test_iat(self, float_frame):
-
- for i, row in enumerate(float_frame.index):
- for j, col in enumerate(float_frame.columns):
- result = float_frame.iat[i, j]
- expected = float_frame.at[row, col]
- assert result == expected
-
@pytest.mark.parametrize(
"method,expected_values",
[
@@ -1608,6 +1601,16 @@ def test_reindex_methods_nearest_special(self):
actual = df.reindex(target, method="nearest", tolerance=[0.5, 0.01, 0.4, 0.1])
tm.assert_frame_equal(expected, actual)
+ def test_reindex_nearest_tz(self, tz_aware_fixture):
+ # GH26683
+ tz = tz_aware_fixture
+ idx = pd.date_range("2019-01-01", periods=5, tz=tz)
+ df = pd.DataFrame({"x": list(range(5))}, index=idx)
+
+ expected = df.head(3)
+ actual = df.reindex(idx[:3], method="nearest")
+ tm.assert_frame_equal(expected, actual)
+
def test_reindex_frame_add_nat(self):
rng = date_range("1/1/2000 00:00:00", periods=10, freq="10s")
df = DataFrame({"A": np.random.randn(len(rng)), "B": rng})
@@ -1916,89 +1919,6 @@ def test_at_time_between_time_datetimeindex(self):
result.loc[bkey] = df.iloc[binds]
tm.assert_frame_equal(result, df)
- def test_xs(self, float_frame, datetime_frame):
- idx = float_frame.index[5]
- xs = float_frame.xs(idx)
- for item, value in xs.items():
- if np.isnan(value):
- assert np.isnan(float_frame[item][idx])
- else:
- assert value == float_frame[item][idx]
-
- # mixed-type xs
- test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}}
- frame = DataFrame(test_data)
- xs = frame.xs("1")
- assert xs.dtype == np.object_
- assert xs["A"] == 1
- assert xs["B"] == "1"
-
- with pytest.raises(
- KeyError, match=re.escape("Timestamp('1999-12-31 00:00:00', freq='B')")
- ):
- datetime_frame.xs(datetime_frame.index[0] - BDay())
-
- # xs get column
- series = float_frame.xs("A", axis=1)
- expected = float_frame["A"]
- tm.assert_series_equal(series, expected)
-
- # view is returned if possible
- series = float_frame.xs("A", axis=1)
- series[:] = 5
- assert (expected == 5).all()
-
- def test_xs_corner(self):
- # pathological mixed-type reordering case
- df = DataFrame(index=[0])
- df["A"] = 1.0
- df["B"] = "foo"
- df["C"] = 2.0
- df["D"] = "bar"
- df["E"] = 3.0
-
- xs = df.xs(0)
- exp = pd.Series([1.0, "foo", 2.0, "bar", 3.0], index=list("ABCDE"), name=0)
- tm.assert_series_equal(xs, exp)
-
- # no columns but Index(dtype=object)
- df = DataFrame(index=["a", "b", "c"])
- result = df.xs("a")
- expected = Series([], name="a", index=pd.Index([]), dtype=np.float64)
- tm.assert_series_equal(result, expected)
-
- def test_xs_duplicates(self):
- df = DataFrame(np.random.randn(5, 2), index=["b", "b", "c", "b", "a"])
-
- cross = df.xs("c")
- exp = df.iloc[2]
- tm.assert_series_equal(cross, exp)
-
- def test_xs_keep_level(self):
- df = DataFrame(
- {
- "day": {0: "sat", 1: "sun"},
- "flavour": {0: "strawberry", 1: "strawberry"},
- "sales": {0: 10, 1: 12},
- "year": {0: 2008, 1: 2008},
- }
- ).set_index(["year", "flavour", "day"])
- result = df.xs("sat", level="day", drop_level=False)
- expected = df[:1]
- tm.assert_frame_equal(result, expected)
-
- result = df.xs([2008, "sat"], level=["year", "day"], drop_level=False)
- tm.assert_frame_equal(result, expected)
-
- def test_xs_view(self):
- # in 0.14 this will return a view if possible a copy otherwise, but
- # this is numpy dependent
-
- dm = DataFrame(np.arange(20.0).reshape(4, 5), index=range(4), columns=range(5))
-
- dm.xs(2)[:] = 10
- assert (dm.xs(2) == 10).all()
-
def test_index_namedtuple(self):
from collections import namedtuple
@@ -2154,31 +2074,6 @@ def test_mask_callable(self):
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, (df + 2).mask((df + 2) > 8, (df + 2) + 10))
- def test_head_tail(self, float_frame):
- tm.assert_frame_equal(float_frame.head(), float_frame[:5])
- tm.assert_frame_equal(float_frame.tail(), float_frame[-5:])
-
- tm.assert_frame_equal(float_frame.head(0), float_frame[0:0])
- tm.assert_frame_equal(float_frame.tail(0), float_frame[0:0])
-
- tm.assert_frame_equal(float_frame.head(-1), float_frame[:-1])
- tm.assert_frame_equal(float_frame.tail(-1), float_frame[1:])
- tm.assert_frame_equal(float_frame.head(1), float_frame[:1])
- tm.assert_frame_equal(float_frame.tail(1), float_frame[-1:])
- # with a float index
- df = float_frame.copy()
- df.index = np.arange(len(float_frame)) + 0.1
- tm.assert_frame_equal(df.head(), df.iloc[:5])
- tm.assert_frame_equal(df.tail(), df.iloc[-5:])
- tm.assert_frame_equal(df.head(0), df[0:0])
- tm.assert_frame_equal(df.tail(0), df[0:0])
- tm.assert_frame_equal(df.head(-1), df.iloc[:-1])
- tm.assert_frame_equal(df.tail(-1), df.iloc[1:])
- # test empty dataframe
- empty_df = DataFrame()
- tm.assert_frame_equal(empty_df.tail(), empty_df)
- tm.assert_frame_equal(empty_df.head(), empty_df)
-
def test_type_error_multiindex(self):
# See gh-12218
df = DataFrame(
@@ -2270,9 +2165,40 @@ def test_set_reset(self):
df = result.set_index("foo")
tm.assert_index_equal(df.index, idx)
- def test_transpose(self, uint64_frame):
- result = uint64_frame.T
- expected = DataFrame(uint64_frame.values.T)
- expected.index = ["A", "B"]
- tm.assert_frame_equal(result, expected)
+def test_object_casting_indexing_wraps_datetimelike():
+ # GH#31649, check the indexing methods all the way down the stack
+ df = pd.DataFrame(
+ {
+ "A": [1, 2],
+ "B": pd.date_range("2000", periods=2),
+ "C": pd.timedelta_range("1 Day", periods=2),
+ }
+ )
+
+ ser = df.loc[0]
+ assert isinstance(ser.values[1], pd.Timestamp)
+ assert isinstance(ser.values[2], pd.Timedelta)
+
+ ser = df.iloc[0]
+ assert isinstance(ser.values[1], pd.Timestamp)
+ assert isinstance(ser.values[2], pd.Timedelta)
+
+ ser = df.xs(0, axis=0)
+ assert isinstance(ser.values[1], pd.Timestamp)
+ assert isinstance(ser.values[2], pd.Timedelta)
+
+ mgr = df._data
+ arr = mgr.fast_xs(0)
+ assert isinstance(arr[1], pd.Timestamp)
+ assert isinstance(arr[2], pd.Timedelta)
+
+ blk = mgr.blocks[mgr._blknos[1]]
+ assert blk.dtype == "M8[ns]" # we got the right block
+ val = blk.iget((0, 0))
+ assert isinstance(val, pd.Timestamp)
+
+ blk = mgr.blocks[mgr._blknos[2]]
+ assert blk.dtype == "m8[ns]" # we got the right block
+ val = blk.iget((0, 0))
+ assert isinstance(val, pd.Timedelta)
diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py
index df1b128dcd227..507b2e9cd237b 100644
--- a/pandas/tests/frame/indexing/test_where.py
+++ b/pandas/tests/frame/indexing/test_where.py
@@ -10,22 +10,30 @@
import pandas._testing as tm
-class TestDataFrameIndexingWhere:
- def test_where(self, float_string_frame, mixed_float_frame, mixed_int_frame):
- default_frame = DataFrame(np.random.randn(5, 3), columns=["A", "B", "C"])
-
- def _safe_add(df):
- # only add to the numeric items
- def is_ok(s):
- return (
- issubclass(s.dtype.type, (np.integer, np.floating))
- and s.dtype != "uint8"
- )
-
- return DataFrame(
- dict((c, s + 1) if is_ok(s) else (c, s) for c, s in df.items())
- )
+@pytest.fixture(params=["default", "float_string", "mixed_float", "mixed_int"])
+def where_frame(request, float_string_frame, mixed_float_frame, mixed_int_frame):
+ if request.param == "default":
+ return DataFrame(np.random.randn(5, 3), columns=["A", "B", "C"])
+ if request.param == "float_string":
+ return float_string_frame
+ if request.param == "mixed_float":
+ return mixed_float_frame
+ if request.param == "mixed_int":
+ return mixed_int_frame
+
+
+def _safe_add(df):
+ # only add to the numeric items
+ def is_ok(s):
+ return (
+ issubclass(s.dtype.type, (np.integer, np.floating)) and s.dtype != "uint8"
+ )
+
+ return DataFrame(dict((c, s + 1) if is_ok(s) else (c, s) for c, s in df.items()))
+
+class TestDataFrameIndexingWhere:
+ def test_where_get(self, where_frame, float_string_frame):
def _check_get(df, cond, check_dtypes=True):
other1 = _safe_add(df)
rs = df.where(cond, other1)
@@ -40,19 +48,15 @@ def _check_get(df, cond, check_dtypes=True):
assert (rs.dtypes == df.dtypes).all()
# check getting
- for df in [
- default_frame,
- float_string_frame,
- mixed_float_frame,
- mixed_int_frame,
- ]:
- if df is float_string_frame:
- with pytest.raises(TypeError):
- df > 0
- continue
- cond = df > 0
- _check_get(df, cond)
-
+ df = where_frame
+ if df is float_string_frame:
+ with pytest.raises(TypeError):
+ df > 0
+ return
+ cond = df > 0
+ _check_get(df, cond)
+
+ def test_where_upcasting(self):
# upcasting case (GH # 2794)
df = DataFrame(
{
@@ -78,6 +82,7 @@ def _check_get(df, cond, check_dtypes=True):
tm.assert_series_equal(result, expected)
+ def test_where_alignment(self, where_frame, float_string_frame):
# aligning
def _check_align(df, cond, other, check_dtypes=True):
rs = df.where(cond, other)
@@ -107,27 +112,30 @@ def _check_align(df, cond, other, check_dtypes=True):
if check_dtypes and not isinstance(other, np.ndarray):
assert (rs.dtypes == df.dtypes).all()
- for df in [float_string_frame, mixed_float_frame, mixed_int_frame]:
- if df is float_string_frame:
- with pytest.raises(TypeError):
- df > 0
- continue
+ df = where_frame
+ if df is float_string_frame:
+ with pytest.raises(TypeError):
+ df > 0
+ return
- # other is a frame
- cond = (df > 0)[1:]
- _check_align(df, cond, _safe_add(df))
+ # other is a frame
+ cond = (df > 0)[1:]
+ _check_align(df, cond, _safe_add(df))
- # check other is ndarray
- cond = df > 0
- _check_align(df, cond, (_safe_add(df).values))
+ # check other is ndarray
+ cond = df > 0
+ _check_align(df, cond, (_safe_add(df).values))
- # integers are upcast, so don't check the dtypes
- cond = df > 0
- check_dtypes = all(not issubclass(s.type, np.integer) for s in df.dtypes)
- _check_align(df, cond, np.nan, check_dtypes=check_dtypes)
+ # integers are upcast, so don't check the dtypes
+ cond = df > 0
+ check_dtypes = all(not issubclass(s.type, np.integer) for s in df.dtypes)
+ _check_align(df, cond, np.nan, check_dtypes=check_dtypes)
+ def test_where_invalid(self):
# invalid conditions
- df = default_frame
+ df = DataFrame(np.random.randn(5, 3), columns=["A", "B", "C"])
+ cond = df > 0
+
err1 = (df + 1).values[0:2, :]
msg = "other must be the same shape as self when an ndarray"
with pytest.raises(ValueError, match=msg):
@@ -144,7 +152,9 @@ def _check_align(df, cond, other, check_dtypes=True):
with pytest.raises(ValueError, match=msg):
df.mask(0)
+ def test_where_set(self, where_frame, float_string_frame):
# where inplace
+
def _check_set(df, cond, check_dtypes=True):
dfi = df.copy()
econd = cond.reindex_like(df).fillna(True)
@@ -160,27 +170,23 @@ def _check_set(df, cond, check_dtypes=True):
v = np.dtype("float64")
assert dfi[k].dtype == v
- for df in [
- default_frame,
- float_string_frame,
- mixed_float_frame,
- mixed_int_frame,
- ]:
- if df is float_string_frame:
- with pytest.raises(TypeError):
- df > 0
- continue
+ df = where_frame
+ if df is float_string_frame:
+ with pytest.raises(TypeError):
+ df > 0
+ return
- cond = df > 0
- _check_set(df, cond)
+ cond = df > 0
+ _check_set(df, cond)
- cond = df >= 0
- _check_set(df, cond)
+ cond = df >= 0
+ _check_set(df, cond)
- # aligning
- cond = (df >= 0)[1:]
- _check_set(df, cond)
+ # aligning
+ cond = (df >= 0)[1:]
+ _check_set(df, cond)
+ def test_where_series_slicing(self):
# GH 10218
# test DataFrame.where with Series slicing
df = DataFrame({"a": range(3), "b": range(4, 7)})
diff --git a/pandas/tests/frame/indexing/test_xs.py b/pandas/tests/frame/indexing/test_xs.py
new file mode 100644
index 0000000000000..71b40585f0c2f
--- /dev/null
+++ b/pandas/tests/frame/indexing/test_xs.py
@@ -0,0 +1,95 @@
+import re
+
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import DataFrame, Series
+import pandas._testing as tm
+
+from pandas.tseries.offsets import BDay
+
+
+class TestXS:
+ def test_xs(self, float_frame, datetime_frame):
+ idx = float_frame.index[5]
+ xs = float_frame.xs(idx)
+ for item, value in xs.items():
+ if np.isnan(value):
+ assert np.isnan(float_frame[item][idx])
+ else:
+ assert value == float_frame[item][idx]
+
+ # mixed-type xs
+ test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}}
+ frame = DataFrame(test_data)
+ xs = frame.xs("1")
+ assert xs.dtype == np.object_
+ assert xs["A"] == 1
+ assert xs["B"] == "1"
+
+ with pytest.raises(
+ KeyError, match=re.escape("Timestamp('1999-12-31 00:00:00', freq='B')")
+ ):
+ datetime_frame.xs(datetime_frame.index[0] - BDay())
+
+ # xs get column
+ series = float_frame.xs("A", axis=1)
+ expected = float_frame["A"]
+ tm.assert_series_equal(series, expected)
+
+ # view is returned if possible
+ series = float_frame.xs("A", axis=1)
+ series[:] = 5
+ assert (expected == 5).all()
+
+ def test_xs_corner(self):
+ # pathological mixed-type reordering case
+ df = DataFrame(index=[0])
+ df["A"] = 1.0
+ df["B"] = "foo"
+ df["C"] = 2.0
+ df["D"] = "bar"
+ df["E"] = 3.0
+
+ xs = df.xs(0)
+ exp = pd.Series([1.0, "foo", 2.0, "bar", 3.0], index=list("ABCDE"), name=0)
+ tm.assert_series_equal(xs, exp)
+
+ # no columns but Index(dtype=object)
+ df = DataFrame(index=["a", "b", "c"])
+ result = df.xs("a")
+ expected = Series([], name="a", index=pd.Index([]), dtype=np.float64)
+ tm.assert_series_equal(result, expected)
+
+ def test_xs_duplicates(self):
+ df = DataFrame(np.random.randn(5, 2), index=["b", "b", "c", "b", "a"])
+
+ cross = df.xs("c")
+ exp = df.iloc[2]
+ tm.assert_series_equal(cross, exp)
+
+ def test_xs_keep_level(self):
+ df = DataFrame(
+ {
+ "day": {0: "sat", 1: "sun"},
+ "flavour": {0: "strawberry", 1: "strawberry"},
+ "sales": {0: 10, 1: 12},
+ "year": {0: 2008, 1: 2008},
+ }
+ ).set_index(["year", "flavour", "day"])
+ result = df.xs("sat", level="day", drop_level=False)
+ expected = df[:1]
+ tm.assert_frame_equal(result, expected)
+
+ result = df.xs([2008, "sat"], level=["year", "day"], drop_level=False)
+ tm.assert_frame_equal(result, expected)
+
+ def test_xs_view(self):
+ # in 0.14 this will return a view if possible a copy otherwise, but
+ # this is numpy dependent
+
+ dm = DataFrame(np.arange(20.0).reshape(4, 5), index=range(4), columns=range(5))
+
+ dm.xs(2)[:] = 10
+ assert (dm.xs(2) == 10).all()
diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py
new file mode 100644
index 0000000000000..7715cb1cb6eec
--- /dev/null
+++ b/pandas/tests/frame/methods/test_combine_first.py
@@ -0,0 +1,349 @@
+from datetime import datetime
+
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import DataFrame, Index, Series
+import pandas._testing as tm
+
+
+class TestDataFrameCombineFirst:
+ def test_combine_first_mixed(self):
+ a = Series(["a", "b"], index=range(2))
+ b = Series(range(2), index=range(2))
+ f = DataFrame({"A": a, "B": b})
+
+ a = Series(["a", "b"], index=range(5, 7))
+ b = Series(range(2), index=range(5, 7))
+ g = DataFrame({"A": a, "B": b})
+
+ exp = pd.DataFrame(
+ {"A": list("abab"), "B": [0.0, 1.0, 0.0, 1.0]}, index=[0, 1, 5, 6]
+ )
+ combined = f.combine_first(g)
+ tm.assert_frame_equal(combined, exp)
+
+ def test_combine_first(self, float_frame):
+ # disjoint
+ head, tail = float_frame[:5], float_frame[5:]
+
+ combined = head.combine_first(tail)
+ reordered_frame = float_frame.reindex(combined.index)
+ tm.assert_frame_equal(combined, reordered_frame)
+ assert tm.equalContents(combined.columns, float_frame.columns)
+ tm.assert_series_equal(combined["A"], reordered_frame["A"])
+
+ # same index
+ fcopy = float_frame.copy()
+ fcopy["A"] = 1
+ del fcopy["C"]
+
+ fcopy2 = float_frame.copy()
+ fcopy2["B"] = 0
+ del fcopy2["D"]
+
+ combined = fcopy.combine_first(fcopy2)
+
+ assert (combined["A"] == 1).all()
+ tm.assert_series_equal(combined["B"], fcopy["B"])
+ tm.assert_series_equal(combined["C"], fcopy2["C"])
+ tm.assert_series_equal(combined["D"], fcopy["D"])
+
+ # overlap
+ head, tail = reordered_frame[:10].copy(), reordered_frame
+ head["A"] = 1
+
+ combined = head.combine_first(tail)
+ assert (combined["A"][:10] == 1).all()
+
+ # reverse overlap
+ tail["A"][:10] = 0
+ combined = tail.combine_first(head)
+ assert (combined["A"][:10] == 0).all()
+
+ # no overlap
+ f = float_frame[:10]
+ g = float_frame[10:]
+ combined = f.combine_first(g)
+ tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
+ tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
+
+ # corner cases
+ comb = float_frame.combine_first(DataFrame())
+ tm.assert_frame_equal(comb, float_frame)
+
+ comb = DataFrame().combine_first(float_frame)
+ tm.assert_frame_equal(comb, float_frame)
+
+ comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
+ assert "faz" in comb.index
+
+ # #2525
+ df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
+ df2 = DataFrame(columns=["b"])
+ result = df.combine_first(df2)
+ assert "b" in result
+
+ def test_combine_first_mixed_bug(self):
+ idx = Index(["a", "b", "c", "e"])
+ ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
+ ser2 = Series(["a", "b", "c", "e"], index=idx)
+ ser3 = Series([12, 4, 5, 97], index=idx)
+
+ frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
+
+ idx = Index(["a", "b", "c", "f"])
+ ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
+ ser2 = Series(["a", "b", "c", "f"], index=idx)
+ ser3 = Series([12, 4, 5, 97], index=idx)
+
+ frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3})
+
+ combined = frame1.combine_first(frame2)
+ assert len(combined.columns) == 5
+
+ # gh 3016 (same as in update)
+ df = DataFrame(
+ [[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
+ columns=["A", "B", "bool1", "bool2"],
+ )
+
+ other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
+ result = df.combine_first(other)
+ tm.assert_frame_equal(result, df)
+
+ df.loc[0, "A"] = np.nan
+ result = df.combine_first(other)
+ df.loc[0, "A"] = 45
+ tm.assert_frame_equal(result, df)
+
+ # doc example
+ df1 = DataFrame(
+ {"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
+ )
+
+ df2 = DataFrame(
+ {
+ "A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
+ "B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
+ }
+ )
+
+ result = df1.combine_first(df2)
+ expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
+ tm.assert_frame_equal(result, expected)
+
+ # GH3552, return object dtype with bools
+ df1 = DataFrame(
+ [[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
+ )
+ df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
+
+ result = df1.combine_first(df2)[2]
+ expected = Series([True, True, False], name=2)
+ tm.assert_series_equal(result, expected)
+
+ # GH 3593, converting datetime64[ns] incorrectly
+ df0 = DataFrame(
+ {"a": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]}
+ )
+ df1 = DataFrame({"a": [None, None, None]})
+ df2 = df1.combine_first(df0)
+ tm.assert_frame_equal(df2, df0)
+
+ df2 = df0.combine_first(df1)
+ tm.assert_frame_equal(df2, df0)
+
+ df0 = DataFrame(
+ {"a": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]}
+ )
+ df1 = DataFrame({"a": [datetime(2000, 1, 2), None, None]})
+ df2 = df1.combine_first(df0)
+ result = df0.copy()
+ result.iloc[0, :] = df1.iloc[0, :]
+ tm.assert_frame_equal(df2, result)
+
+ df2 = df0.combine_first(df1)
+ tm.assert_frame_equal(df2, df0)
+
+ def test_combine_first_align_nan(self):
+ # GH 7509 (not fixed)
+ dfa = pd.DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"])
+ dfb = pd.DataFrame([[4], [5]], columns=["b"])
+ assert dfa["a"].dtype == "datetime64[ns]"
+ assert dfa["b"].dtype == "int64"
+
+ res = dfa.combine_first(dfb)
+ exp = pd.DataFrame(
+ {"a": [pd.Timestamp("2011-01-01"), pd.NaT], "b": [2.0, 5.0]},
+ columns=["a", "b"],
+ )
+ tm.assert_frame_equal(res, exp)
+ assert res["a"].dtype == "datetime64[ns]"
+ # ToDo: this must be int64
+ assert res["b"].dtype == "float64"
+
+ res = dfa.iloc[:0].combine_first(dfb)
+ exp = pd.DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"])
+ tm.assert_frame_equal(res, exp)
+ # ToDo: this must be datetime64
+ assert res["a"].dtype == "float64"
+ # ToDo: this must be int64
+ assert res["b"].dtype == "int64"
+
+ def test_combine_first_timezone(self):
+ # see gh-7630
+ data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC")
+ df1 = pd.DataFrame(
+ columns=["UTCdatetime", "abc"],
+ data=data1,
+ index=pd.date_range("20140627", periods=1),
+ )
+ data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC")
+ df2 = pd.DataFrame(
+ columns=["UTCdatetime", "xyz"],
+ data=data2,
+ index=pd.date_range("20140628", periods=1),
+ )
+ res = df2[["UTCdatetime"]].combine_first(df1)
+ exp = pd.DataFrame(
+ {
+ "UTCdatetime": [
+ pd.Timestamp("2010-01-01 01:01", tz="UTC"),
+ pd.Timestamp("2012-12-12 12:12", tz="UTC"),
+ ],
+ "abc": [pd.Timestamp("2010-01-01 01:01:00", tz="UTC"), pd.NaT],
+ },
+ columns=["UTCdatetime", "abc"],
+ index=pd.date_range("20140627", periods=2, freq="D"),
+ )
+ tm.assert_frame_equal(res, exp)
+ assert res["UTCdatetime"].dtype == "datetime64[ns, UTC]"
+ assert res["abc"].dtype == "datetime64[ns, UTC]"
+
+ # see gh-10567
+ dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC")
+ df1 = pd.DataFrame({"DATE": dts1})
+ dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC")
+ df2 = pd.DataFrame({"DATE": dts2})
+
+ res = df1.combine_first(df2)
+ tm.assert_frame_equal(res, df1)
+ assert res["DATE"].dtype == "datetime64[ns, UTC]"
+
+ dts1 = pd.DatetimeIndex(
+ ["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern"
+ )
+ df1 = pd.DataFrame({"DATE": dts1}, index=[1, 3, 5, 7])
+ dts2 = pd.DatetimeIndex(
+ ["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern"
+ )
+ df2 = pd.DataFrame({"DATE": dts2}, index=[2, 4, 5])
+
+ res = df1.combine_first(df2)
+ exp_dts = pd.DatetimeIndex(
+ [
+ "2011-01-01",
+ "2012-01-01",
+ "NaT",
+ "2012-01-02",
+ "2011-01-03",
+ "2011-01-04",
+ ],
+ tz="US/Eastern",
+ )
+ exp = pd.DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7])
+ tm.assert_frame_equal(res, exp)
+
+ # different tz
+ dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern")
+ df1 = pd.DataFrame({"DATE": dts1})
+ dts2 = pd.date_range("2015-01-03", "2015-01-05")
+ df2 = pd.DataFrame({"DATE": dts2})
+
+ # if df1 doesn't have NaN, keep its dtype
+ res = df1.combine_first(df2)
+ tm.assert_frame_equal(res, df1)
+ assert res["DATE"].dtype == "datetime64[ns, US/Eastern]"
+
+ dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern")
+ df1 = pd.DataFrame({"DATE": dts1})
+ dts2 = pd.date_range("2015-01-01", "2015-01-03")
+ df2 = pd.DataFrame({"DATE": dts2})
+
+ res = df1.combine_first(df2)
+ exp_dts = [
+ pd.Timestamp("2015-01-01", tz="US/Eastern"),
+ pd.Timestamp("2015-01-02", tz="US/Eastern"),
+ pd.Timestamp("2015-01-03"),
+ ]
+ exp = pd.DataFrame({"DATE": exp_dts})
+ tm.assert_frame_equal(res, exp)
+ assert res["DATE"].dtype == "object"
+
+ def test_combine_first_timedelta(self):
+ data1 = pd.TimedeltaIndex(["1 day", "NaT", "3 day", "4day"])
+ df1 = pd.DataFrame({"TD": data1}, index=[1, 3, 5, 7])
+ data2 = pd.TimedeltaIndex(["10 day", "11 day", "12 day"])
+ df2 = pd.DataFrame({"TD": data2}, index=[2, 4, 5])
+
+ res = df1.combine_first(df2)
+ exp_dts = pd.TimedeltaIndex(
+ ["1 day", "10 day", "NaT", "11 day", "3 day", "4 day"]
+ )
+ exp = pd.DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7])
+ tm.assert_frame_equal(res, exp)
+ assert res["TD"].dtype == "timedelta64[ns]"
+
+ def test_combine_first_period(self):
+ data1 = pd.PeriodIndex(["2011-01", "NaT", "2011-03", "2011-04"], freq="M")
+ df1 = pd.DataFrame({"P": data1}, index=[1, 3, 5, 7])
+ data2 = pd.PeriodIndex(["2012-01-01", "2012-02", "2012-03"], freq="M")
+ df2 = pd.DataFrame({"P": data2}, index=[2, 4, 5])
+
+ res = df1.combine_first(df2)
+ exp_dts = pd.PeriodIndex(
+ ["2011-01", "2012-01", "NaT", "2012-02", "2011-03", "2011-04"], freq="M"
+ )
+ exp = pd.DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
+ tm.assert_frame_equal(res, exp)
+ assert res["P"].dtype == data1.dtype
+
+ # different freq
+ dts2 = pd.PeriodIndex(["2012-01-01", "2012-01-02", "2012-01-03"], freq="D")
+ df2 = pd.DataFrame({"P": dts2}, index=[2, 4, 5])
+
+ res = df1.combine_first(df2)
+ exp_dts = [
+ pd.Period("2011-01", freq="M"),
+ pd.Period("2012-01-01", freq="D"),
+ pd.NaT,
+ pd.Period("2012-01-02", freq="D"),
+ pd.Period("2011-03", freq="M"),
+ pd.Period("2011-04", freq="M"),
+ ]
+ exp = pd.DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
+ tm.assert_frame_equal(res, exp)
+ assert res["P"].dtype == "object"
+
+ def test_combine_first_int(self):
+ # GH14687 - integer series that do no align exactly
+
+ df1 = pd.DataFrame({"a": [0, 1, 3, 5]}, dtype="int64")
+ df2 = pd.DataFrame({"a": [1, 4]}, dtype="int64")
+
+ res = df1.combine_first(df2)
+ tm.assert_frame_equal(res, df1)
+ assert res["a"].dtype == "int64"
+
+ @pytest.mark.parametrize("val", [1, 1.0])
+ def test_combine_first_with_asymmetric_other(self, val):
+ # see gh-20699
+ df1 = pd.DataFrame({"isNum": [val]})
+ df2 = pd.DataFrame({"isBool": [True]})
+
+ res = df1.combine_first(df2)
+ exp = pd.DataFrame({"isBool": [True], "isNum": [val]})
+
+ tm.assert_frame_equal(res, exp)
diff --git a/pandas/tests/frame/methods/test_head_tail.py b/pandas/tests/frame/methods/test_head_tail.py
new file mode 100644
index 0000000000000..93763bc12ce0d
--- /dev/null
+++ b/pandas/tests/frame/methods/test_head_tail.py
@@ -0,0 +1,30 @@
+import numpy as np
+
+from pandas import DataFrame
+import pandas._testing as tm
+
+
+def test_head_tail(float_frame):
+ tm.assert_frame_equal(float_frame.head(), float_frame[:5])
+ tm.assert_frame_equal(float_frame.tail(), float_frame[-5:])
+
+ tm.assert_frame_equal(float_frame.head(0), float_frame[0:0])
+ tm.assert_frame_equal(float_frame.tail(0), float_frame[0:0])
+
+ tm.assert_frame_equal(float_frame.head(-1), float_frame[:-1])
+ tm.assert_frame_equal(float_frame.tail(-1), float_frame[1:])
+ tm.assert_frame_equal(float_frame.head(1), float_frame[:1])
+ tm.assert_frame_equal(float_frame.tail(1), float_frame[-1:])
+ # with a float index
+ df = float_frame.copy()
+ df.index = np.arange(len(float_frame)) + 0.1
+ tm.assert_frame_equal(df.head(), df.iloc[:5])
+ tm.assert_frame_equal(df.tail(), df.iloc[-5:])
+ tm.assert_frame_equal(df.head(0), df[0:0])
+ tm.assert_frame_equal(df.tail(0), df[0:0])
+ tm.assert_frame_equal(df.head(-1), df.iloc[:-1])
+ tm.assert_frame_equal(df.tail(-1), df.iloc[1:])
+ # test empty dataframe
+ empty_df = DataFrame()
+ tm.assert_frame_equal(empty_df.tail(), empty_df)
+ tm.assert_frame_equal(empty_df.head(), empty_df)
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index aa91e7a489356..92b74c4409d7d 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -1356,3 +1356,10 @@ def test_replace_replacer_dtype(self, replacer):
result = df.replace({"a": replacer, "b": replacer})
expected = pd.DataFrame([replacer])
tm.assert_frame_equal(result, expected)
+
+ def test_replace_after_convert_dtypes(self):
+ # GH31517
+ df = pd.DataFrame({"grp": [1, 2, 3, 4, 5]}, dtype="Int64")
+ result = df.replace(1, 10)
+ expected = pd.DataFrame({"grp": [10, 2, 3, 4, 5]}, dtype="Int64")
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_transpose.py b/pandas/tests/frame/methods/test_transpose.py
index 428b9e5068407..a5fe5f3a6d5e4 100644
--- a/pandas/tests/frame/methods/test_transpose.py
+++ b/pandas/tests/frame/methods/test_transpose.py
@@ -1,3 +1,5 @@
+import numpy as np
+
import pandas as pd
import pandas._testing as tm
@@ -41,3 +43,34 @@ def test_transpose_object_to_tzaware_mixed_tz(self):
assert (df2.dtypes == object).all()
res2 = df2.T
assert (res2.dtypes == [dti.dtype, dti2.dtype]).all()
+
+ def test_transpose_uint64(self, uint64_frame):
+
+ result = uint64_frame.T
+ expected = pd.DataFrame(uint64_frame.values.T)
+ expected.index = ["A", "B"]
+ tm.assert_frame_equal(result, expected)
+
+ def test_transpose_float(self, float_frame):
+ frame = float_frame
+ dft = frame.T
+ for idx, series in dft.items():
+ for col, value in series.items():
+ if np.isnan(value):
+ assert np.isnan(frame[col][idx])
+ else:
+ assert value == frame[col][idx]
+
+ # mixed type
+ index, data = tm.getMixedTypeDict()
+ mixed = pd.DataFrame(data, index=index)
+
+ mixed_T = mixed.T
+ for col, s in mixed_T.items():
+ assert s.dtype == np.object_
+
+ def test_transpose_get_view(self, float_frame):
+ dft = float_frame.T
+ dft.values[:, 5:10] = 5
+
+ assert (float_frame.values[5:10] == 5).all()
diff --git a/pandas/tests/frame/methods/test_update.py b/pandas/tests/frame/methods/test_update.py
new file mode 100644
index 0000000000000..d9de026dbf4e9
--- /dev/null
+++ b/pandas/tests/frame/methods/test_update.py
@@ -0,0 +1,135 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import DataFrame, Series, date_range
+import pandas._testing as tm
+
+
+class TestDataFrameUpdate:
+ def test_update_nan(self):
+ # #15593 #15617
+ # test 1
+ df1 = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
+ df2 = DataFrame({"A": [None, 2, 3]})
+ expected = df1.copy()
+ df1.update(df2, overwrite=False)
+
+ tm.assert_frame_equal(df1, expected)
+
+ # test 2
+ df1 = DataFrame({"A": [1.0, None, 3], "B": date_range("2000", periods=3)})
+ df2 = DataFrame({"A": [None, 2, 3]})
+ expected = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
+ df1.update(df2, overwrite=False)
+
+ tm.assert_frame_equal(df1, expected)
+
+ def test_update(self):
+ df = DataFrame(
+ [[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
+ )
+
+ other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
+
+ df.update(other)
+
+ expected = DataFrame(
+ [[1.5, np.nan, 3], [3.6, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
+ )
+ tm.assert_frame_equal(df, expected)
+
+ def test_update_dtypes(self):
+
+ # gh 3016
+ df = DataFrame(
+ [[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
+ columns=["A", "B", "bool1", "bool2"],
+ )
+
+ other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
+ df.update(other)
+
+ expected = DataFrame(
+ [[45.0, 45.0, False, True], [4.0, 5.0, True, False]],
+ columns=["A", "B", "bool1", "bool2"],
+ )
+ tm.assert_frame_equal(df, expected)
+
+ def test_update_nooverwrite(self):
+ df = DataFrame(
+ [[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
+ )
+
+ other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
+
+ df.update(other, overwrite=False)
+
+ expected = DataFrame(
+ [[1.5, np.nan, 3], [1.5, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 3.0]]
+ )
+ tm.assert_frame_equal(df, expected)
+
+ def test_update_filtered(self):
+ df = DataFrame(
+ [[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
+ )
+
+ other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
+
+ df.update(other, filter_func=lambda x: x > 2)
+
+ expected = DataFrame(
+ [[1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
+ )
+ tm.assert_frame_equal(df, expected)
+
+ @pytest.mark.parametrize(
+ "bad_kwarg, exception, msg",
+ [
+ # errors must be 'ignore' or 'raise'
+ ({"errors": "something"}, ValueError, "The parameter errors must.*"),
+ ({"join": "inner"}, NotImplementedError, "Only left join is supported"),
+ ],
+ )
+ def test_update_raise_bad_parameter(self, bad_kwarg, exception, msg):
+ df = DataFrame([[1.5, 1, 3.0]])
+ with pytest.raises(exception, match=msg):
+ df.update(df, **bad_kwarg)
+
+ def test_update_raise_on_overlap(self):
+ df = DataFrame(
+ [[1.5, 1, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
+ )
+
+ other = DataFrame([[2.0, np.nan], [np.nan, 7]], index=[1, 3], columns=[1, 2])
+ with pytest.raises(ValueError, match="Data overlaps"):
+ df.update(other, errors="raise")
+
+ def test_update_from_non_df(self):
+ d = {"a": Series([1, 2, 3, 4]), "b": Series([5, 6, 7, 8])}
+ df = DataFrame(d)
+
+ d["a"] = Series([5, 6, 7, 8])
+ df.update(d)
+
+ expected = DataFrame(d)
+
+ tm.assert_frame_equal(df, expected)
+
+ d = {"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]}
+ df = DataFrame(d)
+
+ d["a"] = [5, 6, 7, 8]
+ df.update(d)
+
+ expected = DataFrame(d)
+
+ tm.assert_frame_equal(df, expected)
+
+ def test_update_datetime_tz(self):
+ # GH 25807
+ result = DataFrame([pd.Timestamp("2019", tz="UTC")])
+ result.update(result)
+ expected = DataFrame([pd.Timestamp("2019", tz="UTC")])
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 9de5d6fe16a0d..17cc50661e3cb 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -14,15 +14,15 @@
class TestDataFrameMisc:
- def test_copy_index_name_checking(self, float_frame):
+ @pytest.mark.parametrize("attr", ["index", "columns"])
+ def test_copy_index_name_checking(self, float_frame, attr):
# don't want to be able to modify the index stored elsewhere after
# making a copy
- for attr in ("index", "columns"):
- ind = getattr(float_frame, attr)
- ind.name = None
- cp = float_frame.copy()
- getattr(cp, attr).name = "foo"
- assert getattr(float_frame, attr).name is None
+ ind = getattr(float_frame, attr)
+ ind.name = None
+ cp = float_frame.copy()
+ getattr(cp, attr).name = "foo"
+ assert getattr(float_frame, attr).name is None
def test_getitem_pop_assign_name(self, float_frame):
s = float_frame["A"]
@@ -358,24 +358,6 @@ def test_to_numpy_copy(self):
assert df.to_numpy(copy=False).base is arr
assert df.to_numpy(copy=True).base is None
- def test_transpose(self, float_frame):
- frame = float_frame
- dft = frame.T
- for idx, series in dft.items():
- for col, value in series.items():
- if np.isnan(value):
- assert np.isnan(frame[col][idx])
- else:
- assert value == frame[col][idx]
-
- # mixed type
- index, data = tm.getMixedTypeDict()
- mixed = DataFrame(data, index=index)
-
- mixed_T = mixed.T
- for col, s in mixed_T.items():
- assert s.dtype == np.object_
-
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
tm.assert_frame_equal(df.T, df.swapaxes(0, 1))
@@ -470,12 +452,6 @@ def test_deepcopy(self, float_frame):
for idx, value in series.items():
assert float_frame["A"][idx] != value
- def test_transpose_get_view(self, float_frame):
- dft = float_frame.T
- dft.values[:, 5:10] = 5
-
- assert (float_frame.values[5:10] == 5).all()
-
def test_inplace_return_self(self):
# GH 1893
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index e98f74e133ea9..fe6abef97acc4 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -703,6 +703,14 @@ def apply_list(row):
)
tm.assert_series_equal(result, expected)
+ def test_apply_noreduction_tzaware_object(self):
+ # https://github.com/pandas-dev/pandas/issues/31505
+ df = pd.DataFrame({"foo": [pd.Timestamp("2020", tz="UTC")]}, dtype="object")
+ result = df.apply(lambda x: x)
+ tm.assert_frame_equal(result, df)
+ result = df.apply(lambda x: x.copy())
+ tm.assert_frame_equal(result, df)
+
class TestInferOutputShape:
# the user has supplied an opaque UDF where
diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
index 9bad54b051d6c..36a476d195fe5 100644
--- a/pandas/tests/frame/test_combine_concat.py
+++ b/pandas/tests/frame/test_combine_concat.py
@@ -128,115 +128,6 @@ def test_concat_tuple_keys(self):
)
tm.assert_frame_equal(results, expected)
- def test_update(self):
- df = DataFrame(
- [[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
- )
-
- other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
-
- df.update(other)
-
- expected = DataFrame(
- [[1.5, np.nan, 3], [3.6, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
- )
- tm.assert_frame_equal(df, expected)
-
- def test_update_dtypes(self):
-
- # gh 3016
- df = DataFrame(
- [[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
- columns=["A", "B", "bool1", "bool2"],
- )
-
- other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
- df.update(other)
-
- expected = DataFrame(
- [[45.0, 45.0, False, True], [4.0, 5.0, True, False]],
- columns=["A", "B", "bool1", "bool2"],
- )
- tm.assert_frame_equal(df, expected)
-
- def test_update_nooverwrite(self):
- df = DataFrame(
- [[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
- )
-
- other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
-
- df.update(other, overwrite=False)
-
- expected = DataFrame(
- [[1.5, np.nan, 3], [1.5, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 3.0]]
- )
- tm.assert_frame_equal(df, expected)
-
- def test_update_filtered(self):
- df = DataFrame(
- [[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
- )
-
- other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
-
- df.update(other, filter_func=lambda x: x > 2)
-
- expected = DataFrame(
- [[1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
- )
- tm.assert_frame_equal(df, expected)
-
- @pytest.mark.parametrize(
- "bad_kwarg, exception, msg",
- [
- # errors must be 'ignore' or 'raise'
- ({"errors": "something"}, ValueError, "The parameter errors must.*"),
- ({"join": "inner"}, NotImplementedError, "Only left join is supported"),
- ],
- )
- def test_update_raise_bad_parameter(self, bad_kwarg, exception, msg):
- df = DataFrame([[1.5, 1, 3.0]])
- with pytest.raises(exception, match=msg):
- df.update(df, **bad_kwarg)
-
- def test_update_raise_on_overlap(self):
- df = DataFrame(
- [[1.5, 1, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
- )
-
- other = DataFrame([[2.0, np.nan], [np.nan, 7]], index=[1, 3], columns=[1, 2])
- with pytest.raises(ValueError, match="Data overlaps"):
- df.update(other, errors="raise")
-
- def test_update_from_non_df(self):
- d = {"a": Series([1, 2, 3, 4]), "b": Series([5, 6, 7, 8])}
- df = DataFrame(d)
-
- d["a"] = Series([5, 6, 7, 8])
- df.update(d)
-
- expected = DataFrame(d)
-
- tm.assert_frame_equal(df, expected)
-
- d = {"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]}
- df = DataFrame(d)
-
- d["a"] = [5, 6, 7, 8]
- df.update(d)
-
- expected = DataFrame(d)
-
- tm.assert_frame_equal(df, expected)
-
- def test_update_datetime_tz(self):
- # GH 25807
- result = DataFrame([pd.Timestamp("2019", tz="UTC")])
- result.update(result)
- expected = DataFrame([pd.Timestamp("2019", tz="UTC")])
- tm.assert_frame_equal(result, expected)
-
def test_join_str_datetime(self):
str_dates = ["20120209", "20120222"]
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
@@ -422,347 +313,6 @@ def test_concat_astype_dup_col(self):
).astype("category")
tm.assert_frame_equal(result, expected)
-
-class TestDataFrameCombineFirst:
- def test_combine_first_mixed(self):
- a = Series(["a", "b"], index=range(2))
- b = Series(range(2), index=range(2))
- f = DataFrame({"A": a, "B": b})
-
- a = Series(["a", "b"], index=range(5, 7))
- b = Series(range(2), index=range(5, 7))
- g = DataFrame({"A": a, "B": b})
-
- exp = pd.DataFrame(
- {"A": list("abab"), "B": [0.0, 1.0, 0.0, 1.0]}, index=[0, 1, 5, 6]
- )
- combined = f.combine_first(g)
- tm.assert_frame_equal(combined, exp)
-
- def test_combine_first(self, float_frame):
- # disjoint
- head, tail = float_frame[:5], float_frame[5:]
-
- combined = head.combine_first(tail)
- reordered_frame = float_frame.reindex(combined.index)
- tm.assert_frame_equal(combined, reordered_frame)
- assert tm.equalContents(combined.columns, float_frame.columns)
- tm.assert_series_equal(combined["A"], reordered_frame["A"])
-
- # same index
- fcopy = float_frame.copy()
- fcopy["A"] = 1
- del fcopy["C"]
-
- fcopy2 = float_frame.copy()
- fcopy2["B"] = 0
- del fcopy2["D"]
-
- combined = fcopy.combine_first(fcopy2)
-
- assert (combined["A"] == 1).all()
- tm.assert_series_equal(combined["B"], fcopy["B"])
- tm.assert_series_equal(combined["C"], fcopy2["C"])
- tm.assert_series_equal(combined["D"], fcopy["D"])
-
- # overlap
- head, tail = reordered_frame[:10].copy(), reordered_frame
- head["A"] = 1
-
- combined = head.combine_first(tail)
- assert (combined["A"][:10] == 1).all()
-
- # reverse overlap
- tail["A"][:10] = 0
- combined = tail.combine_first(head)
- assert (combined["A"][:10] == 0).all()
-
- # no overlap
- f = float_frame[:10]
- g = float_frame[10:]
- combined = f.combine_first(g)
- tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
- tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
-
- # corner cases
- comb = float_frame.combine_first(DataFrame())
- tm.assert_frame_equal(comb, float_frame)
-
- comb = DataFrame().combine_first(float_frame)
- tm.assert_frame_equal(comb, float_frame)
-
- comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
- assert "faz" in comb.index
-
- # #2525
- df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
- df2 = DataFrame(columns=["b"])
- result = df.combine_first(df2)
- assert "b" in result
-
- def test_combine_first_mixed_bug(self):
- idx = Index(["a", "b", "c", "e"])
- ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
- ser2 = Series(["a", "b", "c", "e"], index=idx)
- ser3 = Series([12, 4, 5, 97], index=idx)
-
- frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
-
- idx = Index(["a", "b", "c", "f"])
- ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
- ser2 = Series(["a", "b", "c", "f"], index=idx)
- ser3 = Series([12, 4, 5, 97], index=idx)
-
- frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3})
-
- combined = frame1.combine_first(frame2)
- assert len(combined.columns) == 5
-
- # gh 3016 (same as in update)
- df = DataFrame(
- [[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
- columns=["A", "B", "bool1", "bool2"],
- )
-
- other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
- result = df.combine_first(other)
- tm.assert_frame_equal(result, df)
-
- df.loc[0, "A"] = np.nan
- result = df.combine_first(other)
- df.loc[0, "A"] = 45
- tm.assert_frame_equal(result, df)
-
- # doc example
- df1 = DataFrame(
- {"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
- )
-
- df2 = DataFrame(
- {
- "A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
- "B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
- }
- )
-
- result = df1.combine_first(df2)
- expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
- tm.assert_frame_equal(result, expected)
-
- # GH3552, return object dtype with bools
- df1 = DataFrame(
- [[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
- )
- df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
-
- result = df1.combine_first(df2)[2]
- expected = Series([True, True, False], name=2)
- tm.assert_series_equal(result, expected)
-
- # GH 3593, converting datetime64[ns] incorrectly
- df0 = DataFrame(
- {"a": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]}
- )
- df1 = DataFrame({"a": [None, None, None]})
- df2 = df1.combine_first(df0)
- tm.assert_frame_equal(df2, df0)
-
- df2 = df0.combine_first(df1)
- tm.assert_frame_equal(df2, df0)
-
- df0 = DataFrame(
- {"a": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]}
- )
- df1 = DataFrame({"a": [datetime(2000, 1, 2), None, None]})
- df2 = df1.combine_first(df0)
- result = df0.copy()
- result.iloc[0, :] = df1.iloc[0, :]
- tm.assert_frame_equal(df2, result)
-
- df2 = df0.combine_first(df1)
- tm.assert_frame_equal(df2, df0)
-
- def test_combine_first_align_nan(self):
- # GH 7509 (not fixed)
- dfa = pd.DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"])
- dfb = pd.DataFrame([[4], [5]], columns=["b"])
- assert dfa["a"].dtype == "datetime64[ns]"
- assert dfa["b"].dtype == "int64"
-
- res = dfa.combine_first(dfb)
- exp = pd.DataFrame(
- {"a": [pd.Timestamp("2011-01-01"), pd.NaT], "b": [2.0, 5.0]},
- columns=["a", "b"],
- )
- tm.assert_frame_equal(res, exp)
- assert res["a"].dtype == "datetime64[ns]"
- # ToDo: this must be int64
- assert res["b"].dtype == "float64"
-
- res = dfa.iloc[:0].combine_first(dfb)
- exp = pd.DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"])
- tm.assert_frame_equal(res, exp)
- # ToDo: this must be datetime64
- assert res["a"].dtype == "float64"
- # ToDo: this must be int64
- assert res["b"].dtype == "int64"
-
- def test_combine_first_timezone(self):
- # see gh-7630
- data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC")
- df1 = pd.DataFrame(
- columns=["UTCdatetime", "abc"],
- data=data1,
- index=pd.date_range("20140627", periods=1),
- )
- data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC")
- df2 = pd.DataFrame(
- columns=["UTCdatetime", "xyz"],
- data=data2,
- index=pd.date_range("20140628", periods=1),
- )
- res = df2[["UTCdatetime"]].combine_first(df1)
- exp = pd.DataFrame(
- {
- "UTCdatetime": [
- pd.Timestamp("2010-01-01 01:01", tz="UTC"),
- pd.Timestamp("2012-12-12 12:12", tz="UTC"),
- ],
- "abc": [pd.Timestamp("2010-01-01 01:01:00", tz="UTC"), pd.NaT],
- },
- columns=["UTCdatetime", "abc"],
- index=pd.date_range("20140627", periods=2, freq="D"),
- )
- tm.assert_frame_equal(res, exp)
- assert res["UTCdatetime"].dtype == "datetime64[ns, UTC]"
- assert res["abc"].dtype == "datetime64[ns, UTC]"
-
- # see gh-10567
- dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC")
- df1 = pd.DataFrame({"DATE": dts1})
- dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC")
- df2 = pd.DataFrame({"DATE": dts2})
-
- res = df1.combine_first(df2)
- tm.assert_frame_equal(res, df1)
- assert res["DATE"].dtype == "datetime64[ns, UTC]"
-
- dts1 = pd.DatetimeIndex(
- ["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern"
- )
- df1 = pd.DataFrame({"DATE": dts1}, index=[1, 3, 5, 7])
- dts2 = pd.DatetimeIndex(
- ["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern"
- )
- df2 = pd.DataFrame({"DATE": dts2}, index=[2, 4, 5])
-
- res = df1.combine_first(df2)
- exp_dts = pd.DatetimeIndex(
- [
- "2011-01-01",
- "2012-01-01",
- "NaT",
- "2012-01-02",
- "2011-01-03",
- "2011-01-04",
- ],
- tz="US/Eastern",
- )
- exp = pd.DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7])
- tm.assert_frame_equal(res, exp)
-
- # different tz
- dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern")
- df1 = pd.DataFrame({"DATE": dts1})
- dts2 = pd.date_range("2015-01-03", "2015-01-05")
- df2 = pd.DataFrame({"DATE": dts2})
-
- # if df1 doesn't have NaN, keep its dtype
- res = df1.combine_first(df2)
- tm.assert_frame_equal(res, df1)
- assert res["DATE"].dtype == "datetime64[ns, US/Eastern]"
-
- dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern")
- df1 = pd.DataFrame({"DATE": dts1})
- dts2 = pd.date_range("2015-01-01", "2015-01-03")
- df2 = pd.DataFrame({"DATE": dts2})
-
- res = df1.combine_first(df2)
- exp_dts = [
- pd.Timestamp("2015-01-01", tz="US/Eastern"),
- pd.Timestamp("2015-01-02", tz="US/Eastern"),
- pd.Timestamp("2015-01-03"),
- ]
- exp = pd.DataFrame({"DATE": exp_dts})
- tm.assert_frame_equal(res, exp)
- assert res["DATE"].dtype == "object"
-
- def test_combine_first_timedelta(self):
- data1 = pd.TimedeltaIndex(["1 day", "NaT", "3 day", "4day"])
- df1 = pd.DataFrame({"TD": data1}, index=[1, 3, 5, 7])
- data2 = pd.TimedeltaIndex(["10 day", "11 day", "12 day"])
- df2 = pd.DataFrame({"TD": data2}, index=[2, 4, 5])
-
- res = df1.combine_first(df2)
- exp_dts = pd.TimedeltaIndex(
- ["1 day", "10 day", "NaT", "11 day", "3 day", "4 day"]
- )
- exp = pd.DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7])
- tm.assert_frame_equal(res, exp)
- assert res["TD"].dtype == "timedelta64[ns]"
-
- def test_combine_first_period(self):
- data1 = pd.PeriodIndex(["2011-01", "NaT", "2011-03", "2011-04"], freq="M")
- df1 = pd.DataFrame({"P": data1}, index=[1, 3, 5, 7])
- data2 = pd.PeriodIndex(["2012-01-01", "2012-02", "2012-03"], freq="M")
- df2 = pd.DataFrame({"P": data2}, index=[2, 4, 5])
-
- res = df1.combine_first(df2)
- exp_dts = pd.PeriodIndex(
- ["2011-01", "2012-01", "NaT", "2012-02", "2011-03", "2011-04"], freq="M"
- )
- exp = pd.DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
- tm.assert_frame_equal(res, exp)
- assert res["P"].dtype == data1.dtype
-
- # different freq
- dts2 = pd.PeriodIndex(["2012-01-01", "2012-01-02", "2012-01-03"], freq="D")
- df2 = pd.DataFrame({"P": dts2}, index=[2, 4, 5])
-
- res = df1.combine_first(df2)
- exp_dts = [
- pd.Period("2011-01", freq="M"),
- pd.Period("2012-01-01", freq="D"),
- pd.NaT,
- pd.Period("2012-01-02", freq="D"),
- pd.Period("2011-03", freq="M"),
- pd.Period("2011-04", freq="M"),
- ]
- exp = pd.DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
- tm.assert_frame_equal(res, exp)
- assert res["P"].dtype == "object"
-
- def test_combine_first_int(self):
- # GH14687 - integer series that do no align exactly
-
- df1 = pd.DataFrame({"a": [0, 1, 3, 5]}, dtype="int64")
- df2 = pd.DataFrame({"a": [1, 4]}, dtype="int64")
-
- res = df1.combine_first(df2)
- tm.assert_frame_equal(res, df1)
- assert res["a"].dtype == "int64"
-
- @pytest.mark.parametrize("val", [1, 1.0])
- def test_combine_first_with_asymmetric_other(self, val):
- # see gh-20699
- df1 = pd.DataFrame({"isNum": [val]})
- df2 = pd.DataFrame({"isBool": [True]})
-
- res = df1.combine_first(df2)
- exp = pd.DataFrame({"isBool": [True], "isNum": [val]})
-
- tm.assert_frame_equal(res, exp)
-
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
@@ -776,23 +326,3 @@ def test_concat_datetime_datetime64_frame(self):
# it works!
pd.concat([df1, df2_obj])
-
-
-class TestDataFrameUpdate:
- def test_update_nan(self):
- # #15593 #15617
- # test 1
- df1 = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
- df2 = DataFrame({"A": [None, 2, 3]})
- expected = df1.copy()
- df1.update(df2, overwrite=False)
-
- tm.assert_frame_equal(df1, expected)
-
- # test 2
- df1 = DataFrame({"A": [1.0, None, 3], "B": date_range("2000", periods=3)})
- df2 = DataFrame({"A": [None, 2, 3]})
- expected = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
- df1.update(df2, overwrite=False)
-
- tm.assert_frame_equal(df1, expected)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 7b1a9d8ff6ae3..5f4c78449f71d 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -1860,9 +1860,8 @@ def check(df):
# No NaN found -> error
if len(indexer) == 0:
msg = (
- "cannot do label indexing on "
- r"<class 'pandas\.core\.indexes\.range\.RangeIndex'> "
- r"with these indexers \[nan\] of <class 'float'>"
+ "cannot do label indexing on RangeIndex "
+ r"with these indexers \[nan\] of type float"
)
with pytest.raises(TypeError, match=msg):
df.loc[:, np.nan]
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index 49e6fe4940e18..a7e01d8f1fd6d 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -223,8 +223,7 @@ def test_info_verbose(self):
for i, line in enumerate(lines):
if i >= start and i < start + size:
- index = i - start
- line_nr = " {} ".format(index)
+ line_nr = f" {i - start} "
assert line.startswith(line_nr)
def test_info_memory(self):
@@ -236,7 +235,7 @@ def test_info_memory(self):
bytes = float(df.memory_usage().sum())
expected = textwrap.dedent(
- """\
+ f"""\
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 2 entries, 0 to 1
Data columns (total 1 columns):
@@ -244,10 +243,8 @@ def test_info_memory(self):
--- ------ -------------- -----
0 a 2 non-null int64
dtypes: int64(1)
- memory usage: {} bytes
- """.format(
- bytes
- )
+ memory usage: {bytes} bytes
+ """
)
assert result == expected
@@ -313,9 +310,7 @@ def test_info_shows_column_dtypes(self):
)
assert header in res
for i, dtype in enumerate(dtypes):
- name = " {i:d} {i:d} {n:d} non-null {dtype}".format(
- i=i, n=n, dtype=dtype
- )
+ name = f" {i:d} {i:d} {n:d} non-null {dtype}"
assert name in res
def test_info_max_cols(self):
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index 2d31996a8a964..ff99081521ffb 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -377,6 +377,65 @@ def test_agg_index_has_complex_internals(index):
tm.assert_frame_equal(result, expected)
+def test_agg_split_block():
+ # https://github.com/pandas-dev/pandas/issues/31522
+ df = pd.DataFrame(
+ {
+ "key1": ["a", "a", "b", "b", "a"],
+ "key2": ["one", "two", "one", "two", "one"],
+ "key3": ["three", "three", "three", "six", "six"],
+ }
+ )
+ result = df.groupby("key1").min()
+ expected = pd.DataFrame(
+ {"key2": ["one", "one"], "key3": ["six", "six"]},
+ index=pd.Index(["a", "b"], name="key1"),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_agg_split_object_part_datetime():
+ # https://github.com/pandas-dev/pandas/pull/31616
+ df = pd.DataFrame(
+ {
+ "A": pd.date_range("2000", periods=4),
+ "B": ["a", "b", "c", "d"],
+ "C": [1, 2, 3, 4],
+ "D": ["b", "c", "d", "e"],
+ "E": pd.date_range("2000", periods=4),
+ "F": [1, 2, 3, 4],
+ }
+ ).astype(object)
+ result = df.groupby([0, 0, 0, 0]).min()
+ expected = pd.DataFrame(
+ {
+ "A": [pd.Timestamp("2000")],
+ "B": ["a"],
+ "C": [1],
+ "D": ["b"],
+ "E": [pd.Timestamp("2000")],
+ "F": [1],
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_agg_cython_category_not_implemented_fallback():
+ # https://github.com/pandas-dev/pandas/issues/31450
+ df = pd.DataFrame({"col_num": [1, 1, 2, 3]})
+ df["col_cat"] = df["col_num"].astype("category")
+
+ result = df.groupby("col_num").col_cat.first()
+ expected = pd.Series(
+ [1, 2, 3], index=pd.Index([1, 2, 3], name="col_num"), name="col_cat"
+ )
+ tm.assert_series_equal(result, expected)
+
+ result = df.groupby("col_num").agg({"col_cat": "first"})
+ expected = expected.to_frame()
+ tm.assert_frame_equal(result, expected)
+
+
class TestNamedAggregationSeries:
def test_series_named_agg(self):
df = pd.Series([1, 2, 3, 4])
@@ -684,6 +743,34 @@ def aggfunc(x):
tm.assert_frame_equal(result, expected)
+@pytest.mark.parametrize("func", ["min", "max"])
+def test_groupby_aggregate_period_column(func):
+ # GH 31471
+ groups = [1, 2]
+ periods = pd.period_range("2020", periods=2, freq="Y")
+ df = pd.DataFrame({"a": groups, "b": periods})
+
+ result = getattr(df.groupby("a")["b"], func)()
+ idx = pd.Int64Index([1, 2], name="a")
+ expected = pd.Series(periods, index=idx, name="b")
+
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("func", ["min", "max"])
+def test_groupby_aggregate_period_frame(func):
+ # GH 31471
+ groups = [1, 2]
+ periods = pd.period_range("2020", periods=2, freq="Y")
+ df = pd.DataFrame({"a": groups, "b": periods})
+
+ result = getattr(df.groupby("a"), func)()
+ idx = pd.Int64Index([1, 2], name="a")
+ expected = pd.DataFrame({"b": periods}, index=idx)
+
+ tm.assert_frame_equal(result, expected)
+
+
class TestLambdaMangling:
def test_basic(self):
df = pd.DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 9c2b045079622..41ec70468aaeb 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -851,3 +851,17 @@ def test_apply_function_returns_non_pandas_non_scalar(function, expected_values)
result = df.groupby("groups").apply(function)
expected = pd.Series(expected_values, index=pd.Index(["A", "B"], name="groups"))
tm.assert_series_equal(result, expected)
+
+
+def test_apply_function_returns_numpy_array():
+ # GH 31605
+ def fct(group):
+ return group["B"].values.flatten()
+
+ df = pd.DataFrame({"A": ["a", "a", "b", "none"], "B": [1, 2, 3, np.nan]})
+
+ result = df.groupby("A").apply(fct)
+ expected = pd.Series(
+ [[1.0, 2.0], [3.0], [np.nan]], index=pd.Index(["a", "b", "none"], name="A")
+ )
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 97cf1af1d2e9e..73e36cb5e6c84 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -671,7 +671,7 @@ def test_nsmallest():
tm.assert_series_equal(gb.nsmallest(3, keep="last"), e)
-@pytest.mark.parametrize("func", ["mean", "var", "std", "cumprod", "cumsum"])
+@pytest.mark.parametrize("func", ["cumprod", "cumsum"])
def test_numpy_compat(func):
# see gh-12811
df = pd.DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]})
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index 4273139b32828..efcd22f9c0c82 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -676,6 +676,19 @@ def test_groupby_level_index_value_all_na(self):
)
tm.assert_frame_equal(result, expected)
+ def test_groupby_multiindex_level_empty(self):
+ # https://github.com/pandas-dev/pandas/issues/31670
+ df = pd.DataFrame(
+ [[123, "a", 1.0], [123, "b", 2.0]], columns=["id", "category", "value"]
+ )
+ df = df.set_index(["id", "category"])
+ empty = df[df.value < 0]
+ result = empty.groupby("id").sum()
+ expected = pd.DataFrame(
+ dtype="float64", columns=["value"], index=pd.Int64Index([], name="id")
+ )
+ tm.assert_frame_equal(result, expected)
+
# get_group
# --------------------------------
diff --git a/pandas/tests/indexes/base_class/__init__.py b/pandas/tests/indexes/base_class/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/tests/indexes/base_class/test_constructors.py b/pandas/tests/indexes/base_class/test_constructors.py
new file mode 100644
index 0000000000000..9e6a8f34c135d
--- /dev/null
+++ b/pandas/tests/indexes/base_class/test_constructors.py
@@ -0,0 +1,36 @@
+import pytest
+
+from pandas import Index, MultiIndex
+
+
+class TestIndexConstructor:
+ # Tests for the Index constructor, specifically for cases that do
+ # not return a subclass
+
+ def test_constructor_corner(self):
+ # corner case
+ msg = (
+ r"Index\(\.\.\.\) must be called with a collection of some "
+ "kind, 0 was passed"
+ )
+ with pytest.raises(TypeError, match=msg):
+ Index(0)
+
+ @pytest.mark.parametrize("index_vals", [[("A", 1), "B"], ["B", ("A", 1)]])
+ def test_construction_list_mixed_tuples(self, index_vals):
+ # see gh-10697: if we are constructing from a mixed list of tuples,
+ # make sure that we are independent of the sorting order.
+ index = Index(index_vals)
+ assert isinstance(index, Index)
+ assert not isinstance(index, MultiIndex)
+
+ def test_constructor_wrong_kwargs(self):
+ # GH #19348
+ with pytest.raises(TypeError, match="Unexpected keyword arguments {'foo'}"):
+ Index([], foo="bar")
+
+ @pytest.mark.xfail(reason="see GH#21311: Index doesn't enforce dtype argument")
+ def test_constructor_cast(self):
+ msg = "could not convert string to float"
+ with pytest.raises(ValueError, match=msg):
+ Index(["a", "b", "c"], dtype=float)
diff --git a/pandas/tests/indexes/base_class/test_setops.py b/pandas/tests/indexes/base_class/test_setops.py
new file mode 100644
index 0000000000000..e7d5e21d0ba47
--- /dev/null
+++ b/pandas/tests/indexes/base_class/test_setops.py
@@ -0,0 +1,74 @@
+import numpy as np
+import pytest
+
+from pandas import Index, Series
+import pandas._testing as tm
+from pandas.core.algorithms import safe_sort
+
+
+class TestIndexSetOps:
+ def test_union_base(self):
+ index = Index([0, "a", 1, "b", 2, "c"])
+ first = index[3:]
+ second = index[:5]
+
+ result = first.union(second)
+
+ expected = Index([0, 1, 2, "a", "b", "c"])
+ tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize("klass", [np.array, Series, list])
+ def test_union_different_type_base(self, klass):
+ # GH 10149
+ index = Index([0, "a", 1, "b", 2, "c"])
+ first = index[3:]
+ second = index[:5]
+
+ result = first.union(klass(second.values))
+
+ assert tm.equalContents(result, index)
+
+ @pytest.mark.parametrize("sort", [None, False])
+ def test_intersection_base(self, sort):
+ # (same results for py2 and py3 but sortedness not tested elsewhere)
+ index = Index([0, "a", 1, "b", 2, "c"])
+ first = index[:5]
+ second = index[:3]
+
+ expected = Index([0, 1, "a"]) if sort is None else Index([0, "a", 1])
+ result = first.intersection(second, sort=sort)
+ tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize("klass", [np.array, Series, list])
+ @pytest.mark.parametrize("sort", [None, False])
+ def test_intersection_different_type_base(self, klass, sort):
+ # GH 10149
+ index = Index([0, "a", 1, "b", 2, "c"])
+ first = index[:5]
+ second = index[:3]
+
+ result = first.intersection(klass(second.values), sort=sort)
+ assert tm.equalContents(result, second)
+
+ @pytest.mark.parametrize("sort", [None, False])
+ def test_difference_base(self, sort):
+ # (same results for py2 and py3 but sortedness not tested elsewhere)
+ index = Index([0, "a", 1, "b", 2, "c"])
+ first = index[:4]
+ second = index[3:]
+
+ result = first.difference(second, sort)
+ expected = Index([0, "a", 1])
+ if sort is None:
+ expected = Index(safe_sort(expected))
+ tm.assert_index_equal(result, expected)
+
+ def test_symmetric_difference(self):
+ # (same results for py2 and py3 but sortedness not tested elsewhere)
+ index = Index([0, "a", 1, "b", 2, "c"])
+ first = index[:4]
+ second = index[3:]
+
+ result = first.symmetric_difference(second)
+ expected = Index([0, 1, 2, "a", "c"])
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py
index d870259c2539b..c18cd1f252c83 100644
--- a/pandas/tests/indexes/categorical/test_category.py
+++ b/pandas/tests/indexes/categorical/test_category.py
@@ -146,76 +146,6 @@ def test_contains_list(self):
with pytest.raises(TypeError, match="unhashable type"):
["a", "b"] in idx
- def test_map(self):
- ci = pd.CategoricalIndex(list("ABABC"), categories=list("CBA"), ordered=True)
- result = ci.map(lambda x: x.lower())
- exp = pd.CategoricalIndex(list("ababc"), categories=list("cba"), ordered=True)
- tm.assert_index_equal(result, exp)
-
- ci = pd.CategoricalIndex(
- list("ABABC"), categories=list("BAC"), ordered=False, name="XXX"
- )
- result = ci.map(lambda x: x.lower())
- exp = pd.CategoricalIndex(
- list("ababc"), categories=list("bac"), ordered=False, name="XXX"
- )
- tm.assert_index_equal(result, exp)
-
- # GH 12766: Return an index not an array
- tm.assert_index_equal(
- ci.map(lambda x: 1), Index(np.array([1] * 5, dtype=np.int64), name="XXX")
- )
-
- # change categories dtype
- ci = pd.CategoricalIndex(list("ABABC"), categories=list("BAC"), ordered=False)
-
- def f(x):
- return {"A": 10, "B": 20, "C": 30}.get(x)
-
- result = ci.map(f)
- exp = pd.CategoricalIndex(
- [10, 20, 10, 20, 30], categories=[20, 10, 30], ordered=False
- )
- tm.assert_index_equal(result, exp)
-
- result = ci.map(pd.Series([10, 20, 30], index=["A", "B", "C"]))
- tm.assert_index_equal(result, exp)
-
- result = ci.map({"A": 10, "B": 20, "C": 30})
- tm.assert_index_equal(result, exp)
-
- def test_map_with_categorical_series(self):
- # GH 12756
- a = pd.Index([1, 2, 3, 4])
- b = pd.Series(["even", "odd", "even", "odd"], dtype="category")
- c = pd.Series(["even", "odd", "even", "odd"])
-
- exp = CategoricalIndex(["odd", "even", "odd", np.nan])
- tm.assert_index_equal(a.map(b), exp)
- exp = pd.Index(["odd", "even", "odd", np.nan])
- tm.assert_index_equal(a.map(c), exp)
-
- @pytest.mark.parametrize(
- ("data", "f"),
- (
- ([1, 1, np.nan], pd.isna),
- ([1, 2, np.nan], pd.isna),
- ([1, 1, np.nan], {1: False}),
- ([1, 2, np.nan], {1: False, 2: False}),
- ([1, 1, np.nan], pd.Series([False, False])),
- ([1, 2, np.nan], pd.Series([False, False, False])),
- ),
- )
- def test_map_with_nan(self, data, f): # GH 24241
- values = pd.Categorical(data)
- result = values.map(f)
- if data[1] == 1:
- expected = pd.Categorical([False, False, np.nan])
- tm.assert_categorical_equal(result, expected)
- else:
- expected = pd.Index([False, False, np.nan])
- tm.assert_index_equal(result, expected)
-
@pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series])
def test_where(self, klass):
i = self.create_index()
@@ -384,89 +314,6 @@ def test_astype_category(self, name, dtype_ordered, index_ordered):
expected = index
tm.assert_index_equal(result, expected)
- def test_reindex_base(self):
- # Determined by cat ordering.
- idx = CategoricalIndex(list("cab"), categories=list("cab"))
- expected = np.arange(len(idx), dtype=np.intp)
-
- actual = idx.get_indexer(idx)
- tm.assert_numpy_array_equal(expected, actual)
-
- with pytest.raises(ValueError, match="Invalid fill method"):
- idx.get_indexer(idx, method="invalid")
-
- def test_reindexing(self):
- np.random.seed(123456789)
-
- ci = self.create_index()
- oidx = Index(np.array(ci))
-
- for n in [1, 2, 5, len(ci)]:
- finder = oidx[np.random.randint(0, len(ci), size=n)]
- expected = oidx.get_indexer_non_unique(finder)[0]
-
- actual = ci.get_indexer(finder)
- tm.assert_numpy_array_equal(expected, actual)
-
- # see gh-17323
- #
- # Even when indexer is equal to the
- # members in the index, we should
- # respect duplicates instead of taking
- # the fast-track path.
- for finder in [list("aabbca"), list("aababca")]:
- expected = oidx.get_indexer_non_unique(finder)[0]
-
- actual = ci.get_indexer(finder)
- tm.assert_numpy_array_equal(expected, actual)
-
- def test_reindex_dtype(self):
- c = CategoricalIndex(["a", "b", "c", "a"])
- res, indexer = c.reindex(["a", "c"])
- tm.assert_index_equal(res, Index(["a", "a", "c"]), exact=True)
- tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
-
- c = CategoricalIndex(["a", "b", "c", "a"])
- res, indexer = c.reindex(Categorical(["a", "c"]))
-
- exp = CategoricalIndex(["a", "a", "c"], categories=["a", "c"])
- tm.assert_index_equal(res, exp, exact=True)
- tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
-
- c = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
- res, indexer = c.reindex(["a", "c"])
- exp = Index(["a", "a", "c"], dtype="object")
- tm.assert_index_equal(res, exp, exact=True)
- tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
-
- c = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
- res, indexer = c.reindex(Categorical(["a", "c"]))
- exp = CategoricalIndex(["a", "a", "c"], categories=["a", "c"])
- tm.assert_index_equal(res, exp, exact=True)
- tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
-
- def test_reindex_duplicate_target(self):
- # See GH25459
- cat = CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c", "d"])
- res, indexer = cat.reindex(["a", "c", "c"])
- exp = Index(["a", "c", "c"], dtype="object")
- tm.assert_index_equal(res, exp, exact=True)
- tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp))
-
- res, indexer = cat.reindex(
- CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"])
- )
- exp = CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"])
- tm.assert_index_equal(res, exp, exact=True)
- tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp))
-
- def test_reindex_empty_index(self):
- # See GH16770
- c = CategoricalIndex([])
- res, indexer = c.reindex(["a", "b"])
- tm.assert_index_equal(res, Index(["a", "b"]), exact=True)
- tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp))
-
@pytest.mark.parametrize(
"data, non_lexsorted_data",
[[[1, 2, 3], [9, 0, 1, 2, 3]], [list("abc"), list("fabcd")]],
@@ -518,75 +365,6 @@ def test_drop_duplicates(self):
tm.assert_index_equal(idx.drop_duplicates(), expected)
tm.assert_index_equal(idx.unique(), expected)
- def test_get_indexer(self):
-
- idx1 = CategoricalIndex(list("aabcde"), categories=list("edabc"))
- idx2 = CategoricalIndex(list("abf"))
-
- for indexer in [idx2, list("abf"), Index(list("abf"))]:
- r1 = idx1.get_indexer(idx2)
- tm.assert_almost_equal(r1, np.array([0, 1, 2, -1], dtype=np.intp))
-
- msg = (
- "method='pad' and method='backfill' not implemented yet for "
- "CategoricalIndex"
- )
- with pytest.raises(NotImplementedError, match=msg):
- idx2.get_indexer(idx1, method="pad")
- with pytest.raises(NotImplementedError, match=msg):
- idx2.get_indexer(idx1, method="backfill")
-
- msg = "method='nearest' not implemented yet for CategoricalIndex"
- with pytest.raises(NotImplementedError, match=msg):
- idx2.get_indexer(idx1, method="nearest")
-
- def test_get_loc(self):
- # GH 12531
- cidx1 = CategoricalIndex(list("abcde"), categories=list("edabc"))
- idx1 = Index(list("abcde"))
- assert cidx1.get_loc("a") == idx1.get_loc("a")
- assert cidx1.get_loc("e") == idx1.get_loc("e")
-
- for i in [cidx1, idx1]:
- with pytest.raises(KeyError, match="'NOT-EXIST'"):
- i.get_loc("NOT-EXIST")
-
- # non-unique
- cidx2 = CategoricalIndex(list("aacded"), categories=list("edabc"))
- idx2 = Index(list("aacded"))
-
- # results in bool array
- res = cidx2.get_loc("d")
- tm.assert_numpy_array_equal(res, idx2.get_loc("d"))
- tm.assert_numpy_array_equal(
- res, np.array([False, False, False, True, False, True])
- )
- # unique element results in scalar
- res = cidx2.get_loc("e")
- assert res == idx2.get_loc("e")
- assert res == 4
-
- for i in [cidx2, idx2]:
- with pytest.raises(KeyError, match="'NOT-EXIST'"):
- i.get_loc("NOT-EXIST")
-
- # non-unique, sliceable
- cidx3 = CategoricalIndex(list("aabbb"), categories=list("abc"))
- idx3 = Index(list("aabbb"))
-
- # results in slice
- res = cidx3.get_loc("a")
- assert res == idx3.get_loc("a")
- assert res == slice(0, 2, None)
-
- res = cidx3.get_loc("b")
- assert res == idx3.get_loc("b")
- assert res == slice(2, 5, None)
-
- for i in [cidx3, idx3]:
- with pytest.raises(KeyError, match="'c'"):
- i.get_loc("c")
-
def test_repr_roundtrip(self):
ci = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True)
@@ -837,122 +615,6 @@ def test_fillna_categorical(self):
with pytest.raises(ValueError, match=msg):
idx.fillna(2.0)
- def test_take_fill_value(self):
- # GH 12631
-
- # numeric category
- idx = pd.CategoricalIndex([1, 2, 3], name="xxx")
- result = idx.take(np.array([1, 0, -1]))
- expected = pd.CategoricalIndex([2, 1, 3], name="xxx")
- tm.assert_index_equal(result, expected)
- tm.assert_categorical_equal(result.values, expected.values)
-
- # fill_value
- result = idx.take(np.array([1, 0, -1]), fill_value=True)
- expected = pd.CategoricalIndex([2, 1, np.nan], categories=[1, 2, 3], name="xxx")
- tm.assert_index_equal(result, expected)
- tm.assert_categorical_equal(result.values, expected.values)
-
- # allow_fill=False
- result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
- expected = pd.CategoricalIndex([2, 1, 3], name="xxx")
- tm.assert_index_equal(result, expected)
- tm.assert_categorical_equal(result.values, expected.values)
-
- # object category
- idx = pd.CategoricalIndex(
- list("CBA"), categories=list("ABC"), ordered=True, name="xxx"
- )
- result = idx.take(np.array([1, 0, -1]))
- expected = pd.CategoricalIndex(
- list("BCA"), categories=list("ABC"), ordered=True, name="xxx"
- )
- tm.assert_index_equal(result, expected)
- tm.assert_categorical_equal(result.values, expected.values)
-
- # fill_value
- result = idx.take(np.array([1, 0, -1]), fill_value=True)
- expected = pd.CategoricalIndex(
- ["B", "C", np.nan], categories=list("ABC"), ordered=True, name="xxx"
- )
- tm.assert_index_equal(result, expected)
- tm.assert_categorical_equal(result.values, expected.values)
-
- # allow_fill=False
- result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
- expected = pd.CategoricalIndex(
- list("BCA"), categories=list("ABC"), ordered=True, name="xxx"
- )
- tm.assert_index_equal(result, expected)
- tm.assert_categorical_equal(result.values, expected.values)
-
- msg = (
- "When allow_fill=True and fill_value is not None, "
- "all indices must be >= -1"
- )
- with pytest.raises(ValueError, match=msg):
- idx.take(np.array([1, 0, -2]), fill_value=True)
- with pytest.raises(ValueError, match=msg):
- idx.take(np.array([1, 0, -5]), fill_value=True)
-
- with pytest.raises(IndexError):
- idx.take(np.array([1, -5]))
-
- def test_take_fill_value_datetime(self):
-
- # datetime category
- idx = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx")
- idx = pd.CategoricalIndex(idx)
- result = idx.take(np.array([1, 0, -1]))
- expected = pd.DatetimeIndex(
- ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx"
- )
- expected = pd.CategoricalIndex(expected)
- tm.assert_index_equal(result, expected)
-
- # fill_value
- result = idx.take(np.array([1, 0, -1]), fill_value=True)
- expected = pd.DatetimeIndex(["2011-02-01", "2011-01-01", "NaT"], name="xxx")
- exp_cats = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"])
- expected = pd.CategoricalIndex(expected, categories=exp_cats)
- tm.assert_index_equal(result, expected)
-
- # allow_fill=False
- result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
- expected = pd.DatetimeIndex(
- ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx"
- )
- expected = pd.CategoricalIndex(expected)
- tm.assert_index_equal(result, expected)
-
- msg = (
- "When allow_fill=True and fill_value is not None, "
- "all indices must be >= -1"
- )
- with pytest.raises(ValueError, match=msg):
- idx.take(np.array([1, 0, -2]), fill_value=True)
- with pytest.raises(ValueError, match=msg):
- idx.take(np.array([1, 0, -5]), fill_value=True)
-
- with pytest.raises(IndexError):
- idx.take(np.array([1, -5]))
-
- def test_take_invalid_kwargs(self):
- idx = pd.CategoricalIndex([1, 2, 3], name="foo")
- indices = [1, 0, -1]
-
- msg = r"take\(\) got an unexpected keyword argument 'foo'"
- with pytest.raises(TypeError, match=msg):
- idx.take(indices, foo=2)
-
- msg = "the 'out' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- idx.take(indices, out=indices)
-
- msg = "the 'mode' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- idx.take(indices, mode="clip")
-
@pytest.mark.parametrize(
"dtype, engine_type",
[
@@ -976,19 +638,10 @@ def test_engine_type(self, dtype, engine_type):
assert np.issubdtype(ci.codes.dtype, dtype)
assert isinstance(ci._engine, engine_type)
- @pytest.mark.parametrize(
- "data, categories",
- [
- (list("abcbca"), list("cab")),
- (pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)),
- ],
- ids=["string", "interval"],
- )
- def test_map_str(self, data, categories, ordered_fixture):
- # GH 31202 - override base class since we want to maintain categorical/ordered
- index = CategoricalIndex(data, categories=categories, ordered=ordered_fixture)
- result = index.map(str)
- expected = CategoricalIndex(
- map(str, data), categories=map(str, categories), ordered=ordered_fixture
- )
- tm.assert_index_equal(result, expected)
+ def test_reindex_base(self):
+ # See test_reindex.py
+ pass
+
+ def test_map_str(self):
+ # See test_map.py
+ pass
diff --git a/pandas/tests/indexes/categorical/test_indexing.py b/pandas/tests/indexes/categorical/test_indexing.py
new file mode 100644
index 0000000000000..507e38d9acac2
--- /dev/null
+++ b/pandas/tests/indexes/categorical/test_indexing.py
@@ -0,0 +1,250 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import CategoricalIndex, Index
+import pandas._testing as tm
+
+
+class TestTake:
+ def test_take_fill_value(self):
+ # GH 12631
+
+ # numeric category
+ idx = pd.CategoricalIndex([1, 2, 3], name="xxx")
+ result = idx.take(np.array([1, 0, -1]))
+ expected = pd.CategoricalIndex([2, 1, 3], name="xxx")
+ tm.assert_index_equal(result, expected)
+ tm.assert_categorical_equal(result.values, expected.values)
+
+ # fill_value
+ result = idx.take(np.array([1, 0, -1]), fill_value=True)
+ expected = pd.CategoricalIndex([2, 1, np.nan], categories=[1, 2, 3], name="xxx")
+ tm.assert_index_equal(result, expected)
+ tm.assert_categorical_equal(result.values, expected.values)
+
+ # allow_fill=False
+ result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
+ expected = pd.CategoricalIndex([2, 1, 3], name="xxx")
+ tm.assert_index_equal(result, expected)
+ tm.assert_categorical_equal(result.values, expected.values)
+
+ # object category
+ idx = pd.CategoricalIndex(
+ list("CBA"), categories=list("ABC"), ordered=True, name="xxx"
+ )
+ result = idx.take(np.array([1, 0, -1]))
+ expected = pd.CategoricalIndex(
+ list("BCA"), categories=list("ABC"), ordered=True, name="xxx"
+ )
+ tm.assert_index_equal(result, expected)
+ tm.assert_categorical_equal(result.values, expected.values)
+
+ # fill_value
+ result = idx.take(np.array([1, 0, -1]), fill_value=True)
+ expected = pd.CategoricalIndex(
+ ["B", "C", np.nan], categories=list("ABC"), ordered=True, name="xxx"
+ )
+ tm.assert_index_equal(result, expected)
+ tm.assert_categorical_equal(result.values, expected.values)
+
+ # allow_fill=False
+ result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
+ expected = pd.CategoricalIndex(
+ list("BCA"), categories=list("ABC"), ordered=True, name="xxx"
+ )
+ tm.assert_index_equal(result, expected)
+ tm.assert_categorical_equal(result.values, expected.values)
+
+ msg = (
+ "When allow_fill=True and fill_value is not None, "
+ "all indices must be >= -1"
+ )
+ with pytest.raises(ValueError, match=msg):
+ idx.take(np.array([1, 0, -2]), fill_value=True)
+ with pytest.raises(ValueError, match=msg):
+ idx.take(np.array([1, 0, -5]), fill_value=True)
+
+ with pytest.raises(IndexError):
+ idx.take(np.array([1, -5]))
+
+ def test_take_fill_value_datetime(self):
+
+ # datetime category
+ idx = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx")
+ idx = pd.CategoricalIndex(idx)
+ result = idx.take(np.array([1, 0, -1]))
+ expected = pd.DatetimeIndex(
+ ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx"
+ )
+ expected = pd.CategoricalIndex(expected)
+ tm.assert_index_equal(result, expected)
+
+ # fill_value
+ result = idx.take(np.array([1, 0, -1]), fill_value=True)
+ expected = pd.DatetimeIndex(["2011-02-01", "2011-01-01", "NaT"], name="xxx")
+ exp_cats = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"])
+ expected = pd.CategoricalIndex(expected, categories=exp_cats)
+ tm.assert_index_equal(result, expected)
+
+ # allow_fill=False
+ result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
+ expected = pd.DatetimeIndex(
+ ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx"
+ )
+ expected = pd.CategoricalIndex(expected)
+ tm.assert_index_equal(result, expected)
+
+ msg = (
+ "When allow_fill=True and fill_value is not None, "
+ "all indices must be >= -1"
+ )
+ with pytest.raises(ValueError, match=msg):
+ idx.take(np.array([1, 0, -2]), fill_value=True)
+ with pytest.raises(ValueError, match=msg):
+ idx.take(np.array([1, 0, -5]), fill_value=True)
+
+ with pytest.raises(IndexError):
+ idx.take(np.array([1, -5]))
+
+ def test_take_invalid_kwargs(self):
+ idx = pd.CategoricalIndex([1, 2, 3], name="foo")
+ indices = [1, 0, -1]
+
+ msg = r"take\(\) got an unexpected keyword argument 'foo'"
+ with pytest.raises(TypeError, match=msg):
+ idx.take(indices, foo=2)
+
+ msg = "the 'out' parameter is not supported"
+ with pytest.raises(ValueError, match=msg):
+ idx.take(indices, out=indices)
+
+ msg = "the 'mode' parameter is not supported"
+ with pytest.raises(ValueError, match=msg):
+ idx.take(indices, mode="clip")
+
+
+class TestGetLoc:
+ def test_get_loc(self):
+ # GH 12531
+ cidx1 = CategoricalIndex(list("abcde"), categories=list("edabc"))
+ idx1 = Index(list("abcde"))
+ assert cidx1.get_loc("a") == idx1.get_loc("a")
+ assert cidx1.get_loc("e") == idx1.get_loc("e")
+
+ for i in [cidx1, idx1]:
+ with pytest.raises(KeyError, match="'NOT-EXIST'"):
+ i.get_loc("NOT-EXIST")
+
+ # non-unique
+ cidx2 = CategoricalIndex(list("aacded"), categories=list("edabc"))
+ idx2 = Index(list("aacded"))
+
+ # results in bool array
+ res = cidx2.get_loc("d")
+ tm.assert_numpy_array_equal(res, idx2.get_loc("d"))
+ tm.assert_numpy_array_equal(
+ res, np.array([False, False, False, True, False, True])
+ )
+ # unique element results in scalar
+ res = cidx2.get_loc("e")
+ assert res == idx2.get_loc("e")
+ assert res == 4
+
+ for i in [cidx2, idx2]:
+ with pytest.raises(KeyError, match="'NOT-EXIST'"):
+ i.get_loc("NOT-EXIST")
+
+ # non-unique, sliceable
+ cidx3 = CategoricalIndex(list("aabbb"), categories=list("abc"))
+ idx3 = Index(list("aabbb"))
+
+ # results in slice
+ res = cidx3.get_loc("a")
+ assert res == idx3.get_loc("a")
+ assert res == slice(0, 2, None)
+
+ res = cidx3.get_loc("b")
+ assert res == idx3.get_loc("b")
+ assert res == slice(2, 5, None)
+
+ for i in [cidx3, idx3]:
+ with pytest.raises(KeyError, match="'c'"):
+ i.get_loc("c")
+
+ def test_get_loc_unique(self):
+ cidx = pd.CategoricalIndex(list("abc"))
+ result = cidx.get_loc("b")
+ assert result == 1
+
+ def test_get_loc_monotonic_nonunique(self):
+ cidx = pd.CategoricalIndex(list("abbc"))
+ result = cidx.get_loc("b")
+ expected = slice(1, 3, None)
+ assert result == expected
+
+ def test_get_loc_nonmonotonic_nonunique(self):
+ cidx = pd.CategoricalIndex(list("abcb"))
+ result = cidx.get_loc("b")
+ expected = np.array([False, True, False, True], dtype=bool)
+ tm.assert_numpy_array_equal(result, expected)
+
+
+class TestGetIndexer:
+ def test_get_indexer_base(self):
+ # Determined by cat ordering.
+ idx = CategoricalIndex(list("cab"), categories=list("cab"))
+ expected = np.arange(len(idx), dtype=np.intp)
+
+ actual = idx.get_indexer(idx)
+ tm.assert_numpy_array_equal(expected, actual)
+
+ with pytest.raises(ValueError, match="Invalid fill method"):
+ idx.get_indexer(idx, method="invalid")
+
+ def test_get_indexer_non_unique(self):
+ np.random.seed(123456789)
+
+ ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False)
+ oidx = Index(np.array(ci))
+
+ for n in [1, 2, 5, len(ci)]:
+ finder = oidx[np.random.randint(0, len(ci), size=n)]
+ expected = oidx.get_indexer_non_unique(finder)[0]
+
+ actual = ci.get_indexer(finder)
+ tm.assert_numpy_array_equal(expected, actual)
+
+ # see gh-17323
+ #
+ # Even when indexer is equal to the
+ # members in the index, we should
+ # respect duplicates instead of taking
+ # the fast-track path.
+ for finder in [list("aabbca"), list("aababca")]:
+ expected = oidx.get_indexer_non_unique(finder)[0]
+
+ actual = ci.get_indexer(finder)
+ tm.assert_numpy_array_equal(expected, actual)
+
+ def test_get_indexer(self):
+
+ idx1 = CategoricalIndex(list("aabcde"), categories=list("edabc"))
+ idx2 = CategoricalIndex(list("abf"))
+
+ for indexer in [idx2, list("abf"), Index(list("abf"))]:
+ r1 = idx1.get_indexer(idx2)
+ tm.assert_almost_equal(r1, np.array([0, 1, 2, -1], dtype=np.intp))
+
+ msg = (
+ "method='pad' and method='backfill' not implemented yet for "
+ "CategoricalIndex"
+ )
+ with pytest.raises(NotImplementedError, match=msg):
+ idx2.get_indexer(idx1, method="pad")
+ with pytest.raises(NotImplementedError, match=msg):
+ idx2.get_indexer(idx1, method="backfill")
+
+ msg = "method='nearest' not implemented yet for CategoricalIndex"
+ with pytest.raises(NotImplementedError, match=msg):
+ idx2.get_indexer(idx1, method="nearest")
diff --git a/pandas/tests/indexes/categorical/test_map.py b/pandas/tests/indexes/categorical/test_map.py
new file mode 100644
index 0000000000000..943359a72e971
--- /dev/null
+++ b/pandas/tests/indexes/categorical/test_map.py
@@ -0,0 +1,95 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import CategoricalIndex, Index
+import pandas._testing as tm
+
+
+class TestMap:
+ @pytest.mark.parametrize(
+ "data, categories",
+ [
+ (list("abcbca"), list("cab")),
+ (pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)),
+ ],
+ ids=["string", "interval"],
+ )
+ def test_map_str(self, data, categories, ordered_fixture):
+ # GH 31202 - override base class since we want to maintain categorical/ordered
+ index = CategoricalIndex(data, categories=categories, ordered=ordered_fixture)
+ result = index.map(str)
+ expected = CategoricalIndex(
+ map(str, data), categories=map(str, categories), ordered=ordered_fixture
+ )
+ tm.assert_index_equal(result, expected)
+
+ def test_map(self):
+ ci = pd.CategoricalIndex(list("ABABC"), categories=list("CBA"), ordered=True)
+ result = ci.map(lambda x: x.lower())
+ exp = pd.CategoricalIndex(list("ababc"), categories=list("cba"), ordered=True)
+ tm.assert_index_equal(result, exp)
+
+ ci = pd.CategoricalIndex(
+ list("ABABC"), categories=list("BAC"), ordered=False, name="XXX"
+ )
+ result = ci.map(lambda x: x.lower())
+ exp = pd.CategoricalIndex(
+ list("ababc"), categories=list("bac"), ordered=False, name="XXX"
+ )
+ tm.assert_index_equal(result, exp)
+
+ # GH 12766: Return an index not an array
+ tm.assert_index_equal(
+ ci.map(lambda x: 1), Index(np.array([1] * 5, dtype=np.int64), name="XXX")
+ )
+
+ # change categories dtype
+ ci = pd.CategoricalIndex(list("ABABC"), categories=list("BAC"), ordered=False)
+
+ def f(x):
+ return {"A": 10, "B": 20, "C": 30}.get(x)
+
+ result = ci.map(f)
+ exp = pd.CategoricalIndex(
+ [10, 20, 10, 20, 30], categories=[20, 10, 30], ordered=False
+ )
+ tm.assert_index_equal(result, exp)
+
+ result = ci.map(pd.Series([10, 20, 30], index=["A", "B", "C"]))
+ tm.assert_index_equal(result, exp)
+
+ result = ci.map({"A": 10, "B": 20, "C": 30})
+ tm.assert_index_equal(result, exp)
+
+ def test_map_with_categorical_series(self):
+ # GH 12756
+ a = pd.Index([1, 2, 3, 4])
+ b = pd.Series(["even", "odd", "even", "odd"], dtype="category")
+ c = pd.Series(["even", "odd", "even", "odd"])
+
+ exp = CategoricalIndex(["odd", "even", "odd", np.nan])
+ tm.assert_index_equal(a.map(b), exp)
+ exp = pd.Index(["odd", "even", "odd", np.nan])
+ tm.assert_index_equal(a.map(c), exp)
+
+ @pytest.mark.parametrize(
+ ("data", "f"),
+ (
+ ([1, 1, np.nan], pd.isna),
+ ([1, 2, np.nan], pd.isna),
+ ([1, 1, np.nan], {1: False}),
+ ([1, 2, np.nan], {1: False, 2: False}),
+ ([1, 1, np.nan], pd.Series([False, False])),
+ ([1, 2, np.nan], pd.Series([False, False, False])),
+ ),
+ )
+ def test_map_with_nan(self, data, f): # GH 24241
+ values = pd.Categorical(data)
+ result = values.map(f)
+ if data[1] == 1:
+ expected = pd.Categorical([False, False, np.nan])
+ tm.assert_categorical_equal(result, expected)
+ else:
+ expected = pd.Index([False, False, np.nan])
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/categorical/test_reindex.py b/pandas/tests/indexes/categorical/test_reindex.py
new file mode 100644
index 0000000000000..f59ddc42ce4e4
--- /dev/null
+++ b/pandas/tests/indexes/categorical/test_reindex.py
@@ -0,0 +1,53 @@
+import numpy as np
+
+from pandas import Categorical, CategoricalIndex, Index
+import pandas._testing as tm
+
+
+class TestReindex:
+ def test_reindex_dtype(self):
+ c = CategoricalIndex(["a", "b", "c", "a"])
+ res, indexer = c.reindex(["a", "c"])
+ tm.assert_index_equal(res, Index(["a", "a", "c"]), exact=True)
+ tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
+
+ c = CategoricalIndex(["a", "b", "c", "a"])
+ res, indexer = c.reindex(Categorical(["a", "c"]))
+
+ exp = CategoricalIndex(["a", "a", "c"], categories=["a", "c"])
+ tm.assert_index_equal(res, exp, exact=True)
+ tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
+
+ c = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
+ res, indexer = c.reindex(["a", "c"])
+ exp = Index(["a", "a", "c"], dtype="object")
+ tm.assert_index_equal(res, exp, exact=True)
+ tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
+
+ c = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
+ res, indexer = c.reindex(Categorical(["a", "c"]))
+ exp = CategoricalIndex(["a", "a", "c"], categories=["a", "c"])
+ tm.assert_index_equal(res, exp, exact=True)
+ tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
+
+ def test_reindex_duplicate_target(self):
+ # See GH25459
+ cat = CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c", "d"])
+ res, indexer = cat.reindex(["a", "c", "c"])
+ exp = Index(["a", "c", "c"], dtype="object")
+ tm.assert_index_equal(res, exp, exact=True)
+ tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp))
+
+ res, indexer = cat.reindex(
+ CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"])
+ )
+ exp = CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"])
+ tm.assert_index_equal(res, exp, exact=True)
+ tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp))
+
+ def test_reindex_empty_index(self):
+ # See GH16770
+ c = CategoricalIndex([])
+ res, indexer = c.reindex(["a", "b"])
+ tm.assert_index_equal(res, Index(["a", "b"]), exact=True)
+ tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp))
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 26d120619defc..da27057a783ab 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -167,6 +167,10 @@ def test_create_index_existing_name(self):
def test_numeric_compat(self):
idx = self.create_index()
+ # Check that this doesn't cover MultiIndex case, if/when it does,
+ # we can remove multi.test_compat.test_numeric_compat
+ assert not isinstance(idx, MultiIndex)
+
with pytest.raises(TypeError, match="cannot perform __mul__"):
idx * 1
with pytest.raises(TypeError, match="cannot perform __rmul__"):
diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py
index 68285d41bda70..1d1d371fcec1e 100644
--- a/pandas/tests/indexes/datetimes/test_constructors.py
+++ b/pandas/tests/indexes/datetimes/test_constructors.py
@@ -951,16 +951,11 @@ def test_datetimeindex_constructor_misc(self):
assert len(idx1) == len(idx2)
assert idx1.freq == idx2.freq
+ def test_pass_datetimeindex_to_index(self):
+ # Bugs in #1396
+ rng = date_range("1/1/2000", "3/1/2000")
+ idx = Index(rng, dtype=object)
-def test_timedelta_constructor_identity():
- # Test for #30543
- expected = pd.Timedelta(np.timedelta64(1, "s"))
- result = pd.Timedelta(expected)
- assert result is expected
+ expected = Index(rng.to_pydatetime(), dtype=object)
-
-def test_timestamp_constructor_identity():
- # Test for #30543
- expected = pd.Timestamp("2017-01-01T12")
- result = pd.Timestamp(expected)
- assert result is expected
+ tm.assert_numpy_array_equal(idx.values, expected.values)
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index 2f954117f48d7..c358e72538788 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -344,6 +344,115 @@ def test_take_fill_value_with_timezone(self):
idx.take(np.array([1, -5]))
+class TestGetLoc:
+ @pytest.mark.parametrize("method", [None, "pad", "backfill", "nearest"])
+ def test_get_loc_method_exact_match(self, method):
+ idx = pd.date_range("2000-01-01", periods=3)
+ assert idx.get_loc(idx[1], method) == 1
+ assert idx.get_loc(idx[1].to_pydatetime(), method) == 1
+ assert idx.get_loc(str(idx[1]), method) == 1
+
+ if method is not None:
+ assert idx.get_loc(idx[1], method, tolerance=pd.Timedelta("0 days")) == 1
+
+ def test_get_loc(self):
+ idx = pd.date_range("2000-01-01", periods=3)
+
+ assert idx.get_loc("2000-01-01", method="nearest") == 0
+ assert idx.get_loc("2000-01-01T12", method="nearest") == 1
+
+ assert idx.get_loc("2000-01-01T12", method="nearest", tolerance="1 day") == 1
+ assert (
+ idx.get_loc("2000-01-01T12", method="nearest", tolerance=pd.Timedelta("1D"))
+ == 1
+ )
+ assert (
+ idx.get_loc(
+ "2000-01-01T12", method="nearest", tolerance=np.timedelta64(1, "D")
+ )
+ == 1
+ )
+ assert (
+ idx.get_loc("2000-01-01T12", method="nearest", tolerance=timedelta(1)) == 1
+ )
+ with pytest.raises(ValueError, match="unit abbreviation w/o a number"):
+ idx.get_loc("2000-01-01T12", method="nearest", tolerance="foo")
+ with pytest.raises(KeyError, match="'2000-01-01T03'"):
+ idx.get_loc("2000-01-01T03", method="nearest", tolerance="2 hours")
+ with pytest.raises(
+ ValueError, match="tolerance size must match target index size"
+ ):
+ idx.get_loc(
+ "2000-01-01",
+ method="nearest",
+ tolerance=[
+ pd.Timedelta("1day").to_timedelta64(),
+ pd.Timedelta("1day").to_timedelta64(),
+ ],
+ )
+
+ assert idx.get_loc("2000", method="nearest") == slice(0, 3)
+ assert idx.get_loc("2000-01", method="nearest") == slice(0, 3)
+
+ assert idx.get_loc("1999", method="nearest") == 0
+ assert idx.get_loc("2001", method="nearest") == 2
+
+ with pytest.raises(KeyError, match="'1999'"):
+ idx.get_loc("1999", method="pad")
+ with pytest.raises(KeyError, match="'2001'"):
+ idx.get_loc("2001", method="backfill")
+
+ with pytest.raises(KeyError, match="'foobar'"):
+ idx.get_loc("foobar")
+ with pytest.raises(InvalidIndexError, match=r"slice\(None, 2, None\)"):
+ idx.get_loc(slice(2))
+
+ idx = pd.to_datetime(["2000-01-01", "2000-01-04"])
+ assert idx.get_loc("2000-01-02", method="nearest") == 0
+ assert idx.get_loc("2000-01-03", method="nearest") == 1
+ assert idx.get_loc("2000-01", method="nearest") == slice(0, 2)
+
+ # time indexing
+ idx = pd.date_range("2000-01-01", periods=24, freq="H")
+ tm.assert_numpy_array_equal(
+ idx.get_loc(time(12)), np.array([12]), check_dtype=False
+ )
+ tm.assert_numpy_array_equal(
+ idx.get_loc(time(12, 30)), np.array([]), check_dtype=False
+ )
+ with pytest.raises(NotImplementedError):
+ idx.get_loc(time(12, 30), method="pad")
+
+ def test_get_loc_nat(self):
+ # GH#20464
+ index = DatetimeIndex(["1/3/2000", "NaT"])
+ assert index.get_loc(pd.NaT) == 1
+
+ assert index.get_loc(None) == 1
+
+ assert index.get_loc(np.nan) == 1
+
+ assert index.get_loc(pd.NA) == 1
+
+ assert index.get_loc(np.datetime64("NaT")) == 1
+
+ with pytest.raises(KeyError, match="NaT"):
+ index.get_loc(np.timedelta64("NaT"))
+
+ @pytest.mark.parametrize("key", [pd.Timedelta(0), pd.Timedelta(1), timedelta(0)])
+ def test_get_loc_timedelta_invalid_key(self, key):
+ # GH#20464
+ dti = pd.date_range("1970-01-01", periods=10)
+ with pytest.raises(TypeError):
+ dti.get_loc(key)
+
+ def test_get_loc_reasonable_key_error(self):
+ # GH#1062
+ index = DatetimeIndex(["1/3/2000"])
+ with pytest.raises(KeyError, match="2000"):
+ index.get_loc("1/1/2000")
+
+
class TestDatetimeIndex:
@pytest.mark.parametrize(
"null", [None, np.nan, np.datetime64("NaT"), pd.NaT, pd.NA]
@@ -639,84 +748,6 @@ def test_get_value(self):
result = dti.get_value(ser, key.to_datetime64())
assert result == 7
- def test_get_loc(self):
- idx = pd.date_range("2000-01-01", periods=3)
-
- for method in [None, "pad", "backfill", "nearest"]:
- assert idx.get_loc(idx[1], method) == 1
- assert idx.get_loc(idx[1].to_pydatetime(), method) == 1
- assert idx.get_loc(str(idx[1]), method) == 1
-
- if method is not None:
- assert (
- idx.get_loc(idx[1], method, tolerance=pd.Timedelta("0 days")) == 1
- )
-
- assert idx.get_loc("2000-01-01", method="nearest") == 0
- assert idx.get_loc("2000-01-01T12", method="nearest") == 1
-
- assert idx.get_loc("2000-01-01T12", method="nearest", tolerance="1 day") == 1
- assert (
- idx.get_loc("2000-01-01T12", method="nearest", tolerance=pd.Timedelta("1D"))
- == 1
- )
- assert (
- idx.get_loc(
- "2000-01-01T12", method="nearest", tolerance=np.timedelta64(1, "D")
- )
- == 1
- )
- assert (
- idx.get_loc("2000-01-01T12", method="nearest", tolerance=timedelta(1)) == 1
- )
- with pytest.raises(ValueError, match="unit abbreviation w/o a number"):
- idx.get_loc("2000-01-01T12", method="nearest", tolerance="foo")
- with pytest.raises(KeyError, match="'2000-01-01T03'"):
- idx.get_loc("2000-01-01T03", method="nearest", tolerance="2 hours")
- with pytest.raises(
- ValueError, match="tolerance size must match target index size"
- ):
- idx.get_loc(
- "2000-01-01",
- method="nearest",
- tolerance=[
- pd.Timedelta("1day").to_timedelta64(),
- pd.Timedelta("1day").to_timedelta64(),
- ],
- )
-
- assert idx.get_loc("2000", method="nearest") == slice(0, 3)
- assert idx.get_loc("2000-01", method="nearest") == slice(0, 3)
-
- assert idx.get_loc("1999", method="nearest") == 0
- assert idx.get_loc("2001", method="nearest") == 2
-
- with pytest.raises(KeyError, match="'1999'"):
- idx.get_loc("1999", method="pad")
- with pytest.raises(KeyError, match="'2001'"):
- idx.get_loc("2001", method="backfill")
-
- with pytest.raises(KeyError, match="'foobar'"):
- idx.get_loc("foobar")
- with pytest.raises(InvalidIndexError, match=r"slice\(None, 2, None\)"):
- idx.get_loc(slice(2))
-
- idx = pd.to_datetime(["2000-01-01", "2000-01-04"])
- assert idx.get_loc("2000-01-02", method="nearest") == 0
- assert idx.get_loc("2000-01-03", method="nearest") == 1
- assert idx.get_loc("2000-01", method="nearest") == slice(0, 2)
-
- # time indexing
- idx = pd.date_range("2000-01-01", periods=24, freq="H")
- tm.assert_numpy_array_equal(
- idx.get_loc(time(12)), np.array([12]), check_dtype=False
- )
- tm.assert_numpy_array_equal(
- idx.get_loc(time(12, 30)), np.array([]), check_dtype=False
- )
- with pytest.raises(NotImplementedError):
- idx.get_loc(time(12, 30), method="pad")
-
def test_get_indexer(self):
idx = pd.date_range("2000-01-01", periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
@@ -756,32 +787,3 @@ def test_get_indexer(self):
idx.get_indexer(target, "nearest", tolerance=tol_bad)
with pytest.raises(ValueError):
idx.get_indexer(idx[[0]], method="nearest", tolerance="foo")
-
- def test_reasonable_key_error(self):
- # GH#1062
- index = DatetimeIndex(["1/3/2000"])
- with pytest.raises(KeyError, match="2000"):
- index.get_loc("1/1/2000")
-
- @pytest.mark.parametrize("key", [pd.Timedelta(0), pd.Timedelta(1), timedelta(0)])
- def test_timedelta_invalid_key(self, key):
- # GH#20464
- dti = pd.date_range("1970-01-01", periods=10)
- with pytest.raises(TypeError):
- dti.get_loc(key)
-
- def test_get_loc_nat(self):
- # GH#20464
- index = DatetimeIndex(["1/3/2000", "NaT"])
- assert index.get_loc(pd.NaT) == 1
-
- assert index.get_loc(None) == 1
-
- assert index.get_loc(np.nan) == 1
-
- assert index.get_loc(pd.NA) == 1
-
- assert index.get_loc(np.datetime64("NaT")) == 1
-
- with pytest.raises(KeyError, match="NaT"):
- index.get_loc(np.timedelta64("NaT"))
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index 340f53b2868bd..d0464698e3f24 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -12,15 +12,6 @@
class TestTimeSeries:
- def test_pass_datetimeindex_to_index(self):
- # Bugs in #1396
- rng = date_range("1/1/2000", "3/1/2000")
- idx = Index(rng, dtype=object)
-
- expected = Index(rng.to_pydatetime(), dtype=object)
-
- tm.assert_numpy_array_equal(idx.values, expected.values)
-
def test_range_edges(self):
# GH#13672
idx = pd.date_range(
diff --git a/pandas/tests/indexes/multi/conftest.py b/pandas/tests/indexes/multi/conftest.py
index acaea4ff96ff5..67ebfcddf6c2d 100644
--- a/pandas/tests/indexes/multi/conftest.py
+++ b/pandas/tests/indexes/multi/conftest.py
@@ -49,12 +49,6 @@ def index_names():
return ["first", "second"]
-@pytest.fixture
-def holder():
- # the MultiIndex constructor used to base compatibility with pickle
- return MultiIndex
-
-
@pytest.fixture
def compat_props():
# a MultiIndex must have these properties associated with it
diff --git a/pandas/tests/indexes/multi/test_analytics.py b/pandas/tests/indexes/multi/test_analytics.py
index e64511efd7ffb..a9e02934f27ab 100644
--- a/pandas/tests/indexes/multi/test_analytics.py
+++ b/pandas/tests/indexes/multi/test_analytics.py
@@ -146,83 +146,6 @@ def test_append_mixed_dtypes():
tm.assert_index_equal(res, exp)
-def test_take(idx):
- indexer = [4, 3, 0, 2]
- result = idx.take(indexer)
- expected = idx[indexer]
- assert result.equals(expected)
-
- # TODO: Remove Commented Code
- # if not isinstance(idx,
- # (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
- # GH 10791
- msg = "'MultiIndex' object has no attribute 'freq'"
- with pytest.raises(AttributeError, match=msg):
- idx.freq
-
-
-def test_take_invalid_kwargs(idx):
- idx = idx
- indices = [1, 2]
-
- msg = r"take\(\) got an unexpected keyword argument 'foo'"
- with pytest.raises(TypeError, match=msg):
- idx.take(indices, foo=2)
-
- msg = "the 'out' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- idx.take(indices, out=indices)
-
- msg = "the 'mode' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- idx.take(indices, mode="clip")
-
-
-def test_take_fill_value():
- # GH 12631
- vals = [["A", "B"], [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]]
- idx = pd.MultiIndex.from_product(vals, names=["str", "dt"])
-
- result = idx.take(np.array([1, 0, -1]))
- exp_vals = [
- ("A", pd.Timestamp("2011-01-02")),
- ("A", pd.Timestamp("2011-01-01")),
- ("B", pd.Timestamp("2011-01-02")),
- ]
- expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"])
- tm.assert_index_equal(result, expected)
-
- # fill_value
- result = idx.take(np.array([1, 0, -1]), fill_value=True)
- exp_vals = [
- ("A", pd.Timestamp("2011-01-02")),
- ("A", pd.Timestamp("2011-01-01")),
- (np.nan, pd.NaT),
- ]
- expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"])
- tm.assert_index_equal(result, expected)
-
- # allow_fill=False
- result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
- exp_vals = [
- ("A", pd.Timestamp("2011-01-02")),
- ("A", pd.Timestamp("2011-01-01")),
- ("B", pd.Timestamp("2011-01-02")),
- ]
- expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"])
- tm.assert_index_equal(result, expected)
-
- msg = "When allow_fill=True and fill_value is not None, all indices must be >= -1"
- with pytest.raises(ValueError, match=msg):
- idx.take(np.array([1, 0, -2]), fill_value=True)
- with pytest.raises(ValueError, match=msg):
- idx.take(np.array([1, 0, -5]), fill_value=True)
-
- msg = "index -5 is out of bounds for( axis 0 with)? size 4"
- with pytest.raises(IndexError, match=msg):
- idx.take(np.array([1, -5]))
-
-
def test_iter(idx):
result = list(idx)
expected = [
diff --git a/pandas/tests/indexes/multi/test_compat.py b/pandas/tests/indexes/multi/test_compat.py
index 545a7ddef29bb..9a76f0623eb31 100644
--- a/pandas/tests/indexes/multi/test_compat.py
+++ b/pandas/tests/indexes/multi/test_compat.py
@@ -112,8 +112,8 @@ def test_ndarray_compat_properties(idx, compat_props):
idx.values.nbytes
-def test_pickle_compat_construction(holder):
+def test_pickle_compat_construction():
# this is testing for pickle compat
# need an object to create with
with pytest.raises(TypeError, match="Must pass both levels and codes"):
- holder()
+ MultiIndex()
diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py
index 074072ae581b2..675a1e2e832f3 100644
--- a/pandas/tests/indexes/multi/test_get_set.py
+++ b/pandas/tests/indexes/multi/test_get_set.py
@@ -57,8 +57,6 @@ def test_get_value_duplicates():
)
assert index.get_loc("D") == slice(0, 3)
- with pytest.raises(KeyError, match=r"^'D'$"):
- index._engine.get_value(np.array([]), "D")
def test_get_level_values_all_na():
@@ -159,7 +157,7 @@ def test_set_levels_codes_directly(idx):
minor_codes = [(x + 1) % 1 for x in minor_codes]
new_codes = [major_codes, minor_codes]
- msg = "can't set attribute"
+ msg = "[Cc]an't set attribute"
with pytest.raises(AttributeError, match=msg):
idx.levels = new_levels
with pytest.raises(AttributeError, match=msg):
diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py
index b08280a712642..21a4773fa3683 100644
--- a/pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas/tests/indexes/multi/test_indexing.py
@@ -392,7 +392,7 @@ def test_get_loc_missing_nan():
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
- with pytest.raises(KeyError, match=r"^3\.0$"):
+ with pytest.raises(KeyError, match=r"^3$"):
idx.get_loc(3)
with pytest.raises(KeyError, match=r"^nan$"):
idx.get_loc(np.nan)
diff --git a/pandas/tests/indexes/multi/test_sorting.py b/pandas/tests/indexes/multi/test_sorting.py
index 50242c1cac549..bb40612b9a55a 100644
--- a/pandas/tests/indexes/multi/test_sorting.py
+++ b/pandas/tests/indexes/multi/test_sorting.py
@@ -1,3 +1,5 @@
+import random
+
import numpy as np
import pytest
@@ -9,8 +11,6 @@
def test_sortlevel(idx):
- import random
-
tuples = list(idx)
random.shuffle(tuples)
diff --git a/pandas/tests/indexes/multi/test_take.py b/pandas/tests/indexes/multi/test_take.py
new file mode 100644
index 0000000000000..85043ff8812af
--- /dev/null
+++ b/pandas/tests/indexes/multi/test_take.py
@@ -0,0 +1,82 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+import pandas._testing as tm
+
+
+def test_take(idx):
+ indexer = [4, 3, 0, 2]
+ result = idx.take(indexer)
+ expected = idx[indexer]
+ assert result.equals(expected)
+
+ # FIXME: Remove Commented Code
+ # if not isinstance(idx,
+ # (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
+ # GH 10791
+ msg = "'MultiIndex' object has no attribute 'freq'"
+ with pytest.raises(AttributeError, match=msg):
+ idx.freq
+
+
+def test_take_invalid_kwargs(idx):
+ idx = idx
+ indices = [1, 2]
+
+ msg = r"take\(\) got an unexpected keyword argument 'foo'"
+ with pytest.raises(TypeError, match=msg):
+ idx.take(indices, foo=2)
+
+ msg = "the 'out' parameter is not supported"
+ with pytest.raises(ValueError, match=msg):
+ idx.take(indices, out=indices)
+
+ msg = "the 'mode' parameter is not supported"
+ with pytest.raises(ValueError, match=msg):
+ idx.take(indices, mode="clip")
+
+
+def test_take_fill_value():
+ # GH 12631
+ vals = [["A", "B"], [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]]
+ idx = pd.MultiIndex.from_product(vals, names=["str", "dt"])
+
+ result = idx.take(np.array([1, 0, -1]))
+ exp_vals = [
+ ("A", pd.Timestamp("2011-01-02")),
+ ("A", pd.Timestamp("2011-01-01")),
+ ("B", pd.Timestamp("2011-01-02")),
+ ]
+ expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"])
+ tm.assert_index_equal(result, expected)
+
+ # fill_value
+ result = idx.take(np.array([1, 0, -1]), fill_value=True)
+ exp_vals = [
+ ("A", pd.Timestamp("2011-01-02")),
+ ("A", pd.Timestamp("2011-01-01")),
+ (np.nan, pd.NaT),
+ ]
+ expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"])
+ tm.assert_index_equal(result, expected)
+
+ # allow_fill=False
+ result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
+ exp_vals = [
+ ("A", pd.Timestamp("2011-01-02")),
+ ("A", pd.Timestamp("2011-01-01")),
+ ("B", pd.Timestamp("2011-01-02")),
+ ]
+ expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"])
+ tm.assert_index_equal(result, expected)
+
+ msg = "When allow_fill=True and fill_value is not None, all indices must be >= -1"
+ with pytest.raises(ValueError, match=msg):
+ idx.take(np.array([1, 0, -2]), fill_value=True)
+ with pytest.raises(ValueError, match=msg):
+ idx.take(np.array([1, 0, -5]), fill_value=True)
+
+ msg = "index -5 is out of bounds for( axis 0 with)? size 4"
+ with pytest.raises(IndexError, match=msg):
+ idx.take(np.array([1, -5]))
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index 38514594efe09..fffc4a7562306 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -486,15 +486,17 @@ def test_get_value_datetime_hourly(self, freq):
assert ser.loc[ts2] == 7
def test_get_value_integer(self):
+ msg = "index 16801 is out of bounds for axis 0 with size 3"
dti = pd.date_range("2016-01-01", periods=3)
pi = dti.to_period("D")
ser = pd.Series(range(3), index=pi)
- with pytest.raises(IndexError, match="index out of bounds"):
+ with pytest.raises(IndexError, match=msg):
pi.get_value(ser, 16801)
+ msg = "index 46 is out of bounds for axis 0 with size 3"
pi2 = dti.to_period("Y") # duplicates, ordinals are all 46
ser2 = pd.Series(range(3), index=pi2)
- with pytest.raises(IndexError, match="index out of bounds"):
+ with pytest.raises(IndexError, match=msg):
pi2.get_value(ser2, 46)
def test_is_monotonic_increasing(self):
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index e72963de09ab4..04af9b09bbf89 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -34,7 +34,6 @@
period_range,
)
import pandas._testing as tm
-from pandas.core.algorithms import safe_sort
from pandas.core.indexes.api import (
Index,
MultiIndex,
@@ -108,23 +107,6 @@ def test_constructor_copy(self, index):
# arr = np.array(5.)
# pytest.raises(Exception, arr.view, Index)
- def test_constructor_corner(self):
- # corner case
- msg = (
- r"Index\(\.\.\.\) must be called with a collection of some "
- "kind, 0 was passed"
- )
- with pytest.raises(TypeError, match=msg):
- Index(0)
-
- @pytest.mark.parametrize("index_vals", [[("A", 1), "B"], ["B", ("A", 1)]])
- def test_construction_list_mixed_tuples(self, index_vals):
- # see gh-10697: if we are constructing from a mixed list of tuples,
- # make sure that we are independent of the sorting order.
- index = Index(index_vals)
- assert isinstance(index, Index)
- assert not isinstance(index, MultiIndex)
-
@pytest.mark.parametrize("na_value", [None, np.nan])
@pytest.mark.parametrize("vtype", [list, tuple, iter])
def test_construction_list_tuples_nan(self, na_value, vtype):
@@ -359,11 +341,6 @@ def test_constructor_simple_new(self, vals, dtype):
result = index._simple_new(index.values, dtype)
tm.assert_index_equal(result, index)
- def test_constructor_wrong_kwargs(self):
- # GH #19348
- with pytest.raises(TypeError, match="Unexpected keyword arguments {'foo'}"):
- Index([], foo="bar")
-
@pytest.mark.parametrize(
"vals",
[
@@ -554,12 +531,6 @@ def test_constructor_overflow_int64(self):
with pytest.raises(OverflowError, match=msg):
Index([np.iinfo(np.uint64).max - 1], dtype="int64")
- @pytest.mark.xfail(reason="see GH#21311: Index doesn't enforce dtype argument")
- def test_constructor_cast(self):
- msg = "could not convert string to float"
- with pytest.raises(ValueError, match=msg):
- Index(["a", "b", "c"], dtype=float)
-
@pytest.mark.parametrize(
"index",
[
@@ -1047,6 +1018,32 @@ def test_setops_disallow_true(self, method):
with pytest.raises(ValueError, match="The 'sort' keyword only takes"):
getattr(idx1, method)(idx2, sort=True)
+ def test_setops_preserve_object_dtype(self):
+ idx = pd.Index([1, 2, 3], dtype=object)
+ result = idx.intersection(idx[1:])
+ expected = idx[1:]
+ tm.assert_index_equal(result, expected)
+
+ # if other is not monotonic increasing, intersection goes through
+ # a different route
+ result = idx.intersection(idx[1:][::-1])
+ tm.assert_index_equal(result, expected)
+
+ result = idx._union(idx[1:], sort=None)
+ expected = idx
+ tm.assert_index_equal(result, expected)
+
+ result = idx.union(idx[1:], sort=None)
+ tm.assert_index_equal(result, expected)
+
+ # if other is not monotonic increasing, _union goes through
+ # a different route
+ result = idx._union(idx[1:][::-1], sort=None)
+ tm.assert_index_equal(result, expected)
+
+ result = idx.union(idx[1:][::-1], sort=None)
+ tm.assert_index_equal(result, expected)
+
def test_map_identity_mapping(self, indices):
# GH 12766
tm.assert_index_equal(indices, indices.map(lambda x: x))
@@ -2502,78 +2499,12 @@ def test_copy_name2(self):
assert index3.name == "NewName"
assert index3.names == ["NewName"]
- def test_union_base(self):
- index = self.create_index()
- first = index[3:]
- second = index[:5]
-
- result = first.union(second)
-
- expected = Index([0, 1, 2, "a", "b", "c"])
- tm.assert_index_equal(result, expected)
-
- @pytest.mark.parametrize("klass", [np.array, Series, list])
- def test_union_different_type_base(self, klass):
- # GH 10149
- index = self.create_index()
- first = index[3:]
- second = index[:5]
-
- result = first.union(klass(second.values))
-
- assert tm.equalContents(result, index)
-
def test_unique_na(self):
idx = pd.Index([2, np.nan, 2, 1], name="my_index")
expected = pd.Index([2, np.nan, 1], name="my_index")
result = idx.unique()
tm.assert_index_equal(result, expected)
- @pytest.mark.parametrize("sort", [None, False])
- def test_intersection_base(self, sort):
- # (same results for py2 and py3 but sortedness not tested elsewhere)
- index = self.create_index()
- first = index[:5]
- second = index[:3]
-
- expected = Index([0, 1, "a"]) if sort is None else Index([0, "a", 1])
- result = first.intersection(second, sort=sort)
- tm.assert_index_equal(result, expected)
-
- @pytest.mark.parametrize("klass", [np.array, Series, list])
- @pytest.mark.parametrize("sort", [None, False])
- def test_intersection_different_type_base(self, klass, sort):
- # GH 10149
- index = self.create_index()
- first = index[:5]
- second = index[:3]
-
- result = first.intersection(klass(second.values), sort=sort)
- assert tm.equalContents(result, second)
-
- @pytest.mark.parametrize("sort", [None, False])
- def test_difference_base(self, sort):
- # (same results for py2 and py3 but sortedness not tested elsewhere)
- index = self.create_index()
- first = index[:4]
- second = index[3:]
-
- result = first.difference(second, sort)
- expected = Index([0, "a", 1])
- if sort is None:
- expected = Index(safe_sort(expected))
- tm.assert_index_equal(result, expected)
-
- def test_symmetric_difference(self):
- # (same results for py2 and py3 but sortedness not tested elsewhere)
- index = self.create_index()
- first = index[:4]
- second = index[3:]
-
- result = first.symmetric_difference(second)
- expected = Index([0, 1, 2, "a", "c"])
- tm.assert_index_equal(result, expected)
-
def test_logical_compat(self):
index = self.create_index()
assert index.all() == index.values.all()
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index 992a91ad8a528..1b504ce99604d 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -385,7 +385,7 @@ def test_get_loc_missing_nan(self):
# GH 8569
idx = Float64Index([1, 2])
assert idx.get_loc(1) == 0
- with pytest.raises(KeyError, match=r"^3\.0$"):
+ with pytest.raises(KeyError, match=r"^3$"):
idx.get_loc(3)
with pytest.raises(KeyError, match="^nan$"):
idx.get_loc(np.nan)
diff --git a/pandas/tests/indexing/multiindex/test_getitem.py b/pandas/tests/indexing/multiindex/test_getitem.py
index c15fa34283f21..7e75b5324445e 100644
--- a/pandas/tests/indexing/multiindex/test_getitem.py
+++ b/pandas/tests/indexing/multiindex/test_getitem.py
@@ -87,8 +87,8 @@ def test_series_getitem_returns_scalar(
(lambda s: s[(2000, 3, 4)], KeyError, r"^\(2000, 3, 4\)$"),
(lambda s: s.loc[(2000, 3, 4)], KeyError, r"^\(2000, 3, 4\)$"),
(lambda s: s.loc[(2000, 3, 4, 5)], IndexingError, "Too many indexers"),
- (lambda s: s.__getitem__(len(s)), IndexError, "index out of bounds"),
- (lambda s: s[len(s)], IndexError, "index out of bounds"),
+ (lambda s: s.__getitem__(len(s)), IndexError, "is out of bounds"),
+ (lambda s: s[len(s)], IndexError, "is out of bounds"),
(
lambda s: s.iloc[len(s)],
IndexError,
diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py
index aebd1ad2573ed..1e641760f7e8d 100644
--- a/pandas/tests/indexing/multiindex/test_setitem.py
+++ b/pandas/tests/indexing/multiindex/test_setitem.py
@@ -414,6 +414,16 @@ def test_astype_assignment_with_dups(self):
df["A"] = df["A"].astype(np.float64)
tm.assert_index_equal(df.index, index)
+ def test_setitem_nonmonotonic(self):
+ # https://github.com/pandas-dev/pandas/issues/31449
+ index = pd.MultiIndex.from_tuples(
+ [("a", "c"), ("b", "x"), ("a", "d")], names=["l1", "l2"]
+ )
+ df = pd.DataFrame(data=[0, 1, 2], index=index, columns=["e"])
+ df.loc["a", "e"] = np.arange(99, 101, dtype="int64")
+ expected = pd.DataFrame({"e": [99, 1, 100]}, index=index)
+ tm.assert_frame_equal(df, expected)
+
def test_frame_setitem_view_direct(multiindex_dataframe_random_data):
# this works because we are modifying the underlying array
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index 8c8dece53277e..da935b1c911d0 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -83,8 +83,8 @@ def test_loc_scalar(self):
df.loc["d", "C"] = 10
msg = (
- r"cannot do label indexing on <class 'pandas\.core\.indexes\.category"
- r"\.CategoricalIndex'> with these indexers \[1\] of <class 'int'>"
+ "cannot do label indexing on CategoricalIndex with these "
+ r"indexers \[1\] of type int"
)
with pytest.raises(TypeError, match=msg):
df.loc[1]
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index 5530896a90941..8bb88cd9fd63a 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -22,16 +22,9 @@ def check(self, result, original, indexer, getitem):
tm.assert_almost_equal(result, expected)
- def test_scalar_error(self):
-
- # GH 4892
- # float_indexers should raise exceptions
- # on appropriate Index types & accessors
- # this duplicates the code below
- # but is specifically testing for the error
- # message
-
- for index in [
+ @pytest.mark.parametrize(
+ "index_func",
+ [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeCategoricalIndex,
@@ -40,22 +33,31 @@ def test_scalar_error(self):
tm.makePeriodIndex,
tm.makeIntIndex,
tm.makeRangeIndex,
- ]:
+ ],
+ )
+ def test_scalar_error(self, index_func):
- i = index(5)
+ # GH 4892
+ # float_indexers should raise exceptions
+ # on appropriate Index types & accessors
+ # this duplicates the code below
+ # but is specifically testing for the error
+ # message
- s = Series(np.arange(len(i)), index=i)
+ i = index_func(5)
- msg = "Cannot index by location index"
- with pytest.raises(TypeError, match=msg):
- s.iloc[3.0]
+ s = Series(np.arange(len(i)), index=i)
- msg = (
- "cannot do positional indexing on {klass} with these "
- r"indexers \[3\.0\] of {kind}".format(klass=type(i), kind=str(float))
- )
- with pytest.raises(TypeError, match=msg):
- s.iloc[3.0] = 0
+ msg = "Cannot index by location index"
+ with pytest.raises(TypeError, match=msg):
+ s.iloc[3.0]
+
+ msg = (
+ "cannot do positional indexing on {klass} with these "
+ r"indexers \[3\.0\] of type float".format(klass=type(i).__name__)
+ )
+ with pytest.raises(TypeError, match=msg):
+ s.iloc[3.0] = 0
def test_scalar_non_numeric(self):
@@ -90,11 +92,11 @@ def test_scalar_non_numeric(self):
else:
error = TypeError
msg = (
- r"cannot do (label|index|positional) indexing "
+ r"cannot do (label|positional) indexing "
r"on {klass} with these indexers \[3\.0\] of "
- r"{kind}|"
+ r"type float|"
"Cannot index by location index with a "
- "non-integer key".format(klass=type(i), kind=str(float))
+ "non-integer key".format(klass=type(i).__name__)
)
with pytest.raises(error, match=msg):
idxr(s)[3.0]
@@ -107,13 +109,13 @@ def test_scalar_non_numeric(self):
"mixed",
}:
error = KeyError
- msg = r"^3$"
+ msg = r"^3\.0$"
else:
error = TypeError
msg = (
- r"cannot do (label|index) indexing "
+ r"cannot do label indexing "
r"on {klass} with these indexers \[3\.0\] of "
- r"{kind}".format(klass=type(i), kind=str(float))
+ r"type float".format(klass=type(i).__name__)
)
with pytest.raises(error, match=msg):
s.loc[3.0]
@@ -123,9 +125,9 @@ def test_scalar_non_numeric(self):
# setting with a float fails with iloc
msg = (
- r"cannot do (label|index|positional) indexing "
+ r"cannot do (label|positional) indexing "
r"on {klass} with these indexers \[3\.0\] of "
- r"{kind}".format(klass=type(i), kind=str(float))
+ r"type float".format(klass=type(i).__name__)
)
with pytest.raises(TypeError, match=msg):
s.iloc[3.0] = 0
@@ -160,9 +162,9 @@ def test_scalar_non_numeric(self):
s = Series(np.arange(len(i)), index=i)
s[3]
msg = (
- r"cannot do (label|index) indexing "
+ r"cannot do label indexing "
r"on {klass} with these indexers \[3\.0\] of "
- r"{kind}".format(klass=type(i), kind=str(float))
+ r"type float".format(klass=type(i).__name__)
)
with pytest.raises(TypeError, match=msg):
s[3.0]
@@ -179,15 +181,15 @@ def test_scalar_with_mixed(self):
msg = (
r"cannot do label indexing "
r"on {klass} with these indexers \[1\.0\] of "
- r"{kind}|"
+ r"type float|"
"Cannot index by location index with a non-integer key".format(
- klass=str(Index), kind=str(float)
+ klass=Index.__name__
)
)
with pytest.raises(TypeError, match=msg):
idxr(s2)[1.0]
- with pytest.raises(KeyError, match=r"^1$"):
+ with pytest.raises(KeyError, match=r"^1\.0$"):
s2.loc[1.0]
result = s2.loc["b"]
@@ -201,7 +203,7 @@ def test_scalar_with_mixed(self):
msg = (
r"cannot do label indexing "
r"on {klass} with these indexers \[1\.0\] of "
- r"{kind}".format(klass=str(Index), kind=str(float))
+ r"type float".format(klass=Index.__name__)
)
with pytest.raises(TypeError, match=msg):
idxr(s3)[1.0]
@@ -213,7 +215,7 @@ def test_scalar_with_mixed(self):
msg = "Cannot index by location index with a non-integer key"
with pytest.raises(TypeError, match=msg):
s3.iloc[1.0]
- with pytest.raises(KeyError, match=r"^1$"):
+ with pytest.raises(KeyError, match=r"^1\.0$"):
s3.loc[1.0]
result = s3.loc[1.5]
@@ -315,7 +317,7 @@ def test_scalar_float(self):
msg = (
r"cannot do positional indexing "
r"on {klass} with these indexers \[3\.0\] of "
- r"{kind}".format(klass=str(Float64Index), kind=str(float))
+ r"type float".format(klass=Float64Index.__name__)
)
with pytest.raises(TypeError, match=msg):
s2.iloc[3.0] = 0
@@ -344,9 +346,9 @@ def test_slice_non_numeric(self):
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
msg = (
- "cannot do slice indexing "
+ "cannot do positional indexing "
r"on {klass} with these indexers \[(3|4)\.0\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s.iloc[l]
@@ -354,14 +356,10 @@ def test_slice_non_numeric(self):
for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
msg = (
- "cannot do slice indexing "
+ "cannot do (slice|positional) indexing "
r"on {klass} with these indexers "
r"\[(3|4)(\.0)?\] "
- r"of ({kind_float}|{kind_int})".format(
- klass=type(index),
- kind_float=str(float),
- kind_int=str(int),
- )
+ r"of type (float|int)".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
idxr(s)[l]
@@ -370,23 +368,19 @@ def test_slice_non_numeric(self):
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
msg = (
- "cannot do slice indexing "
+ "cannot do positional indexing "
r"on {klass} with these indexers \[(3|4)\.0\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s.iloc[l] = 0
for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
msg = (
- "cannot do slice indexing "
+ "cannot do (slice|positional) indexing "
r"on {klass} with these indexers "
r"\[(3|4)(\.0)?\] "
- r"of ({kind_float}|{kind_int})".format(
- klass=type(index),
- kind_float=str(float),
- kind_int=str(int),
- )
+ r"of type (float|int)".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
idxr(s)[l] = 0
@@ -426,7 +420,7 @@ def test_slice_integer(self):
msg = (
"cannot do slice indexing "
r"on {klass} with these indexers \[(3|4)\.0\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s[l]
@@ -450,7 +444,7 @@ def test_slice_integer(self):
msg = (
"cannot do slice indexing "
r"on {klass} with these indexers \[-6\.0\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s[slice(-6.0, 6.0)]
@@ -476,7 +470,7 @@ def test_slice_integer(self):
msg = (
"cannot do slice indexing "
r"on {klass} with these indexers \[(2|3)\.5\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s[l]
@@ -494,7 +488,7 @@ def test_slice_integer(self):
msg = (
"cannot do slice indexing "
r"on {klass} with these indexers \[(3|4)\.0\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s[l] = 0
@@ -515,9 +509,9 @@ def test_integer_positional_indexing(self):
klass = RangeIndex
msg = (
- "cannot do slice indexing "
+ "cannot do (slice|positional) indexing "
r"on {klass} with these indexers \[(2|4)\.0\] of "
- "{kind}".format(klass=str(klass), kind=str(float))
+ "type float".format(klass=klass.__name__)
)
with pytest.raises(TypeError, match=msg):
idxr(s)[l]
@@ -542,7 +536,7 @@ def f(idxr):
msg = (
"cannot do slice indexing "
r"on {klass} with these indexers \[(0|1)\.0\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s[l]
@@ -557,7 +551,7 @@ def f(idxr):
msg = (
"cannot do slice indexing "
r"on {klass} with these indexers \[-10\.0\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s[slice(-10.0, 10.0)]
@@ -576,7 +570,7 @@ def f(idxr):
msg = (
"cannot do slice indexing "
r"on {klass} with these indexers \[0\.5\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s[l]
@@ -593,7 +587,7 @@ def f(idxr):
msg = (
"cannot do slice indexing "
r"on {klass} with these indexers \[(3|4)\.0\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s[l] = 0
@@ -666,11 +660,11 @@ def test_floating_misc(self):
# value not found (and no fallbacking at all)
# scalar integers
- with pytest.raises(KeyError, match=r"^4\.0$"):
+ with pytest.raises(KeyError, match=r"^4$"):
s.loc[4]
- with pytest.raises(KeyError, match=r"^4\.0$"):
+ with pytest.raises(KeyError, match=r"^4$"):
s.loc[4]
- with pytest.raises(KeyError, match=r"^4\.0$"):
+ with pytest.raises(KeyError, match=r"^4$"):
s[4]
# fancy floats/integers create the correct entry (as nan)
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index d67259e8b7d40..08ea4c1579ef8 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -15,6 +15,44 @@
class TestiLoc(Base):
+ def test_iloc_getitem_int(self):
+ # integer
+ self.check_result(
+ "iloc",
+ 2,
+ "iloc",
+ 2,
+ typs=["labels", "mixed", "ts", "floats", "empty"],
+ fails=IndexError,
+ )
+
+ def test_iloc_getitem_neg_int(self):
+ # neg integer
+ self.check_result(
+ "iloc",
+ -1,
+ "iloc",
+ -1,
+ typs=["labels", "mixed", "ts", "floats", "empty"],
+ fails=IndexError,
+ )
+
+ def test_iloc_getitem_list_int(self):
+ self.check_result(
+ "iloc",
+ [0, 1, 2],
+ "iloc",
+ [0, 1, 2],
+ typs=["labels", "mixed", "ts", "floats", "empty"],
+ fails=IndexError,
+ )
+
+ # array of ints (GH5006), make sure that a single indexer is returning
+ # the correct type
+
+
+class TestiLoc2:
+ # TODO: better name, just separating out things that dont rely on base class
def test_iloc_exceeds_bounds(self):
# GH6296
@@ -135,28 +173,6 @@ def test_iloc_non_integer_raises(self, index, columns, index_vals, column_vals):
with pytest.raises(IndexError, match=msg):
df.iloc[index_vals, column_vals]
- def test_iloc_getitem_int(self):
- # integer
- self.check_result(
- "iloc",
- 2,
- "iloc",
- 2,
- typs=["labels", "mixed", "ts", "floats", "empty"],
- fails=IndexError,
- )
-
- def test_iloc_getitem_neg_int(self):
- # neg integer
- self.check_result(
- "iloc",
- -1,
- "iloc",
- -1,
- typs=["labels", "mixed", "ts", "floats", "empty"],
- fails=IndexError,
- )
-
@pytest.mark.parametrize("dims", [1, 2])
def test_iloc_getitem_invalid_scalar(self, dims):
# GH 21982
@@ -183,19 +199,6 @@ def test_iloc_array_not_mutating_negative_indices(self):
df.iloc[:, array_with_neg_numbers]
tm.assert_numpy_array_equal(array_with_neg_numbers, array_copy)
- def test_iloc_getitem_list_int(self):
- self.check_result(
- "iloc",
- [0, 1, 2],
- "iloc",
- [0, 1, 2],
- typs=["labels", "mixed", "ts", "floats", "empty"],
- fails=IndexError,
- )
-
- # array of ints (GH5006), make sure that a single indexer is returning
- # the correct type
-
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
@@ -286,7 +289,9 @@ def test_iloc_getitem_slice_dups(self):
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
- df = self.frame_ints
+ df = DataFrame(
+ np.random.randn(4, 4), index=np.arange(0, 8, 2), columns=np.arange(0, 12, 3)
+ )
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 1913caae93932..98940b64330b4 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -17,13 +17,13 @@
from pandas.core.generic import NDFrame
from pandas.core.indexers import validate_indices
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
-from pandas.tests.indexing.common import Base, _mklbl
+from pandas.tests.indexing.common import _mklbl
# ------------------------------------------------------------------------
# Indexing test cases
-class TestFancy(Base):
+class TestFancy:
""" pure get/set item & fancy indexing """
def test_setitem_ndarray_1d(self):
@@ -137,7 +137,7 @@ def test_setitem_ndarray_3d(self, index, obj, idxr, idxr_id):
r"Buffer has wrong number of dimensions \(expected 1, "
r"got 3\)|"
"'pandas._libs.interval.IntervalTree' object has no attribute "
- "'set_value'|" # AttributeError
+ "'get_loc'|" # AttributeError
"unhashable type: 'numpy.ndarray'|" # TypeError
"No matching signature found|" # TypeError
r"^\[\[\[|" # pandas.core.indexing.IndexingError
@@ -750,7 +750,7 @@ def test_index_type_coercion(self):
assert s2.index.is_object()
-class TestMisc(Base):
+class TestMisc:
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
df["a"] = 10
@@ -875,21 +875,21 @@ def test_indexing_dtypes_on_empty(self):
assert df2.loc[:, "a"].dtype == np.int64
tm.assert_series_equal(df2.loc[:, "a"], df2.iloc[:, 0])
- def test_range_in_series_indexing(self):
+ @pytest.mark.parametrize("size", [5, 999999, 1000000])
+ def test_range_in_series_indexing(self, size):
# range can cause an indexing error
# GH 11652
- for x in [5, 999999, 1000000]:
- s = Series(index=range(x), dtype=np.float64)
- s.loc[range(1)] = 42
- tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))
+ s = Series(index=range(size), dtype=np.float64)
+ s.loc[range(1)] = 42
+ tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))
- s.loc[range(2)] = 43
- tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))
+ s.loc[range(2)] = 43
+ tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))
- def test_non_reducing_slice(self):
- df = DataFrame([[0, 1], [2, 3]])
-
- slices = [
+ @pytest.mark.parametrize(
+ "slc",
+ [
+ # FIXME: dont leave commented-out
# pd.IndexSlice[:, :],
pd.IndexSlice[:, 1],
pd.IndexSlice[1, :],
@@ -902,10 +902,13 @@ def test_non_reducing_slice(self):
[0, 1],
np.array([0, 1]),
Series([0, 1]),
- ]
- for slice_ in slices:
- tslice_ = _non_reducing_slice(slice_)
- assert isinstance(df.loc[tslice_], DataFrame)
+ ],
+ )
+ def test_non_reducing_slice(self, slc):
+ df = DataFrame([[0, 1], [2, 3]])
+
+ tslice_ = _non_reducing_slice(slc)
+ assert isinstance(df.loc[tslice_], DataFrame)
def test_list_slice(self):
# like dataframe getitem
@@ -965,37 +968,37 @@ class TestSeriesNoneCoercion:
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
- def test_coercion_with_setitem(self):
- for start_data, expected_result in self.EXPECTED_RESULTS:
- start_series = Series(start_data)
- start_series[0] = None
+ @pytest.mark.parametrize("start_data,expected_result", EXPECTED_RESULTS)
+ def test_coercion_with_setitem(self, start_data, expected_result):
+ start_series = Series(start_data)
+ start_series[0] = None
- expected_series = Series(expected_result)
- tm.assert_series_equal(start_series, expected_series)
+ expected_series = Series(expected_result)
+ tm.assert_series_equal(start_series, expected_series)
- def test_coercion_with_loc_setitem(self):
- for start_data, expected_result in self.EXPECTED_RESULTS:
- start_series = Series(start_data)
- start_series.loc[0] = None
+ @pytest.mark.parametrize("start_data,expected_result", EXPECTED_RESULTS)
+ def test_coercion_with_loc_setitem(self, start_data, expected_result):
+ start_series = Series(start_data)
+ start_series.loc[0] = None
- expected_series = Series(expected_result)
- tm.assert_series_equal(start_series, expected_series)
+ expected_series = Series(expected_result)
+ tm.assert_series_equal(start_series, expected_series)
- def test_coercion_with_setitem_and_series(self):
- for start_data, expected_result in self.EXPECTED_RESULTS:
- start_series = Series(start_data)
- start_series[start_series == start_series[0]] = None
+ @pytest.mark.parametrize("start_data,expected_result", EXPECTED_RESULTS)
+ def test_coercion_with_setitem_and_series(self, start_data, expected_result):
+ start_series = Series(start_data)
+ start_series[start_series == start_series[0]] = None
- expected_series = Series(expected_result)
- tm.assert_series_equal(start_series, expected_series)
+ expected_series = Series(expected_result)
+ tm.assert_series_equal(start_series, expected_series)
- def test_coercion_with_loc_and_series(self):
- for start_data, expected_result in self.EXPECTED_RESULTS:
- start_series = Series(start_data)
- start_series.loc[start_series == start_series[0]] = None
+ @pytest.mark.parametrize("start_data,expected_result", EXPECTED_RESULTS)
+ def test_coercion_with_loc_and_series(self, start_data, expected_result):
+ start_series = Series(start_data)
+ start_series.loc[start_series == start_series[0]] = None
- expected_series = Series(expected_result)
- tm.assert_series_equal(start_series, expected_series)
+ expected_series = Series(expected_result)
+ tm.assert_series_equal(start_series, expected_series)
class TestDataframeNoneCoercion:
@@ -1012,31 +1015,35 @@ class TestDataframeNoneCoercion:
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
- def test_coercion_with_loc(self):
- for start_data, expected_result in self.EXPECTED_SINGLE_ROW_RESULTS:
- start_dataframe = DataFrame({"foo": start_data})
- start_dataframe.loc[0, ["foo"]] = None
+ @pytest.mark.parametrize("expected", EXPECTED_SINGLE_ROW_RESULTS)
+ def test_coercion_with_loc(self, expected):
+ start_data, expected_result = expected
+
+ start_dataframe = DataFrame({"foo": start_data})
+ start_dataframe.loc[0, ["foo"]] = None
+
+ expected_dataframe = DataFrame({"foo": expected_result})
+ tm.assert_frame_equal(start_dataframe, expected_dataframe)
+
+ @pytest.mark.parametrize("expected", EXPECTED_SINGLE_ROW_RESULTS)
+ def test_coercion_with_setitem_and_dataframe(self, expected):
+ start_data, expected_result = expected
- expected_dataframe = DataFrame({"foo": expected_result})
- tm.assert_frame_equal(start_dataframe, expected_dataframe)
+ start_dataframe = DataFrame({"foo": start_data})
+ start_dataframe[start_dataframe["foo"] == start_dataframe["foo"][0]] = None
- def test_coercion_with_setitem_and_dataframe(self):
- for start_data, expected_result in self.EXPECTED_SINGLE_ROW_RESULTS:
- start_dataframe = DataFrame({"foo": start_data})
- start_dataframe[start_dataframe["foo"] == start_dataframe["foo"][0]] = None
+ expected_dataframe = DataFrame({"foo": expected_result})
+ tm.assert_frame_equal(start_dataframe, expected_dataframe)
- expected_dataframe = DataFrame({"foo": expected_result})
- tm.assert_frame_equal(start_dataframe, expected_dataframe)
+ @pytest.mark.parametrize("expected", EXPECTED_SINGLE_ROW_RESULTS)
+ def test_none_coercion_loc_and_dataframe(self, expected):
+ start_data, expected_result = expected
- def test_none_coercion_loc_and_dataframe(self):
- for start_data, expected_result in self.EXPECTED_SINGLE_ROW_RESULTS:
- start_dataframe = DataFrame({"foo": start_data})
- start_dataframe.loc[
- start_dataframe["foo"] == start_dataframe["foo"][0]
- ] = None
+ start_dataframe = DataFrame({"foo": start_data})
+ start_dataframe.loc[start_dataframe["foo"] == start_dataframe["foo"][0]] = None
- expected_dataframe = DataFrame({"foo": expected_result})
- tm.assert_frame_equal(start_dataframe, expected_dataframe)
+ expected_dataframe = DataFrame({"foo": expected_result})
+ tm.assert_frame_equal(start_dataframe, expected_dataframe)
def test_none_coercion_mixed_dtypes(self):
start_dataframe = DataFrame(
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index b9dc96adfa738..3a726fb9923ee 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -13,85 +13,6 @@
class TestLoc(Base):
- def test_loc_getitem_dups(self):
- # GH 5678
- # repeated getitems on a dup index returning a ndarray
- df = DataFrame(
- np.random.random_sample((20, 5)), index=["ABCDE"[x % 5] for x in range(20)]
- )
- expected = df.loc["A", 0]
- result = df.loc[:, 0].loc["A"]
- tm.assert_series_equal(result, expected)
-
- def test_loc_getitem_dups2(self):
-
- # GH4726
- # dup indexing with iloc/loc
- df = DataFrame(
- [[1, 2, "foo", "bar", Timestamp("20130101")]],
- columns=["a", "a", "a", "a", "a"],
- index=[1],
- )
- expected = Series(
- [1, 2, "foo", "bar", Timestamp("20130101")],
- index=["a", "a", "a", "a", "a"],
- name=1,
- )
-
- result = df.iloc[0]
- tm.assert_series_equal(result, expected)
-
- result = df.loc[1]
- tm.assert_series_equal(result, expected)
-
- def test_loc_setitem_dups(self):
-
- # GH 6541
- df_orig = DataFrame(
- {
- "me": list("rttti"),
- "foo": list("aaade"),
- "bar": np.arange(5, dtype="float64") * 1.34 + 2,
- "bar2": np.arange(5, dtype="float64") * -0.34 + 2,
- }
- ).set_index("me")
-
- indexer = tuple(["r", ["bar", "bar2"]])
- df = df_orig.copy()
- df.loc[indexer] *= 2.0
- tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
-
- indexer = tuple(["r", "bar"])
- df = df_orig.copy()
- df.loc[indexer] *= 2.0
- assert df.loc[indexer] == 2.0 * df_orig.loc[indexer]
-
- indexer = tuple(["t", ["bar", "bar2"]])
- df = df_orig.copy()
- df.loc[indexer] *= 2.0
- tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
-
- def test_loc_setitem_slice(self):
- # GH10503
-
- # assigning the same type should not change the type
- df1 = DataFrame({"a": [0, 1, 1], "b": Series([100, 200, 300], dtype="uint32")})
- ix = df1["a"] == 1
- newb1 = df1.loc[ix, "b"] + 1
- df1.loc[ix, "b"] = newb1
- expected = DataFrame(
- {"a": [0, 1, 1], "b": Series([100, 201, 301], dtype="uint32")}
- )
- tm.assert_frame_equal(df1, expected)
-
- # assigning a new type should get the inferred type
- df2 = DataFrame({"a": [0, 1, 1], "b": [100, 200, 300]}, dtype="uint64")
- ix = df1["a"] == 1
- newb2 = df2.loc[ix, "b"]
- df1.loc[ix, "b"] = newb2
- expected = DataFrame({"a": [0, 1, 1], "b": [100, 200, 300]}, dtype="uint64")
- tm.assert_frame_equal(df2, expected)
-
def test_loc_getitem_int(self):
# int label
@@ -162,17 +83,6 @@ def test_loc_getitem_label_list_with_missing(self):
fails=KeyError,
)
- def test_getitem_label_list_with_missing(self):
- s = Series(range(3), index=["a", "b", "c"])
-
- # consistency
- with pytest.raises(KeyError, match="with any missing labels"):
- s[["a", "d"]]
-
- s = Series(range(3))
- with pytest.raises(KeyError, match="with any missing labels"):
- s[[0, 3]]
-
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result(
@@ -196,6 +106,168 @@ def test_loc_getitem_bool(self):
self.check_result("loc", b, "loc", b, typs=["empty"], fails=IndexError)
+ def test_loc_getitem_label_slice(self):
+
+ # label slices (with ints)
+
+ # real label slices
+
+ # GH 14316
+
+ self.check_result(
+ "loc",
+ slice(1, 3),
+ "loc",
+ slice(1, 3),
+ typs=["labels", "mixed", "empty", "ts", "floats"],
+ fails=TypeError,
+ )
+
+ self.check_result(
+ "loc",
+ slice("20130102", "20130104"),
+ "loc",
+ slice("20130102", "20130104"),
+ typs=["ts"],
+ axes=1,
+ fails=TypeError,
+ )
+
+ self.check_result(
+ "loc",
+ slice(2, 8),
+ "loc",
+ slice(2, 8),
+ typs=["mixed"],
+ axes=0,
+ fails=TypeError,
+ )
+ self.check_result(
+ "loc",
+ slice(2, 8),
+ "loc",
+ slice(2, 8),
+ typs=["mixed"],
+ axes=1,
+ fails=KeyError,
+ )
+
+ self.check_result(
+ "loc",
+ slice(2, 4, 2),
+ "loc",
+ slice(2, 4, 2),
+ typs=["mixed"],
+ axes=0,
+ fails=TypeError,
+ )
+
+
+class TestLoc2:
+ # TODO: better name, just separating out things that rely on base class
+
+ def test_loc_getitem_dups(self):
+ # GH 5678
+ # repeated getitems on a dup index returning a ndarray
+ df = DataFrame(
+ np.random.random_sample((20, 5)), index=["ABCDE"[x % 5] for x in range(20)]
+ )
+ expected = df.loc["A", 0]
+ result = df.loc[:, 0].loc["A"]
+ tm.assert_series_equal(result, expected)
+
+ def test_loc_getitem_dups2(self):
+
+ # GH4726
+ # dup indexing with iloc/loc
+ df = DataFrame(
+ [[1, 2, "foo", "bar", Timestamp("20130101")]],
+ columns=["a", "a", "a", "a", "a"],
+ index=[1],
+ )
+ expected = Series(
+ [1, 2, "foo", "bar", Timestamp("20130101")],
+ index=["a", "a", "a", "a", "a"],
+ name=1,
+ )
+
+ result = df.iloc[0]
+ tm.assert_series_equal(result, expected)
+
+ result = df.loc[1]
+ tm.assert_series_equal(result, expected)
+
+ def test_loc_setitem_dups(self):
+
+ # GH 6541
+ df_orig = DataFrame(
+ {
+ "me": list("rttti"),
+ "foo": list("aaade"),
+ "bar": np.arange(5, dtype="float64") * 1.34 + 2,
+ "bar2": np.arange(5, dtype="float64") * -0.34 + 2,
+ }
+ ).set_index("me")
+
+ indexer = tuple(["r", ["bar", "bar2"]])
+ df = df_orig.copy()
+ df.loc[indexer] *= 2.0
+ tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
+
+ indexer = tuple(["r", "bar"])
+ df = df_orig.copy()
+ df.loc[indexer] *= 2.0
+ assert df.loc[indexer] == 2.0 * df_orig.loc[indexer]
+
+ indexer = tuple(["t", ["bar", "bar2"]])
+ df = df_orig.copy()
+ df.loc[indexer] *= 2.0
+ tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
+
+ def test_loc_setitem_slice(self):
+ # GH10503
+
+ # assigning the same type should not change the type
+ df1 = DataFrame({"a": [0, 1, 1], "b": Series([100, 200, 300], dtype="uint32")})
+ ix = df1["a"] == 1
+ newb1 = df1.loc[ix, "b"] + 1
+ df1.loc[ix, "b"] = newb1
+ expected = DataFrame(
+ {"a": [0, 1, 1], "b": Series([100, 201, 301], dtype="uint32")}
+ )
+ tm.assert_frame_equal(df1, expected)
+
+ # assigning a new type should get the inferred type
+ df2 = DataFrame({"a": [0, 1, 1], "b": [100, 200, 300]}, dtype="uint64")
+ ix = df1["a"] == 1
+ newb2 = df2.loc[ix, "b"]
+ df1.loc[ix, "b"] = newb2
+ expected = DataFrame({"a": [0, 1, 1], "b": [100, 200, 300]}, dtype="uint64")
+ tm.assert_frame_equal(df2, expected)
+
+ def test_loc_setitem_dtype(self):
+ # GH31340
+ df = DataFrame({"id": ["A"], "a": [1.2], "b": [0.0], "c": [-2.5]})
+ cols = ["a", "b", "c"]
+ df.loc[:, cols] = df.loc[:, cols].astype("float32")
+
+ expected = DataFrame(
+ {"id": ["A"], "a": [1.2], "b": [0.0], "c": [-2.5]}, dtype="float32"
+ ) # id is inferred as object
+
+ tm.assert_frame_equal(df, expected)
+
+ def test_getitem_label_list_with_missing(self):
+ s = Series(range(3), index=["a", "b", "c"])
+
+ # consistency
+ with pytest.raises(KeyError, match="with any missing labels"):
+ s[["a", "d"]]
+
+ s = Series(range(3))
+ with pytest.raises(KeyError, match="with any missing labels"):
+ s[[0, 3]]
+
@pytest.mark.parametrize("index", [[True, False], [True, False, True, False]])
def test_loc_getitem_bool_diff_len(self, index):
# GH26658
@@ -297,62 +369,6 @@ def test_loc_getitem_list_with_fail(self):
with pytest.raises(KeyError, match="with any missing labels"):
s.loc[[2, 3]]
- def test_loc_getitem_label_slice(self):
-
- # label slices (with ints)
-
- # real label slices
-
- # GH 14316
-
- self.check_result(
- "loc",
- slice(1, 3),
- "loc",
- slice(1, 3),
- typs=["labels", "mixed", "empty", "ts", "floats"],
- fails=TypeError,
- )
-
- self.check_result(
- "loc",
- slice("20130102", "20130104"),
- "loc",
- slice("20130102", "20130104"),
- typs=["ts"],
- axes=1,
- fails=TypeError,
- )
-
- self.check_result(
- "loc",
- slice(2, 8),
- "loc",
- slice(2, 8),
- typs=["mixed"],
- axes=0,
- fails=TypeError,
- )
- self.check_result(
- "loc",
- slice(2, 8),
- "loc",
- slice(2, 8),
- typs=["mixed"],
- axes=1,
- fails=KeyError,
- )
-
- self.check_result(
- "loc",
- slice(2, 4, 2),
- "loc",
- slice(2, 4, 2),
- typs=["mixed"],
- axes=0,
- fails=TypeError,
- )
-
def test_loc_index(self):
# gh-17131
# a boolean index should index like a boolean numpy array
@@ -559,7 +575,7 @@ def test_loc_modify_datetime(self):
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame(self):
- df = self.frame_labels
+ df = DataFrame(np.random.randn(4, 4), index=list("abcd"), columns=list("ABCD"))
result = df.iloc[0, 0]
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
index a567fb9b8ccc7..3622b12b853a4 100644
--- a/pandas/tests/indexing/test_scalar.py
+++ b/pandas/tests/indexing/test_scalar.py
@@ -65,6 +65,10 @@ def _check(f, func, values=False):
for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
_check(f, "at")
+
+class TestScalar2:
+ # TODO: Better name, just separating things that dont need Base class
+
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
@@ -125,38 +129,79 @@ def test_imethods_with_dups(self):
result = df.iat[2, 0]
assert result == 2
- def test_at_to_fail(self):
+ def test_series_at_raises_type_error(self):
# at should not fallback
# GH 7814
- s = Series([1, 2, 3], index=list("abc"))
- result = s.at["a"]
+ # GH#31724 .at should match .loc
+ ser = Series([1, 2, 3], index=list("abc"))
+ result = ser.at["a"]
assert result == 1
+ result = ser.loc["a"]
+ assert result == 1
+
msg = (
- "At based indexing on an non-integer index can only have "
- "non-integer indexers"
+ "cannot do label indexing on Index "
+ r"with these indexers \[0\] of type int"
)
- with pytest.raises(ValueError, match=msg):
- s.at[0]
+ with pytest.raises(TypeError, match=msg):
+ ser.at[0]
+ with pytest.raises(TypeError, match=msg):
+ ser.loc[0]
+ def test_frame_raises_type_error(self):
+ # GH#31724 .at should match .loc
df = DataFrame({"A": [1, 2, 3]}, index=list("abc"))
result = df.at["a", "A"]
assert result == 1
- with pytest.raises(ValueError, match=msg):
+ result = df.loc["a", "A"]
+ assert result == 1
+
+ msg = (
+ "cannot do label indexing on Index "
+ r"with these indexers \[0\] of type int"
+ )
+ with pytest.raises(TypeError, match=msg):
df.at["a", 0]
+ with pytest.raises(TypeError, match=msg):
+ df.loc["a", 0]
- s = Series([1, 2, 3], index=[3, 2, 1])
- result = s.at[1]
+ def test_series_at_raises_key_error(self):
+ # GH#31724 .at should match .loc
+
+ ser = Series([1, 2, 3], index=[3, 2, 1])
+ result = ser.at[1]
+ assert result == 3
+ result = ser.loc[1]
assert result == 3
- msg = "At based indexing on an integer index can only have integer indexers"
- with pytest.raises(ValueError, match=msg):
- s.at["a"]
+
+ with pytest.raises(KeyError, match="a"):
+ ser.at["a"]
+ with pytest.raises(KeyError, match="a"):
+ # .at should match .loc
+ ser.loc["a"]
+
+ def test_frame_at_raises_key_error(self):
+ # GH#31724 .at should match .loc
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
+
result = df.at[1, 0]
assert result == 3
- with pytest.raises(ValueError, match=msg):
+ result = df.loc[1, 0]
+ assert result == 3
+
+ with pytest.raises(KeyError, match="a"):
df.at["a", 0]
+ with pytest.raises(KeyError, match="a"):
+ df.loc["a", 0]
+
+ with pytest.raises(KeyError, match="a"):
+ df.at[1, "a"]
+ with pytest.raises(KeyError, match="a"):
+ df.loc[1, "a"]
+ # TODO: belongs somewhere else?
+ def test_getitem_list_missing_key(self):
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({"x": [1.0], "y": [2.0], "z": [3.0]})
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index f7b49ccb1a72d..91665a24fc4c5 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -1048,6 +1048,27 @@ def test_invalid_columns(self, path):
):
write_frame.to_excel(path, "test1", columns=["C", "D"])
+ @pytest.mark.parametrize(
+ "to_excel_index,read_excel_index_col",
+ [
+ (True, 0), # Include index in write to file
+ (False, None), # Dont include index in write to file
+ ],
+ )
+ def test_write_subset_columns(self, path, to_excel_index, read_excel_index_col):
+ # GH 31677
+ write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2], "C": [3, 3, 3]})
+ write_frame.to_excel(
+ path, "col_subset_bug", columns=["A", "B"], index=to_excel_index
+ )
+
+ expected = write_frame[["A", "B"]]
+ read_frame = pd.read_excel(
+ path, "col_subset_bug", index_col=read_excel_index_col
+ )
+
+ tm.assert_frame_equal(expected, read_frame)
+
def test_comment_arg(self, path):
# see gh-18735
#
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 7650561d3072d..bf7b98eb78f11 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -239,6 +239,15 @@ def test_repr_truncation(self):
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
+ def test_repr_deprecation_negative_int(self):
+ # FIXME: remove in future version after deprecation cycle
+ # Non-regression test for:
+ # https://github.com/pandas-dev/pandas/issues/31532
+ width = get_option("display.max_colwidth")
+ with tm.assert_produces_warning(FutureWarning):
+ set_option("display.max_colwidth", -1)
+ set_option("display.max_colwidth", width)
+
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
pd.reset_option("display.chop_threshold") # default None
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 94d51589023c4..f2d35bfb3b5ae 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -1662,3 +1662,22 @@ def test_json_multiindex(self, dataframe, expected):
series = dataframe.stack()
result = series.to_json(orient="index")
assert result == expected
+
+ def test_to_s3(self, s3_resource):
+ # GH 28375
+ mock_bucket_name, target_file = "pandas-test", "test.json"
+ df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]})
+ df.to_json(f"s3://{mock_bucket_name}/{target_file}")
+ assert target_file in (
+ obj.key for obj in s3_resource.Bucket("pandas-test").objects.all()
+ )
+
+ def test_json_pandas_na(self):
+ # GH 31615
+ result = pd.DataFrame([[pd.NA]]).to_json()
+ assert result == '{"0":{"0":null}}'
+
+ def test_json_pandas_nulls(self, nulls_fixture):
+ # GH 31615
+ result = pd.DataFrame([[nulls_fixture]]).to_json()
+ assert result == '{"0":{"0":null}}'
diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py
index 6c17f40b790ac..c19056d434ec3 100644
--- a/pandas/tests/io/parser/test_common.py
+++ b/pandas/tests/io/parser/test_common.py
@@ -2040,6 +2040,17 @@ def test_read_csv_memory_growth_chunksize(all_parsers):
pass
+def test_read_csv_raises_on_header_prefix(all_parsers):
+ # gh-27394
+ parser = all_parsers
+ msg = "Argument prefix must be None if argument header is not None"
+
+ s = StringIO("0,1\n2,3")
+
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(s, header=0, prefix="_X")
+
+
def test_read_table_equivalency_to_read_csv(all_parsers):
# see gh-21948
# As of 0.25.0, read_table is undeprecated
diff --git a/pandas/tests/io/parser/test_encoding.py b/pandas/tests/io/parser/test_encoding.py
index 406e7bedfd298..13f72a0414bac 100644
--- a/pandas/tests/io/parser/test_encoding.py
+++ b/pandas/tests/io/parser/test_encoding.py
@@ -141,6 +141,7 @@ def test_read_csv_utf_aliases(all_parsers, utf_value, encoding_fmt):
)
def test_binary_mode_file_buffers(all_parsers, csv_dir_path, fname, encoding):
# gh-23779: Python csv engine shouldn't error on files opened in binary.
+ # gh-31575: Python csv engine shouldn't error on files opened in raw binary.
parser = all_parsers
fpath = os.path.join(csv_dir_path, fname)
@@ -154,6 +155,10 @@ def test_binary_mode_file_buffers(all_parsers, csv_dir_path, fname, encoding):
result = parser.read_csv(fb, encoding=encoding)
tm.assert_frame_equal(expected, result)
+ with open(fpath, mode="rb", buffering=0) as fb:
+ result = parser.read_csv(fb, encoding=encoding)
+ tm.assert_frame_equal(expected, result)
+
@pytest.mark.parametrize("pass_encoding", [True, False])
def test_encoding_temp_file(all_parsers, utf_value, encoding_fmt, pass_encoding):
diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py
index b01b22e811ee3..073af758f0b29 100644
--- a/pandas/tests/io/parser/test_parse_dates.py
+++ b/pandas/tests/io/parser/test_parse_dates.py
@@ -1516,3 +1516,15 @@ def test_hypothesis_delimited_date(date_format, dayfirst, delimiter, test_dateti
assert except_out_dateutil == except_in_dateutil
assert result == expected
+
+
+@pytest.mark.parametrize("parse_dates", [["time", ], {"date": ["time", ]}])
+def test_missing_column(all_parsers, parse_dates):
+ """GH31251 column names provided in parse_dates could be missing."""
+ parser = all_parsers
+ content = StringIO("time,val\n2020-01-31,32\n")
+ msg = "Missing column provided to 'parse_dates': 'time'"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(
+ content, sep=",", usecols=["val", ], parse_dates=parse_dates,
+ )
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index d7a21b27308e8..404f5a477187b 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -141,24 +141,7 @@ def test_read_non_existant(self, reader, module, error_class, fn_ext):
pytest.importorskip(module)
path = os.path.join(HERE, "data", "does_not_exist." + fn_ext)
- msg1 = r"File (b')?.+does_not_exist\.{}'? does not exist".format(fn_ext)
- msg2 = fr"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
- msg3 = "Expected object or value"
- msg4 = "path_or_buf needs to be a string file path or file-like"
- msg5 = (
- fr"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
- fr"'.+does_not_exist\.{fn_ext}'"
- )
- msg6 = fr"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
- msg7 = (
- fr"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
- )
- msg8 = fr"Failed to open local file.+does_not_exist\.{fn_ext}.?, error: .*"
-
- with pytest.raises(
- error_class,
- match=fr"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})",
- ):
+ with tm.external_error_raised(error_class):
reader(path)
@pytest.mark.parametrize(
@@ -184,24 +167,7 @@ def test_read_expands_user_home_dir(
path = os.path.join("~", "does_not_exist." + fn_ext)
monkeypatch.setattr(icom, "_expand_user", lambda x: os.path.join("foo", x))
- msg1 = fr"File (b')?.+does_not_exist\.{fn_ext}'? does not exist"
- msg2 = fr"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
- msg3 = "Unexpected character found when decoding 'false'"
- msg4 = "path_or_buf needs to be a string file path or file-like"
- msg5 = (
- fr"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
- fr"'.+does_not_exist\.{fn_ext}'"
- )
- msg6 = fr"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
- msg7 = (
- fr"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
- )
- msg8 = fr"Failed to open local file.+does_not_exist\.{fn_ext}.?, error: .*"
-
- with pytest.raises(
- error_class,
- match=fr"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})",
- ):
+ with tm.external_error_raised(error_class):
reader(path)
@pytest.mark.parametrize(
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index d51c712ed5abd..7ed8d8f22764c 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -564,6 +564,13 @@ def test_additional_extension_types(self, pa):
)
check_round_trip(df, pa)
+ @td.skip_if_no("pyarrow", min_version="0.14")
+ def test_timestamp_nanoseconds(self, pa):
+ # with version 2.0, pyarrow defaults to writing the nanoseconds, so
+ # this should work without error
+ df = pd.DataFrame({"a": pd.date_range("2017-01-01", freq="1n", periods=10)})
+ check_round_trip(df, pa, write_kwargs={"version": "2.0"})
+
class TestParquetFastParquet(Base):
@td.skip_if_no("fastparquet", min_version="0.3.2")
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index 04fd4835469a9..78b630bb5ada1 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -60,9 +60,7 @@ def compare_element(result, expected, typ, version=None):
assert result == expected
assert result.freq == expected.freq
else:
- comparator = getattr(
- tm, "assert_{typ}_equal".format(typ=typ), tm.assert_almost_equal
- )
+ comparator = getattr(tm, f"assert_{typ}_equal", tm.assert_almost_equal)
comparator(result, expected)
@@ -77,7 +75,7 @@ def compare(data, vf, version):
# use a specific comparator
# if available
- comparator = "compare_{typ}_{dt}".format(typ=typ, dt=dt)
+ comparator = f"compare_{typ}_{dt}"
comparator = m.get(comparator, m["compare_element"])
comparator(result, expected, typ, version)
@@ -234,7 +232,7 @@ def test_legacy_sparse_warning(datapath):
@pytest.fixture
def get_random_path():
- return "__{}__.pickle".format(tm.rands(10))
+ return f"__{tm.rands(10)}__.pickle"
class TestCompression:
@@ -262,7 +260,7 @@ def compress_file(self, src_path, dest_path, compression):
elif compression == "xz":
f = _get_lzma_file(lzma)(dest_path, "w")
else:
- msg = "Unrecognized compression type: {}".format(compression)
+ msg = f"Unrecognized compression type: {compression}"
raise ValueError(msg)
if compression != "zip":
diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py
index 9cd3ccbf9214e..e54f4784e9c4f 100644
--- a/pandas/tests/plotting/test_converter.py
+++ b/pandas/tests/plotting/test_converter.py
@@ -8,6 +8,7 @@
import pandas._config.config as cf
from pandas.compat.numpy import np_datetime64_compat
+import pandas.util._test_decorators as td
from pandas import Index, Period, Series, Timestamp, date_range
import pandas._testing as tm
@@ -59,6 +60,7 @@ def test_register_by_default(self):
call = [sys.executable, "-c", code]
assert subprocess.check_call(call) == 0
+ @td.skip_if_no("matplotlib", min_version="3.1.3")
def test_registering_no_warning(self):
plt = pytest.importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range("2017", periods=12))
@@ -66,9 +68,7 @@ def test_registering_no_warning(self):
# Set to the "warn" state, in case this isn't the first test run
register_matplotlib_converters()
- with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
- # GH#30588 DeprecationWarning from 2D indexing
- ax.plot(s.index, s.values)
+ ax.plot(s.index, s.values)
def test_pandas_plots_register(self):
pytest.importorskip("matplotlib.pyplot")
@@ -91,6 +91,7 @@ def test_matplotlib_formatters(self):
assert Timestamp not in units.registry
assert Timestamp in units.registry
+ @td.skip_if_no("matplotlib", min_version="3.1.3")
def test_option_no_warning(self):
pytest.importorskip("matplotlib.pyplot")
ctx = cf.option_context("plotting.matplotlib.register_converters", False)
@@ -100,15 +101,12 @@ def test_option_no_warning(self):
# Test without registering first, no warning
with ctx:
- # GH#30588 DeprecationWarning from 2D indexing on Index
- with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
- ax.plot(s.index, s.values)
+ ax.plot(s.index, s.values)
# Now test with registering
register_matplotlib_converters()
with ctx:
- with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
- ax.plot(s.index, s.values)
+ ax.plot(s.index, s.values)
def test_registry_resets(self):
units = pytest.importorskip("matplotlib.units")
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 84d298cd7c6fe..979b89a87d843 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -43,19 +43,19 @@ def setup_method(self, method):
def teardown_method(self, method):
tm.close()
- # Ignore warning
- # ```
- # Converting to PeriodArray/Index representation will drop timezone information.
- # ```
- # which occurs for UTC-like timezones.
@pytest.mark.slow
- @pytest.mark.filterwarnings("ignore:msg:UserWarning")
def test_ts_plot_with_tz(self, tz_aware_fixture):
- # GH2877, GH17173
+ # GH2877, GH17173, GH31205, GH31580
tz = tz_aware_fixture
index = date_range("1/1/2011", periods=2, freq="H", tz=tz)
ts = Series([188.5, 328.25], index=index)
- _check_plot_works(ts.plot)
+ with tm.assert_produces_warning(None):
+ _check_plot_works(ts.plot)
+ ax = ts.plot()
+ xdata = list(ax.get_lines())[0].get_xdata()
+ # Check first and last points' labels are correct
+ assert (xdata[0].hour, xdata[0].minute) == (0, 0)
+ assert (xdata[-1].hour, xdata[-1].minute) == (1, 0)
def test_fontsize_set_correctly(self):
# For issue #8765
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index f9acf5b60a3cd..fd189c7435b29 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -371,10 +371,8 @@ def test_no_overlap_more_informative_error(self):
msg = (
"No common columns to perform merge on. "
- "Merge options: left_on={lon}, right_on={ron}, "
- "left_index={lidx}, right_index={ridx}".format(
- lon=None, ron=None, lidx=False, ridx=False
- )
+ f"Merge options: left_on={None}, right_on={None}, "
+ f"left_index={False}, right_index={False}"
)
with pytest.raises(MergeError, match=msg):
diff --git a/pandas/tests/reshape/test_cut.py b/pandas/tests/reshape/test_cut.py
index 13b6f05ed304a..830e786fd1c6d 100644
--- a/pandas/tests/reshape/test_cut.py
+++ b/pandas/tests/reshape/test_cut.py
@@ -612,3 +612,16 @@ def test_cut_incorrect_labels(labels):
msg = "Bin labels must either be False, None or passed in as a list-like argument"
with pytest.raises(ValueError, match=msg):
cut(values, 4, labels=labels)
+
+
+@pytest.mark.parametrize("bins", [3, [0, 5, 15]])
+@pytest.mark.parametrize("right", [True, False])
+@pytest.mark.parametrize("include_lowest", [True, False])
+def test_cut_nullable_integer(bins, right, include_lowest):
+ a = np.random.randint(0, 10, size=50).astype(float)
+ a[::2] = np.nan
+ result = cut(
+ pd.array(a, dtype="Int64"), bins, right=right, include_lowest=include_lowest
+ )
+ expected = cut(a, bins, right=right, include_lowest=include_lowest)
+ tm.assert_categorical_equal(result, expected)
diff --git a/pandas/tests/reshape/test_qcut.py b/pandas/tests/reshape/test_qcut.py
index 95406a5ebf4f7..c436ab5d90578 100644
--- a/pandas/tests/reshape/test_qcut.py
+++ b/pandas/tests/reshape/test_qcut.py
@@ -3,6 +3,7 @@
import numpy as np
import pytest
+import pandas as pd
from pandas import (
Categorical,
DatetimeIndex,
@@ -286,3 +287,14 @@ def test_qcut_bool_coercion_to_int(bins, box, compare):
expected = qcut(data_expected, bins, duplicates="drop")
result = qcut(data_result, bins, duplicates="drop")
compare(result, expected)
+
+
+@pytest.mark.parametrize("q", [2, 5, 10])
+def test_qcut_nullable_integer(q, any_nullable_int_dtype):
+ arr = pd.array(np.arange(100), dtype=any_nullable_int_dtype)
+ arr[::2] = pd.NA
+
+ result = qcut(arr, q)
+ expected = qcut(arr.astype(float), q)
+
+ tm.assert_categorical_equal(result, expected)
diff --git a/pandas/tests/scalar/period/test_asfreq.py b/pandas/tests/scalar/period/test_asfreq.py
index 357274e724c68..436810042186a 100644
--- a/pandas/tests/scalar/period/test_asfreq.py
+++ b/pandas/tests/scalar/period/test_asfreq.py
@@ -3,7 +3,7 @@
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG, _period_code_map
from pandas.errors import OutOfBoundsDatetime
-from pandas import Period, offsets
+from pandas import Period, Timestamp, offsets
class TestFreqConversion:
@@ -656,6 +656,23 @@ def test_conv_secondly(self):
assert ival_S.asfreq("S") == ival_S
+ def test_conv_microsecond(self):
+ # GH#31475 Avoid floating point errors dropping the start_time to
+ # before the beginning of the Period
+ per = Period("2020-01-30 15:57:27.576166", freq="U")
+ assert per.ordinal == 1580399847576166
+
+ start = per.start_time
+ expected = Timestamp("2020-01-30 15:57:27.576166")
+ assert start == expected
+ assert start.value == per.ordinal * 1000
+
+ per2 = Period("2300-01-01", "us")
+ with pytest.raises(OutOfBoundsDatetime, match="2300-01-01"):
+ per2.start_time
+ with pytest.raises(OutOfBoundsDatetime, match="2300-01-01"):
+ per2.end_time
+
def test_asfreq_mult(self):
# normal freq to mult freq
p = Period(freq="A", year=2007)
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index bbc81e0dbb6e6..995d47c1473be 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -925,7 +925,7 @@ def test_properties_secondly(self):
class TestPeriodField:
def test_get_period_field_array_raises_on_out_of_range(self):
- msg = "Buffer dtype mismatch, expected 'int64_t' but got 'double'"
+ msg = "Buffer dtype mismatch, expected 'const int64_t' but got 'double'"
with pytest.raises(ValueError, match=msg):
libperiod.get_period_field_arr(-1, np.empty(1), 0)
diff --git a/pandas/tests/scalar/timedelta/test_constructors.py b/pandas/tests/scalar/timedelta/test_constructors.py
index ae1e84576c092..25c9fc19981be 100644
--- a/pandas/tests/scalar/timedelta/test_constructors.py
+++ b/pandas/tests/scalar/timedelta/test_constructors.py
@@ -274,3 +274,10 @@ def test_td_constructor_on_nanoseconds(constructed_td, conversion):
def test_td_constructor_value_error():
with pytest.raises(TypeError):
Timedelta(nanoseconds="abc")
+
+
+def test_timedelta_constructor_identity():
+ # Test for #30543
+ expected = Timedelta(np.timedelta64(1, "s"))
+ result = Timedelta(expected)
+ assert result is expected
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index e1d965bbb14e9..9cdbeb6ab4845 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -821,3 +821,16 @@ def test_resolution_deprecated(self):
def test_truthiness(value, expected):
# https://github.com/pandas-dev/pandas/issues/21484
assert bool(value) is expected
+
+
+def test_timedelta_attribute_precision():
+ # GH 31354
+ td = Timedelta(1552211999999999872, unit="ns")
+ result = td.days * 86400
+ result += td.seconds
+ result *= 1000000
+ result += td.microseconds
+ result *= 1000
+ result += td.nanoseconds
+ expected = td.value
+ assert result == expected
diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py
new file mode 100644
index 0000000000000..737a85faa4c9b
--- /dev/null
+++ b/pandas/tests/scalar/timestamp/test_constructors.py
@@ -0,0 +1,552 @@
+import calendar
+from datetime import datetime, timedelta
+
+import dateutil.tz
+from dateutil.tz import tzutc
+import numpy as np
+import pytest
+import pytz
+
+from pandas.errors import OutOfBoundsDatetime
+
+from pandas import Period, Timedelta, Timestamp, compat
+
+from pandas.tseries import offsets
+
+
+class TestTimestampConstructors:
+ def test_constructor(self):
+ base_str = "2014-07-01 09:00"
+ base_dt = datetime(2014, 7, 1, 9)
+ base_expected = 1_404_205_200_000_000_000
+
+ # confirm base representation is correct
+ assert calendar.timegm(base_dt.timetuple()) * 1_000_000_000 == base_expected
+
+ tests = [
+ (base_str, base_dt, base_expected),
+ (
+ "2014-07-01 10:00",
+ datetime(2014, 7, 1, 10),
+ base_expected + 3600 * 1_000_000_000,
+ ),
+ (
+ "2014-07-01 09:00:00.000008000",
+ datetime(2014, 7, 1, 9, 0, 0, 8),
+ base_expected + 8000,
+ ),
+ (
+ "2014-07-01 09:00:00.000000005",
+ Timestamp("2014-07-01 09:00:00.000000005"),
+ base_expected + 5,
+ ),
+ ]
+
+ timezones = [
+ (None, 0),
+ ("UTC", 0),
+ (pytz.utc, 0),
+ ("Asia/Tokyo", 9),
+ ("US/Eastern", -4),
+ ("dateutil/US/Pacific", -7),
+ (pytz.FixedOffset(-180), -3),
+ (dateutil.tz.tzoffset(None, 18000), 5),
+ ]
+
+ for date_str, date, expected in tests:
+ for result in [Timestamp(date_str), Timestamp(date)]:
+ # only with timestring
+ assert result.value == expected
+
+ # re-creation shouldn't affect to internal value
+ result = Timestamp(result)
+ assert result.value == expected
+
+ # with timezone
+ for tz, offset in timezones:
+ for result in [Timestamp(date_str, tz=tz), Timestamp(date, tz=tz)]:
+ expected_tz = expected - offset * 3600 * 1_000_000_000
+ assert result.value == expected_tz
+
+ # should preserve tz
+ result = Timestamp(result)
+ assert result.value == expected_tz
+
+ # should convert to UTC
+ if tz is not None:
+ result = Timestamp(result).tz_convert("UTC")
+ else:
+ result = Timestamp(result, tz="UTC")
+ expected_utc = expected - offset * 3600 * 1_000_000_000
+ assert result.value == expected_utc
+
+ def test_constructor_with_stringoffset(self):
+ # GH 7833
+ base_str = "2014-07-01 11:00:00+02:00"
+ base_dt = datetime(2014, 7, 1, 9)
+ base_expected = 1_404_205_200_000_000_000
+
+ # confirm base representation is correct
+ assert calendar.timegm(base_dt.timetuple()) * 1_000_000_000 == base_expected
+
+ tests = [
+ (base_str, base_expected),
+ ("2014-07-01 12:00:00+02:00", base_expected + 3600 * 1_000_000_000),
+ ("2014-07-01 11:00:00.000008000+02:00", base_expected + 8000),
+ ("2014-07-01 11:00:00.000000005+02:00", base_expected + 5),
+ ]
+
+ timezones = [
+ (None, 0),
+ ("UTC", 0),
+ (pytz.utc, 0),
+ ("Asia/Tokyo", 9),
+ ("US/Eastern", -4),
+ ("dateutil/US/Pacific", -7),
+ (pytz.FixedOffset(-180), -3),
+ (dateutil.tz.tzoffset(None, 18000), 5),
+ ]
+
+ for date_str, expected in tests:
+ for result in [Timestamp(date_str)]:
+ # only with timestring
+ assert result.value == expected
+
+ # re-creation shouldn't affect to internal value
+ result = Timestamp(result)
+ assert result.value == expected
+
+ # with timezone
+ for tz, offset in timezones:
+ result = Timestamp(date_str, tz=tz)
+ expected_tz = expected
+ assert result.value == expected_tz
+
+ # should preserve tz
+ result = Timestamp(result)
+ assert result.value == expected_tz
+
+ # should convert to UTC
+ result = Timestamp(result).tz_convert("UTC")
+ expected_utc = expected
+ assert result.value == expected_utc
+
+ # This should be 2013-11-01 05:00 in UTC
+ # converted to Chicago tz
+ result = Timestamp("2013-11-01 00:00:00-0500", tz="America/Chicago")
+ assert result.value == Timestamp("2013-11-01 05:00").value
+ expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
+ assert repr(result) == expected
+ assert result == eval(repr(result))
+
+ # This should be 2013-11-01 05:00 in UTC
+ # converted to Tokyo tz (+09:00)
+ result = Timestamp("2013-11-01 00:00:00-0500", tz="Asia/Tokyo")
+ assert result.value == Timestamp("2013-11-01 05:00").value
+ expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
+ assert repr(result) == expected
+ assert result == eval(repr(result))
+
+ # GH11708
+ # This should be 2015-11-18 10:00 in UTC
+ # converted to Asia/Katmandu
+ result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
+ assert result.value == Timestamp("2015-11-18 10:00").value
+ expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
+ assert repr(result) == expected
+ assert result == eval(repr(result))
+
+ # This should be 2015-11-18 10:00 in UTC
+ # converted to Asia/Kolkata
+ result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
+ assert result.value == Timestamp("2015-11-18 10:00").value
+ expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
+ assert repr(result) == expected
+ assert result == eval(repr(result))
+
+ def test_constructor_invalid(self):
+ with pytest.raises(TypeError, match="Cannot convert input"):
+ Timestamp(slice(2))
+ with pytest.raises(ValueError, match="Cannot convert Period"):
+ Timestamp(Period("1000-01-01"))
+
+ def test_constructor_invalid_tz(self):
+ # GH#17690
+ with pytest.raises(TypeError, match="must be a datetime.tzinfo"):
+ Timestamp("2017-10-22", tzinfo="US/Eastern")
+
+ with pytest.raises(ValueError, match="at most one of"):
+ Timestamp("2017-10-22", tzinfo=pytz.utc, tz="UTC")
+
+ with pytest.raises(ValueError, match="Invalid frequency:"):
+ # GH#5168
+ # case where user tries to pass tz as an arg, not kwarg, gets
+ # interpreted as a `freq`
+ Timestamp("2012-01-01", "US/Pacific")
+
+ def test_constructor_strptime(self):
+ # GH25016
+ # Test support for Timestamp.strptime
+ fmt = "%Y%m%d-%H%M%S-%f%z"
+ ts = "20190129-235348-000001+0000"
+ with pytest.raises(NotImplementedError):
+ Timestamp.strptime(ts, fmt)
+
+ def test_constructor_tz_or_tzinfo(self):
+ # GH#17943, GH#17690, GH#5168
+ stamps = [
+ Timestamp(year=2017, month=10, day=22, tz="UTC"),
+ Timestamp(year=2017, month=10, day=22, tzinfo=pytz.utc),
+ Timestamp(year=2017, month=10, day=22, tz=pytz.utc),
+ Timestamp(datetime(2017, 10, 22), tzinfo=pytz.utc),
+ Timestamp(datetime(2017, 10, 22), tz="UTC"),
+ Timestamp(datetime(2017, 10, 22), tz=pytz.utc),
+ ]
+ assert all(ts == stamps[0] for ts in stamps)
+
+ def test_constructor_positional(self):
+ # see gh-10758
+ with pytest.raises(TypeError):
+ Timestamp(2000, 1)
+ with pytest.raises(ValueError):
+ Timestamp(2000, 0, 1)
+ with pytest.raises(ValueError):
+ Timestamp(2000, 13, 1)
+ with pytest.raises(ValueError):
+ Timestamp(2000, 1, 0)
+ with pytest.raises(ValueError):
+ Timestamp(2000, 1, 32)
+
+ # see gh-11630
+ assert repr(Timestamp(2015, 11, 12)) == repr(Timestamp("20151112"))
+ assert repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) == repr(
+ Timestamp("2015-11-12 01:02:03.999999")
+ )
+
+ def test_constructor_keyword(self):
+ # GH 10758
+ with pytest.raises(TypeError):
+ Timestamp(year=2000, month=1)
+ with pytest.raises(ValueError):
+ Timestamp(year=2000, month=0, day=1)
+ with pytest.raises(ValueError):
+ Timestamp(year=2000, month=13, day=1)
+ with pytest.raises(ValueError):
+ Timestamp(year=2000, month=1, day=0)
+ with pytest.raises(ValueError):
+ Timestamp(year=2000, month=1, day=32)
+
+ assert repr(Timestamp(year=2015, month=11, day=12)) == repr(
+ Timestamp("20151112")
+ )
+
+ assert repr(
+ Timestamp(
+ year=2015,
+ month=11,
+ day=12,
+ hour=1,
+ minute=2,
+ second=3,
+ microsecond=999999,
+ )
+ ) == repr(Timestamp("2015-11-12 01:02:03.999999"))
+
+ def test_constructor_fromordinal(self):
+ base = datetime(2000, 1, 1)
+
+ ts = Timestamp.fromordinal(base.toordinal(), freq="D")
+ assert base == ts
+ assert ts.freq == "D"
+ assert base.toordinal() == ts.toordinal()
+
+ ts = Timestamp.fromordinal(base.toordinal(), tz="US/Eastern")
+ assert Timestamp("2000-01-01", tz="US/Eastern") == ts
+ assert base.toordinal() == ts.toordinal()
+
+ # GH#3042
+ dt = datetime(2011, 4, 16, 0, 0)
+ ts = Timestamp.fromordinal(dt.toordinal())
+ assert ts.to_pydatetime() == dt
+
+ # with a tzinfo
+ stamp = Timestamp("2011-4-16", tz="US/Eastern")
+ dt_tz = stamp.to_pydatetime()
+ ts = Timestamp.fromordinal(dt_tz.toordinal(), tz="US/Eastern")
+ assert ts.to_pydatetime() == dt_tz
+
+ @pytest.mark.parametrize(
+ "result",
+ [
+ Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1),
+ Timestamp(
+ year=2000,
+ month=1,
+ day=2,
+ hour=3,
+ minute=4,
+ second=5,
+ microsecond=6,
+ nanosecond=1,
+ ),
+ Timestamp(
+ year=2000,
+ month=1,
+ day=2,
+ hour=3,
+ minute=4,
+ second=5,
+ microsecond=6,
+ nanosecond=1,
+ tz="UTC",
+ ),
+ Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),
+ Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC),
+ ],
+ )
+ def test_constructor_nanosecond(self, result):
+ # GH 18898
+ expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz)
+ expected = expected + Timedelta(nanoseconds=1)
+ assert result == expected
+
+ @pytest.mark.parametrize("z", ["Z0", "Z00"])
+ def test_constructor_invalid_Z0_isostring(self, z):
+ # GH 8910
+ with pytest.raises(ValueError):
+ Timestamp("2014-11-02 01:00{}".format(z))
+
+ @pytest.mark.parametrize(
+ "arg",
+ [
+ "year",
+ "month",
+ "day",
+ "hour",
+ "minute",
+ "second",
+ "microsecond",
+ "nanosecond",
+ ],
+ )
+ def test_invalid_date_kwarg_with_string_input(self, arg):
+ kwarg = {arg: 1}
+ with pytest.raises(ValueError):
+ Timestamp("2010-10-10 12:59:59.999999999", **kwarg)
+
+ def test_out_of_bounds_integer_value(self):
+ # GH#26651 check that we raise OutOfBoundsDatetime, not OverflowError
+ with pytest.raises(OutOfBoundsDatetime):
+ Timestamp(Timestamp.max.value * 2)
+ with pytest.raises(OutOfBoundsDatetime):
+ Timestamp(Timestamp.min.value * 2)
+
+ def test_out_of_bounds_value(self):
+ one_us = np.timedelta64(1).astype("timedelta64[us]")
+
+ # By definition we can't go out of bounds in [ns], so we
+ # convert the datetime64s to [us] so we can go out of bounds
+ min_ts_us = np.datetime64(Timestamp.min).astype("M8[us]")
+ max_ts_us = np.datetime64(Timestamp.max).astype("M8[us]")
+
+ # No error for the min/max datetimes
+ Timestamp(min_ts_us)
+ Timestamp(max_ts_us)
+
+ # One us less than the minimum is an error
+ with pytest.raises(ValueError):
+ Timestamp(min_ts_us - one_us)
+
+ # One us more than the maximum is an error
+ with pytest.raises(ValueError):
+ Timestamp(max_ts_us + one_us)
+
+ def test_out_of_bounds_string(self):
+ with pytest.raises(ValueError):
+ Timestamp("1676-01-01")
+ with pytest.raises(ValueError):
+ Timestamp("2263-01-01")
+
+ def test_barely_out_of_bounds(self):
+ # GH#19529
+ # GH#19382 close enough to bounds that dropping nanos would result
+ # in an in-bounds datetime
+ with pytest.raises(OutOfBoundsDatetime):
+ Timestamp("2262-04-11 23:47:16.854775808")
+
+ def test_bounds_with_different_units(self):
+ out_of_bounds_dates = ("1677-09-21", "2262-04-12")
+
+ time_units = ("D", "h", "m", "s", "ms", "us")
+
+ for date_string in out_of_bounds_dates:
+ for unit in time_units:
+ dt64 = np.datetime64(date_string, unit)
+ with pytest.raises(ValueError):
+ Timestamp(dt64)
+
+ in_bounds_dates = ("1677-09-23", "2262-04-11")
+
+ for date_string in in_bounds_dates:
+ for unit in time_units:
+ dt64 = np.datetime64(date_string, unit)
+ Timestamp(dt64)
+
+ def test_min_valid(self):
+ # Ensure that Timestamp.min is a valid Timestamp
+ Timestamp(Timestamp.min)
+
+ def test_max_valid(self):
+ # Ensure that Timestamp.max is a valid Timestamp
+ Timestamp(Timestamp.max)
+
+ def test_now(self):
+ # GH#9000
+ ts_from_string = Timestamp("now")
+ ts_from_method = Timestamp.now()
+ ts_datetime = datetime.now()
+
+ ts_from_string_tz = Timestamp("now", tz="US/Eastern")
+ ts_from_method_tz = Timestamp.now(tz="US/Eastern")
+
+ # Check that the delta between the times is less than 1s (arbitrarily
+ # small)
+ delta = Timedelta(seconds=1)
+ assert abs(ts_from_method - ts_from_string) < delta
+ assert abs(ts_datetime - ts_from_method) < delta
+ assert abs(ts_from_method_tz - ts_from_string_tz) < delta
+ assert (
+ abs(
+ ts_from_string_tz.tz_localize(None)
+ - ts_from_method_tz.tz_localize(None)
+ )
+ < delta
+ )
+
+ def test_today(self):
+ ts_from_string = Timestamp("today")
+ ts_from_method = Timestamp.today()
+ ts_datetime = datetime.today()
+
+ ts_from_string_tz = Timestamp("today", tz="US/Eastern")
+ ts_from_method_tz = Timestamp.today(tz="US/Eastern")
+
+ # Check that the delta between the times is less than 1s (arbitrarily
+ # small)
+ delta = Timedelta(seconds=1)
+ assert abs(ts_from_method - ts_from_string) < delta
+ assert abs(ts_datetime - ts_from_method) < delta
+ assert abs(ts_from_method_tz - ts_from_string_tz) < delta
+ assert (
+ abs(
+ ts_from_string_tz.tz_localize(None)
+ - ts_from_method_tz.tz_localize(None)
+ )
+ < delta
+ )
+
+ @pytest.mark.parametrize("tz", [None, pytz.timezone("US/Pacific")])
+ def test_disallow_setting_tz(self, tz):
+ # GH 3746
+ ts = Timestamp("2010")
+ with pytest.raises(AttributeError):
+ ts.tz = tz
+
+ @pytest.mark.parametrize("offset", ["+0300", "+0200"])
+ def test_construct_timestamp_near_dst(self, offset):
+ # GH 20854
+ expected = Timestamp(
+ "2016-10-30 03:00:00{}".format(offset), tz="Europe/Helsinki"
+ )
+ result = Timestamp(expected).tz_convert("Europe/Helsinki")
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "arg", ["2013/01/01 00:00:00+09:00", "2013-01-01 00:00:00+09:00"]
+ )
+ def test_construct_with_different_string_format(self, arg):
+ # GH 12064
+ result = Timestamp(arg)
+ expected = Timestamp(datetime(2013, 1, 1), tz=pytz.FixedOffset(540))
+ assert result == expected
+
+ def test_construct_timestamp_preserve_original_frequency(self):
+ # GH 22311
+ result = Timestamp(Timestamp("2010-08-08", freq="D")).freq
+ expected = offsets.Day()
+ assert result == expected
+
+ def test_constructor_invalid_frequency(self):
+ # GH 22311
+ with pytest.raises(ValueError, match="Invalid frequency:"):
+ Timestamp("2012-01-01", freq=[])
+
+ @pytest.mark.parametrize("box", [datetime, Timestamp])
+ def test_raise_tz_and_tzinfo_in_datetime_input(self, box):
+ # GH 23579
+ kwargs = {"year": 2018, "month": 1, "day": 1, "tzinfo": pytz.utc}
+ with pytest.raises(ValueError, match="Cannot pass a datetime or Timestamp"):
+ Timestamp(box(**kwargs), tz="US/Pacific")
+ with pytest.raises(ValueError, match="Cannot pass a datetime or Timestamp"):
+ Timestamp(box(**kwargs), tzinfo=pytz.timezone("US/Pacific"))
+
+ def test_dont_convert_dateutil_utc_to_pytz_utc(self):
+ result = Timestamp(datetime(2018, 1, 1), tz=tzutc())
+ expected = Timestamp(datetime(2018, 1, 1)).tz_localize(tzutc())
+ assert result == expected
+
+ def test_constructor_subclassed_datetime(self):
+ # GH 25851
+ # ensure that subclassed datetime works for
+ # Timestamp creation
+ class SubDatetime(datetime):
+ pass
+
+ data = SubDatetime(2000, 1, 1)
+ result = Timestamp(data)
+ expected = Timestamp(2000, 1, 1)
+ assert result == expected
+
+ @pytest.mark.skipif(
+ not compat.PY38,
+ reason="datetime.fromisocalendar was added in Python version 3.8",
+ )
+ def test_constructor_fromisocalendar(self):
+ # GH 30395
+ expected_timestamp = Timestamp("2000-01-03 00:00:00")
+ expected_stdlib = datetime.fromisocalendar(2000, 1, 1)
+ result = Timestamp.fromisocalendar(2000, 1, 1)
+ assert result == expected_timestamp
+ assert result == expected_stdlib
+ assert isinstance(result, Timestamp)
+
+
+def test_constructor_ambigous_dst():
+ # GH 24329
+ # Make sure that calling Timestamp constructor
+ # on Timestamp created from ambiguous time
+ # doesn't change Timestamp.value
+ ts = Timestamp(1382835600000000000, tz="dateutil/Europe/London")
+ expected = ts.value
+ result = Timestamp(ts).value
+ assert result == expected
+
+
+@pytest.mark.parametrize("epoch", [1552211999999999872, 1552211999999999999])
+def test_constructor_before_dst_switch(epoch):
+ # GH 31043
+ # Make sure that calling Timestamp constructor
+ # on time just before DST switch doesn't lead to
+ # nonexistent time or value change
+ ts = Timestamp(epoch, tz="dateutil/America/Los_Angeles")
+ result = ts.tz.dst(ts)
+ expected = timedelta(seconds=0)
+ assert Timestamp(ts).value == epoch
+ assert result == expected
+
+
+def test_timestamp_constructor_identity():
+ # Test for #30543
+ expected = Timestamp("2017-01-01T12")
+ result = Timestamp(expected)
+ assert result is expected
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index 692eb6cd8bc43..cee7ac450e411 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -2,11 +2,9 @@
import calendar
from datetime import datetime, timedelta
-from distutils.version import LooseVersion
import locale
import unicodedata
-import dateutil
from dateutil.tz import tzutc
import numpy as np
import pytest
@@ -14,12 +12,10 @@
from pytz import timezone, utc
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz, get_timezone
-import pandas.compat as compat
from pandas.compat.numpy import np_datetime64_compat
-from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
-from pandas import NaT, Period, Timedelta, Timestamp
+from pandas import NaT, Timedelta, Timestamp
import pandas._testing as tm
from pandas.tseries import offsets
@@ -198,513 +194,6 @@ def test_resolution(self):
assert Timestamp.resolution == Timedelta(nanoseconds=1)
-class TestTimestampConstructors:
- def test_constructor(self):
- base_str = "2014-07-01 09:00"
- base_dt = datetime(2014, 7, 1, 9)
- base_expected = 1_404_205_200_000_000_000
-
- # confirm base representation is correct
- assert calendar.timegm(base_dt.timetuple()) * 1_000_000_000 == base_expected
-
- tests = [
- (base_str, base_dt, base_expected),
- (
- "2014-07-01 10:00",
- datetime(2014, 7, 1, 10),
- base_expected + 3600 * 1_000_000_000,
- ),
- (
- "2014-07-01 09:00:00.000008000",
- datetime(2014, 7, 1, 9, 0, 0, 8),
- base_expected + 8000,
- ),
- (
- "2014-07-01 09:00:00.000000005",
- Timestamp("2014-07-01 09:00:00.000000005"),
- base_expected + 5,
- ),
- ]
-
- timezones = [
- (None, 0),
- ("UTC", 0),
- (pytz.utc, 0),
- ("Asia/Tokyo", 9),
- ("US/Eastern", -4),
- ("dateutil/US/Pacific", -7),
- (pytz.FixedOffset(-180), -3),
- (dateutil.tz.tzoffset(None, 18000), 5),
- ]
-
- for date_str, date, expected in tests:
- for result in [Timestamp(date_str), Timestamp(date)]:
- # only with timestring
- assert result.value == expected
-
- # re-creation shouldn't affect to internal value
- result = Timestamp(result)
- assert result.value == expected
-
- # with timezone
- for tz, offset in timezones:
- for result in [Timestamp(date_str, tz=tz), Timestamp(date, tz=tz)]:
- expected_tz = expected - offset * 3600 * 1_000_000_000
- assert result.value == expected_tz
-
- # should preserve tz
- result = Timestamp(result)
- assert result.value == expected_tz
-
- # should convert to UTC
- if tz is not None:
- result = Timestamp(result).tz_convert("UTC")
- else:
- result = Timestamp(result, tz="UTC")
- expected_utc = expected - offset * 3600 * 1_000_000_000
- assert result.value == expected_utc
-
- def test_constructor_with_stringoffset(self):
- # GH 7833
- base_str = "2014-07-01 11:00:00+02:00"
- base_dt = datetime(2014, 7, 1, 9)
- base_expected = 1_404_205_200_000_000_000
-
- # confirm base representation is correct
- assert calendar.timegm(base_dt.timetuple()) * 1_000_000_000 == base_expected
-
- tests = [
- (base_str, base_expected),
- ("2014-07-01 12:00:00+02:00", base_expected + 3600 * 1_000_000_000),
- ("2014-07-01 11:00:00.000008000+02:00", base_expected + 8000),
- ("2014-07-01 11:00:00.000000005+02:00", base_expected + 5),
- ]
-
- timezones = [
- (None, 0),
- ("UTC", 0),
- (pytz.utc, 0),
- ("Asia/Tokyo", 9),
- ("US/Eastern", -4),
- ("dateutil/US/Pacific", -7),
- (pytz.FixedOffset(-180), -3),
- (dateutil.tz.tzoffset(None, 18000), 5),
- ]
-
- for date_str, expected in tests:
- for result in [Timestamp(date_str)]:
- # only with timestring
- assert result.value == expected
-
- # re-creation shouldn't affect to internal value
- result = Timestamp(result)
- assert result.value == expected
-
- # with timezone
- for tz, offset in timezones:
- result = Timestamp(date_str, tz=tz)
- expected_tz = expected
- assert result.value == expected_tz
-
- # should preserve tz
- result = Timestamp(result)
- assert result.value == expected_tz
-
- # should convert to UTC
- result = Timestamp(result).tz_convert("UTC")
- expected_utc = expected
- assert result.value == expected_utc
-
- # This should be 2013-11-01 05:00 in UTC
- # converted to Chicago tz
- result = Timestamp("2013-11-01 00:00:00-0500", tz="America/Chicago")
- assert result.value == Timestamp("2013-11-01 05:00").value
- expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
- assert repr(result) == expected
- assert result == eval(repr(result))
-
- # This should be 2013-11-01 05:00 in UTC
- # converted to Tokyo tz (+09:00)
- result = Timestamp("2013-11-01 00:00:00-0500", tz="Asia/Tokyo")
- assert result.value == Timestamp("2013-11-01 05:00").value
- expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
- assert repr(result) == expected
- assert result == eval(repr(result))
-
- # GH11708
- # This should be 2015-11-18 10:00 in UTC
- # converted to Asia/Katmandu
- result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
- assert result.value == Timestamp("2015-11-18 10:00").value
- expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
- assert repr(result) == expected
- assert result == eval(repr(result))
-
- # This should be 2015-11-18 10:00 in UTC
- # converted to Asia/Kolkata
- result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
- assert result.value == Timestamp("2015-11-18 10:00").value
- expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
- assert repr(result) == expected
- assert result == eval(repr(result))
-
- def test_constructor_invalid(self):
- with pytest.raises(TypeError, match="Cannot convert input"):
- Timestamp(slice(2))
- with pytest.raises(ValueError, match="Cannot convert Period"):
- Timestamp(Period("1000-01-01"))
-
- def test_constructor_invalid_tz(self):
- # GH#17690
- with pytest.raises(TypeError, match="must be a datetime.tzinfo"):
- Timestamp("2017-10-22", tzinfo="US/Eastern")
-
- with pytest.raises(ValueError, match="at most one of"):
- Timestamp("2017-10-22", tzinfo=utc, tz="UTC")
-
- with pytest.raises(ValueError, match="Invalid frequency:"):
- # GH#5168
- # case where user tries to pass tz as an arg, not kwarg, gets
- # interpreted as a `freq`
- Timestamp("2012-01-01", "US/Pacific")
-
- def test_constructor_strptime(self):
- # GH25016
- # Test support for Timestamp.strptime
- fmt = "%Y%m%d-%H%M%S-%f%z"
- ts = "20190129-235348-000001+0000"
- with pytest.raises(NotImplementedError):
- Timestamp.strptime(ts, fmt)
-
- def test_constructor_tz_or_tzinfo(self):
- # GH#17943, GH#17690, GH#5168
- stamps = [
- Timestamp(year=2017, month=10, day=22, tz="UTC"),
- Timestamp(year=2017, month=10, day=22, tzinfo=utc),
- Timestamp(year=2017, month=10, day=22, tz=utc),
- Timestamp(datetime(2017, 10, 22), tzinfo=utc),
- Timestamp(datetime(2017, 10, 22), tz="UTC"),
- Timestamp(datetime(2017, 10, 22), tz=utc),
- ]
- assert all(ts == stamps[0] for ts in stamps)
-
- def test_constructor_positional(self):
- # see gh-10758
- with pytest.raises(TypeError):
- Timestamp(2000, 1)
- with pytest.raises(ValueError):
- Timestamp(2000, 0, 1)
- with pytest.raises(ValueError):
- Timestamp(2000, 13, 1)
- with pytest.raises(ValueError):
- Timestamp(2000, 1, 0)
- with pytest.raises(ValueError):
- Timestamp(2000, 1, 32)
-
- # see gh-11630
- assert repr(Timestamp(2015, 11, 12)) == repr(Timestamp("20151112"))
- assert repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) == repr(
- Timestamp("2015-11-12 01:02:03.999999")
- )
-
- def test_constructor_keyword(self):
- # GH 10758
- with pytest.raises(TypeError):
- Timestamp(year=2000, month=1)
- with pytest.raises(ValueError):
- Timestamp(year=2000, month=0, day=1)
- with pytest.raises(ValueError):
- Timestamp(year=2000, month=13, day=1)
- with pytest.raises(ValueError):
- Timestamp(year=2000, month=1, day=0)
- with pytest.raises(ValueError):
- Timestamp(year=2000, month=1, day=32)
-
- assert repr(Timestamp(year=2015, month=11, day=12)) == repr(
- Timestamp("20151112")
- )
-
- assert repr(
- Timestamp(
- year=2015,
- month=11,
- day=12,
- hour=1,
- minute=2,
- second=3,
- microsecond=999999,
- )
- ) == repr(Timestamp("2015-11-12 01:02:03.999999"))
-
- def test_constructor_fromordinal(self):
- base = datetime(2000, 1, 1)
-
- ts = Timestamp.fromordinal(base.toordinal(), freq="D")
- assert base == ts
- assert ts.freq == "D"
- assert base.toordinal() == ts.toordinal()
-
- ts = Timestamp.fromordinal(base.toordinal(), tz="US/Eastern")
- assert Timestamp("2000-01-01", tz="US/Eastern") == ts
- assert base.toordinal() == ts.toordinal()
-
- # GH#3042
- dt = datetime(2011, 4, 16, 0, 0)
- ts = Timestamp.fromordinal(dt.toordinal())
- assert ts.to_pydatetime() == dt
-
- # with a tzinfo
- stamp = Timestamp("2011-4-16", tz="US/Eastern")
- dt_tz = stamp.to_pydatetime()
- ts = Timestamp.fromordinal(dt_tz.toordinal(), tz="US/Eastern")
- assert ts.to_pydatetime() == dt_tz
-
- @pytest.mark.parametrize(
- "result",
- [
- Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1),
- Timestamp(
- year=2000,
- month=1,
- day=2,
- hour=3,
- minute=4,
- second=5,
- microsecond=6,
- nanosecond=1,
- ),
- Timestamp(
- year=2000,
- month=1,
- day=2,
- hour=3,
- minute=4,
- second=5,
- microsecond=6,
- nanosecond=1,
- tz="UTC",
- ),
- Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),
- Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC),
- ],
- )
- def test_constructor_nanosecond(self, result):
- # GH 18898
- expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz)
- expected = expected + Timedelta(nanoseconds=1)
- assert result == expected
-
- @pytest.mark.parametrize("z", ["Z0", "Z00"])
- def test_constructor_invalid_Z0_isostring(self, z):
- # GH 8910
- with pytest.raises(ValueError):
- Timestamp("2014-11-02 01:00{}".format(z))
-
- @pytest.mark.parametrize(
- "arg",
- [
- "year",
- "month",
- "day",
- "hour",
- "minute",
- "second",
- "microsecond",
- "nanosecond",
- ],
- )
- def test_invalid_date_kwarg_with_string_input(self, arg):
- kwarg = {arg: 1}
- with pytest.raises(ValueError):
- Timestamp("2010-10-10 12:59:59.999999999", **kwarg)
-
- def test_out_of_bounds_integer_value(self):
- # GH#26651 check that we raise OutOfBoundsDatetime, not OverflowError
- with pytest.raises(OutOfBoundsDatetime):
- Timestamp(Timestamp.max.value * 2)
- with pytest.raises(OutOfBoundsDatetime):
- Timestamp(Timestamp.min.value * 2)
-
- def test_out_of_bounds_value(self):
- one_us = np.timedelta64(1).astype("timedelta64[us]")
-
- # By definition we can't go out of bounds in [ns], so we
- # convert the datetime64s to [us] so we can go out of bounds
- min_ts_us = np.datetime64(Timestamp.min).astype("M8[us]")
- max_ts_us = np.datetime64(Timestamp.max).astype("M8[us]")
-
- # No error for the min/max datetimes
- Timestamp(min_ts_us)
- Timestamp(max_ts_us)
-
- # One us less than the minimum is an error
- with pytest.raises(ValueError):
- Timestamp(min_ts_us - one_us)
-
- # One us more than the maximum is an error
- with pytest.raises(ValueError):
- Timestamp(max_ts_us + one_us)
-
- def test_out_of_bounds_string(self):
- with pytest.raises(ValueError):
- Timestamp("1676-01-01")
- with pytest.raises(ValueError):
- Timestamp("2263-01-01")
-
- def test_barely_out_of_bounds(self):
- # GH#19529
- # GH#19382 close enough to bounds that dropping nanos would result
- # in an in-bounds datetime
- with pytest.raises(OutOfBoundsDatetime):
- Timestamp("2262-04-11 23:47:16.854775808")
-
- def test_bounds_with_different_units(self):
- out_of_bounds_dates = ("1677-09-21", "2262-04-12")
-
- time_units = ("D", "h", "m", "s", "ms", "us")
-
- for date_string in out_of_bounds_dates:
- for unit in time_units:
- dt64 = np.datetime64(date_string, unit)
- with pytest.raises(ValueError):
- Timestamp(dt64)
-
- in_bounds_dates = ("1677-09-23", "2262-04-11")
-
- for date_string in in_bounds_dates:
- for unit in time_units:
- dt64 = np.datetime64(date_string, unit)
- Timestamp(dt64)
-
- def test_min_valid(self):
- # Ensure that Timestamp.min is a valid Timestamp
- Timestamp(Timestamp.min)
-
- def test_max_valid(self):
- # Ensure that Timestamp.max is a valid Timestamp
- Timestamp(Timestamp.max)
-
- def test_now(self):
- # GH#9000
- ts_from_string = Timestamp("now")
- ts_from_method = Timestamp.now()
- ts_datetime = datetime.now()
-
- ts_from_string_tz = Timestamp("now", tz="US/Eastern")
- ts_from_method_tz = Timestamp.now(tz="US/Eastern")
-
- # Check that the delta between the times is less than 1s (arbitrarily
- # small)
- delta = Timedelta(seconds=1)
- assert abs(ts_from_method - ts_from_string) < delta
- assert abs(ts_datetime - ts_from_method) < delta
- assert abs(ts_from_method_tz - ts_from_string_tz) < delta
- assert (
- abs(
- ts_from_string_tz.tz_localize(None)
- - ts_from_method_tz.tz_localize(None)
- )
- < delta
- )
-
- def test_today(self):
- ts_from_string = Timestamp("today")
- ts_from_method = Timestamp.today()
- ts_datetime = datetime.today()
-
- ts_from_string_tz = Timestamp("today", tz="US/Eastern")
- ts_from_method_tz = Timestamp.today(tz="US/Eastern")
-
- # Check that the delta between the times is less than 1s (arbitrarily
- # small)
- delta = Timedelta(seconds=1)
- assert abs(ts_from_method - ts_from_string) < delta
- assert abs(ts_datetime - ts_from_method) < delta
- assert abs(ts_from_method_tz - ts_from_string_tz) < delta
- assert (
- abs(
- ts_from_string_tz.tz_localize(None)
- - ts_from_method_tz.tz_localize(None)
- )
- < delta
- )
-
- @pytest.mark.parametrize("tz", [None, pytz.timezone("US/Pacific")])
- def test_disallow_setting_tz(self, tz):
- # GH 3746
- ts = Timestamp("2010")
- with pytest.raises(AttributeError):
- ts.tz = tz
-
- @pytest.mark.parametrize("offset", ["+0300", "+0200"])
- def test_construct_timestamp_near_dst(self, offset):
- # GH 20854
- expected = Timestamp(
- "2016-10-30 03:00:00{}".format(offset), tz="Europe/Helsinki"
- )
- result = Timestamp(expected).tz_convert("Europe/Helsinki")
- assert result == expected
-
- @pytest.mark.parametrize(
- "arg", ["2013/01/01 00:00:00+09:00", "2013-01-01 00:00:00+09:00"]
- )
- def test_construct_with_different_string_format(self, arg):
- # GH 12064
- result = Timestamp(arg)
- expected = Timestamp(datetime(2013, 1, 1), tz=pytz.FixedOffset(540))
- assert result == expected
-
- def test_construct_timestamp_preserve_original_frequency(self):
- # GH 22311
- result = Timestamp(Timestamp("2010-08-08", freq="D")).freq
- expected = offsets.Day()
- assert result == expected
-
- def test_constructor_invalid_frequency(self):
- # GH 22311
- with pytest.raises(ValueError, match="Invalid frequency:"):
- Timestamp("2012-01-01", freq=[])
-
- @pytest.mark.parametrize("box", [datetime, Timestamp])
- def test_raise_tz_and_tzinfo_in_datetime_input(self, box):
- # GH 23579
- kwargs = {"year": 2018, "month": 1, "day": 1, "tzinfo": utc}
- with pytest.raises(ValueError, match="Cannot pass a datetime or Timestamp"):
- Timestamp(box(**kwargs), tz="US/Pacific")
- with pytest.raises(ValueError, match="Cannot pass a datetime or Timestamp"):
- Timestamp(box(**kwargs), tzinfo=pytz.timezone("US/Pacific"))
-
- def test_dont_convert_dateutil_utc_to_pytz_utc(self):
- result = Timestamp(datetime(2018, 1, 1), tz=tzutc())
- expected = Timestamp(datetime(2018, 1, 1)).tz_localize(tzutc())
- assert result == expected
-
- def test_constructor_subclassed_datetime(self):
- # GH 25851
- # ensure that subclassed datetime works for
- # Timestamp creation
- class SubDatetime(datetime):
- pass
-
- data = SubDatetime(2000, 1, 1)
- result = Timestamp(data)
- expected = Timestamp(2000, 1, 1)
- assert result == expected
-
- @pytest.mark.skipif(
- not compat.PY38,
- reason="datetime.fromisocalendar was added in Python version 3.8",
- )
- def test_constructor_fromisocalendar(self):
- # GH 30395
- expected_timestamp = Timestamp("2000-01-03 00:00:00")
- expected_stdlib = datetime.fromisocalendar(2000, 1, 1)
- result = Timestamp.fromisocalendar(2000, 1, 1)
- assert result == expected_timestamp
- assert result == expected_stdlib
- assert isinstance(result, Timestamp)
-
-
class TestTimestamp:
def test_tz(self):
tstr = "2014-02-01 09:00"
@@ -1075,34 +564,3 @@ def test_dt_subclass_add_timedelta(lh, rh):
result = lh + rh
expected = SubDatetime(2000, 1, 1, 1)
assert result == expected
-
-
-def test_constructor_ambigous_dst():
- # GH 24329
- # Make sure that calling Timestamp constructor
- # on Timestamp created from ambiguous time
- # doesn't change Timestamp.value
- ts = Timestamp(1382835600000000000, tz="dateutil/Europe/London")
- expected = ts.value
- result = Timestamp(ts).value
- assert result == expected
-
-
-@pytest.mark.xfail(
- LooseVersion(compat._optional._get_version(dateutil)) < LooseVersion("2.7.0"),
- reason="dateutil moved to Timedelta.total_seconds() in 2.7.0",
-)
-@pytest.mark.parametrize("epoch", [1552211999999999872, 1552211999999999999])
-def test_constructor_before_dst_switch(epoch):
- # GH 31043
- # Make sure that calling Timestamp constructor
- # on time just before DST switch doesn't lead to
- # nonexistent time or value change
- # Works only with dateutil >= 2.7.0 as dateutil overrid
- # pandas.Timedelta.total_seconds with
- # datetime.timedelta.total_seconds before
- ts = Timestamp(epoch, tz="dateutil/US/Pacific")
- result = ts.tz.dst(ts)
- expected = timedelta(seconds=0)
- assert Timestamp(ts).value == epoch
- assert result == expected
diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py
index 16a29d10eb414..28f3c0f7429f8 100644
--- a/pandas/tests/series/indexing/test_boolean.py
+++ b/pandas/tests/series/indexing/test_boolean.py
@@ -1,10 +1,7 @@
import numpy as np
import pytest
-from pandas.core.dtypes.common import is_integer
-
-import pandas as pd
-from pandas import Index, Series, Timestamp, date_range, isna
+from pandas import Index, Series
import pandas._testing as tm
from pandas.core.indexing import IndexingError
@@ -136,492 +133,3 @@ def test_get_set_boolean_different_order(string_series):
sel = string_series[ordered > 0]
exp = string_series[string_series > 0]
tm.assert_series_equal(sel, exp)
-
-
-def test_where_unsafe_int(sint_dtype):
- s = Series(np.arange(10), dtype=sint_dtype)
- mask = s < 5
-
- s[mask] = range(2, 7)
- expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype=sint_dtype)
-
- tm.assert_series_equal(s, expected)
-
-
-def test_where_unsafe_float(float_dtype):
- s = Series(np.arange(10), dtype=float_dtype)
- mask = s < 5
-
- s[mask] = range(2, 7)
- data = list(range(2, 7)) + list(range(5, 10))
- expected = Series(data, dtype=float_dtype)
-
- tm.assert_series_equal(s, expected)
-
-
-@pytest.mark.parametrize(
- "dtype,expected_dtype",
- [
- (np.int8, np.float64),
- (np.int16, np.float64),
- (np.int32, np.float64),
- (np.int64, np.float64),
- (np.float32, np.float32),
- (np.float64, np.float64),
- ],
-)
-def test_where_unsafe_upcast(dtype, expected_dtype):
- # see gh-9743
- s = Series(np.arange(10), dtype=dtype)
- values = [2.5, 3.5, 4.5, 5.5, 6.5]
- mask = s < 5
- expected = Series(values + list(range(5, 10)), dtype=expected_dtype)
- s[mask] = values
- tm.assert_series_equal(s, expected)
-
-
-def test_where_unsafe():
- # see gh-9731
- s = Series(np.arange(10), dtype="int64")
- values = [2.5, 3.5, 4.5, 5.5]
-
- mask = s > 5
- expected = Series(list(range(6)) + values, dtype="float64")
-
- s[mask] = values
- tm.assert_series_equal(s, expected)
-
- # see gh-3235
- s = Series(np.arange(10), dtype="int64")
- mask = s < 5
- s[mask] = range(2, 7)
- expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype="int64")
- tm.assert_series_equal(s, expected)
- assert s.dtype == expected.dtype
-
- s = Series(np.arange(10), dtype="int64")
- mask = s > 5
- s[mask] = [0] * 4
- expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype="int64")
- tm.assert_series_equal(s, expected)
-
- s = Series(np.arange(10))
- mask = s > 5
-
- msg = "cannot assign mismatch length to masked array"
- with pytest.raises(ValueError, match=msg):
- s[mask] = [5, 4, 3, 2, 1]
-
- with pytest.raises(ValueError, match=msg):
- s[mask] = [0] * 5
-
- # dtype changes
- s = Series([1, 2, 3, 4])
- result = s.where(s > 2, np.nan)
- expected = Series([np.nan, np.nan, 3, 4])
- tm.assert_series_equal(result, expected)
-
- # GH 4667
- # setting with None changes dtype
- s = Series(range(10)).astype(float)
- s[8] = None
- result = s[8]
- assert isna(result)
-
- s = Series(range(10)).astype(float)
- s[s > 8] = None
- result = s[isna(s)]
- expected = Series(np.nan, index=[9])
- tm.assert_series_equal(result, expected)
-
-
-def test_where():
- s = Series(np.random.randn(5))
- cond = s > 0
-
- rs = s.where(cond).dropna()
- rs2 = s[cond]
- tm.assert_series_equal(rs, rs2)
-
- rs = s.where(cond, -s)
- tm.assert_series_equal(rs, s.abs())
-
- rs = s.where(cond)
- assert s.shape == rs.shape
- assert rs is not s
-
- # test alignment
- cond = Series([True, False, False, True, False], index=s.index)
- s2 = -(s.abs())
-
- expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
- rs = s2.where(cond[:3])
- tm.assert_series_equal(rs, expected)
-
- expected = s2.abs()
- expected.iloc[0] = s2[0]
- rs = s2.where(cond[:3], -s2)
- tm.assert_series_equal(rs, expected)
-
-
-def test_where_error():
- s = Series(np.random.randn(5))
- cond = s > 0
-
- msg = "Array conditional must be same shape as self"
- with pytest.raises(ValueError, match=msg):
- s.where(1)
- with pytest.raises(ValueError, match=msg):
- s.where(cond[:3].values, -s)
-
- # GH 2745
- s = Series([1, 2])
- s[[True, False]] = [0, 1]
- expected = Series([0, 2])
- tm.assert_series_equal(s, expected)
-
- # failures
- msg = "cannot assign mismatch length to masked array"
- with pytest.raises(ValueError, match=msg):
- s[[True, False]] = [0, 2, 3]
- msg = (
- "NumPy boolean array indexing assignment cannot assign 0 input "
- "values to the 1 output values where the mask is true"
- )
- with pytest.raises(ValueError, match=msg):
- s[[True, False]] = []
-
-
-@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
-def test_where_array_like(klass):
- # see gh-15414
- s = Series([1, 2, 3])
- cond = [False, True, True]
- expected = Series([np.nan, 2, 3])
-
- result = s.where(klass(cond))
- tm.assert_series_equal(result, expected)
-
-
-@pytest.mark.parametrize(
- "cond",
- [
- [1, 0, 1],
- Series([2, 5, 7]),
- ["True", "False", "True"],
- [Timestamp("2017-01-01"), pd.NaT, Timestamp("2017-01-02")],
- ],
-)
-def test_where_invalid_input(cond):
- # see gh-15414: only boolean arrays accepted
- s = Series([1, 2, 3])
- msg = "Boolean array expected for the condition"
-
- with pytest.raises(ValueError, match=msg):
- s.where(cond)
-
- msg = "Array conditional must be same shape as self"
- with pytest.raises(ValueError, match=msg):
- s.where([True])
-
-
-def test_where_ndframe_align():
- msg = "Array conditional must be same shape as self"
- s = Series([1, 2, 3])
-
- cond = [True]
- with pytest.raises(ValueError, match=msg):
- s.where(cond)
-
- expected = Series([1, np.nan, np.nan])
-
- out = s.where(Series(cond))
- tm.assert_series_equal(out, expected)
-
- cond = np.array([False, True, False, True])
- with pytest.raises(ValueError, match=msg):
- s.where(cond)
-
- expected = Series([np.nan, 2, np.nan])
-
- out = s.where(Series(cond))
- tm.assert_series_equal(out, expected)
-
-
-def test_where_setitem_invalid():
- # GH 2702
- # make sure correct exceptions are raised on invalid list assignment
-
- msg = "cannot set using a {} indexer with a different length than the value"
-
- # slice
- s = Series(list("abc"))
-
- with pytest.raises(ValueError, match=msg.format("slice")):
- s[0:3] = list(range(27))
-
- s[0:3] = list(range(3))
- expected = Series([0, 1, 2])
- tm.assert_series_equal(s.astype(np.int64), expected)
-
- # slice with step
- s = Series(list("abcdef"))
-
- with pytest.raises(ValueError, match=msg.format("slice")):
- s[0:4:2] = list(range(27))
-
- s = Series(list("abcdef"))
- s[0:4:2] = list(range(2))
- expected = Series([0, "b", 1, "d", "e", "f"])
- tm.assert_series_equal(s, expected)
-
- # neg slices
- s = Series(list("abcdef"))
-
- with pytest.raises(ValueError, match=msg.format("slice")):
- s[:-1] = list(range(27))
-
- s[-3:-1] = list(range(2))
- expected = Series(["a", "b", "c", 0, 1, "f"])
- tm.assert_series_equal(s, expected)
-
- # list
- s = Series(list("abc"))
-
- with pytest.raises(ValueError, match=msg.format("list-like")):
- s[[0, 1, 2]] = list(range(27))
-
- s = Series(list("abc"))
-
- with pytest.raises(ValueError, match=msg.format("list-like")):
- s[[0, 1, 2]] = list(range(2))
-
- # scalar
- s = Series(list("abc"))
- s[0] = list(range(10))
- expected = Series([list(range(10)), "b", "c"])
- tm.assert_series_equal(s, expected)
-
-
-@pytest.mark.parametrize("size", range(2, 6))
-@pytest.mark.parametrize(
- "mask", [[True, False, False, False, False], [True, False], [False]]
-)
-@pytest.mark.parametrize(
- "item", [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]
-)
-# Test numpy arrays, lists and tuples as the input to be
-# broadcast
-@pytest.mark.parametrize(
- "box", [lambda x: np.array([x]), lambda x: [x], lambda x: (x,)]
-)
-def test_broadcast(size, mask, item, box):
- selection = np.resize(mask, size)
-
- data = np.arange(size, dtype=float)
-
- # Construct the expected series by taking the source
- # data or item based on the selection
- expected = Series(
- [item if use_item else data[i] for i, use_item in enumerate(selection)]
- )
-
- s = Series(data)
- s[selection] = box(item)
- tm.assert_series_equal(s, expected)
-
- s = Series(data)
- result = s.where(~selection, box(item))
- tm.assert_series_equal(result, expected)
-
- s = Series(data)
- result = s.mask(selection, box(item))
- tm.assert_series_equal(result, expected)
-
-
-def test_where_inplace():
- s = Series(np.random.randn(5))
- cond = s > 0
-
- rs = s.copy()
-
- rs.where(cond, inplace=True)
- tm.assert_series_equal(rs.dropna(), s[cond])
- tm.assert_series_equal(rs, s.where(cond))
-
- rs = s.copy()
- rs.where(cond, -s, inplace=True)
- tm.assert_series_equal(rs, s.where(cond, -s))
-
-
-def test_where_dups():
- # GH 4550
- # where crashes with dups in index
- s1 = Series(list(range(3)))
- s2 = Series(list(range(3)))
- comb = pd.concat([s1, s2])
- result = comb.where(comb < 2)
- expected = Series([0, 1, np.nan, 0, 1, np.nan], index=[0, 1, 2, 0, 1, 2])
- tm.assert_series_equal(result, expected)
-
- # GH 4548
- # inplace updating not working with dups
- comb[comb < 1] = 5
- expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
- tm.assert_series_equal(comb, expected)
-
- comb[comb < 2] += 10
- expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
- tm.assert_series_equal(comb, expected)
-
-
-def test_where_numeric_with_string():
- # GH 9280
- s = pd.Series([1, 2, 3])
- w = s.where(s > 1, "X")
-
- assert not is_integer(w[0])
- assert is_integer(w[1])
- assert is_integer(w[2])
- assert isinstance(w[0], str)
- assert w.dtype == "object"
-
- w = s.where(s > 1, ["X", "Y", "Z"])
- assert not is_integer(w[0])
- assert is_integer(w[1])
- assert is_integer(w[2])
- assert isinstance(w[0], str)
- assert w.dtype == "object"
-
- w = s.where(s > 1, np.array(["X", "Y", "Z"]))
- assert not is_integer(w[0])
- assert is_integer(w[1])
- assert is_integer(w[2])
- assert isinstance(w[0], str)
- assert w.dtype == "object"
-
-
-def test_where_timedelta_coerce():
- s = Series([1, 2], dtype="timedelta64[ns]")
- expected = Series([10, 10])
- mask = np.array([False, False])
-
- rs = s.where(mask, [10, 10])
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, 10)
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, 10.0)
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, [10.0, 10.0])
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, [10.0, np.nan])
- expected = Series([10, None], dtype="object")
- tm.assert_series_equal(rs, expected)
-
-
-def test_where_datetime_conversion():
- s = Series(date_range("20130102", periods=2))
- expected = Series([10, 10])
- mask = np.array([False, False])
-
- rs = s.where(mask, [10, 10])
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, 10)
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, 10.0)
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, [10.0, 10.0])
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, [10.0, np.nan])
- expected = Series([10, None], dtype="object")
- tm.assert_series_equal(rs, expected)
-
- # GH 15701
- timestamps = ["2016-12-31 12:00:04+00:00", "2016-12-31 12:00:04.010000+00:00"]
- s = Series([pd.Timestamp(t) for t in timestamps])
- rs = s.where(Series([False, True]))
- expected = Series([pd.NaT, s[1]])
- tm.assert_series_equal(rs, expected)
-
-
-def test_where_dt_tz_values(tz_naive_fixture):
- ser1 = pd.Series(
- pd.DatetimeIndex(["20150101", "20150102", "20150103"], tz=tz_naive_fixture)
- )
- ser2 = pd.Series(
- pd.DatetimeIndex(["20160514", "20160515", "20160516"], tz=tz_naive_fixture)
- )
- mask = pd.Series([True, True, False])
- result = ser1.where(mask, ser2)
- exp = pd.Series(
- pd.DatetimeIndex(["20150101", "20150102", "20160516"], tz=tz_naive_fixture)
- )
- tm.assert_series_equal(exp, result)
-
-
-def test_mask():
- # compare with tested results in test_where
- s = Series(np.random.randn(5))
- cond = s > 0
-
- rs = s.where(~cond, np.nan)
- tm.assert_series_equal(rs, s.mask(cond))
-
- rs = s.where(~cond)
- rs2 = s.mask(cond)
- tm.assert_series_equal(rs, rs2)
-
- rs = s.where(~cond, -s)
- rs2 = s.mask(cond, -s)
- tm.assert_series_equal(rs, rs2)
-
- cond = Series([True, False, False, True, False], index=s.index)
- s2 = -(s.abs())
- rs = s2.where(~cond[:3])
- rs2 = s2.mask(cond[:3])
- tm.assert_series_equal(rs, rs2)
-
- rs = s2.where(~cond[:3], -s2)
- rs2 = s2.mask(cond[:3], -s2)
- tm.assert_series_equal(rs, rs2)
-
- msg = "Array conditional must be same shape as self"
- with pytest.raises(ValueError, match=msg):
- s.mask(1)
- with pytest.raises(ValueError, match=msg):
- s.mask(cond[:3].values, -s)
-
- # dtype changes
- s = Series([1, 2, 3, 4])
- result = s.mask(s > 2, np.nan)
- expected = Series([1, 2, np.nan, np.nan])
- tm.assert_series_equal(result, expected)
-
- # see gh-21891
- s = Series([1, 2])
- res = s.mask([True, False])
-
- exp = Series([np.nan, 2])
- tm.assert_series_equal(res, exp)
-
-
-def test_mask_inplace():
- s = Series(np.random.randn(5))
- cond = s > 0
-
- rs = s.copy()
- rs.mask(cond, inplace=True)
- tm.assert_series_equal(rs.dropna(), s[~cond])
- tm.assert_series_equal(rs, s.mask(cond))
-
- rs = s.copy()
- rs.mask(cond, -s, inplace=True)
- tm.assert_series_equal(rs, s.mask(cond, -s))
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index 77085ef547690..acaa9de88a836 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -1,4 +1,5 @@
from datetime import datetime, timedelta
+import re
import numpy as np
import pytest
@@ -147,7 +148,6 @@ def test_frame_datetime64_duplicated():
def test_getitem_setitem_datetime_tz_pytz():
from pytz import timezone as tz
- from pandas import date_range
N = 50
# testing with timezone, GH #2785
@@ -188,8 +188,6 @@ def test_getitem_setitem_datetime_tz_dateutil():
lambda x: tzutc() if x == "UTC" else gettz(x)
) # handle special case for utc in dateutil
- from pandas import date_range
-
N = 50
# testing with timezone, GH #2785
@@ -372,7 +370,6 @@ def test_getitem_median_slice_bug():
def test_datetime_indexing():
- from pandas import date_range
index = date_range("1/1/2000", "1/7/2000")
index = index.repeat(3)
@@ -380,7 +377,7 @@ def test_datetime_indexing():
s = Series(len(index), index=index)
stamp = Timestamp("1/8/2000")
- with pytest.raises(KeyError, match=r"^947289600000000000$"):
+ with pytest.raises(KeyError, match=re.escape(repr(stamp))):
s[stamp]
s[stamp] = 0
assert s[stamp] == 0
@@ -389,7 +386,7 @@ def test_datetime_indexing():
s = Series(len(index), index=index)
s = s[::-1]
- with pytest.raises(KeyError, match=r"^947289600000000000$"):
+ with pytest.raises(KeyError, match=re.escape(repr(stamp))):
s[stamp]
s[stamp] = 0
assert s[stamp] == 0
@@ -495,8 +492,9 @@ def test_duplicate_dates_indexing(dups):
expected = Series(np.where(mask, 0, ts), index=ts.index)
tm.assert_series_equal(cp, expected)
- with pytest.raises(KeyError, match=r"^947116800000000000$"):
- ts[datetime(2000, 1, 6)]
+ key = datetime(2000, 1, 6)
+ with pytest.raises(KeyError, match=re.escape(repr(key))):
+ ts[key]
# new index
ts[datetime(2000, 1, 6)] = 0
diff --git a/pandas/tests/series/indexing/test_get.py b/pandas/tests/series/indexing/test_get.py
new file mode 100644
index 0000000000000..438b61ed203a3
--- /dev/null
+++ b/pandas/tests/series/indexing/test_get.py
@@ -0,0 +1,134 @@
+import numpy as np
+
+import pandas as pd
+from pandas import Series
+
+
+def test_get():
+ # GH 6383
+ s = Series(
+ np.array(
+ [
+ 43,
+ 48,
+ 60,
+ 48,
+ 50,
+ 51,
+ 50,
+ 45,
+ 57,
+ 48,
+ 56,
+ 45,
+ 51,
+ 39,
+ 55,
+ 43,
+ 54,
+ 52,
+ 51,
+ 54,
+ ]
+ )
+ )
+
+ result = s.get(25, 0)
+ expected = 0
+ assert result == expected
+
+ s = Series(
+ np.array(
+ [
+ 43,
+ 48,
+ 60,
+ 48,
+ 50,
+ 51,
+ 50,
+ 45,
+ 57,
+ 48,
+ 56,
+ 45,
+ 51,
+ 39,
+ 55,
+ 43,
+ 54,
+ 52,
+ 51,
+ 54,
+ ]
+ ),
+ index=pd.Float64Index(
+ [
+ 25.0,
+ 36.0,
+ 49.0,
+ 64.0,
+ 81.0,
+ 100.0,
+ 121.0,
+ 144.0,
+ 169.0,
+ 196.0,
+ 1225.0,
+ 1296.0,
+ 1369.0,
+ 1444.0,
+ 1521.0,
+ 1600.0,
+ 1681.0,
+ 1764.0,
+ 1849.0,
+ 1936.0,
+ ]
+ ),
+ )
+
+ result = s.get(25, 0)
+ expected = 43
+ assert result == expected
+
+ # GH 7407
+ # with a boolean accessor
+ df = pd.DataFrame({"i": [0] * 3, "b": [False] * 3})
+ vc = df.i.value_counts()
+ result = vc.get(99, default="Missing")
+ assert result == "Missing"
+
+ vc = df.b.value_counts()
+ result = vc.get(False, default="Missing")
+ assert result == 3
+
+ result = vc.get(True, default="Missing")
+ assert result == "Missing"
+
+
+def test_get_nan():
+ # GH 8569
+ s = pd.Float64Index(range(10)).to_series()
+ assert s.get(np.nan) is None
+ assert s.get(np.nan, default="Missing") == "Missing"
+
+
+def test_get_nan_multiple():
+ # GH 8569
+ # ensure that fixing "test_get_nan" above hasn't broken get
+ # with multiple elements
+ s = pd.Float64Index(range(10)).to_series()
+
+ idx = [2, 30]
+ assert s.get(idx) is None
+
+ idx = [2, np.nan]
+ assert s.get(idx) is None
+
+ # GH 17295 - all missing keys
+ idx = [20, 30]
+ assert s.get(idx) is None
+
+ idx = [np.nan, np.nan]
+ assert s.get(idx) is None
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 18dbd22b73b35..fa5c75d5e4ad9 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -17,10 +17,9 @@
def test_basic_indexing():
s = Series(np.random.randn(5), index=["a", "b", "a", "a", "b"])
- msg = "index out of bounds"
+ msg = "index 5 is out of bounds for axis 0 with size 5"
with pytest.raises(IndexError, match=msg):
s[5]
- msg = "index 5 is out of bounds for axis 0 with size 5"
with pytest.raises(IndexError, match=msg):
s[5] = 0
@@ -29,7 +28,6 @@ def test_basic_indexing():
s = s.sort_index()
- msg = r"index out of bounds|^5$"
with pytest.raises(IndexError, match=msg):
s[5]
msg = r"index 5 is out of bounds for axis (0|1) with size 5|^5$"
@@ -165,11 +163,12 @@ def test_getitem_with_duplicates_indices(result_1, duplicate_item, expected_1):
def test_getitem_out_of_bounds(datetime_series):
# don't segfault, GH #495
- msg = "index out of bounds"
+ msg = r"index \d+ is out of bounds for axis 0 with size \d+"
with pytest.raises(IndexError, match=msg):
datetime_series[len(datetime_series)]
# GH #917
+ msg = r"index -\d+ is out of bounds for axis 0 with size \d+"
s = Series([], dtype=object)
with pytest.raises(IndexError, match=msg):
s[-1]
@@ -430,7 +429,7 @@ def test_basic_getitem_setitem_corner(datetime_series):
@pytest.mark.parametrize("tz", ["US/Eastern", "UTC", "Asia/Tokyo"])
def test_setitem_with_tz(tz):
orig = pd.Series(pd.date_range("2016-01-01", freq="H", periods=3, tz=tz))
- assert orig.dtype == "datetime64[ns, {0}]".format(tz)
+ assert orig.dtype == f"datetime64[ns, {tz}]"
# scalar
s = orig.copy()
@@ -457,7 +456,7 @@ def test_setitem_with_tz(tz):
[pd.Timestamp("2011-01-01", tz=tz), pd.Timestamp("2012-01-01", tz=tz)],
index=[1, 2],
)
- assert vals.dtype == "datetime64[ns, {0}]".format(tz)
+ assert vals.dtype == f"datetime64[ns, {tz}]"
s[[1, 2]] = vals
exp = pd.Series(
@@ -482,7 +481,7 @@ def test_setitem_with_tz_dst():
# GH XXX
tz = "US/Eastern"
orig = pd.Series(pd.date_range("2016-11-06", freq="H", periods=3, tz=tz))
- assert orig.dtype == "datetime64[ns, {0}]".format(tz)
+ assert orig.dtype == f"datetime64[ns, {tz}]"
# scalar
s = orig.copy()
@@ -509,7 +508,7 @@ def test_setitem_with_tz_dst():
[pd.Timestamp("2011-01-01", tz=tz), pd.Timestamp("2012-01-01", tz=tz)],
index=[1, 2],
)
- assert vals.dtype == "datetime64[ns, {0}]".format(tz)
+ assert vals.dtype == f"datetime64[ns, {tz}]"
s[[1, 2]] = vals
exp = pd.Series(
@@ -883,41 +882,6 @@ def test_pop():
tm.assert_series_equal(k, expected)
-def test_take():
- s = Series([-1, 5, 6, 2, 4])
-
- actual = s.take([1, 3, 4])
- expected = Series([5, 2, 4], index=[1, 3, 4])
- tm.assert_series_equal(actual, expected)
-
- actual = s.take([-1, 3, 4])
- expected = Series([4, 2, 4], index=[4, 3, 4])
- tm.assert_series_equal(actual, expected)
-
- msg = "index {} is out of bounds for( axis 0 with)? size 5"
- with pytest.raises(IndexError, match=msg.format(10)):
- s.take([1, 10])
- with pytest.raises(IndexError, match=msg.format(5)):
- s.take([2, 5])
-
-
-def test_take_categorical():
- # https://github.com/pandas-dev/pandas/issues/20664
- s = Series(pd.Categorical(["a", "b", "c"]))
- result = s.take([-2, -2, 0])
- expected = Series(
- pd.Categorical(["b", "b", "a"], categories=["a", "b", "c"]), index=[1, 1, 0]
- )
- tm.assert_series_equal(result, expected)
-
-
-def test_head_tail(string_series):
- tm.assert_series_equal(string_series.head(), string_series[:5])
- tm.assert_series_equal(string_series.head(0), string_series[0:0])
- tm.assert_series_equal(string_series.tail(), string_series[-5:])
- tm.assert_series_equal(string_series.tail(0), string_series[0:0])
-
-
def test_uint_drop(any_int_dtype):
# see GH18311
# assigning series.loc[0] = 4 changed series.dtype to int
diff --git a/pandas/tests/series/indexing/test_mask.py b/pandas/tests/series/indexing/test_mask.py
new file mode 100644
index 0000000000000..dc4fb530dbb52
--- /dev/null
+++ b/pandas/tests/series/indexing/test_mask.py
@@ -0,0 +1,65 @@
+import numpy as np
+import pytest
+
+from pandas import Series
+import pandas._testing as tm
+
+
+def test_mask():
+ # compare with tested results in test_where
+ s = Series(np.random.randn(5))
+ cond = s > 0
+
+ rs = s.where(~cond, np.nan)
+ tm.assert_series_equal(rs, s.mask(cond))
+
+ rs = s.where(~cond)
+ rs2 = s.mask(cond)
+ tm.assert_series_equal(rs, rs2)
+
+ rs = s.where(~cond, -s)
+ rs2 = s.mask(cond, -s)
+ tm.assert_series_equal(rs, rs2)
+
+ cond = Series([True, False, False, True, False], index=s.index)
+ s2 = -(s.abs())
+ rs = s2.where(~cond[:3])
+ rs2 = s2.mask(cond[:3])
+ tm.assert_series_equal(rs, rs2)
+
+ rs = s2.where(~cond[:3], -s2)
+ rs2 = s2.mask(cond[:3], -s2)
+ tm.assert_series_equal(rs, rs2)
+
+ msg = "Array conditional must be same shape as self"
+ with pytest.raises(ValueError, match=msg):
+ s.mask(1)
+ with pytest.raises(ValueError, match=msg):
+ s.mask(cond[:3].values, -s)
+
+ # dtype changes
+ s = Series([1, 2, 3, 4])
+ result = s.mask(s > 2, np.nan)
+ expected = Series([1, 2, np.nan, np.nan])
+ tm.assert_series_equal(result, expected)
+
+ # see gh-21891
+ s = Series([1, 2])
+ res = s.mask([True, False])
+
+ exp = Series([np.nan, 2])
+ tm.assert_series_equal(res, exp)
+
+
+def test_mask_inplace():
+ s = Series(np.random.randn(5))
+ cond = s > 0
+
+ rs = s.copy()
+ rs.mask(cond, inplace=True)
+ tm.assert_series_equal(rs.dropna(), s[~cond])
+ tm.assert_series_equal(rs, s.mask(cond))
+
+ rs = s.copy()
+ rs.mask(cond, -s, inplace=True)
+ tm.assert_series_equal(rs, s.mask(cond, -s))
diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py
index 3684ca00c2f17..7e73e6366438b 100644
--- a/pandas/tests/series/indexing/test_numeric.py
+++ b/pandas/tests/series/indexing/test_numeric.py
@@ -1,141 +1,10 @@
import numpy as np
import pytest
-import pandas as pd
from pandas import DataFrame, Index, Series
import pandas._testing as tm
-def test_get():
- # GH 6383
- s = Series(
- np.array(
- [
- 43,
- 48,
- 60,
- 48,
- 50,
- 51,
- 50,
- 45,
- 57,
- 48,
- 56,
- 45,
- 51,
- 39,
- 55,
- 43,
- 54,
- 52,
- 51,
- 54,
- ]
- )
- )
-
- result = s.get(25, 0)
- expected = 0
- assert result == expected
-
- s = Series(
- np.array(
- [
- 43,
- 48,
- 60,
- 48,
- 50,
- 51,
- 50,
- 45,
- 57,
- 48,
- 56,
- 45,
- 51,
- 39,
- 55,
- 43,
- 54,
- 52,
- 51,
- 54,
- ]
- ),
- index=pd.Float64Index(
- [
- 25.0,
- 36.0,
- 49.0,
- 64.0,
- 81.0,
- 100.0,
- 121.0,
- 144.0,
- 169.0,
- 196.0,
- 1225.0,
- 1296.0,
- 1369.0,
- 1444.0,
- 1521.0,
- 1600.0,
- 1681.0,
- 1764.0,
- 1849.0,
- 1936.0,
- ]
- ),
- )
-
- result = s.get(25, 0)
- expected = 43
- assert result == expected
-
- # GH 7407
- # with a boolean accessor
- df = pd.DataFrame({"i": [0] * 3, "b": [False] * 3})
- vc = df.i.value_counts()
- result = vc.get(99, default="Missing")
- assert result == "Missing"
-
- vc = df.b.value_counts()
- result = vc.get(False, default="Missing")
- assert result == 3
-
- result = vc.get(True, default="Missing")
- assert result == "Missing"
-
-
-def test_get_nan():
- # GH 8569
- s = pd.Float64Index(range(10)).to_series()
- assert s.get(np.nan) is None
- assert s.get(np.nan, default="Missing") == "Missing"
-
-
-def test_get_nan_multiple():
- # GH 8569
- # ensure that fixing "test_get_nan" above hasn't broken get
- # with multiple elements
- s = pd.Float64Index(range(10)).to_series()
-
- idx = [2, 30]
- assert s.get(idx) is None
-
- idx = [2, np.nan]
- assert s.get(idx) is None
-
- # GH 17295 - all missing keys
- idx = [20, 30]
- assert s.get(idx) is None
-
- idx = [np.nan, np.nan]
- assert s.get(idx) is None
-
-
def test_delitem():
# GH 5542
# should delete the item inplace
@@ -202,10 +71,9 @@ def test_slice_float64():
def test_getitem_negative_out_of_bounds():
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
- msg = "index out of bounds"
+ msg = "index -11 is out of bounds for axis 0 with size 10"
with pytest.raises(IndexError, match=msg):
s[-11]
- msg = "index -11 is out of bounds for axis 0 with size 10"
with pytest.raises(IndexError, match=msg):
s[-11] = "foo"
@@ -260,9 +128,8 @@ def test_setitem_float_labels():
def test_slice_float_get_set(datetime_series):
msg = (
- r"cannot do slice indexing on <class 'pandas\.core\.indexes"
- r"\.datetimes\.DatetimeIndex'> with these indexers \[{key}\] "
- r"of <class 'float'>"
+ "cannot do slice indexing on DatetimeIndex with these indexers "
+ r"\[{key}\] of type float"
)
with pytest.raises(TypeError, match=msg.format(key=r"4\.0")):
datetime_series[4.0:10.0]
diff --git a/pandas/tests/series/indexing/test_take.py b/pandas/tests/series/indexing/test_take.py
new file mode 100644
index 0000000000000..9368d49e5ff2b
--- /dev/null
+++ b/pandas/tests/series/indexing/test_take.py
@@ -0,0 +1,33 @@
+import pytest
+
+import pandas as pd
+from pandas import Series
+import pandas._testing as tm
+
+
+def test_take():
+ ser = Series([-1, 5, 6, 2, 4])
+
+ actual = ser.take([1, 3, 4])
+ expected = Series([5, 2, 4], index=[1, 3, 4])
+ tm.assert_series_equal(actual, expected)
+
+ actual = ser.take([-1, 3, 4])
+ expected = Series([4, 2, 4], index=[4, 3, 4])
+ tm.assert_series_equal(actual, expected)
+
+ msg = "index {} is out of bounds for( axis 0 with)? size 5"
+ with pytest.raises(IndexError, match=msg.format(10)):
+ ser.take([1, 10])
+ with pytest.raises(IndexError, match=msg.format(5)):
+ ser.take([2, 5])
+
+
+def test_take_categorical():
+ # https://github.com/pandas-dev/pandas/issues/20664
+ ser = Series(pd.Categorical(["a", "b", "c"]))
+ result = ser.take([-2, -2, 0])
+ expected = Series(
+ pd.Categorical(["b", "b", "a"], categories=["a", "b", "c"]), index=[1, 1, 0]
+ )
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/indexing/test_where.py b/pandas/tests/series/indexing/test_where.py
new file mode 100644
index 0000000000000..9703f5afaf689
--- /dev/null
+++ b/pandas/tests/series/indexing/test_where.py
@@ -0,0 +1,437 @@
+import numpy as np
+import pytest
+
+from pandas.core.dtypes.common import is_integer
+
+import pandas as pd
+from pandas import Series, Timestamp, date_range, isna
+import pandas._testing as tm
+
+
+def test_where_unsafe_int(sint_dtype):
+ s = Series(np.arange(10), dtype=sint_dtype)
+ mask = s < 5
+
+ s[mask] = range(2, 7)
+ expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype=sint_dtype)
+
+ tm.assert_series_equal(s, expected)
+
+
+def test_where_unsafe_float(float_dtype):
+ s = Series(np.arange(10), dtype=float_dtype)
+ mask = s < 5
+
+ s[mask] = range(2, 7)
+ data = list(range(2, 7)) + list(range(5, 10))
+ expected = Series(data, dtype=float_dtype)
+
+ tm.assert_series_equal(s, expected)
+
+
+@pytest.mark.parametrize(
+ "dtype,expected_dtype",
+ [
+ (np.int8, np.float64),
+ (np.int16, np.float64),
+ (np.int32, np.float64),
+ (np.int64, np.float64),
+ (np.float32, np.float32),
+ (np.float64, np.float64),
+ ],
+)
+def test_where_unsafe_upcast(dtype, expected_dtype):
+ # see gh-9743
+ s = Series(np.arange(10), dtype=dtype)
+ values = [2.5, 3.5, 4.5, 5.5, 6.5]
+ mask = s < 5
+ expected = Series(values + list(range(5, 10)), dtype=expected_dtype)
+ s[mask] = values
+ tm.assert_series_equal(s, expected)
+
+
+def test_where_unsafe():
+ # see gh-9731
+ s = Series(np.arange(10), dtype="int64")
+ values = [2.5, 3.5, 4.5, 5.5]
+
+ mask = s > 5
+ expected = Series(list(range(6)) + values, dtype="float64")
+
+ s[mask] = values
+ tm.assert_series_equal(s, expected)
+
+ # see gh-3235
+ s = Series(np.arange(10), dtype="int64")
+ mask = s < 5
+ s[mask] = range(2, 7)
+ expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype="int64")
+ tm.assert_series_equal(s, expected)
+ assert s.dtype == expected.dtype
+
+ s = Series(np.arange(10), dtype="int64")
+ mask = s > 5
+ s[mask] = [0] * 4
+ expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype="int64")
+ tm.assert_series_equal(s, expected)
+
+ s = Series(np.arange(10))
+ mask = s > 5
+
+ msg = "cannot assign mismatch length to masked array"
+ with pytest.raises(ValueError, match=msg):
+ s[mask] = [5, 4, 3, 2, 1]
+
+ with pytest.raises(ValueError, match=msg):
+ s[mask] = [0] * 5
+
+ # dtype changes
+ s = Series([1, 2, 3, 4])
+ result = s.where(s > 2, np.nan)
+ expected = Series([np.nan, np.nan, 3, 4])
+ tm.assert_series_equal(result, expected)
+
+ # GH 4667
+ # setting with None changes dtype
+ s = Series(range(10)).astype(float)
+ s[8] = None
+ result = s[8]
+ assert isna(result)
+
+ s = Series(range(10)).astype(float)
+ s[s > 8] = None
+ result = s[isna(s)]
+ expected = Series(np.nan, index=[9])
+ tm.assert_series_equal(result, expected)
+
+
+def test_where():
+ s = Series(np.random.randn(5))
+ cond = s > 0
+
+ rs = s.where(cond).dropna()
+ rs2 = s[cond]
+ tm.assert_series_equal(rs, rs2)
+
+ rs = s.where(cond, -s)
+ tm.assert_series_equal(rs, s.abs())
+
+ rs = s.where(cond)
+ assert s.shape == rs.shape
+ assert rs is not s
+
+ # test alignment
+ cond = Series([True, False, False, True, False], index=s.index)
+ s2 = -(s.abs())
+
+ expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
+ rs = s2.where(cond[:3])
+ tm.assert_series_equal(rs, expected)
+
+ expected = s2.abs()
+ expected.iloc[0] = s2[0]
+ rs = s2.where(cond[:3], -s2)
+ tm.assert_series_equal(rs, expected)
+
+
+def test_where_error():
+ s = Series(np.random.randn(5))
+ cond = s > 0
+
+ msg = "Array conditional must be same shape as self"
+ with pytest.raises(ValueError, match=msg):
+ s.where(1)
+ with pytest.raises(ValueError, match=msg):
+ s.where(cond[:3].values, -s)
+
+ # GH 2745
+ s = Series([1, 2])
+ s[[True, False]] = [0, 1]
+ expected = Series([0, 2])
+ tm.assert_series_equal(s, expected)
+
+ # failures
+ msg = "cannot assign mismatch length to masked array"
+ with pytest.raises(ValueError, match=msg):
+ s[[True, False]] = [0, 2, 3]
+ msg = (
+ "NumPy boolean array indexing assignment cannot assign 0 input "
+ "values to the 1 output values where the mask is true"
+ )
+ with pytest.raises(ValueError, match=msg):
+ s[[True, False]] = []
+
+
+@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
+def test_where_array_like(klass):
+ # see gh-15414
+ s = Series([1, 2, 3])
+ cond = [False, True, True]
+ expected = Series([np.nan, 2, 3])
+
+ result = s.where(klass(cond))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "cond",
+ [
+ [1, 0, 1],
+ Series([2, 5, 7]),
+ ["True", "False", "True"],
+ [Timestamp("2017-01-01"), pd.NaT, Timestamp("2017-01-02")],
+ ],
+)
+def test_where_invalid_input(cond):
+ # see gh-15414: only boolean arrays accepted
+ s = Series([1, 2, 3])
+ msg = "Boolean array expected for the condition"
+
+ with pytest.raises(ValueError, match=msg):
+ s.where(cond)
+
+ msg = "Array conditional must be same shape as self"
+ with pytest.raises(ValueError, match=msg):
+ s.where([True])
+
+
+def test_where_ndframe_align():
+ msg = "Array conditional must be same shape as self"
+ s = Series([1, 2, 3])
+
+ cond = [True]
+ with pytest.raises(ValueError, match=msg):
+ s.where(cond)
+
+ expected = Series([1, np.nan, np.nan])
+
+ out = s.where(Series(cond))
+ tm.assert_series_equal(out, expected)
+
+ cond = np.array([False, True, False, True])
+ with pytest.raises(ValueError, match=msg):
+ s.where(cond)
+
+ expected = Series([np.nan, 2, np.nan])
+
+ out = s.where(Series(cond))
+ tm.assert_series_equal(out, expected)
+
+
+def test_where_setitem_invalid():
+ # GH 2702
+ # make sure correct exceptions are raised on invalid list assignment
+
+ msg = "cannot set using a {} indexer with a different length than the value"
+
+ # slice
+ s = Series(list("abc"))
+
+ with pytest.raises(ValueError, match=msg.format("slice")):
+ s[0:3] = list(range(27))
+
+ s[0:3] = list(range(3))
+ expected = Series([0, 1, 2])
+ tm.assert_series_equal(s.astype(np.int64), expected)
+
+ # slice with step
+ s = Series(list("abcdef"))
+
+ with pytest.raises(ValueError, match=msg.format("slice")):
+ s[0:4:2] = list(range(27))
+
+ s = Series(list("abcdef"))
+ s[0:4:2] = list(range(2))
+ expected = Series([0, "b", 1, "d", "e", "f"])
+ tm.assert_series_equal(s, expected)
+
+ # neg slices
+ s = Series(list("abcdef"))
+
+ with pytest.raises(ValueError, match=msg.format("slice")):
+ s[:-1] = list(range(27))
+
+ s[-3:-1] = list(range(2))
+ expected = Series(["a", "b", "c", 0, 1, "f"])
+ tm.assert_series_equal(s, expected)
+
+ # list
+ s = Series(list("abc"))
+
+ with pytest.raises(ValueError, match=msg.format("list-like")):
+ s[[0, 1, 2]] = list(range(27))
+
+ s = Series(list("abc"))
+
+ with pytest.raises(ValueError, match=msg.format("list-like")):
+ s[[0, 1, 2]] = list(range(2))
+
+ # scalar
+ s = Series(list("abc"))
+ s[0] = list(range(10))
+ expected = Series([list(range(10)), "b", "c"])
+ tm.assert_series_equal(s, expected)
+
+
+@pytest.mark.parametrize("size", range(2, 6))
+@pytest.mark.parametrize(
+ "mask", [[True, False, False, False, False], [True, False], [False]]
+)
+@pytest.mark.parametrize(
+ "item", [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]
+)
+# Test numpy arrays, lists and tuples as the input to be
+# broadcast
+@pytest.mark.parametrize(
+ "box", [lambda x: np.array([x]), lambda x: [x], lambda x: (x,)]
+)
+def test_broadcast(size, mask, item, box):
+ selection = np.resize(mask, size)
+
+ data = np.arange(size, dtype=float)
+
+ # Construct the expected series by taking the source
+ # data or item based on the selection
+ expected = Series(
+ [item if use_item else data[i] for i, use_item in enumerate(selection)]
+ )
+
+ s = Series(data)
+ s[selection] = box(item)
+ tm.assert_series_equal(s, expected)
+
+ s = Series(data)
+ result = s.where(~selection, box(item))
+ tm.assert_series_equal(result, expected)
+
+ s = Series(data)
+ result = s.mask(selection, box(item))
+ tm.assert_series_equal(result, expected)
+
+
+def test_where_inplace():
+ s = Series(np.random.randn(5))
+ cond = s > 0
+
+ rs = s.copy()
+
+ rs.where(cond, inplace=True)
+ tm.assert_series_equal(rs.dropna(), s[cond])
+ tm.assert_series_equal(rs, s.where(cond))
+
+ rs = s.copy()
+ rs.where(cond, -s, inplace=True)
+ tm.assert_series_equal(rs, s.where(cond, -s))
+
+
+def test_where_dups():
+ # GH 4550
+ # where crashes with dups in index
+ s1 = Series(list(range(3)))
+ s2 = Series(list(range(3)))
+ comb = pd.concat([s1, s2])
+ result = comb.where(comb < 2)
+ expected = Series([0, 1, np.nan, 0, 1, np.nan], index=[0, 1, 2, 0, 1, 2])
+ tm.assert_series_equal(result, expected)
+
+ # GH 4548
+ # inplace updating not working with dups
+ comb[comb < 1] = 5
+ expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
+ tm.assert_series_equal(comb, expected)
+
+ comb[comb < 2] += 10
+ expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
+ tm.assert_series_equal(comb, expected)
+
+
+def test_where_numeric_with_string():
+ # GH 9280
+ s = pd.Series([1, 2, 3])
+ w = s.where(s > 1, "X")
+
+ assert not is_integer(w[0])
+ assert is_integer(w[1])
+ assert is_integer(w[2])
+ assert isinstance(w[0], str)
+ assert w.dtype == "object"
+
+ w = s.where(s > 1, ["X", "Y", "Z"])
+ assert not is_integer(w[0])
+ assert is_integer(w[1])
+ assert is_integer(w[2])
+ assert isinstance(w[0], str)
+ assert w.dtype == "object"
+
+ w = s.where(s > 1, np.array(["X", "Y", "Z"]))
+ assert not is_integer(w[0])
+ assert is_integer(w[1])
+ assert is_integer(w[2])
+ assert isinstance(w[0], str)
+ assert w.dtype == "object"
+
+
+def test_where_timedelta_coerce():
+ s = Series([1, 2], dtype="timedelta64[ns]")
+ expected = Series([10, 10])
+ mask = np.array([False, False])
+
+ rs = s.where(mask, [10, 10])
+ tm.assert_series_equal(rs, expected)
+
+ rs = s.where(mask, 10)
+ tm.assert_series_equal(rs, expected)
+
+ rs = s.where(mask, 10.0)
+ tm.assert_series_equal(rs, expected)
+
+ rs = s.where(mask, [10.0, 10.0])
+ tm.assert_series_equal(rs, expected)
+
+ rs = s.where(mask, [10.0, np.nan])
+ expected = Series([10, None], dtype="object")
+ tm.assert_series_equal(rs, expected)
+
+
+def test_where_datetime_conversion():
+ s = Series(date_range("20130102", periods=2))
+ expected = Series([10, 10])
+ mask = np.array([False, False])
+
+ rs = s.where(mask, [10, 10])
+ tm.assert_series_equal(rs, expected)
+
+ rs = s.where(mask, 10)
+ tm.assert_series_equal(rs, expected)
+
+ rs = s.where(mask, 10.0)
+ tm.assert_series_equal(rs, expected)
+
+ rs = s.where(mask, [10.0, 10.0])
+ tm.assert_series_equal(rs, expected)
+
+ rs = s.where(mask, [10.0, np.nan])
+ expected = Series([10, None], dtype="object")
+ tm.assert_series_equal(rs, expected)
+
+ # GH 15701
+ timestamps = ["2016-12-31 12:00:04+00:00", "2016-12-31 12:00:04.010000+00:00"]
+ s = Series([pd.Timestamp(t) for t in timestamps])
+ rs = s.where(Series([False, True]))
+ expected = Series([pd.NaT, s[1]])
+ tm.assert_series_equal(rs, expected)
+
+
+def test_where_dt_tz_values(tz_naive_fixture):
+ ser1 = pd.Series(
+ pd.DatetimeIndex(["20150101", "20150102", "20150103"], tz=tz_naive_fixture)
+ )
+ ser2 = pd.Series(
+ pd.DatetimeIndex(["20160514", "20160515", "20160516"], tz=tz_naive_fixture)
+ )
+ mask = pd.Series([True, True, False])
+ result = ser1.where(mask, ser2)
+ exp = pd.Series(
+ pd.DatetimeIndex(["20150101", "20150102", "20160516"], tz=tz_naive_fixture)
+ )
+ tm.assert_series_equal(exp, result)
diff --git a/pandas/tests/series/indexing/test_xs.py b/pandas/tests/series/indexing/test_xs.py
new file mode 100644
index 0000000000000..43458ca2ebeb2
--- /dev/null
+++ b/pandas/tests/series/indexing/test_xs.py
@@ -0,0 +1,17 @@
+import numpy as np
+
+import pandas as pd
+
+
+def test_xs_datetimelike_wrapping():
+ # GH#31630 a case where we shouldn't wrap datetime64 in Timestamp
+ arr = pd.date_range("2016-01-01", periods=3)._data._data
+
+ ser = pd.Series(arr, dtype=object)
+ for i in range(len(ser)):
+ ser.iloc[i] = arr[i]
+ assert ser.dtype == object
+ assert isinstance(ser[0], np.datetime64)
+
+ result = ser.xs(0)
+ assert isinstance(result, np.datetime64)
diff --git a/pandas/tests/series/test_convert_dtypes.py b/pandas/tests/series/methods/test_convert_dtypes.py
similarity index 100%
rename from pandas/tests/series/test_convert_dtypes.py
rename to pandas/tests/series/methods/test_convert_dtypes.py
diff --git a/pandas/tests/series/methods/test_head_tail.py b/pandas/tests/series/methods/test_head_tail.py
new file mode 100644
index 0000000000000..d9f8d85eda350
--- /dev/null
+++ b/pandas/tests/series/methods/test_head_tail.py
@@ -0,0 +1,8 @@
+import pandas._testing as tm
+
+
+def test_head_tail(string_series):
+ tm.assert_series_equal(string_series.head(), string_series[:5])
+ tm.assert_series_equal(string_series.head(0), string_series[0:0])
+ tm.assert_series_equal(string_series.tail(), string_series[-5:])
+ tm.assert_series_equal(string_series.tail(0), string_series[0:0])
diff --git a/pandas/tests/series/test_reshaping.py b/pandas/tests/series/methods/test_unstack.py
similarity index 100%
rename from pandas/tests/series/test_reshaping.py
rename to pandas/tests/series/methods/test_unstack.py
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index 239353d3955b4..4cb471597b67a 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -4,7 +4,7 @@
import pytest
import pandas as pd
-from pandas import DataFrame, Series
+from pandas import DataFrame, Series, to_datetime
import pandas._testing as tm
@@ -252,7 +252,6 @@ def test_concat_empty_series_dtypes(self):
assert result.dtype == expected
def test_combine_first_dt64(self):
- from pandas.core.tools.datetimes import to_datetime
s0 = to_datetime(Series(["2010", np.NaN]))
s1 = to_datetime(Series([np.NaN, "2011"]))
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 640cd8faf6811..b377ca2869bd3 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -2534,3 +2534,29 @@ def test_sort_ascending_list(self):
result = s.sort_index(level=["third", "first"], ascending=[False, True])
expected = s.iloc[[0, 4, 1, 5, 2, 6, 3, 7]]
tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "keys, expected",
+ [
+ (["b", "a"], [["b", "b", "a", "a"], [1, 2, 1, 2]]),
+ (["a", "b"], [["a", "a", "b", "b"], [1, 2, 1, 2]]),
+ ((["a", "b"], [1, 2]), [["a", "a", "b", "b"], [1, 2, 1, 2]]),
+ ((["a", "b"], [2, 1]), [["a", "a", "b", "b"], [2, 1, 2, 1]]),
+ ((["b", "a"], [2, 1]), [["b", "b", "a", "a"], [2, 1, 2, 1]]),
+ ((["b", "a"], [1, 2]), [["b", "b", "a", "a"], [1, 2, 1, 2]]),
+ ((["c", "a"], [2, 1]), [["c", "a", "a"], [1, 2, 1]]),
+ ],
+ )
+ @pytest.mark.parametrize("dim", ["index", "columns"])
+ def test_multilevel_index_loc_order(self, dim, keys, expected):
+ # GH 22797
+ # Try to respect order of keys given for MultiIndex.loc
+ kwargs = {dim: [["c", "a", "a", "b", "b"], [1, 1, 2, 1, 2]]}
+ df = pd.DataFrame(np.arange(25).reshape(5, 5), **kwargs,)
+ exp_index = MultiIndex.from_arrays(expected)
+ if dim == "index":
+ res = df.loc[keys, :]
+ tm.assert_index_equal(res.index, exp_index)
+ elif dim == "columns":
+ res = df.loc[:, keys]
+ tm.assert_index_equal(res.columns, exp_index)
diff --git a/pandas/tests/tseries/frequencies/test_to_offset.py b/pandas/tests/tseries/frequencies/test_to_offset.py
index b6069c446160d..beaefe9109e91 100644
--- a/pandas/tests/tseries/frequencies/test_to_offset.py
+++ b/pandas/tests/tseries/frequencies/test_to_offset.py
@@ -86,7 +86,7 @@ def test_to_offset_invalid(freqstr):
# We escape string because some of our
# inputs contain regex special characters.
- msg = re.escape("Invalid frequency: {freqstr}".format(freqstr=freqstr))
+ msg = re.escape(f"Invalid frequency: {freqstr}")
with pytest.raises(ValueError, match=msg):
frequencies.to_offset(freqstr)
diff --git a/pandas/tests/util/test_util.py b/pandas/tests/util/test_util.py
index 6a19adef728e4..8860e6fe272ce 100644
--- a/pandas/tests/util/test_util.py
+++ b/pandas/tests/util/test_util.py
@@ -76,3 +76,8 @@ def test_rng_context():
with tm.RNGContext(1):
assert np.random.randn() == expected1
assert np.random.randn() == expected0
+
+
+def test_external_error_raised():
+ with tm.external_error_raised(TypeError):
+ raise TypeError("Should not check this error message, so it will pass")
diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py
index 2801a2bf9c371..fdfa436ce6536 100644
--- a/pandas/util/_print_versions.py
+++ b/pandas/util/_print_versions.py
@@ -43,7 +43,8 @@ def get_sys_info() -> List[Tuple[str, Optional[Union[str, int]]]]:
("python-bits", struct.calcsize("P") * 8),
("OS", f"{sysname}"),
("OS-release", f"{release}"),
- # ("Version", "{version}".format(version=version)),
+ # FIXME: dont leave commented-out
+ # ("Version", f"{version}"),
("machine", f"{machine}"),
("processor", f"{processor}"),
("byteorder", f"{sys.byteorder}"),
@@ -114,14 +115,13 @@ def show_versions(as_json=False):
else:
maxlen = max(len(x) for x in deps)
- tpl = "{{k:<{maxlen}}}: {{stat}}".format(maxlen=maxlen)
print("\nINSTALLED VERSIONS")
print("------------------")
for k, stat in sys_info:
- print(tpl.format(k=k, stat=stat))
+ print(f"{{k:<{maxlen}}}: {{stat}}")
print("")
for k, stat in deps_blob:
- print(tpl.format(k=k, stat=stat))
+ print(f"{{k:<{maxlen}}}: {{stat}}")
def main() -> int:
diff --git a/setup.cfg b/setup.cfg
index cf931f52489a8..c298aa652824c 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -138,9 +138,6 @@ ignore_errors=True
[mypy-pandas.tests.extension.decimal.test_decimal]
ignore_errors=True
-[mypy-pandas.tests.extension.json.array]
-ignore_errors=True
-
[mypy-pandas.tests.extension.json.test_json]
ignore_errors=True
diff --git a/web/pandas/config.yml b/web/pandas/config.yml
index ef0b2a0270a0b..83eb152c9d944 100644
--- a/web/pandas/config.yml
+++ b/web/pandas/config.yml
@@ -35,15 +35,7 @@ navbar:
- name: "Getting started"
target: /getting_started.html
- name: "Documentation"
- target:
- - name: "User guide"
- target: /docs/user_guide/index.html
- - name: "API reference"
- target: /docs/reference/index.html
- - name: "Release notes"
- target: /docs/whatsnew/index.html
- - name: "Older versions"
- target: https://pandas.pydata.org/pandas-docs/version/
+ target: /docs/
- name: "Community"
target:
- name: "Blog"
diff --git a/web/pandas/index.html b/web/pandas/index.html
index fedb0b0c5f712..83d0f48197033 100644
--- a/web/pandas/index.html
+++ b/web/pandas/index.html
@@ -63,7 +63,7 @@ <h5>With the support of:</h5>
{% if releases %}
<h4>Latest version: {{ releases[0].name }}</h4>
<ul>
- <li><a href="docs/whatsnew/v0.25.0.html">What's new in {{ releases[0].name }}</a></li>
+ <li><a href="docs/whatsnew/v1.0.0.html">What's new in {{ releases[0].name }}</a></li>
<li>Release date:<br/>{{ releases[0].published.strftime("%b %d, %Y") }}</li>
<li><a href="{{ base_url}}/docs/">Documentation (web)</a></li>
<li><a href="{{ base_url }}/docs/pandas.pdf">Documentation (pdf)</a></li>
| - [ ] closes #31251
- [ ] add test_missing_column in pandas/tests/io/parser/test_parse_dates.py
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31550 | 2020-02-01T19:59:54Z | 2020-02-08T15:20:47Z | null | 2020-02-28T09:38:57Z |
replace NotImplementedError with AttributeError | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 0dea8235e9d3f..1bcd1c5fcf4eb 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -132,6 +132,12 @@
from pandas.io.formats.printing import pprint_thing
import pandas.plotting
+
+# Customized "cover all bases" local Exception
+class DataFrameError(NotImplementedError, TypeError, AttributeError):
+ pass
+
+
if TYPE_CHECKING:
from pandas.core.groupby.generic import DataFrameGroupBy
from pandas.io.formats.style import Styler
@@ -411,7 +417,9 @@ def _constructor(self) -> Type["DataFrame"]:
@property
def _constructor_expanddim(self):
- raise NotImplementedError("Not supported for DataFrames!")
+ raise DataFrameError(
+ "Property 'constructor_expanddim' is not supported for DataFrames!"
+ )
# ----------------------------------------------------------------------
# Constructors
diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py
index a2e7dc527c4b8..9eda5f4504a75 100644
--- a/pandas/tests/frame/test_subclass.py
+++ b/pandas/tests/frame/test_subclass.py
@@ -5,8 +5,14 @@
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
+import inspect
+
class TestDataFrameSubclassing:
+
+ def test_get_members():
+ inspect.getmembers(pd.DataFrame())
+
def test_frame_subclassing_and_slicing(self):
# Subclass frame and ensure it returns the right class on slicing it
# In reference to PR 9632
| Fixes Python inspection of members - bug reported in https://github.com/pandas-dev/pandas/issues/31474
- [ ] closes #31474
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31549 | 2020-02-01T19:50:25Z | 2020-04-21T12:53:13Z | null | 2020-04-22T22:52:33Z |
BUG: GH31142 Fix for combine.Series | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 17a830788be3f..42dda5854396e 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -918,6 +918,7 @@ Reshaping
- Bug in :func:`crosstab` when inputs are two Series and have tuple names, the output will keep dummy MultiIndex as columns. (:issue:`18321`)
- :meth:`DataFrame.pivot` can now take lists for ``index`` and ``columns`` arguments (:issue:`21425`)
- Bug in :func:`concat` where the resulting indices are not copied when ``copy=True`` (:issue:`29879`)
+- Fix bug in :meth:`Series.combine` where integer key values are interpreted as index values (:issue:`31142`)
- Bug where :meth:`Index.astype` would lose the name attribute when converting from ``Float64Index`` to ``Int64Index``, or when casting to an ``ExtensionArray`` dtype (:issue:`32013`)
- :meth:`Series.append` will now raise a ``TypeError`` when passed a DataFrame or a sequence containing Dataframe (:issue:`31413`)
- :meth:`DataFrame.replace` and :meth:`Series.replace` will raise a ``TypeError`` if ``to_replace`` is not an expected type. Previously the ``replace`` would fail silently (:issue:`18634`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 79805bec85af0..ac833c61535b8 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3761,6 +3761,26 @@ def get(self, key, default=None):
except (KeyError, ValueError, IndexError):
return default
+ def get_strict(self, key, default=None):
+ """
+ Get item from object for given key (ex: DataFrame column).
+
+ Returns default value if key not found, or if it is passed an
+ index value instead of a key.
+
+ Parameters
+ ----------
+ key : object
+
+ Returns
+ -------
+ value : same type as items contained in object
+ """
+ try:
+ return self.loc[key]
+ except (KeyError, ValueError, IndexError, TypeError):
+ return default
+
@property
def _is_view(self) -> bool_t:
"""Return boolean indicating if self is view of another array """
diff --git a/pandas/core/series.py b/pandas/core/series.py
index bc13d5376ec96..1159bdaf8955e 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2753,8 +2753,8 @@ def combine(self, other, func, fill_value=None) -> "Series":
new_name = ops.get_op_result_name(self, other)
new_values = []
for idx in new_index:
- lv = self.get(idx, fill_value)
- rv = other.get(idx, fill_value)
+ lv = self.get_strict(idx, fill_value)
+ rv = other.get_strict(idx, fill_value)
with np.errstate(all="ignore"):
new_values.append(func(lv, rv))
else:
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index 0766bfc37d7ca..14a1ce6b94fb8 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -121,3 +121,26 @@ def test_concat_empty_series_dtypes_sparse(self):
# TODO: release-note: concat sparse dtype
expected = pd.SparseDtype("object")
assert result.dtype == expected
+
+ def test_combine_first_dt64(self):
+ from pandas.core.tools.datetimes import to_datetime
+
+ s0 = to_datetime(Series(["2010", np.NaN]))
+ s1 = to_datetime(Series([np.NaN, "2011"]))
+ rs = s0.combine_first(s1)
+ xp = to_datetime(Series(["2010", "2011"]))
+ tm.assert_series_equal(rs, xp)
+
+ s0 = to_datetime(Series(["2010", np.NaN]))
+ s1 = Series([np.NaN, "2011"])
+ rs = s0.combine_first(s1)
+ xp = Series([datetime(2010, 1, 1), "2011"])
+ tm.assert_series_equal(rs, xp)
+
+ def test_series_combine(self):
+ # GH 31142
+ s0 = pd.Series([1, 2, 3], index=["a", "b", "c"])
+ s1 = pd.Series([10, 20, 30], index=[0, "e", "f"])
+ rs = s0.combine(s1, lambda x, y: x + y, fill_value=0)
+ expected = pd.Series([10, 1, 2, 3, 20, 30], index=[0, "a", "b", "c", "e", "f"])
+ tm.assert_series_equal(rs, expected)
| - [ ] closes #31142
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31547 | 2020-02-01T18:41:35Z | 2020-06-26T15:43:06Z | null | 2020-06-26T15:43:06Z |
Manual backport 31446 | diff --git a/.devcontainer.json b/.devcontainer.json
new file mode 100644
index 0000000000000..315a1ff647012
--- /dev/null
+++ b/.devcontainer.json
@@ -0,0 +1,28 @@
+// For format details, see https://aka.ms/vscode-remote/devcontainer.json or the definition README at
+// https://github.com/microsoft/vscode-dev-containers/tree/master/containers/python-3-miniconda
+{
+ "name": "pandas",
+ "context": ".",
+ "dockerFile": "Dockerfile",
+
+ // Use 'settings' to set *default* container specific settings.json values on container create.
+ // You can edit these settings after create using File > Preferences > Settings > Remote.
+ "settings": {
+ "terminal.integrated.shell.linux": "/bin/bash",
+ "python.condaPath": "/opt/conda/bin/conda",
+ "python.pythonPath": "/opt/conda/bin/python",
+ "python.formatting.provider": "black",
+ "python.linting.enabled": true,
+ "python.linting.flake8Enabled": true,
+ "python.linting.pylintEnabled": false,
+ "python.linting.mypyEnabled": true,
+ "python.testing.pytestEnabled": true,
+ "python.testing.cwd": "pandas/tests"
+ },
+
+ // Add the IDs of extensions you want installed when the container is created in the array below.
+ "extensions": [
+ "ms-python.python",
+ "ms-vscode.cpptools"
+ ]
+}
diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md
index a1fbece3284ec..7dd2e04249492 100644
--- a/.github/CODE_OF_CONDUCT.md
+++ b/.github/CODE_OF_CONDUCT.md
@@ -54,10 +54,10 @@ incident.
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 1.3.0, available at
-[http://contributor-covenant.org/version/1/3/0/][version],
+[https://www.contributor-covenant.org/version/1/3/0/][version],
and the [Swift Code of Conduct][swift].
-[homepage]: http://contributor-covenant.org
-[version]: http://contributor-covenant.org/version/1/3/0/
+[homepage]: https://www.contributor-covenant.org
+[version]: https://www.contributor-covenant.org/version/1/3/0/
[swift]: https://swift.org/community/#code-of-conduct
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 2e6e980242197..bc31d362118b5 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -16,7 +16,7 @@ If you notice a bug in the code or documentation, or have suggestions for how we
## Contributing to the Codebase
-The code is hosted on [GitHub](https://www.github.com/pandas-dev/pandas), so you will need to use [Git](http://git-scm.com/) to clone the project and make changes to the codebase. Once you have obtained a copy of the code, you should create a development environment that is separate from your existing Python environment so that you can make and test changes without compromising your own work environment. For more information, please refer to the "[Working with the code](https://github.com/pandas-dev/pandas/blob/master/doc/source/development/contributing.rst#working-with-the-code)" section.
+The code is hosted on [GitHub](https://www.github.com/pandas-dev/pandas), so you will need to use [Git](https://git-scm.com/) to clone the project and make changes to the codebase. Once you have obtained a copy of the code, you should create a development environment that is separate from your existing Python environment so that you can make and test changes without compromising your own work environment. For more information, please refer to the "[Working with the code](https://github.com/pandas-dev/pandas/blob/master/doc/source/development/contributing.rst#working-with-the-code)" section.
Before submitting your changes for review, make sure to check that your changes do not break any tests. You can find more information about our test suites in the "[Test-driven development/code writing](https://github.com/pandas-dev/pandas/blob/master/doc/source/development/contributing.rst#test-driven-development-code-writing)" section. We also have guidelines regarding coding style that will be enforced during testing, which can be found in the "[Code standards](https://github.com/pandas-dev/pandas/blob/master/doc/source/development/contributing.rst#code-standards)" section.
diff --git a/.github/workflows/assign.yml b/.github/workflows/assign.yml
index 019ecfc484ca5..a6d3f1f383751 100644
--- a/.github/workflows/assign.yml
+++ b/.github/workflows/assign.yml
@@ -7,9 +7,8 @@ jobs:
one:
runs-on: ubuntu-latest
steps:
- - name:
- run: |
- if [[ "${{ github.event.comment.body }}" == "take" ]]; then
- echo "Assigning issue ${{ github.event.issue.number }} to ${{ github.event.comment.user.login }}"
- curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" -d '{"assignees": ["${{ github.event.comment.user.login }}"]}' https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.issue.number }}/assignees
- fi
+ - if: github.event.comment.body == 'take'
+ name:
+ run: |
+ echo "Assigning issue ${{ github.event.issue.number }} to ${{ github.event.comment.user.login }}"
+ curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" -d '{"assignees": ["${{ github.event.comment.user.login }}"]}' https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.issue.number }}/assignees
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 809764a20a713..139b9e31df46c 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -20,11 +20,11 @@ repos:
rev: v0.730
hooks:
- id: mypy
- # We run mypy over all files because of:
- # * changes in type definitions may affect non-touched files.
- # * Running it with `mypy pandas` and the filenames will lead to
- # spurious duplicate module errors,
- # see also https://github.com/pre-commit/mirrors-mypy/issues/5
- pass_filenames: false
args:
- - pandas
+ # As long as a some files are excluded from check-untyped-defs
+ # we have to exclude it from the pre-commit hook as the configuration
+ # is based on modules but the hook runs on files.
+ - --no-check-untyped-defs
+ - --follow-imports
+ - skip
+ files: pandas/
diff --git a/.travis.yml b/.travis.yml
index a11cd469e9b9c..2c8533d02ddc1 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -7,10 +7,10 @@ python: 3.7
# travis cache --delete inside the project directory from the travis command line client
# The cache directories will be deleted if anything in ci/ changes in a commit
cache:
- ccache: true
- directories:
- - $HOME/.cache # cython cache
- - $HOME/.ccache # compiler cache
+ ccache: true
+ directories:
+ - $HOME/.cache # cython cache
+ - $HOME/.ccache # compiler cache
env:
global:
@@ -20,30 +20,30 @@ env:
- secure: "EkWLZhbrp/mXJOx38CHjs7BnjXafsqHtwxPQrqWy457VDFWhIY1DMnIR/lOWG+a20Qv52sCsFtiZEmMfUjf0pLGXOqurdxbYBGJ7/ikFLk9yV2rDwiArUlVM9bWFnFxHvdz9zewBH55WurrY4ShZWyV+x2dWjjceWG5VpWeI6sA="
git:
- # for cloning
- depth: false
+ # for cloning
+ depth: false
matrix:
- fast_finish: true
- exclude:
- # Exclude the default Python 3.5 build
- - python: 3.5
+ fast_finish: true
- include:
+ include:
- env:
- - JOB="3.8" ENV_FILE="ci/deps/travis-38.yaml" PATTERN="(not slow and not network)"
+ - JOB="3.8" ENV_FILE="ci/deps/travis-38.yaml" PATTERN="(not slow and not network and not clipboard)"
- env:
- - JOB="3.7" ENV_FILE="ci/deps/travis-37.yaml" PATTERN="(not slow and not network)"
+ - JOB="3.7" ENV_FILE="ci/deps/travis-37.yaml" PATTERN="(not slow and not network and not clipboard)"
- env:
- - JOB="3.6, locale" ENV_FILE="ci/deps/travis-36-locale.yaml" PATTERN="((not slow and not network) or (single and db))" LOCALE_OVERRIDE="zh_CN.UTF-8" SQL="1"
+ - JOB="3.6, locale" ENV_FILE="ci/deps/travis-36-locale.yaml" PATTERN="((not slow and not network and not clipboard) or (single and db))" LOCALE_OVERRIDE="zh_CN.UTF-8" SQL="1"
services:
- mysql
- postgresql
- env:
- - JOB="3.6, coverage" ENV_FILE="ci/deps/travis-36-cov.yaml" PATTERN="((not slow and not network) or (single and db))" PANDAS_TESTING_MODE="deprecate" COVERAGE=true SQL="1"
+ # Enabling Deprecations when running tests
+ # PANDAS_TESTING_MODE="deprecate" causes DeprecationWarning messages to be displayed in the logs
+ # See pandas/_testing.py for more details.
+ - JOB="3.6, coverage" ENV_FILE="ci/deps/travis-36-cov.yaml" PATTERN="((not slow and not network and not clipboard) or (single and db))" PANDAS_TESTING_MODE="deprecate" COVERAGE=true SQL="1"
services:
- mysql
- postgresql
@@ -73,7 +73,6 @@ before_install:
# This overrides travis and tells it to look nowhere.
- export BOTO_CONFIG=/dev/null
-
install:
- echo "install start"
- ci/prep_cython_cache.sh
@@ -90,5 +89,5 @@ script:
after_script:
- echo "after_script start"
- source activate pandas-dev && pushd /tmp && python -c "import pandas; pandas.show_versions();" && popd
- - ci/print_skipped.py
+ - ci/print_skipped.py
- echo "after_script done"
diff --git a/AUTHORS.md b/AUTHORS.md
index dcaaea101f4c8..f576e333f9448 100644
--- a/AUTHORS.md
+++ b/AUTHORS.md
@@ -14,7 +14,7 @@ About the Copyright Holders
The PyData Development Team is the collection of developers of the PyData
project. This includes all of the PyData sub-projects, including pandas. The
core team that coordinates development on GitHub can be found here:
- http://github.com/pydata.
+ https://github.com/pydata.
Full credits for pandas contributors can be found in the documentation.
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000..b8aff5d671dcf
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,47 @@
+FROM continuumio/miniconda3
+
+# if you forked pandas, you can pass in your own GitHub username to use your fork
+# i.e. gh_username=myname
+ARG gh_username=pandas-dev
+ARG pandas_home="/home/pandas"
+
+# Avoid warnings by switching to noninteractive
+ENV DEBIAN_FRONTEND=noninteractive
+
+# Configure apt and install packages
+RUN apt-get update \
+ && apt-get -y install --no-install-recommends apt-utils dialog 2>&1 \
+ #
+ # Verify git, process tools, lsb-release (common in install instructions for CLIs) installed
+ && apt-get -y install git iproute2 procps iproute2 lsb-release \
+ #
+ # Install C compilers (gcc not enough, so just went with build-essential which admittedly might be overkill),
+ # needed to build pandas C extensions
+ && apt-get -y install build-essential \
+ #
+ # cleanup
+ && apt-get autoremove -y \
+ && apt-get clean -y \
+ && rm -rf /var/lib/apt/lists/*
+
+# Switch back to dialog for any ad-hoc use of apt-get
+ENV DEBIAN_FRONTEND=dialog
+
+# Clone pandas repo
+RUN mkdir "$pandas_home" \
+ && git clone "https://github.com/$gh_username/pandas.git" "$pandas_home" \
+ && cd "$pandas_home" \
+ && git remote add upstream "https://github.com/pandas-dev/pandas.git" \
+ && git pull upstream master
+
+# Because it is surprisingly difficult to activate a conda environment inside a DockerFile
+# (from personal experience and per https://github.com/ContinuumIO/docker-images/issues/89),
+# we just update the base/root one from the 'environment.yml' file instead of creating a new one.
+#
+# Set up environment
+RUN conda env update -n base -f "$pandas_home/environment.yml"
+
+# Build C extensions and pandas
+RUN cd "$pandas_home" \
+ && python setup.py build_ext --inplace -j 4 \
+ && python -m pip install -e .
diff --git a/LICENSE b/LICENSE
index 924de26253bf4..76954a5a339ab 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,8 +1,10 @@
BSD 3-Clause License
-Copyright (c) 2008-2012, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team
+Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team
All rights reserved.
+Copyright (c) 2011-2020, Open source contributors.
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
diff --git a/RELEASE.md b/RELEASE.md
index 7924ffaff561f..42cb82dfcf020 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -3,4 +3,4 @@ Release Notes
The list of changes to Pandas between each release can be found
[here](https://pandas.pydata.org/pandas-docs/stable/whatsnew/index.html). For full
-details, see the commit logs at http://github.com/pandas-dev/pandas.
+details, see the commit logs at https://github.com/pandas-dev/pandas.
diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json
index cd1a31d4eaf34..7886b63e9983e 100644
--- a/asv_bench/asv.conf.json
+++ b/asv_bench/asv.conf.json
@@ -43,6 +43,7 @@
"matplotlib": [],
"sqlalchemy": [],
"scipy": [],
+ "numba": [],
"numexpr": [],
"pytables": [null, ""], // platform dependent, see excludes below
"tables": [null, ""],
diff --git a/asv_bench/benchmarks/attrs_caching.py b/asv_bench/benchmarks/attrs_caching.py
index 501e27b9078ec..9c7b107b478d4 100644
--- a/asv_bench/benchmarks/attrs_caching.py
+++ b/asv_bench/benchmarks/attrs_caching.py
@@ -1,5 +1,6 @@
import numpy as np
+import pandas as pd
from pandas import DataFrame
try:
@@ -7,6 +8,11 @@
except ImportError:
from pandas.util.decorators import cache_readonly
+try:
+ from pandas.core.construction import extract_array
+except ImportError:
+ extract_array = None
+
class DataFrameAttributes:
def setup(self):
@@ -20,6 +26,33 @@ def time_set_index(self):
self.df.index = self.cur_index
+class SeriesArrayAttribute:
+
+ params = [["numeric", "object", "category", "datetime64", "datetime64tz"]]
+ param_names = ["dtype"]
+
+ def setup(self, dtype):
+ if dtype == "numeric":
+ self.series = pd.Series([1, 2, 3])
+ elif dtype == "object":
+ self.series = pd.Series(["a", "b", "c"], dtype=object)
+ elif dtype == "category":
+ self.series = pd.Series(["a", "b", "c"], dtype="category")
+ elif dtype == "datetime64":
+ self.series = pd.Series(pd.date_range("2013", periods=3))
+ elif dtype == "datetime64tz":
+ self.series = pd.Series(pd.date_range("2013", periods=3, tz="UTC"))
+
+ def time_array(self, dtype):
+ self.series.array
+
+ def time_extract_array(self, dtype):
+ extract_array(self.series)
+
+ def time_extract_array_numpy(self, dtype):
+ extract_array(self.series, extract_numpy=True)
+
+
class CacheReadonly:
def setup(self):
class Foo:
diff --git a/asv_bench/benchmarks/io/sas.py b/asv_bench/benchmarks/io/sas.py
index 5eaeb231b031b..369b79641dbc4 100644
--- a/asv_bench/benchmarks/io/sas.py
+++ b/asv_bench/benchmarks/io/sas.py
@@ -9,7 +9,7 @@ class SAS:
param_names = ["format"]
def setup(self, format):
- # Read files that are located in 'pandas/io/tests/sas/data'
+ # Read files that are located in 'pandas/tests/io/sas/data'
files = {"sas7bdat": "test1.sas7bdat", "xport": "paxraw_d_short.xpt"}
file = files[format]
paths = [
diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py
index 6da2b2270c04a..fd1770df8e5d3 100644
--- a/asv_bench/benchmarks/pandas_vb_common.py
+++ b/asv_bench/benchmarks/pandas_vb_common.py
@@ -56,7 +56,7 @@
def setup(*args, **kwargs):
# This function just needs to be imported into each benchmark file to
# set up the random seed before each function.
- # http://asv.readthedocs.io/en/latest/writing_benchmarks.html
+ # https://asv.readthedocs.io/en/latest/writing_benchmarks.html
np.random.seed(1234)
diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py
index 441f4b380656e..21081ee23a773 100644
--- a/asv_bench/benchmarks/reshape.py
+++ b/asv_bench/benchmarks/reshape.py
@@ -161,6 +161,9 @@ def time_pivot_table_categorical_observed(self):
observed=True,
)
+ def time_pivot_table_margins_only_column(self):
+ self.df.pivot_table(columns=["key2", "key3"], margins=True)
+
class Crosstab:
def setup(self):
diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py
index 7a72622fd5fe3..f7e1e395a76bc 100644
--- a/asv_bench/benchmarks/rolling.py
+++ b/asv_bench/benchmarks/rolling.py
@@ -44,6 +44,27 @@ def time_rolling(self, constructor, window, dtype, function, raw):
self.roll.apply(function, raw=raw)
+class Engine:
+ params = (
+ ["DataFrame", "Series"],
+ ["int", "float"],
+ [np.sum, lambda x: np.sum(x) + 5],
+ ["cython", "numba"],
+ )
+ param_names = ["constructor", "dtype", "function", "engine"]
+
+ def setup(self, constructor, dtype, function, engine):
+ N = 10 ** 3
+ arr = (100 * np.random.random(N)).astype(dtype)
+ self.data = getattr(pd, constructor)(arr)
+
+ def time_rolling_apply(self, constructor, dtype, function, engine):
+ self.data.rolling(10).apply(function, raw=True, engine=engine)
+
+ def time_expanding_apply(self, constructor, dtype, function, engine):
+ self.data.expanding().apply(function, raw=True, engine=engine)
+
+
class ExpandingMethods:
params = (
diff --git a/asv_bench/benchmarks/tslibs/timedelta.py b/asv_bench/benchmarks/tslibs/timedelta.py
index 8a16ddc189483..6ed273281569b 100644
--- a/asv_bench/benchmarks/tslibs/timedelta.py
+++ b/asv_bench/benchmarks/tslibs/timedelta.py
@@ -10,6 +10,11 @@
class TimedeltaConstructor:
+ def setup(self):
+ self.nptimedelta64 = np.timedelta64(3600)
+ self.dttimedelta = datetime.timedelta(seconds=3600)
+ self.td = Timedelta(3600, unit="s")
+
def time_from_int(self):
Timedelta(123456789)
@@ -28,10 +33,10 @@ def time_from_components(self):
)
def time_from_datetime_timedelta(self):
- Timedelta(datetime.timedelta(days=1, seconds=1))
+ Timedelta(self.dttimedelta)
def time_from_np_timedelta(self):
- Timedelta(np.timedelta64(1, "ms"))
+ Timedelta(self.nptimedelta64)
def time_from_string(self):
Timedelta("1 days")
@@ -42,6 +47,9 @@ def time_from_iso_format(self):
def time_from_missing(self):
Timedelta("nat")
+ def time_from_pd_timedelta(self):
+ Timedelta(self.td)
+
class TimedeltaProperties:
def setup_cache(self):
diff --git a/asv_bench/benchmarks/tslibs/timestamp.py b/asv_bench/benchmarks/tslibs/timestamp.py
index 8ebb2d8d2f35d..3ef9b814dd79e 100644
--- a/asv_bench/benchmarks/tslibs/timestamp.py
+++ b/asv_bench/benchmarks/tslibs/timestamp.py
@@ -1,12 +1,19 @@
import datetime
import dateutil
+import numpy as np
import pytz
from pandas import Timestamp
class TimestampConstruction:
+ def setup(self):
+ self.npdatetime64 = np.datetime64("2020-01-01 00:00:00")
+ self.dttime_unaware = datetime.datetime(2020, 1, 1, 0, 0, 0)
+ self.dttime_aware = datetime.datetime(2020, 1, 1, 0, 0, 0, 0, pytz.UTC)
+ self.ts = Timestamp("2020-01-01 00:00:00")
+
def time_parse_iso8601_no_tz(self):
Timestamp("2017-08-25 08:16:14")
@@ -28,6 +35,18 @@ def time_fromordinal(self):
def time_fromtimestamp(self):
Timestamp.fromtimestamp(1515448538)
+ def time_from_npdatetime64(self):
+ Timestamp(self.npdatetime64)
+
+ def time_from_datetime_unaware(self):
+ Timestamp(self.dttime_unaware)
+
+ def time_from_datetime_aware(self):
+ Timestamp(self.dttime_aware)
+
+ def time_from_pd_timestamp(self):
+ Timestamp(self.ts)
+
class TimestampProperties:
_tzs = [None, pytz.timezone("Europe/Amsterdam"), pytz.UTC, dateutil.tz.tzutc()]
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 57032932b878c..d992c64073476 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -4,7 +4,7 @@ jobs:
- template: ci/azure/posix.yml
parameters:
name: macOS
- vmImage: xcode9-macos10.13
+ vmImage: macOS-10.14
- template: ci/azure/posix.yml
parameters:
diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml
index 55e8e839f4fae..c9a2e4eefd19d 100644
--- a/ci/azure/posix.yml
+++ b/ci/azure/posix.yml
@@ -18,7 +18,7 @@ jobs:
py36_minimum_versions:
ENV_FILE: ci/deps/azure-36-minimum_versions.yaml
CONDA_PY: "36"
- PATTERN: "not slow and not network"
+ PATTERN: "not slow and not network and not clipboard"
py36_locale_slow_old_np:
ENV_FILE: ci/deps/azure-36-locale_slow.yaml
@@ -36,12 +36,12 @@ jobs:
PATTERN: "not slow and not network"
LANG: "it_IT.utf8"
LC_ALL: "it_IT.utf8"
- EXTRA_APT: "language-pack-it"
+ EXTRA_APT: "language-pack-it xsel"
py36_32bit:
ENV_FILE: ci/deps/azure-36-32bit.yaml
CONDA_PY: "36"
- PATTERN: "not slow and not network"
+ PATTERN: "not slow and not network and not clipboard"
BITS32: "yes"
py37_locale:
@@ -50,7 +50,7 @@ jobs:
PATTERN: "not slow and not network"
LANG: "zh_CN.utf8"
LC_ALL: "zh_CN.utf8"
- EXTRA_APT: "language-pack-zh-hans"
+ EXTRA_APT: "language-pack-zh-hans xsel"
py37_np_dev:
ENV_FILE: ci/deps/azure-37-numpydev.yaml
diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 83ceb11dfcbf4..0cc42be42d61e 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -314,8 +314,8 @@ fi
### DOCSTRINGS ###
if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
- MSG='Validate docstrings (GL03, GL04, GL05, GL06, GL07, GL09, GL10, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT01, RT04, RT05, SA01, SA02, SA03, SA05)' ; echo $MSG
- $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL03,GL04,GL05,GL06,GL07,GL09,GL10,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT01,RT04,RT05,SA01,SA02,SA03,SA05
+ MSG='Validate docstrings (GL03, GL04, GL05, GL06, GL07, GL09, GL10, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT01, RT04, RT05, SA02, SA03, SA05)' ; echo $MSG
+ $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=GL03,GL04,GL05,GL06,GL07,GL09,GL10,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT01,RT04,RT05,SA02,SA03,SA05
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml
index 111ba6b020bc7..dc51597a33209 100644
--- a/ci/deps/azure-37-locale.yaml
+++ b/ci/deps/azure-37-locale.yaml
@@ -34,3 +34,6 @@ dependencies:
- xlsxwriter
- xlwt
- pyarrow>=0.15
+ - pip
+ - pip:
+ - pyxlsb
diff --git a/ci/deps/azure-macos-36.yaml b/ci/deps/azure-macos-36.yaml
index 3bbbdb4cf32ad..90980133b31c1 100644
--- a/ci/deps/azure-macos-36.yaml
+++ b/ci/deps/azure-macos-36.yaml
@@ -33,3 +33,4 @@ dependencies:
- pip
- pip:
- pyreadstat
+ - pyxlsb
diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml
index 62be1075b3337..6b3ad6f560292 100644
--- a/ci/deps/azure-windows-37.yaml
+++ b/ci/deps/azure-windows-37.yaml
@@ -35,3 +35,6 @@ dependencies:
- xlsxwriter
- xlwt
- pyreadstat
+ - pip
+ - pip:
+ - pyxlsb
diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml
index a46001c58d165..869d2ab683f0c 100644
--- a/ci/deps/travis-36-cov.yaml
+++ b/ci/deps/travis-36-cov.yaml
@@ -51,3 +51,4 @@ dependencies:
- coverage
- pandas-datareader
- python-dateutil
+ - pyxlsb
diff --git a/ci/print_skipped.py b/ci/print_skipped.py
index 72822fa2d3c7f..60e2f047235e6 100755
--- a/ci/print_skipped.py
+++ b/ci/print_skipped.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
import os
import xml.etree.ElementTree as et
diff --git a/ci/run_tests.sh b/ci/run_tests.sh
index 8020680d617d7..0cb1f4aabf352 100755
--- a/ci/run_tests.sh
+++ b/ci/run_tests.sh
@@ -14,14 +14,14 @@ if [ "$COVERAGE" ]; then
COVERAGE="-s --cov=pandas --cov-report=xml:$COVERAGE_FNAME"
fi
-PYTEST_CMD="pytest -m \"$PATTERN\" -n auto --dist=loadfile -s --strict --durations=10 --junitxml=test-data.xml $TEST_ARGS $COVERAGE pandas"
-
-# Travis does not have have an X server
-if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
- DISPLAY=DISPLAY=:99.0
- PYTEST_CMD="xvfb-run -e /dev/stdout $PYTEST_CMD"
+# If no X server is found, we use xvfb to emulate it
+if [[ $(uname) == "Linux" && -z $DISPLAY ]]; then
+ export DISPLAY=":0"
+ XVFB="xvfb-run "
fi
+PYTEST_CMD="${XVFB}pytest -m \"$PATTERN\" -n auto --dist=loadfile -s --strict --durations=10 --junitxml=test-data.xml $TEST_ARGS $COVERAGE pandas"
+
echo $PYTEST_CMD
sh -c "$PYTEST_CMD"
diff --git a/ci/setup_env.sh b/ci/setup_env.sh
index db28eaea8956e..e5bee09fe2f79 100755
--- a/ci/setup_env.sh
+++ b/ci/setup_env.sh
@@ -114,6 +114,11 @@ echo "remove postgres if has been installed with conda"
echo "we use the one from the CI"
conda remove postgresql -y --force || true
+echo
+echo "remove qt"
+echo "causes problems with the clipboard, we use xsel for that"
+conda remove qt -y --force || true
+
echo
echo "conda list pandas"
conda list pandas
diff --git a/doc/cheatsheet/README.txt b/doc/cheatsheet/README.txt
index 0eae39f318d23..c57da38b31777 100644
--- a/doc/cheatsheet/README.txt
+++ b/doc/cheatsheet/README.txt
@@ -5,4 +5,4 @@ and pick "PDF" as the format.
This cheat sheet was inspired by the RStudio Data Wrangling Cheatsheet[1], written by Irv Lustig, Princeton Consultants[2].
[1]: https://www.rstudio.com/wp-content/uploads/2015/02/data-wrangling-cheatsheet.pdf
-[2]: http://www.princetonoptimization.com/
+[2]: https://www.princetonoptimization.com/
diff --git a/doc/make.py b/doc/make.py
index cf73f44b5dd02..024a748cd28ca 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
Python script for building documentation.
diff --git a/doc/source/_static/css/pandas.css b/doc/source/_static/css/pandas.css
new file mode 100644
index 0000000000000..43cd631890330
--- /dev/null
+++ b/doc/source/_static/css/pandas.css
@@ -0,0 +1,36 @@
+/* Getting started index page */
+
+.intro-card {
+ background: #fff;
+ border-radius: 0;
+ padding: 30px 10px 10px 10px;
+ margin: 10px 0px;
+}
+
+.intro-card .card-text {
+ margin: 20px 0px;
+ /*min-height: 150px; */
+}
+
+.custom-button {
+ background-color: #dcdcdc;
+ border: none;
+ color: #484848;
+ text-align: center;
+ text-decoration: none;
+ display: inline-block;
+ font-size: 0.9rem;
+ border-radius: 0.5rem;
+ max-width: 220px;
+ padding: 0.5rem 0rem;
+}
+
+.custom-button a {
+ color: #484848;
+}
+
+.custom-button p {
+ margin-top: 0;
+ margin-bottom: 0rem;
+ color: #484848;
+}
diff --git a/doc/source/_static/index_api.svg b/doc/source/_static/index_api.svg
new file mode 100644
index 0000000000000..70bf0d3504b1a
--- /dev/null
+++ b/doc/source/_static/index_api.svg
@@ -0,0 +1,97 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="99.058548mm"
+ height="89.967583mm"
+ viewBox="0 0 99.058554 89.967582"
+ version="1.1"
+ id="svg1040"
+ inkscape:version="0.92.4 (f8dce91, 2019-08-02)"
+ sodipodi:docname="index_api.svg">
+ <defs
+ id="defs1034" />
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="0.35"
+ inkscape:cx="533.74914"
+ inkscape:cy="10.90433"
+ inkscape:document-units="mm"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:window-width="930"
+ inkscape:window-height="472"
+ inkscape:window-x="2349"
+ inkscape:window-y="267"
+ inkscape:window-maximized="0" />
+ <metadata
+ id="metadata1037">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(195.19933,-1.0492759)">
+ <g
+ id="g1008"
+ transform="matrix(1.094977,0,0,1.094977,-521.5523,-198.34055)">
+ <path
+ inkscape:connector-curvature="0"
+ id="path899"
+ d="M 324.96812,187.09499 H 303.0455 v 72.1639 h 22.67969"
+ style="fill:none;stroke:#150458;stroke-width:10;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path899-3"
+ d="m 361.58921,187.09499 h 21.92262 v 72.1639 h -22.67969"
+ style="fill:none;stroke:#150458;stroke-width:10;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <g
+ transform="translate(415.87139,46.162126)"
+ id="g944">
+ <circle
+ style="fill:#150458;fill-opacity:1;stroke:#150458;stroke-width:4.53704548;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="path918"
+ cx="-84.40152"
+ cy="189.84375"
+ r="2.2293637" />
+ <circle
+ style="fill:#150458;fill-opacity:1;stroke:#150458;stroke-width:4.53704548;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="path918-5"
+ cx="-72.949402"
+ cy="189.84375"
+ r="2.2293637" />
+ <circle
+ style="fill:#150458;fill-opacity:1;stroke:#150458;stroke-width:4.53704548;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="path918-6"
+ cx="-61.497284"
+ cy="189.84375"
+ r="2.2293637" />
+ </g>
+ </g>
+ </g>
+</svg>
diff --git a/doc/source/_static/index_contribute.svg b/doc/source/_static/index_contribute.svg
new file mode 100644
index 0000000000000..e86c3e9fd0b3e
--- /dev/null
+++ b/doc/source/_static/index_contribute.svg
@@ -0,0 +1,76 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="89.624855mm"
+ height="89.96759mm"
+ viewBox="0 0 89.62486 89.96759"
+ version="1.1"
+ id="svg1040"
+ inkscape:version="0.92.4 (f8dce91, 2019-08-02)"
+ sodipodi:docname="index_contribute.svg">
+ <defs
+ id="defs1034" />
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="0.35"
+ inkscape:cx="683.11893"
+ inkscape:cy="-59.078181"
+ inkscape:document-units="mm"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:window-width="930"
+ inkscape:window-height="472"
+ inkscape:window-x="2349"
+ inkscape:window-y="267"
+ inkscape:window-maximized="0" />
+ <metadata
+ id="metadata1037">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(234.72009,17.466935)">
+ <g
+ id="g875"
+ transform="matrix(0.99300176,0,0,0.99300176,-133.24106,-172.58804)">
+ <path
+ sodipodi:nodetypes="ccc"
+ inkscape:connector-curvature="0"
+ id="path869"
+ d="m -97.139881,161.26069 47.247024,40.25446 -47.247024,40.25446"
+ style="fill:none;stroke:#150458;stroke-width:10;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path871"
+ d="m -49.514879,241.81547 h 32.505951"
+ style="fill:none;stroke:#150458;stroke-width:10;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ </g>
+ </g>
+</svg>
diff --git a/doc/source/_static/index_getting_started.svg b/doc/source/_static/index_getting_started.svg
new file mode 100644
index 0000000000000..d00e462427193
--- /dev/null
+++ b/doc/source/_static/index_getting_started.svg
@@ -0,0 +1,66 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="101.09389mm"
+ height="89.96759mm"
+ viewBox="0 0 101.09389 89.96759"
+ version="1.1"
+ id="svg1040"
+ inkscape:version="0.92.4 (f8dce91, 2019-08-02)"
+ sodipodi:docname="index_getting_started.svg">
+ <defs
+ id="defs1034" />
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="0.35"
+ inkscape:cx="-93.242129"
+ inkscape:cy="-189.9825"
+ inkscape:document-units="mm"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:window-width="1875"
+ inkscape:window-height="1056"
+ inkscape:window-x="1965"
+ inkscape:window-y="0"
+ inkscape:window-maximized="1" />
+ <metadata
+ id="metadata1037">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(2.9219487,-8.5995374)">
+ <path
+ style="fill:#150458;fill-opacity:1;stroke-width:0.20233451"
+ d="M 37.270955,98.335591 C 33.358064,97.07991 31.237736,92.52319 32.964256,89.08022 c 0.18139,-0.361738 4.757999,-5.096629 10.17021,-10.521968 l 9.84041,-9.864254 -4.03738,-4.041175 -4.037391,-4.041172 -4.96415,4.916665 c -3.61569,3.581096 -5.238959,5.04997 -5.975818,5.407377 l -1.011682,0.490718 H 17.267525 1.5866055 L 0.65034544,70.96512 C -2.2506745,69.535833 -3.5952145,66.18561 -2.5925745,62.884631 c 0.53525,-1.762217 1.61699004,-3.050074 3.22528014,-3.839847 l 1.15623996,-0.56778 13.2591094,-0.05613 13.259111,-0.05613 11.5262,-11.527539 11.526199,-11.527528 H 40.622647 c -12.145542,0 -12.189222,-0.0046 -13.752801,-1.445851 -2.229871,-2.055423 -2.162799,-5.970551 0.135998,-7.938238 1.475193,-1.262712 1.111351,-1.238469 18.588522,-1.238469 12.899229,0 16.035311,0.05193 16.692589,0.276494 0.641832,0.219264 2.590731,2.051402 9.416301,8.852134 l 8.606941,8.575638 h 6.848168 c 4.837422,0 7.092281,0.07311 7.679571,0.249094 0.48064,0.144008 1.22985,0.634863 1.77578,1.163429 2.383085,2.307333 1.968685,6.539886 -0.804989,8.221882 -0.571871,0.346781 -1.38284,0.687226 -1.80217,0.756523 -0.41933,0.06928 -4.2741,0.127016 -8.56615,0.128238 -6.56998,0.0016 -7.977492,-0.04901 -8.902732,-0.321921 -0.975569,-0.287742 -1.400468,-0.622236 -3.783999,-2.978832 l -2.685021,-2.654679 -5.05411,5.051071 -5.0541,5.051081 3.926292,3.947202 c 2.365399,2.378001 4.114289,4.309171 4.399158,4.857713 0.39266,0.75606 0.47311,1.219412 0.474321,2.731516 0.003,3.083647 0.620779,2.331942 -13.598011,16.531349 -10.273768,10.259761 -12.679778,12.563171 -13.500979,12.92519 -1.267042,0.55857 -3.156169,0.681342 -4.390271,0.285321 z m 40.130741,-65.45839 c -2.212909,-0.579748 -3.782711,-1.498393 -5.51275,-3.226063 -2.522111,-2.518633 -3.633121,-5.181304 -3.633121,-8.707194 0,-3.530699 1.11238,-6.197124 3.631161,-8.704043 4.866751,-4.8438383 12.324781,-4.8550953 17.211791,-0.026 3.908758,3.862461 4.818578,9.377999 2.372188,14.380771 -0.846209,1.730481 -3.39493,4.326384 -5.143839,5.239072 -2.69708,1.407492 -6.042829,1.798628 -8.92543,1.043434 z"
+ id="path1000"
+ inkscape:connector-curvature="0" />
+ </g>
+</svg>
diff --git a/doc/source/_static/index_user_guide.svg b/doc/source/_static/index_user_guide.svg
new file mode 100644
index 0000000000000..a567103af5918
--- /dev/null
+++ b/doc/source/_static/index_user_guide.svg
@@ -0,0 +1,67 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="123.72241mm"
+ height="89.96759mm"
+ viewBox="0 0 123.72242 89.96759"
+ version="1.1"
+ id="svg1040"
+ inkscape:version="0.92.4 (f8dce91, 2019-08-02)"
+ sodipodi:docname="index_userguide.svg">
+ <defs
+ id="defs1034" />
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="0.35"
+ inkscape:cx="332.26618"
+ inkscape:cy="83.744004"
+ inkscape:document-units="mm"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:window-width="930"
+ inkscape:window-height="472"
+ inkscape:window-x="2349"
+ inkscape:window-y="267"
+ inkscape:window-maximized="0" />
+ <metadata
+ id="metadata1037">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(141.8903,-20.32143)">
+ <path
+ style="fill:#150458;fill-opacity:1;stroke-width:0.20483544"
+ d="m -139.53374,110.1657 c -0.80428,-0.24884 -1.71513,-1.11296 -2.07107,-1.96486 -0.23905,-0.57214 -0.28453,-6.28104 -0.28453,-35.720988 0,-38.274546 -0.079,-35.840728 1.19849,-36.91568 0.58869,-0.495345 4.63766,-2.187548 8.47998,-3.544073 l 1.58749,-0.560453 v -3.309822 c 0,-3.025538 0.0396,-3.388179 0.46086,-4.222122 0.68808,-1.362003 1.38671,-1.714455 4.60319,-2.322195 4.12797,-0.779966 5.13304,-0.912766 8.81544,-1.16476 11.80964,-0.808168 22.80911,2.509277 30.965439,9.3392 1.750401,1.465747 3.840861,3.5635 5.0903,5.108065 l 0.659122,0.814805 0.659109,-0.814805 c 1.249431,-1.544565 3.33988,-3.642318 5.09029,-5.108065 8.156331,-6.829923 19.155791,-10.147368 30.965441,-9.3392 3.682389,0.251994 4.68748,0.384794 8.81544,1.16476 3.21647,0.60774 3.91511,0.960192 4.60318,2.322195 0.4213,0.833943 0.46087,1.196584 0.46087,4.222122 v 3.309822 l 1.58748,0.560453 c 4.10165,1.448077 7.98852,3.072753 8.5259,3.563743 1.22643,1.120567 1.15258,-1.245868 1.15258,36.927177 0,34.567591 -0.005,35.083151 -0.40663,35.903991 -0.22365,0.45804 -0.73729,1.05665 -1.14143,1.33024 -1.22281,0.82783 -2.17721,0.70485 -5.86813,-0.7561 -9.19595,-3.63998 -18.956011,-6.38443 -26.791332,-7.53353 -3.02827,-0.44412 -9.26189,-0.61543 -11.77821,-0.3237 -5.19357,0.60212 -8.736108,2.05527 -11.700039,4.79936 -0.684501,0.63371 -1.466141,1.23646 -1.736979,1.33942 -0.63859,0.2428 -4.236521,0.2428 -4.875112,0 -0.27083,-0.10296 -1.05247,-0.70571 -1.73696,-1.33942 -2.96395,-2.74409 -6.50648,-4.19724 -11.700058,-4.79936 -2.516312,-0.29173 -8.749941,-0.12042 -11.778201,0.3237 -7.78194,1.14127 -17.39965,3.83907 -26.73341,7.49883 -3.38325,1.32658 -4.15525,1.50926 -5.11851,1.21125 z m 4.2107,-5.34052 c 5.86759,-2.29858 14.40398,-4.922695 20.2018,-6.210065 6.31584,-1.402418 8.5236,-1.646248 14.91592,-1.647338 4.68699,-7.94e-4 6.013661,0.0632 7.257809,0.3497 0.837332,0.19286 1.561052,0.312028 1.60828,0.264819 0.147111,-0.147119 -1.803289,-1.307431 -4.154879,-2.471801 -8.12511,-4.023029 -18.27311,-4.986568 -29.0861,-2.761718 -1.09536,0.22538 -2.32708,0.40827 -2.73715,0.406418 -1.12787,-0.005 -2.3054,-0.76382 -2.84516,-1.8332 l -0.46086,-0.913098 V 62.99179 35.97471 l -0.56331,0.138329 c -0.30981,0.07608 -1.89985,0.665075 -3.5334,1.308881 -2.27551,0.896801 -2.96414,1.252878 -2.94452,1.522563 0.014,0.193604 0.0372,15.284513 0.0512,33.535345 0.014,18.250839 0.0538,33.183322 0.0884,33.183322 0.0346,0 1.02543,-0.3771 2.20198,-0.83801 z m 113.006991,-32.697216 -0.0518,-33.535203 -3.17495,-1.272156 c -1.74623,-0.699685 -3.33627,-1.278755 -3.53341,-1.286819 -0.33966,-0.01389 -0.35847,1.401778 -0.35847,26.980216 v 26.994863 l -0.46087,0.913112 c -0.53976,1.06939 -1.71729,1.828088 -2.84515,1.833189 -0.41008,0.0021 -1.6418,-0.181031 -2.73716,-0.406421 -11.888201,-2.446089 -22.84337,-1.046438 -31.491022,4.02332 -1.68175,0.985941 -2.216748,1.467501 -1.36534,1.228942 1.575181,-0.441362 4.990592,-0.73864 8.524862,-0.742011 5.954408,-0.005 11.43046,0.791951 19.10874,2.78333 3.9516,1.024874 12.1555,3.687454 15.6699,5.085704 1.23926,0.49306 2.36869,0.90517 2.50985,0.9158 0.20489,0.0155 0.2462,-6.745894 0.20483,-33.515866 z m -59.76135,-2.233777 V 40.065438 l -0.95972,-1.357442 c -1.380522,-1.952627 -5.376262,-5.847994 -7.64336,-7.45136 -3.778692,-2.672401 -9.063392,-4.943324 -13.672511,-5.875304 -3.19731,-0.646503 -5.23069,-0.833103 -9.05886,-0.831312 -4.37716,0.0021 -7.70223,0.349169 -11.83461,1.235469 l -1.07538,0.230645 v 31.242342 c 0,26.565778 0.0426,31.226011 0.28429,31.133261 0.15637,-0.06 1.42379,-0.297169 2.81648,-0.527026 12.37657,-2.042634 23.21658,-0.346861 32.521639,5.087596 2.10018,1.226558 5.20202,3.618878 6.880942,5.30692 0.788609,0.792909 1.502978,1.446609 1.587468,1.452679 0.0845,0.006 0.153622,-13.411893 0.153622,-29.817719 z m 5.80221,28.3766 c 6.21476,-6.141601 15.08488,-10.061509 25.025529,-11.05933 4.262419,-0.427849 11.579921,-0.0054 16.017661,0.924912 0.75932,0.15916 1.45259,0.244888 1.54058,0.190498 0.088,-0.05434 0.16003,-14.060382 0.16003,-31.124436 V 26.176883 l -0.52136,-0.198219 c -0.66893,-0.254325 -4.77649,-0.95482 -7.159981,-1.221048 -2.41372,-0.269605 -8.559851,-0.266589 -10.759229,0.0052 -6.458111,0.798299 -12.584091,3.083792 -17.405651,6.49374 -2.267091,1.603366 -6.262831,5.498733 -7.64336,7.45136 l -0.959721,1.357438 v 29.828747 c 0,16.405812 0.0532,29.828746 0.11802,29.828746 0.065,0 0.77928,-0.65347 1.587482,-1.452149 z"
+ id="path845"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="csscccscsssscsssssscscsccsccsccscsscccccccscccccccccsccscscscccscccsccssccsscccscccccsccccsccscsccsscc" />
+ </g>
+</svg>
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 481c03ab8f388..28df08a8607b9 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -10,6 +10,7 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
+from datetime import datetime
import importlib
import inspect
import logging
@@ -137,7 +138,7 @@
# General information about the project.
project = "pandas"
-copyright = "2008-2014, the pandas development team"
+copyright = f"2008-{datetime.now().year}, the pandas development team"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -229,6 +230,10 @@
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
+html_css_files = [
+ "css/pandas.css",
+]
+
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
@@ -407,7 +412,7 @@
"py": ("https://pylib.readthedocs.io/en/latest/", None),
"python": ("https://docs.python.org/3/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
- "statsmodels": ("http://www.statsmodels.org/devel/", None),
+ "statsmodels": ("https://www.statsmodels.org/devel/", None),
}
# extlinks alias
@@ -624,10 +629,10 @@ def linkcode_resolve(domain, info):
fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__))
if "+" in pandas.__version__:
- return f"http://github.com/pandas-dev/pandas/blob/master/pandas/{fn}{linespec}"
+ return f"https://github.com/pandas-dev/pandas/blob/master/pandas/{fn}{linespec}"
else:
return (
- f"http://github.com/pandas-dev/pandas/blob/"
+ f"https://github.com/pandas-dev/pandas/blob/"
f"v{pandas.__version__}/pandas/{fn}{linespec}"
)
@@ -694,7 +699,7 @@ def rstjinja(app, docname, source):
"""
Render our pages as a jinja template for fancy templating goodness.
"""
- # http://ericholscher.com/blog/2016/jul/25/integrating-jinja-rst-sphinx/
+ # https://www.ericholscher.com/blog/2016/jul/25/integrating-jinja-rst-sphinx/
# Make sure we're outputting HTML
if app.builder.format != "html":
return
diff --git a/doc/source/development/code_style.rst b/doc/source/development/code_style.rst
index 2fc2f1fb6ee8d..a295038b5a0bd 100644
--- a/doc/source/development/code_style.rst
+++ b/doc/source/development/code_style.rst
@@ -127,3 +127,29 @@ For example:
value = str
f"Unknown recived type, got: '{type(value).__name__}'"
+
+
+Imports (aim for absolute)
+==========================
+
+In Python 3, absolute imports are recommended. In absolute import doing something
+like ``import string`` will import the string module rather than ``string.py``
+in the same directory. As much as possible, you should try to write out
+absolute imports that show the whole import chain from toplevel pandas.
+
+Explicit relative imports are also supported in Python 3. But it is not
+recommended to use it. Implicit relative imports should never be used
+and is removed in Python 3.
+
+For example:
+
+::
+
+ # preferred
+ import pandas.core.common as com
+
+ # not preferred
+ from .common import test_base
+
+ # wrong
+ from common import test_base
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index 93c65ba7358c9..f904781178656 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -32,7 +32,7 @@ check each issue individually, and it's not possible to find the unassigned ones
For this reason, we implemented a workaround consisting of adding a comment with the exact
text `take`. When you do it, a GitHub action will automatically assign you the issue
-(this will take seconds, and may require refreshint the page to see it).
+(this will take seconds, and may require refreshing the page to see it).
By doing this, it's possible to filter the list of issues and find only the unassigned ones.
So, a good way to find an issue to start contributing to pandas is to check the list of
@@ -56,7 +56,7 @@ Bug reports and enhancement requests
Bug reports are an important part of making *pandas* more stable. Having a complete bug report
will allow others to reproduce the bug and provide insight into fixing. See
`this stackoverflow article <https://stackoverflow.com/help/mcve>`_ and
-`this blogpost <http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports>`_
+`this blogpost <https://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports>`_
for tips on writing a good bug report.
Trying the bug-producing code out on the *master* branch is often a worthwhile exercise
@@ -67,7 +67,7 @@ Bug reports must:
#. Include a short, self-contained Python snippet reproducing the problem.
You can format the code nicely by using `GitHub Flavored Markdown
- <http://github.github.com/github-flavored-markdown/>`_::
+ <https://github.github.com/github-flavored-markdown/>`_::
```python
>>> from pandas import DataFrame
@@ -104,19 +104,19 @@ feel free to ask for help.
The code is hosted on `GitHub <https://www.github.com/pandas-dev/pandas>`_. To
contribute you will need to sign up for a `free GitHub account
-<https://github.com/signup/free>`_. We use `Git <http://git-scm.com/>`_ for
+<https://github.com/signup/free>`_. We use `Git <https://git-scm.com/>`_ for
version control to allow many people to work together on the project.
Some great resources for learning Git:
-* the `GitHub help pages <http://help.github.com/>`_.
-* the `NumPy's documentation <http://docs.scipy.org/doc/numpy/dev/index.html>`_.
-* Matthew Brett's `Pydagogue <http://matthew-brett.github.com/pydagogue/>`_.
+* the `GitHub help pages <https://help.github.com/>`_.
+* the `NumPy's documentation <https://docs.scipy.org/doc/numpy/dev/index.html>`_.
+* Matthew Brett's `Pydagogue <https://matthew-brett.github.com/pydagogue/>`_.
Getting started with Git
------------------------
-`GitHub has instructions <http://help.github.com/set-up-git-redirect>`__ for installing git,
+`GitHub has instructions <https://help.github.com/set-up-git-redirect>`__ for installing git,
setting up your SSH key, and configuring git. All these steps need to be completed before
you can work seamlessly between your local repository and GitHub.
@@ -146,6 +146,17 @@ requires a C compiler and Python environment. If you're making documentation
changes, you can skip to :ref:`contributing.documentation` but you won't be able
to build the documentation locally before pushing your changes.
+Using a Docker Container
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Instead of manually setting up a development environment, you can use Docker to
+automatically create the environment with just several commands. Pandas provides a `DockerFile`
+in the root directory to build a Docker image with a full pandas development environment.
+
+Even easier, you can use the DockerFile to launch a remote session with Visual Studio Code,
+a popular free IDE, using the `.devcontainer.json` file.
+See https://code.visualstudio.com/docs/remote/containers for details.
+
.. _contributing.dev_c:
Installing a C compiler
@@ -249,7 +260,7 @@ To return to your root environment::
conda deactivate
-See the full conda docs `here <http://conda.pydata.org/docs>`__.
+See the full conda docs `here <https://conda.pydata.org/docs>`__.
.. _contributing.pip:
@@ -354,9 +365,9 @@ About the *pandas* documentation
--------------------------------
The documentation is written in **reStructuredText**, which is almost like writing
-in plain English, and built using `Sphinx <http://sphinx.pocoo.org/>`__. The
+in plain English, and built using `Sphinx <https://www.sphinx-doc.org/en/master/>`__. The
Sphinx Documentation has an excellent `introduction to reST
-<http://sphinx.pocoo.org/rest.html>`__. Review the Sphinx docs to perform more
+<https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`__. Review the Sphinx docs to perform more
complex changes to the documentation as well.
Some other important things to know about the docs:
@@ -379,7 +390,7 @@ Some other important things to know about the docs:
contributing_docstring.rst
* The tutorials make heavy use of the `ipython directive
- <http://matplotlib.org/sampledoc/ipython_directive.html>`_ sphinx extension.
+ <https://matplotlib.org/sampledoc/ipython_directive.html>`_ sphinx extension.
This directive lets you put code in the documentation which will be run
during the doc build. For example::
@@ -399,7 +410,7 @@ Some other important things to know about the docs:
doc build. This approach means that code examples will always be up to date,
but it does make the doc building a bit more complex.
-* Our API documentation in ``doc/source/api.rst`` houses the auto-generated
+* Our API documentation files in ``doc/source/reference`` house the auto-generated
documentation from the docstrings. For classes, there are a few subtleties
around controlling which methods and attributes have pages auto-generated.
@@ -417,7 +428,8 @@ Some other important things to know about the docs:
``Methods`` section in the class docstring. See ``CategoricalIndex`` for an
example.
- Every method should be included in a ``toctree`` in ``api.rst``, else Sphinx
+ Every method should be included in a ``toctree`` in one of the documentation files in
+ ``doc/source/reference``, else Sphinx
will emit a warning.
.. note::
@@ -425,7 +437,7 @@ Some other important things to know about the docs:
The ``.rst`` files are used to automatically generate Markdown and HTML versions
of the docs. For this reason, please do not edit ``CONTRIBUTING.md`` directly,
but instead make any changes to ``doc/source/development/contributing.rst``. Then, to
- generate ``CONTRIBUTING.md``, use `pandoc <http://johnmacfarlane.net/pandoc/>`_
+ generate ``CONTRIBUTING.md``, use `pandoc <https://johnmacfarlane.net/pandoc/>`_
with the following command::
pandoc doc/source/development/contributing.rst -t markdown_github > CONTRIBUTING.md
@@ -433,11 +445,11 @@ Some other important things to know about the docs:
The utility script ``scripts/validate_docstrings.py`` can be used to get a csv
summary of the API documentation. And also validate common errors in the docstring
of a specific class, function or method. The summary also compares the list of
-methods documented in ``doc/source/api.rst`` (which is used to generate
+methods documented in the files in ``doc/source/reference`` (which is used to generate
the `API Reference <https://pandas.pydata.org/pandas-docs/stable/api.html>`_ page)
and the actual public methods.
-This will identify methods documented in ``doc/source/api.rst`` that are not actually
-class methods, and existing methods that are not documented in ``doc/source/api.rst``.
+This will identify methods documented in ``doc/source/reference`` that are not actually
+class methods, and existing methods that are not documented in ``doc/source/reference``.
Updating a *pandas* docstring
@@ -609,8 +621,8 @@ You can also run this command on an entire directory if necessary::
cpplint --extensions=c,h --headers=h --filter=-readability/casting,-runtime/int,-build/include_subdir --recursive modified-c-directory
To make your commits compliant with this standard, you can install the
-`ClangFormat <http://clang.llvm.org/docs/ClangFormat.html>`_ tool, which can be
-downloaded `here <http://llvm.org/builds/>`__. To configure, in your home directory,
+`ClangFormat <https://clang.llvm.org/docs/ClangFormat.html>`_ tool, which can be
+downloaded `here <https://llvm.org/builds/>`__. To configure, in your home directory,
run the following command::
clang-format style=google -dump-config > .clang-format
@@ -635,10 +647,12 @@ many errors as possible, but it may not correct *all* of them. Thus, it is
recommended that you run ``cpplint`` to double check and make any other style
fixes manually.
+.. _contributing.code-formatting:
+
Python (PEP8 / black)
~~~~~~~~~~~~~~~~~~~~~
-*pandas* follows the `PEP8 <http://www.python.org/dev/peps/pep-0008/>`_ standard
+*pandas* follows the `PEP8 <https://www.python.org/dev/peps/pep-0008/>`_ standard
and uses `Black <https://black.readthedocs.io/en/stable/>`_ and
`Flake8 <http://flake8.pycqa.org/en/latest/>`_ to ensure a consistent code
format throughout the project.
@@ -656,19 +670,8 @@ apply ``black`` as you edit files.
You should use a ``black`` version >= 19.10b0 as previous versions are not compatible
with the pandas codebase.
-Optionally, you may wish to setup `pre-commit hooks <https://pre-commit.com/>`_
-to automatically run ``black`` and ``flake8`` when you make a git commit. This
-can be done by installing ``pre-commit``::
-
- pip install pre-commit
-
-and then running::
-
- pre-commit install
-
-from the root of the pandas repository. Now ``black`` and ``flake8`` will be run
-each time you commit changes. You can skip these checks with
-``git commit --no-verify``.
+If you wish to run these checks automatically, we encourage you to use
+:ref:`pre-commits <contributing.pre-commit>` instead.
One caveat about ``git diff upstream/master -u -- "*.py" | flake8 --diff``: this
command will catch any stylistic errors in your changes specifically, but
@@ -676,7 +679,7 @@ be beware it may not catch all of them. For example, if you delete the only
usage of an imported function, it is stylistically incorrect to import an
unused function. However, style-checking the diff will not catch this because
the actual import is not part of the diff. Thus, for completeness, you should
-run this command, though it will take longer::
+run this command, though it may take longer::
git diff upstream/master --name-only -- "*.py" | xargs -r flake8
@@ -694,6 +697,8 @@ behaviour as follows::
This will get all the files being changed by the PR (and ending with ``.py``),
and run ``flake8`` on them, one after the other.
+Note that these commands can be run analogously with ``black``.
+
.. _contributing.import-formatting:
Import formatting
@@ -716,7 +721,6 @@ A summary of our current import sections ( in order ):
Imports are alphabetically sorted within these sections.
-
As part of :ref:`Continuous Integration <contributing.ci>` checks we run::
isort --recursive --check-only pandas
@@ -740,8 +744,37 @@ to automatically format imports correctly. This will modify your local copy of t
The `--recursive` flag can be passed to sort all files in a directory.
+Alternatively, you can run a command similar to what was suggested for ``black`` and ``flake8`` :ref:`right above <contributing.code-formatting>`::
+
+ git diff upstream/master --name-only -- "*.py" | xargs -r isort
+
+Where similar caveats apply if you are on OSX or Windows.
+
You can then verify the changes look ok, then git :ref:`commit <contributing.commit-code>` and :ref:`push <contributing.push-code>`.
+.. _contributing.pre-commit:
+
+Pre-Commit
+~~~~~~~~~~
+
+You can run many of these styling checks manually as we have described above. However,
+we encourage you to use `pre-commit hooks <https://pre-commit.com/>`_ instead
+to automatically run ``black``, ``flake8``, ``isort`` when you make a git commit. This
+can be done by installing ``pre-commit``::
+
+ pip install pre-commit
+
+and then running::
+
+ pre-commit install
+
+from the root of the pandas repository. Now all of the styling checks will be
+run each time you commit changes without your needing to run each one manually.
+In addition, using this pre-commit hook will also allow you to more easily
+remain up-to-date with our code checks as they change.
+
+Note that if needed, you can skip these checks with ``git commit --no-verify``.
+
Backwards compatibility
~~~~~~~~~~~~~~~~~~~~~~~
@@ -939,9 +972,9 @@ Adding tests is one of the most common requests after code is pushed to *pandas*
it is worth getting in the habit of writing tests ahead of time so this is never an issue.
Like many packages, *pandas* uses `pytest
-<http://docs.pytest.org/en/latest/>`_ and the convenient
+<https://docs.pytest.org/en/latest/>`_ and the convenient
extensions in `numpy.testing
-<http://docs.scipy.org/doc/numpy/reference/routines.testing.html>`_.
+<https://docs.scipy.org/doc/numpy/reference/routines.testing.html>`_.
.. note::
@@ -992,7 +1025,7 @@ Transitioning to ``pytest``
class TestReallyCoolFeature:
pass
-Going forward, we are moving to a more *functional* style using the `pytest <http://docs.pytest.org/en/latest/>`__ framework, which offers a richer testing
+Going forward, we are moving to a more *functional* style using the `pytest <https://docs.pytest.org/en/latest/>`__ framework, which offers a richer testing
framework that will facilitate testing and developing. Thus, instead of writing test classes, we will write test functions like this:
.. code-block:: python
@@ -1225,7 +1258,7 @@ On Windows, one can type::
This can significantly reduce the time it takes to locally run tests before
submitting a pull request.
-For more, see the `pytest <http://docs.pytest.org/en/latest/>`_ documentation.
+For more, see the `pytest <https://docs.pytest.org/en/latest/>`_ documentation.
Furthermore one can run
@@ -1504,3 +1537,19 @@ The branch will still exist on GitHub, so to delete it there do::
git push origin --delete shiny-new-feature
.. _Gitter: https://gitter.im/pydata/pandas
+
+
+Tips for a successful Pull Request
+==================================
+
+If you have made it to the `Review your code`_ phase, one of the core contributors may
+take a look. Please note however that a handful of people are responsible for reviewing
+all of the contributions, which can often lead to bottlenecks.
+
+To improve the chances of your pull request being reviewed, you should:
+
+- **Reference an open issue** for non-trivial changes to clarify the PR's purpose
+- **Ensure you have appropriate tests**. These should be the first part of any PR
+- **Keep your pull requests as simple as possible**. Larger PRs take longer to review
+- **Ensure that CI is in a green state**. Reviewers may not even look otherwise
+- **Keep** `Updating your pull request`_, either by request or every few days
diff --git a/doc/source/development/contributing_docstring.rst b/doc/source/development/contributing_docstring.rst
index cb32f0e1ee475..649dd37b497b2 100644
--- a/doc/source/development/contributing_docstring.rst
+++ b/doc/source/development/contributing_docstring.rst
@@ -77,8 +77,8 @@ language that allows encoding styles in plain text files. Documentation
about reStructuredText can be found in:
* `Sphinx reStructuredText primer <https://www.sphinx-doc.org/en/stable/rest.html>`_
-* `Quick reStructuredText reference <http://docutils.sourceforge.net/docs/user/rst/quickref.html>`_
-* `Full reStructuredText specification <http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html>`_
+* `Quick reStructuredText reference <https://docutils.sourceforge.io/docs/user/rst/quickref.html>`_
+* `Full reStructuredText specification <https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html>`_
Pandas has some helpers for sharing docstrings between related classes, see
:ref:`docstring.sharing`.
diff --git a/doc/source/development/extending.rst b/doc/source/development/extending.rst
index 89d43e8a43825..270f20e8118bc 100644
--- a/doc/source/development/extending.rst
+++ b/doc/source/development/extending.rst
@@ -306,7 +306,7 @@ Subclassing pandas data structures
1. Extensible method chains with :ref:`pipe <basics.pipe>`
- 2. Use *composition*. See `here <http://en.wikipedia.org/wiki/Composition_over_inheritance>`_.
+ 2. Use *composition*. See `here <https://en.wikipedia.org/wiki/Composition_over_inheritance>`_.
3. Extending by :ref:`registering an accessor <extending.register-accessors>`
diff --git a/doc/source/development/maintaining.rst b/doc/source/development/maintaining.rst
index 0d1088cc8a6ca..e65b66fc243c5 100644
--- a/doc/source/development/maintaining.rst
+++ b/doc/source/development/maintaining.rst
@@ -36,7 +36,7 @@ of what it means to be a maintainer.
* Provide experience / wisdom on API design questions to ensure consistency and maintainability
* Project organization (run / attend developer meetings, represent pandas)
-http://matthewrocklin.com/blog/2019/05/18/maintainer may be interesting background
+https://matthewrocklin.com/blog/2019/05/18/maintainer may be interesting background
reading.
.. _maintaining.triage:
@@ -78,7 +78,7 @@ Here's a typical workflow for triaging a newly opened issue.
4. **Is the issue minimal and reproducible**?
For bug reports, we ask that the reporter provide a minimal reproducible
- example. See http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
+ example. See https://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
for a good explanation. If the example is not reproducible, or if it's
*clearly* not minimal, feel free to ask the reporter if they can provide
and example or simplify the provided one. Do acknowledge that writing
diff --git a/doc/source/development/roadmap.rst b/doc/source/development/roadmap.rst
index 00598830e2fe9..fafe63d80249c 100644
--- a/doc/source/development/roadmap.rst
+++ b/doc/source/development/roadmap.rst
@@ -129,20 +129,6 @@ Some specific goals include
* Improve the overall organization of the documentation and specific subsections
of the documentation to make navigation and finding content easier.
-Package docstring validation
-----------------------------
-
-To improve the quality and consistency of pandas docstrings, we've developed
-tooling to check docstrings in a variety of ways.
-https://github.com/pandas-dev/pandas/blob/master/scripts/validate_docstrings.py
-contains the checks.
-
-Like many other projects, pandas uses the
-`numpydoc <https://numpydoc.readthedocs.io/en/latest/>`__ style for writing
-docstrings. With the collaboration of the numpydoc maintainers, we'd like to
-move the checks to a package other than pandas so that other projects can easily
-use them as well.
-
Performance monitoring
----------------------
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index 7bd5ba7ecdf0b..fb06ee122ae88 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -41,6 +41,16 @@ Pyjanitor provides a clean API for cleaning data, using method chaining.
Engarde is a lightweight library used to explicitly state assumptions about your datasets
and check that they're *actually* true.
+`pandas-path <https://github.com/drivendataorg/pandas-path/>`__
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Since Python 3.4, `pathlib <https://docs.python.org/3/library/pathlib.html>`_ has been
+included in the Python standard library. Path objects provide a simple
+and delightful way to interact with the file system. The pandas-path package enables the
+Path API for pandas through a custom accessor ``.path``. Getting just the filenames from
+a series of full file paths is as simple as ``my_files.path.name``. Other convenient operations like
+joining paths, replacing file extensions, and checking if files exist are also available.
+
.. _ecosystem.stats:
Statistics and machine learning
@@ -112,16 +122,14 @@ also goes beyond matplotlib and pandas with the option to perform statistical
estimation while plotting, aggregating across observations and visualizing the
fit of statistical models to emphasize patterns in a dataset.
-`yhat/ggpy <https://github.com/yhat/ggpy>`__
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+`plotnine <https://github.com/has2k1/plotnine/>`__
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Hadley Wickham's `ggplot2 <https://ggplot2.tidyverse.org/>`__ is a foundational exploratory visualization package for the R language.
Based on `"The Grammar of Graphics" <https://www.cs.uic.edu/~wilkinson/TheGrammarOfGraphics/GOG.html>`__ it
provides a powerful, declarative and extremely general way to generate bespoke plots of any kind of data.
-It's really quite incredible. Various implementations to other languages are available,
-but a faithful implementation for Python users has long been missing. Although still young
-(as of Jan-2014), the `yhat/ggpy <https://github.com/yhat/ggpy>`__ project has been
-progressing quickly in that direction.
+Various implementations to other languages are available.
+A good implementation for Python users is `has2k1/plotnine <https://github.com/has2k1/plotnine/>`__.
`IPython Vega <https://github.com/vega/ipyvega>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -295,8 +303,8 @@ dimensional arrays, rather than the tabular data for which pandas excels.
Out-of-core
-------------
-`Blaze <http://blaze.pydata.org/>`__
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+`Blaze <https://blaze.pydata.org/>`__
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Blaze provides a standard API for doing computations with various
in-memory and on-disk backends: NumPy, Pandas, SQLAlchemy, MongoDB, PyTables,
@@ -386,12 +394,16 @@ A directory of projects providing
:ref:`extension accessors <extending.register-accessors>`. This is for users to
discover new accessors and for library authors to coordinate on the namespace.
-============== ========== =========================
-Library Accessor Classes
-============== ========== =========================
-`cyberpandas`_ ``ip`` ``Series``
-`pdvega`_ ``vgplot`` ``Series``, ``DataFrame``
-============== ========== =========================
+=============== ========== ========================= ===============================================================
+Library Accessor Classes Description
+=============== ========== ========================= ===============================================================
+`cyberpandas`_ ``ip`` ``Series`` Provides common operations for working with IP addresses.
+`pdvega`_ ``vgplot`` ``Series``, ``DataFrame`` Provides plotting functions from the Altair_ library.
+`pandas_path`_ ``path`` ``Index``, ``Series`` Provides `pathlib.Path`_ functions for Series.
+=============== ========== ========================= ===============================================================
.. _cyberpandas: https://cyberpandas.readthedocs.io/en/latest
.. _pdvega: https://altair-viz.github.io/pdvega/
+.. _Altair: https://altair-viz.github.io/
+.. _pandas_path: https://github.com/drivendataorg/pandas-path/
+.. _pathlib.Path: https://docs.python.org/3/library/pathlib.html
\ No newline at end of file
diff --git a/doc/source/getting_started/comparison/comparison_with_r.rst b/doc/source/getting_started/comparison/comparison_with_r.rst
index f67f46fc2b29b..e1a4cfe49b7d1 100644
--- a/doc/source/getting_started/comparison/comparison_with_r.rst
+++ b/doc/source/getting_started/comparison/comparison_with_r.rst
@@ -6,9 +6,9 @@ Comparison with R / R libraries
*******************************
Since ``pandas`` aims to provide a lot of the data manipulation and analysis
-functionality that people use `R <http://www.r-project.org/>`__ for, this page
+functionality that people use `R <https://www.r-project.org/>`__ for, this page
was started to provide a more detailed look at the `R language
-<http://en.wikipedia.org/wiki/R_(programming_language)>`__ and its many third
+<https://en.wikipedia.org/wiki/R_(programming_language)>`__ and its many third
party libraries as they relate to ``pandas``. In comparisons with R and CRAN
libraries, we care about the following things:
@@ -517,37 +517,37 @@ For more details and examples see :ref:`categorical introduction <categorical>`
.. |c| replace:: ``c``
-.. _c: http://stat.ethz.ch/R-manual/R-patched/library/base/html/c.html
+.. _c: https://stat.ethz.ch/R-manual/R-patched/library/base/html/c.html
.. |aggregate| replace:: ``aggregate``
-.. _aggregate: http://finzi.psych.upenn.edu/R/library/stats/html/aggregate.html
+.. _aggregate: https://stat.ethz.ch/R-manual/R-patched/library/stats/html/aggregate.html
.. |match| replace:: ``match`` / ``%in%``
-.. _match: http://finzi.psych.upenn.edu/R/library/base/html/match.html
+.. _match: https://stat.ethz.ch/R-manual/R-patched/library/base/html/match.html
.. |tapply| replace:: ``tapply``
-.. _tapply: http://finzi.psych.upenn.edu/R/library/base/html/tapply.html
+.. _tapply: https://stat.ethz.ch/R-manual/R-patched/library/base/html/tapply.html
.. |with| replace:: ``with``
-.. _with: http://finzi.psych.upenn.edu/R/library/base/html/with.html
+.. _with: https://stat.ethz.ch/R-manual/R-patched/library/base/html/with.html
.. |subset| replace:: ``subset``
-.. _subset: http://finzi.psych.upenn.edu/R/library/base/html/subset.html
+.. _subset: https://stat.ethz.ch/R-manual/R-patched/library/base/html/subset.html
.. |ddply| replace:: ``ddply``
-.. _ddply: http://www.inside-r.org/packages/cran/plyr/docs/ddply
+.. _ddply: https://cran.r-project.org/web/packages/plyr/plyr.pdf#Rfn.ddply.1
.. |meltarray| replace:: ``melt.array``
-.. _meltarray: http://www.inside-r.org/packages/cran/reshape2/docs/melt.array
+.. _meltarray: https://cran.r-project.org/web/packages/reshape2/reshape2.pdf#Rfn.melt.array.1
.. |meltlist| replace:: ``melt.list``
-.. meltlist: http://www.inside-r.org/packages/cran/reshape2/docs/melt.list
+.. meltlist: https://cran.r-project.org/web/packages/reshape2/reshape2.pdf#Rfn.melt.list.1
.. |meltdf| replace:: ``melt.data.frame``
-.. meltdf: http://www.inside-r.org/packages/cran/reshape2/docs/melt.data.frame
+.. meltdf: https://cran.r-project.org/web/packages/reshape2/reshape2.pdf#Rfn.melt.data.frame.1
.. |cast| replace:: ``cast``
-.. cast: http://www.inside-r.org/packages/cran/reshape2/docs/cast
+.. cast: https://cran.r-project.org/web/packages/reshape2/reshape2.pdf#Rfn.cast.1
.. |factor| replace:: ``factor``
.. _factor: https://stat.ethz.ch/R-manual/R-devel/library/base/html/factor.html
diff --git a/doc/source/getting_started/comparison/comparison_with_stata.rst b/doc/source/getting_started/comparison/comparison_with_stata.rst
index fec6bae1e0330..decf12db77af2 100644
--- a/doc/source/getting_started/comparison/comparison_with_stata.rst
+++ b/doc/source/getting_started/comparison/comparison_with_stata.rst
@@ -673,6 +673,6 @@ Disk vs memory
Pandas and Stata both operate exclusively in memory. This means that the size of
data able to be loaded in pandas is limited by your machine's memory.
If out of core processing is needed, one possibility is the
-`dask.dataframe <http://dask.pydata.org/en/latest/dataframe.html>`_
+`dask.dataframe <https://dask.pydata.org/en/latest/dataframe.html>`_
library, which provides a subset of pandas functionality for an
on-disk ``DataFrame``.
diff --git a/doc/source/getting_started/dsintro.rst b/doc/source/getting_started/dsintro.rst
index 8bd271815549d..5d7c9e405cfc2 100644
--- a/doc/source/getting_started/dsintro.rst
+++ b/doc/source/getting_started/dsintro.rst
@@ -136,7 +136,7 @@ Like a NumPy array, a pandas Series has a :attr:`~Series.dtype`.
This is often a NumPy dtype. However, pandas and 3rd-party libraries
extend NumPy's type system in a few places, in which case the dtype would
-be a :class:`~pandas.api.extensions.ExtensionDtype`. Some examples within
+be an :class:`~pandas.api.extensions.ExtensionDtype`. Some examples within
pandas are :ref:`categorical` and :ref:`integer_na`. See :ref:`basics.dtypes`
for more.
@@ -609,7 +609,7 @@ union of the column and row labels.
When doing an operation between DataFrame and Series, the default behavior is
to align the Series **index** on the DataFrame **columns**, thus `broadcasting
-<http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`__
+<https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`__
row-wise. For example:
.. ipython:: python
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst
index b3fd443e662a9..ca285243b5f50 100644
--- a/doc/source/getting_started/install.rst
+++ b/doc/source/getting_started/install.rst
@@ -7,13 +7,13 @@ Installation
============
The easiest way to install pandas is to install it
-as part of the `Anaconda <http://docs.continuum.io/anaconda/>`__ distribution, a
+as part of the `Anaconda <https://docs.continuum.io/anaconda/>`__ distribution, a
cross platform distribution for data analysis and scientific computing.
This is the recommended installation method for most users.
Instructions for installing from source,
`PyPI <https://pypi.org/project/pandas>`__, `ActivePython <https://www.activestate.com/activepython/downloads>`__, various Linux distributions, or a
-`development version <http://github.com/pandas-dev/pandas>`__ are also provided.
+`development version <https://github.com/pandas-dev/pandas>`__ are also provided.
Python version support
----------------------
@@ -28,28 +28,28 @@ Installing pandas
Installing with Anaconda
~~~~~~~~~~~~~~~~~~~~~~~~
-Installing pandas and the rest of the `NumPy <http://www.numpy.org/>`__ and
-`SciPy <http://www.scipy.org/>`__ stack can be a little
+Installing pandas and the rest of the `NumPy <https://www.numpy.org/>`__ and
+`SciPy <https://www.scipy.org/>`__ stack can be a little
difficult for inexperienced users.
The simplest way to install not only pandas, but Python and the most popular
-packages that make up the `SciPy <http://www.scipy.org/>`__ stack
-(`IPython <http://ipython.org/>`__, `NumPy <http://www.numpy.org/>`__,
-`Matplotlib <http://matplotlib.org/>`__, ...) is with
-`Anaconda <http://docs.continuum.io/anaconda/>`__, a cross-platform
+packages that make up the `SciPy <https://www.scipy.org/>`__ stack
+(`IPython <https://ipython.org/>`__, `NumPy <https://www.numpy.org/>`__,
+`Matplotlib <https://matplotlib.org/>`__, ...) is with
+`Anaconda <https://docs.continuum.io/anaconda/>`__, a cross-platform
(Linux, Mac OS X, Windows) Python distribution for data analytics and
scientific computing.
After running the installer, the user will have access to pandas and the
-rest of the `SciPy <http://www.scipy.org/>`__ stack without needing to install
+rest of the `SciPy <https://www.scipy.org/>`__ stack without needing to install
anything else, and without needing to wait for any software to be compiled.
-Installation instructions for `Anaconda <http://docs.continuum.io/anaconda/>`__
-`can be found here <http://docs.continuum.io/anaconda/install.html>`__.
+Installation instructions for `Anaconda <https://docs.continuum.io/anaconda/>`__
+`can be found here <https://docs.continuum.io/anaconda/install.html>`__.
A full list of the packages available as part of the
-`Anaconda <http://docs.continuum.io/anaconda/>`__ distribution
-`can be found here <http://docs.continuum.io/anaconda/pkg-docs.html>`__.
+`Anaconda <https://docs.continuum.io/anaconda/>`__ distribution
+`can be found here <https://docs.continuum.io/anaconda/packages/pkg-docs/>`__.
Another advantage to installing Anaconda is that you don't need
admin rights to install it. Anaconda can install in the user's home directory,
@@ -62,28 +62,28 @@ Installing with Miniconda
~~~~~~~~~~~~~~~~~~~~~~~~~
The previous section outlined how to get pandas installed as part of the
-`Anaconda <http://docs.continuum.io/anaconda/>`__ distribution.
+`Anaconda <https://docs.continuum.io/anaconda/>`__ distribution.
However this approach means you will install well over one hundred packages
and involves downloading the installer which is a few hundred megabytes in size.
If you want to have more control on which packages, or have a limited internet
bandwidth, then installing pandas with
-`Miniconda <http://conda.pydata.org/miniconda.html>`__ may be a better solution.
+`Miniconda <https://conda.pydata.org/miniconda.html>`__ may be a better solution.
-`Conda <http://conda.pydata.org/docs/>`__ is the package manager that the
-`Anaconda <http://docs.continuum.io/anaconda/>`__ distribution is built upon.
+`Conda <https://conda.pydata.org/docs/>`__ is the package manager that the
+`Anaconda <https://docs.continuum.io/anaconda/>`__ distribution is built upon.
It is a package manager that is both cross-platform and language agnostic
(it can play a similar role to a pip and virtualenv combination).
-`Miniconda <http://conda.pydata.org/miniconda.html>`__ allows you to create a
+`Miniconda <https://conda.pydata.org/miniconda.html>`__ allows you to create a
minimal self contained Python installation, and then use the
-`Conda <http://conda.pydata.org/docs/>`__ command to install additional packages.
+`Conda <https://conda.pydata.org/docs/>`__ command to install additional packages.
-First you will need `Conda <http://conda.pydata.org/docs/>`__ to be installed and
+First you will need `Conda <https://conda.pydata.org/docs/>`__ to be installed and
downloading and running the `Miniconda
-<http://conda.pydata.org/miniconda.html>`__
+<https://conda.pydata.org/miniconda.html>`__
will do this for you. The installer
-`can be found here <http://conda.pydata.org/miniconda.html>`__
+`can be found here <https://conda.pydata.org/miniconda.html>`__
The next step is to create a new conda environment. A conda environment is like a
virtualenv that allows you to specify a specific version of Python and set of libraries.
@@ -113,7 +113,7 @@ To install other packages, IPython for example::
conda install ipython
-To install the full `Anaconda <http://docs.continuum.io/anaconda/>`__
+To install the full `Anaconda <https://docs.continuum.io/anaconda/>`__
distribution::
conda install anaconda
@@ -146,17 +146,16 @@ Installing using your Linux distribution's package manager.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The commands in this table will install pandas for Python 3 from your distribution.
-To install pandas for Python 2, you may need to use the ``python-pandas`` package.
.. csv-table::
:header: "Distribution", "Status", "Download / Repository Link", "Install method"
:widths: 10, 10, 20, 50
- Debian, stable, `official Debian repository <http://packages.debian.org/search?keywords=pandas&searchon=names&suite=all§ion=all>`__ , ``sudo apt-get install python3-pandas``
+ Debian, stable, `official Debian repository <https://packages.debian.org/search?keywords=pandas&searchon=names&suite=all§ion=all>`__ , ``sudo apt-get install python3-pandas``
Debian & Ubuntu, unstable (latest packages), `NeuroDebian <http://neuro.debian.net/index.html#how-to-use-this-repository>`__ , ``sudo apt-get install python3-pandas``
- Ubuntu, stable, `official Ubuntu repository <http://packages.ubuntu.com/search?keywords=pandas&searchon=names&suite=all§ion=all>`__ , ``sudo apt-get install python3-pandas``
- OpenSuse, stable, `OpenSuse Repository <http://software.opensuse.org/package/python-pandas?search_term=pandas>`__ , ``zypper in python3-pandas``
+ Ubuntu, stable, `official Ubuntu repository <https://packages.ubuntu.com/search?keywords=pandas&searchon=names&suite=all§ion=all>`__ , ``sudo apt-get install python3-pandas``
+ OpenSuse, stable, `OpenSuse Repository <https://software.opensuse.org/package/python-pandas?search_term=pandas>`__ , ``zypper in python3-pandas``
Fedora, stable, `official Fedora repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``dnf install python3-pandas``
Centos/RHEL, stable, `EPEL repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``yum install python3-pandas``
@@ -177,7 +176,7 @@ pandas is equipped with an exhaustive set of unit tests, covering about 97% of
the code base as of this writing. To run it on your machine to verify that
everything is working (and that you have all of the dependencies, soft and hard,
installed), make sure you have `pytest
-<http://docs.pytest.org/en/latest/>`__ >= 5.0.1 and `Hypothesis
+<https://docs.pytest.org/en/latest/>`__ >= 5.0.1 and `Hypothesis
<https://hypothesis.readthedocs.io/>`__ >= 3.58, then run:
::
@@ -204,9 +203,9 @@ Dependencies
Package Minimum supported version
================================================================ ==========================
`setuptools <https://setuptools.readthedocs.io/en/latest/>`__ 24.2.0
-`NumPy <http://www.numpy.org>`__ 1.13.3
+`NumPy <https://www.numpy.org>`__ 1.13.3
`python-dateutil <https://dateutil.readthedocs.io/en/stable/>`__ 2.6.1
-`pytz <http://pytz.sourceforge.net/>`__ 2017.2
+`pytz <https://pypi.org/project/pytz/>`__ 2017.2
================================================================ ==========================
.. _install.recommended_dependencies:
@@ -264,6 +263,7 @@ pyarrow 0.12.0 Parquet, ORC (requires 0.13.0), and
pymysql 0.7.11 MySQL engine for sqlalchemy
pyreadstat SPSS files (.sav) reading
pytables 3.4.2 HDF5 reading / writing
+pyxlsb 1.0.6 Reading for xlsb files
qtpy Clipboard I/O
s3fs 0.3.0 Amazon S3 access
tabulate 0.8.3 Printing in Markdown-friendly format (see `tabulate`_)
@@ -302,6 +302,6 @@ top-level :func:`~pandas.read_html` function:
usage of the above three libraries.
.. _html5lib: https://github.com/html5lib/html5lib-python
-.. _BeautifulSoup4: http://www.crummy.com/software/BeautifulSoup
-.. _lxml: http://lxml.de
+.. _BeautifulSoup4: https://www.crummy.com/software/BeautifulSoup
+.. _lxml: https://lxml.de
.. _tabulate: https://github.com/astanin/python-tabulate
diff --git a/doc/source/getting_started/tutorials.rst b/doc/source/getting_started/tutorials.rst
index 1ed0e8f635b58..434d791474807 100644
--- a/doc/source/getting_started/tutorials.rst
+++ b/doc/source/getting_started/tutorials.rst
@@ -23,12 +23,12 @@ Community guides
pandas Cookbook by Julia Evans
------------------------------
-The goal of this 2015 cookbook (by `Julia Evans <http://jvns.ca>`_) is to
+The goal of this 2015 cookbook (by `Julia Evans <https://jvns.ca>`_) is to
give you some concrete examples for getting started with pandas. These
are examples with real-world data, and all the bugs and weirdness that
entails.
For the table of contents, see the `pandas-cookbook GitHub
-repository <http://github.com/jvns/pandas-cookbook>`_.
+repository <https://github.com/jvns/pandas-cookbook>`_.
Learn Pandas by Hernan Rojas
----------------------------
@@ -38,10 +38,10 @@ A set of lesson for new pandas users: https://bitbucket.org/hrojas/learn-pandas
Practical data analysis with Python
-----------------------------------
-This `guide <http://wavedatalab.github.io/datawithpython>`_ is an introduction to the data analysis process using the Python data ecosystem and an interesting open dataset.
-There are four sections covering selected topics as `munging data <http://wavedatalab.github.io/datawithpython/munge.html>`__,
-`aggregating data <http://wavedatalab.github.io/datawithpython/aggregate.html>`_, `visualizing data <http://wavedatalab.github.io/datawithpython/visualize.html>`_
-and `time series <http://wavedatalab.github.io/datawithpython/timeseries.html>`_.
+This `guide <https://wavedatalab.github.io/datawithpython>`_ is an introduction to the data analysis process using the Python data ecosystem and an interesting open dataset.
+There are four sections covering selected topics as `munging data <https://wavedatalab.github.io/datawithpython/munge.html>`__,
+`aggregating data <https://wavedatalab.github.io/datawithpython/aggregate.html>`_, `visualizing data <https://wavedatalab.github.io/datawithpython/visualize.html>`_
+and `time series <https://wavedatalab.github.io/datawithpython/timeseries.html>`_.
.. _tutorial-exercises-new-users:
@@ -61,13 +61,13 @@ Tutorial series written in 2016 by
The source may be found in the GitHub repository
`TomAugspurger/effective-pandas <https://github.com/TomAugspurger/effective-pandas>`_.
-* `Modern Pandas <http://tomaugspurger.github.io/modern-1-intro.html>`_
-* `Method Chaining <http://tomaugspurger.github.io/method-chaining.html>`_
-* `Indexes <http://tomaugspurger.github.io/modern-3-indexes.html>`_
-* `Performance <http://tomaugspurger.github.io/modern-4-performance.html>`_
-* `Tidy Data <http://tomaugspurger.github.io/modern-5-tidy.html>`_
-* `Visualization <http://tomaugspurger.github.io/modern-6-visualization.html>`_
-* `Timeseries <http://tomaugspurger.github.io/modern-7-timeseries.html>`_
+* `Modern Pandas <https://tomaugspurger.github.io/modern-1-intro.html>`_
+* `Method Chaining <https://tomaugspurger.github.io/method-chaining.html>`_
+* `Indexes <https://tomaugspurger.github.io/modern-3-indexes.html>`_
+* `Performance <https://tomaugspurger.github.io/modern-4-performance.html>`_
+* `Tidy Data <https://tomaugspurger.github.io/modern-5-tidy.html>`_
+* `Visualization <https://tomaugspurger.github.io/modern-6-visualization.html>`_
+* `Timeseries <https://tomaugspurger.github.io/modern-7-timeseries.html>`_
Excel charts with pandas, vincent and xlsxwriter
------------------------------------------------
@@ -89,21 +89,21 @@ Video tutorials
* `Data analysis in Python with pandas <https://www.youtube.com/playlist?list=PL5-da3qGB5ICCsgW1MxlZ0Hq8LL5U3u9y>`_
(2016-2018)
`GitHub repo <https://github.com/justmarkham/pandas-videos>`__ and
- `Jupyter Notebook <http://nbviewer.jupyter.org/github/justmarkham/pandas-videos/blob/master/pandas.ipynb>`__
+ `Jupyter Notebook <https://nbviewer.jupyter.org/github/justmarkham/pandas-videos/blob/master/pandas.ipynb>`__
* `Best practices with pandas <https://www.youtube.com/playlist?list=PL5-da3qGB5IBITZj_dYSFqnd_15JgqwA6>`_
(2018)
`GitHub repo <https://github.com/justmarkham/pycon-2018-tutorial>`__ and
- `Jupyter Notebook <http://nbviewer.jupyter.org/github/justmarkham/pycon-2018-tutorial/blob/master/tutorial.ipynb>`__
+ `Jupyter Notebook <https://nbviewer.jupyter.org/github/justmarkham/pycon-2018-tutorial/blob/master/tutorial.ipynb>`__
Various tutorials
-----------------
-* `Wes McKinney's (pandas BDFL) blog <http://blog.wesmckinney.com/>`_
+* `Wes McKinney's (pandas BDFL) blog <https://wesmckinney.com/archives.html>`_
* `Statistical analysis made easy in Python with SciPy and pandas DataFrames, by Randal Olson <http://www.randalolson.com/2012/08/06/statistical-analysis-made-easy-in-python/>`_
-* `Statistical Data Analysis in Python, tutorial videos, by Christopher Fonnesbeck from SciPy 2013 <http://conference.scipy.org/scipy2013/tutorial_detail.php?id=109>`_
-* `Financial analysis in Python, by Thomas Wiecki <http://nbviewer.ipython.org/github/twiecki/financial-analysis-python-tutorial/blob/master/1.%20Pandas%20Basics.ipynb>`_
+* `Statistical Data Analysis in Python, tutorial videos, by Christopher Fonnesbeck from SciPy 2013 <https://conference.scipy.org/scipy2013/tutorial_detail.php?id=109>`_
+* `Financial analysis in Python, by Thomas Wiecki <https://nbviewer.ipython.org/github/twiecki/financial-analysis-python-tutorial/blob/master/1.%20Pandas%20Basics.ipynb>`_
* `Intro to pandas data structures, by Greg Reda <http://www.gregreda.com/2013/10/26/intro-to-pandas-data-structures/>`_
-* `Pandas and Python: Top 10, by Manish Amde <http://manishamde.github.io/blog/2013/03/07/pandas-and-python-top-10/>`_
-* `Pandas DataFrames Tutorial, by Karlijn Willems <http://www.datacamp.com/community/tutorials/pandas-tutorial-dataframe-python>`_
+* `Pandas and Python: Top 10, by Manish Amde <https://manishamde.github.io/blog/2013/03/07/pandas-and-python-top-10/>`_
+* `Pandas DataFrames Tutorial, by Karlijn Willems <https://www.datacamp.com/community/tutorials/pandas-tutorial-dataframe-python>`_
* `A concise tutorial with real life examples <https://tutswiki.com/pandas-cookbook/chapter1>`_
diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index 10705787dfedf..5690bb2e4a875 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -1,10 +1,12 @@
+:notoc:
+
.. pandas documentation master file, created by
.. module:: pandas
-*********************************************
-pandas: powerful Python data analysis toolkit
-*********************************************
+********************
+pandas documentation
+********************
**Date**: |today| **Version**: |version|
@@ -21,7 +23,83 @@ pandas: powerful Python data analysis toolkit
easy-to-use data structures and data analysis tools for the `Python <https://www.python.org/>`__
programming language.
-See the :ref:`overview` for more detail about what's in the library.
+.. raw:: html
+
+ <div class="container">
+ <div class="row">
+ <div class="col-lg-6 col-md-6 col-sm-6 col-xs-12 d-flex">
+ <div class="card text-center intro-card shadow">
+ <img src="_static/index_getting_started.svg" class="card-img-top" alt="getting started with pandas action icon" height="52">
+ <div class="card-body flex-fill">
+ <h5 class="card-title">Getting started</h5>
+ <p class="card-text">New to <em>pandas</em>? Check out the getting started guides. They
+ contain an introduction to <em>pandas'</em> main concepts and links to additional tutorials.</p>
+
+.. container:: custom-button
+
+ :ref:`To the getting started guides<getting_started>`
+
+.. raw:: html
+
+ </div>
+ </div>
+ </div>
+ <div class="col-lg-6 col-md-6 col-sm-6 col-xs-12 d-flex">
+ <div class="card text-center intro-card shadow">
+ <img src="_static/index_user_guide.svg" class="card-img-top" alt="pandas user guide action icon" height="52">
+ <div class="card-body flex-fill">
+ <h5 class="card-title">User guide</h5>
+ <p class="card-text">The user guide provides in-depth information on the
+ key concepts of pandas with useful background information and explanation.</p>
+
+.. container:: custom-button
+
+ :ref:`To the user guide<user_guide>`
+
+.. raw:: html
+
+ </div>
+ </div>
+ </div>
+ <div class="col-lg-6 col-md-6 col-sm-6 col-xs-12 d-flex">
+ <div class="card text-center intro-card shadow">
+ <img src="_static/index_api.svg" class="card-img-top" alt="api of pandas action icon" height="52">
+ <div class="card-body flex-fill">
+ <h5 class="card-title">API reference</h5>
+ <p class="card-text">The reference guide contains a detailed description of
+ the pandas API. The reference describes how the methods work and which parameters can
+ be used. It assumes that you have an understanding of the key concepts.</p>
+
+.. container:: custom-button
+
+ :ref:`To the reference guide<api>`
+
+.. raw:: html
+
+ </div>
+ </div>
+ </div>
+ <div class="col-lg-6 col-md-6 col-sm-6 col-xs-12 d-flex">
+ <div class="card text-center intro-card shadow">
+ <img src="_static/index_contribute.svg" class="card-img-top" alt="contribute to pandas action icon" height="52">
+ <div class="card-body flex-fill">
+ <h5 class="card-title">Developer guide</h5>
+ <p class="card-text">Saw a typo in the documentation? Want to improve
+ existing functionalities? The contributing guidelines will guide
+ you through the process of improving pandas.</p>
+
+.. container:: custom-button
+
+ :ref:`To the development guide<development>`
+
+.. raw:: html
+
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+
{% if single_doc and single_doc.endswith('.rst') -%}
.. toctree::
@@ -39,7 +117,7 @@ See the :ref:`overview` for more detail about what's in the library.
:hidden:
{% endif %}
{% if not single_doc %}
- What's New in 1.0.0 <whatsnew/v1.0.0>
+ What's New in 1.1.0 <whatsnew/v1.1.0>
getting_started/index
user_guide/index
{% endif -%}
@@ -50,68 +128,3 @@ See the :ref:`overview` for more detail about what's in the library.
development/index
whatsnew/index
{% endif %}
-
-* :doc:`whatsnew/v1.0.0`
-* :doc:`getting_started/index`
-
- * :doc:`getting_started/install`
- * :doc:`getting_started/overview`
- * :doc:`getting_started/10min`
- * :doc:`getting_started/basics`
- * :doc:`getting_started/dsintro`
- * :doc:`getting_started/comparison/index`
- * :doc:`getting_started/tutorials`
-
-* :doc:`user_guide/index`
-
- * :doc:`user_guide/io`
- * :doc:`user_guide/indexing`
- * :doc:`user_guide/advanced`
- * :doc:`user_guide/merging`
- * :doc:`user_guide/reshaping`
- * :doc:`user_guide/text`
- * :doc:`user_guide/missing_data`
- * :doc:`user_guide/categorical`
- * :doc:`user_guide/integer_na`
- * :doc:`user_guide/boolean`
- * :doc:`user_guide/visualization`
- * :doc:`user_guide/computation`
- * :doc:`user_guide/groupby`
- * :doc:`user_guide/timeseries`
- * :doc:`user_guide/timedeltas`
- * :doc:`user_guide/style`
- * :doc:`user_guide/options`
- * :doc:`user_guide/enhancingperf`
- * :doc:`user_guide/scale`
- * :doc:`user_guide/sparse`
- * :doc:`user_guide/gotchas`
- * :doc:`user_guide/cookbook`
-
-* :doc:`ecosystem`
-* :doc:`reference/index`
-
- * :doc:`reference/io`
- * :doc:`reference/general_functions`
- * :doc:`reference/series`
- * :doc:`reference/frame`
- * :doc:`reference/arrays`
- * :doc:`reference/panel`
- * :doc:`reference/indexing`
- * :doc:`reference/offset_frequency`
- * :doc:`reference/window`
- * :doc:`reference/groupby`
- * :doc:`reference/resampling`
- * :doc:`reference/style`
- * :doc:`reference/plotting`
- * :doc:`reference/general_utility_functions`
- * :doc:`reference/extensions`
-
-* :doc:`development/index`
-
- * :doc:`development/contributing`
- * :doc:`development/code_style`
- * :doc:`development/internals`
- * :doc:`development/extending`
- * :doc:`development/developer`
-
-* :doc:`whatsnew/index`
diff --git a/doc/source/reference/extensions.rst b/doc/source/reference/extensions.rst
index c072237850d82..78fdfbfd28144 100644
--- a/doc/source/reference/extensions.rst
+++ b/doc/source/reference/extensions.rst
@@ -66,7 +66,7 @@ behaves correctly.
.. autosummary::
:toctree: api/
- api.indexers.check_bool_array_indexer
+ api.indexers.check_array_indexer
The sentinel ``pandas.api.extensions.no_default`` is used as the default
diff --git a/doc/source/reference/frame.rst b/doc/source/reference/frame.rst
index 01aa6c60e3b2f..c7b1cc1c832be 100644
--- a/doc/source/reference/frame.rst
+++ b/doc/source/reference/frame.rst
@@ -28,6 +28,7 @@ Attributes and underlying data
:toctree: api/
DataFrame.dtypes
+ DataFrame.info
DataFrame.select_dtypes
DataFrame.values
DataFrame.axes
@@ -43,6 +44,7 @@ Conversion
:toctree: api/
DataFrame.astype
+ DataFrame.convert_dtypes
DataFrame.infer_objects
DataFrame.copy
DataFrame.isna
@@ -346,7 +348,6 @@ Serialization / IO / conversion
DataFrame.from_dict
DataFrame.from_records
- DataFrame.info
DataFrame.to_parquet
DataFrame.to_pickle
DataFrame.to_csv
diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst
index 4ad6a7b014532..1a69fa076dbf0 100644
--- a/doc/source/reference/series.rst
+++ b/doc/source/reference/series.rst
@@ -46,6 +46,7 @@ Conversion
:toctree: api/
Series.astype
+ Series.convert_dtypes
Series.infer_objects
Series.copy
Series.bool
diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index a2150c207c0b0..9951642ca98a4 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -58,7 +58,7 @@ series in the DataFrame, also excluding NA/null values.
is not guaranteed to be positive semi-definite. This could lead to
estimated correlations having absolute values which are greater than one,
and/or a non-invertible covariance matrix. See `Estimation of covariance
- matrices <http://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_matrices>`_
+ matrices <https://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_matrices>`_
for more details.
.. ipython:: python
@@ -346,8 +346,9 @@ if installed as an optional dependency. The apply aggregation can be executed us
``engine='numba'`` and ``engine_kwargs`` arguments (``raw`` must also be set to ``True``).
Numba will be applied in potentially two routines:
-1. If ``func`` is a standard Python function, the engine will `JIT <http://numba.pydata.org/numba-doc/latest/user/overview.html>`__
+1. If ``func`` is a standard Python function, the engine will `JIT <https://numba.pydata.org/numba-doc/latest/user/overview.html>`__
the passed function. ``func`` can also be a JITed function in which case the engine will not JIT the function again.
+
2. The engine will JIT the for loop where the apply function is applied to each window.
The ``engine_kwargs`` argument is a dictionary of keyword arguments that will be passed into the
@@ -1063,5 +1064,5 @@ are scaled by debiasing factors
(For :math:`w_i = 1`, this reduces to the usual :math:`N / (N - 1)` factor,
with :math:`N = t + 1`.)
-See `Weighted Sample Variance <http://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance>`__
+See `Weighted Sample Variance <https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance>`__
on Wikipedia for further details.
diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst
index f581d183b9413..4afdb14e5c39e 100644
--- a/doc/source/user_guide/cookbook.rst
+++ b/doc/source/user_guide/cookbook.rst
@@ -927,7 +927,7 @@ CSV
The :ref:`CSV <io.read_csv_table>` docs
-`read_csv in action <http://wesmckinney.com/blog/update-on-upcoming-pandas-v0-10-new-file-parser-other-performance-wins/>`__
+`read_csv in action <https://wesmckinney.com/blog/update-on-upcoming-pandas-v0-10-new-file-parser-other-performance-wins/>`__
`appending to a csv
<https://stackoverflow.com/questions/17134942/pandas-dataframe-output-end-of-csv>`__
@@ -951,7 +951,7 @@ using that handle to read.
<https://stackoverflow.com/questions/15555005/get-inferred-dataframe-types-iteratively-using-chunksize>`__
`Dealing with bad lines
-<http://github.com/pandas-dev/pandas/issues/2886>`__
+<https://github.com/pandas-dev/pandas/issues/2886>`__
`Dealing with bad lines II
<http://nipunbatra.github.io/2013/06/reading-unclean-data-csv-using-pandas/>`__
@@ -1082,7 +1082,7 @@ The :ref:`Excel <io.excel>` docs
<https://stackoverflow.com/questions/15588713/sheets-of-excel-workbook-from-a-url-into-a-pandas-dataframe>`__
`Modifying formatting in XlsxWriter output
-<http://pbpython.com/improve-pandas-excel-output.html>`__
+<https://pbpython.com/improve-pandas-excel-output.html>`__
.. _cookbook.html:
@@ -1103,7 +1103,7 @@ The :ref:`HDFStores <io.hdf5>` docs
<https://stackoverflow.com/questions/13926089/selecting-columns-from-pandas-hdfstore-table>`__
`Managing heterogeneous data using a linked multiple table hierarchy
-<http://github.com/pandas-dev/pandas/issues/3032>`__
+<https://github.com/pandas-dev/pandas/issues/3032>`__
`Merging on-disk tables with millions of rows
<https://stackoverflow.com/questions/14614512/merging-two-tables-with-millions-of-rows-in-python/14617925#14617925>`__
@@ -1236,7 +1236,7 @@ Computation
-----------
`Numerical integration (sample-based) of a time series
-<http://nbviewer.ipython.org/5720498>`__
+<https://nbviewer.ipython.org/5720498>`__
Correlation
***********
@@ -1284,7 +1284,7 @@ Timedeltas
The :ref:`Timedeltas <timedeltas.timedeltas>` docs.
`Using timedeltas
-<http://github.com/pandas-dev/pandas/pull/2899>`__
+<https://github.com/pandas-dev/pandas/pull/2899>`__
.. ipython:: python
diff --git a/doc/source/user_guide/enhancingperf.rst b/doc/source/user_guide/enhancingperf.rst
index 2df5b9d82dcc3..1d84d05fda079 100644
--- a/doc/source/user_guide/enhancingperf.rst
+++ b/doc/source/user_guide/enhancingperf.rst
@@ -20,7 +20,7 @@ Cython (writing C extensions for pandas)
For many use cases writing pandas in pure Python and NumPy is sufficient. In some
computationally heavy applications however, it can be possible to achieve sizable
-speed-ups by offloading work to `cython <http://cython.org/>`__.
+speed-ups by offloading work to `cython <https://cython.org/>`__.
This tutorial assumes you have refactored as much as possible in Python, for example
by trying to remove for-loops and making use of NumPy vectorization. It's always worth
@@ -69,7 +69,7 @@ We achieve our result by using ``apply`` (row-wise):
But clearly this isn't fast enough for us. Let's take a look and see where the
time is spent during this operation (limited to the most time consuming
-four calls) using the `prun ipython magic function <http://ipython.org/ipython-doc/stable/api/generated/IPython.core.magics.execution.html#IPython.core.magics.execution.ExecutionMagics.prun>`__:
+four calls) using the `prun ipython magic function <https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-prun>`__:
.. ipython:: python
@@ -78,11 +78,6 @@ four calls) using the `prun ipython magic function <http://ipython.org/ipython-d
By far the majority of time is spend inside either ``integrate_f`` or ``f``,
hence we'll concentrate our efforts cythonizing these two functions.
-.. note::
-
- In Python 2 replacing the ``range`` with its generator counterpart (``xrange``)
- would mean the ``range`` line would vanish. In Python 3 ``range`` is already a generator.
-
.. _enhancingperf.plain:
Plain Cython
@@ -298,7 +293,7 @@ advanced Cython techniques:
Even faster, with the caveat that a bug in our Cython code (an off-by-one error,
for example) might cause a segfault because memory access isn't checked.
For more about ``boundscheck`` and ``wraparound``, see the Cython docs on
-`compiler directives <http://cython.readthedocs.io/en/latest/src/reference/compilation.html?highlight=wraparound#compiler-directives>`__.
+`compiler directives <https://cython.readthedocs.io/en/latest/src/reference/compilation.html?highlight=wraparound#compiler-directives>`__.
.. _enhancingperf.numba:
@@ -423,9 +418,9 @@ prefer that Numba throw an error if it cannot compile a function in a way that
speeds up your code, pass Numba the argument
``nopython=True`` (e.g. ``@numba.jit(nopython=True)``). For more on
troubleshooting Numba modes, see the `Numba troubleshooting page
-<http://numba.pydata.org/numba-doc/latest/user/troubleshoot.html#the-compiled-code-is-too-slow>`__.
+<https://numba.pydata.org/numba-doc/latest/user/troubleshoot.html#the-compiled-code-is-too-slow>`__.
-Read more in the `Numba docs <http://numba.pydata.org/>`__.
+Read more in the `Numba docs <https://numba.pydata.org/>`__.
.. _enhancingperf.eval:
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 55bbf6848820b..bd19b35e8d9e8 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -23,7 +23,7 @@ The pandas I/O API is a set of top level ``reader`` functions accessed like
text;`JSON <https://www.json.org/>`__;:ref:`read_json<io.json_reader>`;:ref:`to_json<io.json_writer>`
text;`HTML <https://en.wikipedia.org/wiki/HTML>`__;:ref:`read_html<io.read_html>`;:ref:`to_html<io.html>`
text; Local clipboard;:ref:`read_clipboard<io.clipboard>`;:ref:`to_clipboard<io.clipboard>`
- binary;`MS Excel <https://en.wikipedia.org/wiki/Microsoft_Excel>`__;:ref:`read_excel<io.excel_reader>`;:ref:`to_excel<io.excel_writer>`
+ ;`MS Excel <https://en.wikipedia.org/wiki/Microsoft_Excel>`__;:ref:`read_excel<io.excel_reader>`;:ref:`to_excel<io.excel_writer>`
binary;`OpenDocument <http://www.opendocumentformat.org>`__;:ref:`read_excel<io.ods>`;
binary;`HDF5 Format <https://support.hdfgroup.org/HDF5/whatishdf5.html>`__;:ref:`read_hdf<io.hdf5>`;:ref:`to_hdf<io.hdf5>`
binary;`Feather Format <https://github.com/wesm/feather>`__;:ref:`read_feather<io.feather>`;:ref:`to_feather<io.feather>`
@@ -41,8 +41,7 @@ The pandas I/O API is a set of top level ``reader`` functions accessed like
.. note::
For examples that use the ``StringIO`` class, make sure you import it
- according to your Python version, i.e. ``from StringIO import StringIO`` for
- Python 2 and ``from io import StringIO`` for Python 3.
+ with ``from io import StringIO`` for Python 3.
.. _io.read_csv_table:
@@ -912,16 +911,6 @@ data columns:
significantly faster, ~20x has been observed.
-.. note::
-
- When passing a dict as the `parse_dates` argument, the order of
- the columns prepended is not guaranteed, because `dict` objects do not impose
- an ordering on their keys. On Python 2.7+ you may use `collections.OrderedDict`
- instead of a regular `dict` if this matters to you. Because of this, when using a
- dict for 'parse_dates' in conjunction with the `index_col` argument, it's best to
- specify `index_col` as a column label rather then as an index on the resulting frame.
-
-
Date parsing functions
++++++++++++++++++++++
@@ -2453,7 +2442,7 @@ Specify a number of rows to skip:
dfs = pd.read_html(url, skiprows=0)
-Specify a number of rows to skip using a list (``xrange`` (Python 2 only) works
+Specify a number of rows to skip using a list (``range`` works
as well):
.. code-block:: python
@@ -2768,7 +2757,8 @@ Excel files
The :func:`~pandas.read_excel` method can read Excel 2003 (``.xls``)
files using the ``xlrd`` Python module. Excel 2007+ (``.xlsx``) files
-can be read using either ``xlrd`` or ``openpyxl``.
+can be read using either ``xlrd`` or ``openpyxl``. Binary Excel (``.xlsb``)
+files can be read using ``pyxlsb``.
The :meth:`~DataFrame.to_excel` instance method is used for
saving a ``DataFrame`` to Excel. Generally the semantics are
similar to working with :ref:`csv<io.read_csv_table>` data.
@@ -3123,11 +3113,7 @@ Pandas supports writing Excel files to buffer-like objects such as ``StringIO``
.. code-block:: python
- # Safe import for either Python 2.x or 3.x
- try:
- from io import BytesIO
- except ImportError:
- from cStringIO import StringIO as BytesIO
+ from io import BytesIO
bio = BytesIO()
@@ -3229,6 +3215,30 @@ OpenDocument spreadsheets match what can be done for `Excel files`_ using
Currently pandas only supports *reading* OpenDocument spreadsheets. Writing
is not implemented.
+.. _io.xlsb:
+
+Binary Excel (.xlsb) files
+--------------------------
+
+.. versionadded:: 1.0.0
+
+The :func:`~pandas.read_excel` method can also read binary Excel files
+using the ``pyxlsb`` module. The semantics and features for reading
+binary Excel files mostly match what can be done for `Excel files`_ using
+``engine='pyxlsb'``. ``pyxlsb`` does not recognize datetime types
+in files and will return floats instead.
+
+.. code-block:: python
+
+ # Returns a DataFrame
+ pd.read_excel('path_to_file.xlsb', engine='pyxlsb')
+
+.. note::
+
+ Currently pandas only supports *reading* binary Excel files. Writing
+ is not implemented.
+
+
.. _io.clipboard:
Clipboard
@@ -4220,46 +4230,49 @@ Compression
all kinds of stores, not just tables. Two parameters are used to
control compression: ``complevel`` and ``complib``.
-``complevel`` specifies if and how hard data is to be compressed.
- ``complevel=0`` and ``complevel=None`` disables
- compression and ``0<complevel<10`` enables compression.
-
-``complib`` specifies which compression library to use. If nothing is
- specified the default library ``zlib`` is used. A
- compression library usually optimizes for either good
- compression rates or speed and the results will depend on
- the type of data. Which type of
- compression to choose depends on your specific needs and
- data. The list of supported compression libraries:
-
- - `zlib <https://zlib.net/>`_: The default compression library. A classic in terms of compression, achieves good compression rates but is somewhat slow.
- - `lzo <https://www.oberhumer.com/opensource/lzo/>`_: Fast compression and decompression.
- - `bzip2 <http://bzip.org/>`_: Good compression rates.
- - `blosc <http://www.blosc.org/>`_: Fast compression and decompression.
-
- Support for alternative blosc compressors:
-
- - `blosc:blosclz <http://www.blosc.org/>`_ This is the
- default compressor for ``blosc``
- - `blosc:lz4
- <https://fastcompression.blogspot.dk/p/lz4.html>`_:
- A compact, very popular and fast compressor.
- - `blosc:lz4hc
- <https://fastcompression.blogspot.dk/p/lz4.html>`_:
- A tweaked version of LZ4, produces better
- compression ratios at the expense of speed.
- - `blosc:snappy <https://google.github.io/snappy/>`_:
- A popular compressor used in many places.
- - `blosc:zlib <https://zlib.net/>`_: A classic;
- somewhat slower than the previous ones, but
- achieving better compression ratios.
- - `blosc:zstd <https://facebook.github.io/zstd/>`_: An
- extremely well balanced codec; it provides the best
- compression ratios among the others above, and at
- reasonably fast speed.
-
- If ``complib`` is defined as something other than the
- listed libraries a ``ValueError`` exception is issued.
+* ``complevel`` specifies if and how hard data is to be compressed.
+ ``complevel=0`` and ``complevel=None`` disables compression and
+ ``0<complevel<10`` enables compression.
+
+* ``complib`` specifies which compression library to use.
+ If nothing is specified the default library ``zlib`` is used. A
+ compression library usually optimizes for either good compression rates
+ or speed and the results will depend on the type of data. Which type of
+ compression to choose depends on your specific needs and data. The list
+ of supported compression libraries:
+
+ - `zlib <https://zlib.net/>`_: The default compression library.
+ A classic in terms of compression, achieves good compression
+ rates but is somewhat slow.
+ - `lzo <https://www.oberhumer.com/opensource/lzo/>`_: Fast
+ compression and decompression.
+ - `bzip2 <http://bzip.org/>`_: Good compression rates.
+ - `blosc <https://www.blosc.org/>`_: Fast compression and
+ decompression.
+
+ Support for alternative blosc compressors:
+
+ - `blosc:blosclz <https://www.blosc.org/>`_ This is the
+ default compressor for ``blosc``
+ - `blosc:lz4
+ <https://fastcompression.blogspot.dk/p/lz4.html>`_:
+ A compact, very popular and fast compressor.
+ - `blosc:lz4hc
+ <https://fastcompression.blogspot.dk/p/lz4.html>`_:
+ A tweaked version of LZ4, produces better
+ compression ratios at the expense of speed.
+ - `blosc:snappy <https://google.github.io/snappy/>`_:
+ A popular compressor used in many places.
+ - `blosc:zlib <https://zlib.net/>`_: A classic;
+ somewhat slower than the previous ones, but
+ achieving better compression ratios.
+ - `blosc:zstd <https://facebook.github.io/zstd/>`_: An
+ extremely well balanced codec; it provides the best
+ compression ratios among the others above, and at
+ reasonably fast speed.
+
+ If ``complib`` is defined as something other than the listed libraries a
+ ``ValueError`` exception is issued.
.. note::
@@ -4993,7 +5006,7 @@ Possible values are:
like *Presto* and *Redshift*, but has worse performance for
traditional SQL backend if the table contains many columns.
For more information check the SQLAlchemy `documention
- <http://docs.sqlalchemy.org/en/latest/core/dml.html#sqlalchemy.sql.expression.Insert.values.params.*args>`__.
+ <https://docs.sqlalchemy.org/en/latest/core/dml.html#sqlalchemy.sql.expression.Insert.values.params.*args>`__.
- callable with signature ``(pd_table, conn, keys, data_iter)``:
This can be used to implement a more performant insertion method based on
specific backend dialect features.
diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst
index abbb6feef6056..2e68a0598bb71 100644
--- a/doc/source/user_guide/missing_data.rst
+++ b/doc/source/user_guide/missing_data.rst
@@ -467,9 +467,9 @@ at the new values.
interp_s = ser.reindex(new_index).interpolate(method='pchip')
interp_s[49:51]
-.. _scipy: http://www.scipy.org
-.. _documentation: http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation
-.. _guide: http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html
+.. _scipy: https://www.scipy.org
+.. _documentation: https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation
+.. _guide: https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html
.. _missing_data.interp_limits:
@@ -806,7 +806,8 @@ dtype, it will use ``pd.NA``:
Currently, pandas does not yet use those data types by default (when creating
a DataFrame or Series, or when reading in data), so you need to specify
-the dtype explicitly.
+the dtype explicitly. An easy way to convert to those dtypes is explained
+:ref:`here <missing_data.NA.conversion>`.
Propagation in arithmetic and comparison operations
---------------------------------------------------
@@ -825,14 +826,10 @@ For example, ``pd.NA`` propagates in arithmetic operations, similarly to
There are a few special cases when the result is known, even when one of the
operands is ``NA``.
+.. ipython:: python
-================ ======
-Operation Result
-================ ======
-``pd.NA ** 0`` 0
-``1 ** pd.NA`` 1
-``-1 ** pd.NA`` -1
-================ ======
+ pd.NA ** 0
+ 1 ** pd.NA
In equality and comparison operations, ``pd.NA`` also propagates. This deviates
from the behaviour of ``np.nan``, where comparisons with ``np.nan`` always
@@ -946,3 +943,29 @@ work with ``NA``, and generally return ``NA``:
in the future.
See :ref:`dsintro.numpy_interop` for more on ufuncs.
+
+.. _missing_data.NA.conversion:
+
+Conversion
+----------
+
+If you have a DataFrame or Series using traditional types that have missing data
+represented using ``np.nan``, there are convenience methods
+:meth:`~Series.convert_dtypes` in Series and :meth:`~DataFrame.convert_dtypes`
+in DataFrame that can convert data to use the newer dtypes for integers, strings and
+booleans listed :ref:`here <basics.dtypes>`. This is especially helpful after reading
+in data sets when letting the readers such as :meth:`read_csv` and :meth:`read_excel`
+infer default dtypes.
+
+In this example, while the dtypes of all columns are changed, we show the results for
+the first 10 columns.
+
+.. ipython:: python
+
+ bb = pd.read_csv('data/baseball.csv', index_col='id')
+ bb[bb.columns[:10]].dtypes
+
+.. ipython:: python
+
+ bbn = bb.convert_dtypes()
+ bbn[bbn.columns[:10]].dtypes
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index 02550eab86913..1f2f8818c8458 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -6,7 +6,7 @@
"source": [
"# Styling\n",
"\n",
- "This document is written as a Jupyter Notebook, and can be viewed or downloaded [here](http://nbviewer.ipython.org/github/pandas-dev/pandas/blob/master/doc/source/user_guide/style.ipynb).\n",
+ "This document is written as a Jupyter Notebook, and can be viewed or downloaded [here](https://nbviewer.ipython.org/github/pandas-dev/pandas/blob/master/doc/source/user_guide/style.ipynb).\n",
"\n",
"You can apply **conditional formatting**, the visual styling of a DataFrame\n",
"depending on the data within, by using the ``DataFrame.style`` property.\n",
@@ -462,7 +462,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "You can create \"heatmaps\" with the `background_gradient` method. These require matplotlib, and we'll use [Seaborn](http://stanford.edu/~mwaskom/software/seaborn/) to get a nice colormap."
+ "You can create \"heatmaps\" with the `background_gradient` method. These require matplotlib, and we'll use [Seaborn](https://stanford.edu/~mwaskom/software/seaborn/) to get a nice colormap."
]
},
{
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index 08b2ae0a4a837..3fdab0fd26643 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -1951,6 +1951,10 @@ The ``period`` dtype can be used in ``.astype(...)``. It allows one to change th
PeriodIndex partial string indexing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+PeriodIndex now supports partial string slicing with non-monotonic indexes.
+
+.. versionadded:: 1.1.0
+
You can pass in dates and strings to ``Series`` and ``DataFrame`` with ``PeriodIndex``, in the same manner as ``DatetimeIndex``. For details, refer to :ref:`DatetimeIndex Partial String Indexing <timeseries.partialindexing>`.
.. ipython:: python
@@ -1981,6 +1985,7 @@ As with ``DatetimeIndex``, the endpoints will be included in the result. The exa
dfp['2013-01-01 10H':'2013-01-01 11H']
+
Frequency conversion and resampling with PeriodIndex
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The frequency of ``Period`` and ``PeriodIndex`` can be converted via the ``asfreq``
diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst
index 39051440e9d9a..6680ba854cb6f 100644
--- a/doc/source/user_guide/visualization.rst
+++ b/doc/source/user_guide/visualization.rst
@@ -264,7 +264,7 @@ horizontal and cumulative histograms can be drawn by
plt.close('all')
See the :meth:`hist <matplotlib.axes.Axes.hist>` method and the
-`matplotlib hist documentation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist>`__ for more.
+`matplotlib hist documentation <https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist>`__ for more.
The existing interface ``DataFrame.hist`` to plot histogram still can be used.
@@ -370,7 +370,7 @@ For example, horizontal and custom-positioned boxplot can be drawn by
See the :meth:`boxplot <matplotlib.axes.Axes.boxplot>` method and the
-`matplotlib boxplot documentation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.boxplot>`__ for more.
+`matplotlib boxplot documentation <https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.boxplot>`__ for more.
The existing interface ``DataFrame.boxplot`` to plot boxplot still can be used.
@@ -591,7 +591,7 @@ bubble chart using a column of the ``DataFrame`` as the bubble size.
plt.close('all')
See the :meth:`scatter <matplotlib.axes.Axes.scatter>` method and the
-`matplotlib scatter documentation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter>`__ for more.
+`matplotlib scatter documentation <https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter>`__ for more.
.. _visualization.hexbin:
@@ -651,7 +651,7 @@ given by column ``z``. The bins are aggregated with NumPy's ``max`` function.
plt.close('all')
See the :meth:`hexbin <matplotlib.axes.Axes.hexbin>` method and the
-`matplotlib hexbin documentation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hexbin>`__ for more.
+`matplotlib hexbin documentation <https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hexbin>`__ for more.
.. _visualization.pie:
@@ -749,7 +749,7 @@ If you pass values whose sum total is less than 1.0, matplotlib draws a semicirc
@savefig series_pie_plot_semi.png
series.plot.pie(figsize=(6, 6))
-See the `matplotlib pie documentation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.pie>`__ for more.
+See the `matplotlib pie documentation <https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.pie>`__ for more.
.. ipython:: python
:suppress:
@@ -1267,7 +1267,7 @@ tick locator methods, it is useful to call the automatic
date tick adjustment from matplotlib for figures whose ticklabels overlap.
See the :meth:`autofmt_xdate <matplotlib.figure.autofmt_xdate>` method and the
-`matplotlib documentation <http://matplotlib.org/users/recipes.html#fixing-common-date-annoyances>`__ for more.
+`matplotlib documentation <https://matplotlib.org/users/recipes.html#fixing-common-date-annoyances>`__ for more.
Subplots
~~~~~~~~
@@ -1476,7 +1476,7 @@ as seen in the example below.
There also exists a helper function ``pandas.plotting.table``, which creates a
table from :class:`DataFrame` or :class:`Series`, and adds it to an
``matplotlib.Axes`` instance. This function can accept keywords which the
-matplotlib `table <http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.table>`__ has.
+matplotlib `table <https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.table>`__ has.
.. ipython:: python
@@ -1494,7 +1494,7 @@ matplotlib `table <http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.
plt.close('all')
-**Note**: You can get table instances on the axes using ``axes.tables`` property for further decorations. See the `matplotlib table documentation <http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.table>`__ for more.
+**Note**: You can get table instances on the axes using ``axes.tables`` property for further decorations. See the `matplotlib table documentation <https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.table>`__ for more.
.. _visualization.colormaps:
@@ -1504,7 +1504,7 @@ Colormaps
A potential issue when plotting a large number of columns is that it can be
difficult to distinguish some series due to repetition in the default colors. To
remedy this, ``DataFrame`` plotting supports the use of the ``colormap`` argument,
-which accepts either a Matplotlib `colormap <http://matplotlib.org/api/cm_api.html>`__
+which accepts either a Matplotlib `colormap <https://matplotlib.org/api/cm_api.html>`__
or a string that is a name of a colormap registered with Matplotlib. A
visualization of the default matplotlib colormaps is available `here
<https://matplotlib.org/examples/color/colormaps_reference.html>`__.
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index 05c7f72882088..111caa81f7169 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -7,9 +7,17 @@ Release Notes
*************
This is the list of changes to pandas between each release. For full details,
-see the commit logs at http://github.com/pandas-dev/pandas. For install and
+see the commit logs at https://github.com/pandas-dev/pandas. For install and
upgrade instructions, see :ref:`install`.
+Version 1.1
+-----------
+
+.. toctree::
+ :maxdepth: 2
+
+ v1.1.0
+
Version 1.0
-----------
@@ -17,6 +25,7 @@ Version 1.0
:maxdepth: 2
v1.0.0
+ v1.0.1
Version 0.25
------------
diff --git a/doc/source/whatsnew/v0.13.0.rst b/doc/source/whatsnew/v0.13.0.rst
index 43c6083fdce8f..de5e1986744fe 100644
--- a/doc/source/whatsnew/v0.13.0.rst
+++ b/doc/source/whatsnew/v0.13.0.rst
@@ -214,7 +214,7 @@ These were announced changes in 0.12 or prior that are taking effect as of 0.13.
- Remove deprecated ``read_clipboard/to_clipboard/ExcelFile/ExcelWriter`` from ``pandas.io.parsers`` (:issue:`3717`)
These are available as functions in the main pandas namespace (e.g. ``pd.read_clipboard``)
- default for ``tupleize_cols`` is now ``False`` for both ``to_csv`` and ``read_csv``. Fair warning in 0.12 (:issue:`3604`)
-- default for `display.max_seq_len` is now 100 rather then `None`. This activates
+- default for `display.max_seq_len` is now 100 rather than `None`. This activates
truncated display ("...") of long sequences in various places. (:issue:`3391`)
Deprecations
diff --git a/doc/source/whatsnew/v0.25.3.rst b/doc/source/whatsnew/v0.25.3.rst
index f73a3f956f42e..f7f54198a0f82 100644
--- a/doc/source/whatsnew/v0.25.3.rst
+++ b/doc/source/whatsnew/v0.25.3.rst
@@ -19,4 +19,4 @@ Groupby/resample/rolling
Contributors
~~~~~~~~~~~~
-.. contributors:: v0.25.2..HEAD
+.. contributors:: v0.25.2..v0.25.3
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 5f79accc5c679..00dc3fdb28f26 100755
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -1,7 +1,7 @@
.. _whatsnew_100:
-What's new in 1.0.0 (??)
-------------------------
+What's new in 1.0.0 (January 29, 2020)
+--------------------------------------
These are the changes in pandas 1.0.0. See :ref:`release` for a full changelog
including other versions of pandas.
@@ -37,6 +37,43 @@ See :ref:`policies.version` for more.
Enhancements
~~~~~~~~~~~~
+.. _whatsnew_100.numba_rolling_apply:
+
+Using Numba in ``rolling.apply`` and ``expanding.apply``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+We've added an ``engine`` keyword to :meth:`~core.window.rolling.Rolling.apply` and :meth:`~core.window.expanding.Expanding.apply`
+that allows the user to execute the routine using `Numba <https://numba.pydata.org/>`__ instead of Cython.
+Using the Numba engine can yield significant performance gains if the apply function can operate on numpy arrays and
+the data set is larger (1 million rows or greater). For more details, see
+:ref:`rolling apply documentation <stats.rolling_apply>` (:issue:`28987`, :issue:`30936`)
+
+.. _whatsnew_100.custom_window:
+
+Defining custom windows for rolling operations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+We've added a :func:`pandas.api.indexers.BaseIndexer` class that allows users to define how
+window bounds are created during ``rolling`` operations. Users can define their own ``get_window_bounds``
+method on a :func:`pandas.api.indexers.BaseIndexer` subclass that will generate the start and end
+indices used for each window during the rolling aggregation. For more details and example usage, see
+the :ref:`custom window rolling documentation <stats.custom_rolling_window>`
+
+.. _whatsnew_100.to_markdown:
+
+Converting to Markdown
+^^^^^^^^^^^^^^^^^^^^^^
+
+We've added :meth:`~DataFrame.to_markdown` for creating a markdown table (:issue:`11052`)
+
+.. ipython:: python
+
+ df = pd.DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]}, index=['a', 'a', 'b'])
+ print(df.to_markdown())
+
+Experimental new features
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
.. _whatsnew_100.NA:
Experimental ``NA`` scalar to denote missing values
@@ -144,7 +181,7 @@ type dedicated to boolean data that can hold missing values. The default
``bool`` data type based on a bool-dtype NumPy array, the column can only hold
``True`` or ``False``, and not missing values. This new :class:`~arrays.BooleanArray`
can store missing values as well by keeping track of this in a separate mask.
-(:issue:`29555`, :issue:`30095`)
+(:issue:`29555`, :issue:`30095`, :issue:`31131`)
.. ipython:: python
@@ -157,55 +194,52 @@ You can use the alias ``"boolean"`` as well.
s = pd.Series([True, False, None], dtype="boolean")
s
-.. _whatsnew_100.numba_rolling_apply:
+.. _whatsnew_100.convert_dtypes:
-Using Numba in ``rolling.apply``
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+``convert_dtypes`` method to ease use of supported extension dtypes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-We've added an ``engine`` keyword to :meth:`~core.window.rolling.Rolling.apply` that allows the user to execute the
-routine using `Numba <https://numba.pydata.org/>`__ instead of Cython. Using the Numba engine
-can yield significant performance gains if the apply function can operate on numpy arrays and
-the data set is larger (1 million rows or greater). For more details, see
-:ref:`rolling apply documentation <stats.rolling_apply>` (:issue:`28987`)
+In order to encourage use of the extension dtypes ``StringDtype``,
+``BooleanDtype``, ``Int64Dtype``, ``Int32Dtype``, etc., that support ``pd.NA``, the
+methods :meth:`DataFrame.convert_dtypes` and :meth:`Series.convert_dtypes`
+have been introduced. (:issue:`29752`) (:issue:`30929`)
-.. _whatsnew_100.custom_window:
+Example:
-Defining custom windows for rolling operations
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-We've added a :func:`pandas.api.indexers.BaseIndexer` class that allows users to define how
-window bounds are created during ``rolling`` operations. Users can define their own ``get_window_bounds``
-method on a :func:`pandas.api.indexers.BaseIndexer` subclass that will generate the start and end
-indices used for each window during the rolling aggregation. For more details and example usage, see
-the :ref:`custom window rolling documentation <stats.custom_rolling_window>`
+.. ipython:: python
-.. _whatsnew_100.to_markdown:
+ df = pd.DataFrame({'x': ['abc', None, 'def'],
+ 'y': [1, 2, np.nan],
+ 'z': [True, False, True]})
+ df
+ df.dtypes
-Converting to Markdown
-^^^^^^^^^^^^^^^^^^^^^^
+.. ipython:: python
-We've added :meth:`~DataFrame.to_markdown` for creating a markdown table (:issue:`11052`)
+ converted = df.convert_dtypes()
+ converted
+ converted.dtypes
-.. ipython:: python
+This is especially useful after reading in data using readers such as :func:`read_csv`
+and :func:`read_excel`.
+See :ref:`here <missing_data.NA.conversion>` for a description.
- df = pd.DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]}, index=['a', 'a', 'b'])
- print(df.to_markdown())
.. _whatsnew_100.enhancements.other:
Other enhancements
-^^^^^^^^^^^^^^^^^^
+~~~~~~~~~~~~~~~~~~
- :meth:`DataFrame.to_string` added the ``max_colwidth`` parameter to control when wide columns are truncated (:issue:`9784`)
- Added the ``na_value`` argument to :meth:`Series.to_numpy`, :meth:`Index.to_numpy` and :meth:`DataFrame.to_numpy` to control the value used for missing data (:issue:`30322`)
- :meth:`MultiIndex.from_product` infers level names from inputs if not explicitly provided (:issue:`27292`)
- :meth:`DataFrame.to_latex` now accepts ``caption`` and ``label`` arguments (:issue:`25436`)
-- The :ref:`integer dtype <integer_na>` with support for missing values and the
- new :ref:`string dtype <text.types>` can now be converted to ``pyarrow`` (>=
- 0.15.0), which means that it is supported in writing to the Parquet file
- format when using the ``pyarrow`` engine. It is currently not yet supported
- when converting back to pandas, so it will become an integer or float
- (depending on the presence of missing data) or object dtype column. (:issue:`28368`)
+- DataFrames with :ref:`nullable integer <integer_na>`, the :ref:`new string dtype <text.types>`
+ and period data type can now be converted to ``pyarrow`` (>=0.15.0), which means that it is
+ supported in writing to the Parquet file format when using the ``pyarrow`` engine (:issue:`28368`).
+ Full roundtrip to parquet (writing and reading back in with :meth:`~DataFrame.to_parquet` / :func:`read_parquet`)
+ is supported starting with pyarrow >= 0.16 (:issue:`20612`).
+- :func:`to_parquet` now appropriately handles the ``schema`` argument for user defined schemas in the pyarrow engine. (:issue:`30270`)
- :meth:`DataFrame.to_json` now accepts an ``indent`` integer argument to enable pretty printing of JSON output (:issue:`12004`)
- :meth:`read_stata` can read Stata 119 dta files. (:issue:`28250`)
- Implemented :meth:`pandas.core.window.Window.var` and :meth:`pandas.core.window.Window.std` functions (:issue:`26597`)
@@ -213,32 +247,20 @@ Other enhancements
- Added ``encoding`` argument to :func:`DataFrame.to_html` for non-ascii text (:issue:`28663`)
- :meth:`Styler.background_gradient` now accepts ``vmin`` and ``vmax`` arguments (:issue:`12145`)
- :meth:`Styler.format` added the ``na_rep`` parameter to help format the missing values (:issue:`21527`, :issue:`28358`)
-- Roundtripping DataFrames with nullable integer, string and period data types to parquet
- (:meth:`~DataFrame.to_parquet` / :func:`read_parquet`) using the `'pyarrow'` engine
- now preserve those data types with pyarrow >= 0.16.0 (:issue:`20612`, :issue:`28371`).
+- :func:`read_excel` now can read binary Excel (``.xlsb``) files by passing ``engine='pyxlsb'``. For more details and example usage, see the :ref:`Binary Excel files documentation <io.xlsb>`. Closes :issue:`8540`.
- The ``partition_cols`` argument in :meth:`DataFrame.to_parquet` now accepts a string (:issue:`27117`)
- :func:`pandas.read_json` now parses ``NaN``, ``Infinity`` and ``-Infinity`` (:issue:`12213`)
-- The ``pandas.np`` submodule is now deprecated. Import numpy directly instead (:issue:`30296`)
-- :func:`to_parquet` now appropriately handles the ``schema`` argument for user defined schemas in the pyarrow engine. (:issue:`30270`)
- DataFrame constructor preserve `ExtensionArray` dtype with `ExtensionArray` (:issue:`11363`)
- :meth:`DataFrame.sort_values` and :meth:`Series.sort_values` have gained ``ignore_index`` keyword to be able to reset index after sorting (:issue:`30114`)
- :meth:`DataFrame.sort_index` and :meth:`Series.sort_index` have gained ``ignore_index`` keyword to reset index (:issue:`30114`)
- :meth:`DataFrame.drop_duplicates` has gained ``ignore_index`` keyword to reset index (:issue:`30114`)
-- Added new writer for exporting Stata dta files in version 118, ``StataWriter118``. This format supports exporting strings containing Unicode characters (:issue:`23573`)
+- Added new writer for exporting Stata dta files in versions 118 and 119, ``StataWriterUTF8``. These files formats support exporting strings containing Unicode characters. Format 119 supports data sets with more than 32,767 variables (:issue:`23573`, :issue:`30959`)
- :meth:`Series.map` now accepts ``collections.abc.Mapping`` subclasses as a mapper (:issue:`29733`)
-- The ``pandas.datetime`` class is now deprecated. Import from ``datetime`` instead (:issue:`30296`)
- Added an experimental :attr:`~DataFrame.attrs` for storing global metadata about a dataset (:issue:`29062`)
- :meth:`Timestamp.fromisocalendar` is now compatible with python 3.8 and above (:issue:`28115`)
- :meth:`DataFrame.to_pickle` and :func:`read_pickle` now accept URL (:issue:`30163`)
-Build Changes
-^^^^^^^^^^^^^
-
-Pandas has added a `pyproject.toml <https://www.python.org/dev/peps/pep-0517/>`_ file and will no longer include
-cythonized files in the source distribution uploaded to PyPI (:issue:`28341`, :issue:`20775`). If you're installing
-a built distribution (wheel) or via conda, this shouldn't have any effect on you. If you're building pandas from
-source, you should no longer need to install Cython into your build environment before calling ``pip install pandas``.
.. ---------------------------------------------------------------------------
@@ -283,7 +305,7 @@ To update, use ``MultiIndex.set_names``, which returns a new ``MultiIndex``.
New repr for :class:`~pandas.arrays.IntervalArray`
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-- :class:`pandas.arrays.IntervalArray` adopts a new ``__repr__`` in accordance with other array classes (:issue:`25022`)
+:class:`pandas.arrays.IntervalArray` adopts a new ``__repr__`` in accordance with other array classes (:issue:`25022`)
*pandas 0.25.x*
@@ -304,52 +326,62 @@ New repr for :class:`~pandas.arrays.IntervalArray`
``DataFrame.rename`` now only accepts one positional argument
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-- :meth:`DataFrame.rename` would previously accept positional arguments that would lead
- to ambiguous or undefined behavior. From pandas 1.0, only the very first argument, which
- maps labels to their new names along the default axis, is allowed to be passed by position
- (:issue:`29136`).
+:meth:`DataFrame.rename` would previously accept positional arguments that would lead
+to ambiguous or undefined behavior. From pandas 1.0, only the very first argument, which
+maps labels to their new names along the default axis, is allowed to be passed by position
+(:issue:`29136`).
+
+.. ipython:: python
+ :suppress:
+
+ df = pd.DataFrame([[1]])
*pandas 0.25.x*
-.. code-block:: ipython
+.. code-block:: python
- In [1]: df = pd.DataFrame([[1]])
- In [2]: df.rename({0: 1}, {0: 2})
+ >>> df = pd.DataFrame([[1]])
+ >>> df.rename({0: 1}, {0: 2})
FutureWarning: ...Use named arguments to resolve ambiguity...
- Out[2]:
2
1 1
*pandas 1.0.0*
-.. ipython:: python
- :okexcept:
+.. code-block:: python
- df.rename({0: 1}, {0: 2})
+ >>> df.rename({0: 1}, {0: 2})
+ Traceback (most recent call last):
+ ...
+ TypeError: rename() takes from 1 to 2 positional arguments but 3 were given
Note that errors will now be raised when conflicting or potentially ambiguous arguments are provided.
*pandas 0.25.x*
-.. code-block:: ipython
+.. code-block:: python
- In [1]: df.rename({0: 1}, index={0: 2})
- Out[1]:
+ >>> df.rename({0: 1}, index={0: 2})
0
1 1
- In [2]: df.rename(mapper={0: 1}, index={0: 2})
- Out[2]:
+ >>> df.rename(mapper={0: 1}, index={0: 2})
0
2 1
*pandas 1.0.0*
-.. ipython:: python
- :okexcept:
+.. code-block:: python
+
+ >>> df.rename({0: 1}, index={0: 2})
+ Traceback (most recent call last):
+ ...
+ TypeError: Cannot specify both 'mapper' and any of 'index' or 'columns'
- df.rename({0: 1}, index={0: 2})
- df.rename(mapper={0: 1}, index={0: 2})
+ >>> df.rename(mapper={0: 1}, index={0: 2})
+ Traceback (most recent call last):
+ ...
+ TypeError: Cannot specify both 'mapper' and any of 'index' or 'columns'
You can still change the axis along which the first positional argument is applied by
supplying the ``axis`` keyword argument.
@@ -369,7 +401,7 @@ keywords.
Extended verbose info output for :class:`~pandas.DataFrame`
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-- :meth:`DataFrame.info` now shows line numbers for the columns summary (:issue:`17304`)
+:meth:`DataFrame.info` now shows line numbers for the columns summary (:issue:`17304`)
*pandas 0.25.x*
@@ -485,6 +517,25 @@ Use :meth:`arrays.IntegerArray.to_numpy` with an explicit ``na_value`` instead.
a.to_numpy(dtype="float", na_value=np.nan)
+**Reductions can return ``pd.NA``**
+
+When performing a reduction such as a sum with ``skipna=False``, the result
+will now be ``pd.NA`` instead of ``np.nan`` in presence of missing values
+(:issue:`30958`).
+
+*pandas 0.25.x*
+
+.. code-block:: python
+
+ >>> pd.Series(a).sum(skipna=False)
+ nan
+
+*pandas 1.0.0*
+
+.. ipython:: python
+
+ pd.Series(a).sum(skipna=False)
+
**value_counts returns a nullable integer dtype**
:meth:`Series.value_counts` with a nullable integer dtype now returns a nullable
@@ -572,6 +623,54 @@ consistent with the behaviour of :class:`DataFrame` and :class:`Index`.
DeprecationWarning: The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning.
Series([], dtype: float64)
+Result dtype inference changes for resample operations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The rules for the result dtype in :meth:`DataFrame.resample` aggregations have changed for extension types (:issue:`31359`).
+Previously, pandas would attempt to convert the result back to the original dtype, falling back to the usual
+inference rules if that was not possible. Now, pandas will only return a result of the original dtype if the
+scalar values in the result are instances of the extension dtype's scalar type.
+
+.. ipython:: python
+
+ df = pd.DataFrame({"A": ['a', 'b']}, dtype='category',
+ index=pd.date_range('2000', periods=2))
+ df
+
+
+*pandas 0.25.x*
+
+.. code-block:: python
+
+ >>> df.resample("2D").agg(lambda x: 'a').A.dtype
+ CategoricalDtype(categories=['a', 'b'], ordered=False)
+
+*pandas 1.0.0*
+
+.. ipython:: python
+
+ df.resample("2D").agg(lambda x: 'a').A.dtype
+
+This fixes an inconsistency between ``resample`` and ``groupby``.
+This also fixes a potential bug, where the **values** of the result might change
+depending on how the results are cast back to the original dtype.
+
+*pandas 0.25.x*
+
+.. code-block:: python
+
+ >>> df.resample("2D").agg(lambda x: 'c')
+
+ A
+ 0 NaN
+
+*pandas 1.0.0*
+
+.. ipython:: python
+
+ df.resample("2D").agg(lambda x: 'c')
+
+
.. _whatsnew_100.api_breaking.python:
Increased minimum version for Python
@@ -647,18 +746,26 @@ Optional libraries below the lowest tested version may still work, but are not c
See :ref:`install.dependencies` and :ref:`install.optional_dependencies` for more.
+Build Changes
+^^^^^^^^^^^^^
+
+Pandas has added a `pyproject.toml <https://www.python.org/dev/peps/pep-0517/>`_ file and will no longer include
+cythonized files in the source distribution uploaded to PyPI (:issue:`28341`, :issue:`20775`). If you're installing
+a built distribution (wheel) or via conda, this shouldn't have any effect on you. If you're building pandas from
+source, you should no longer need to install Cython into your build environment before calling ``pip install pandas``.
+
+
.. _whatsnew_100.api.other:
Other API changes
^^^^^^^^^^^^^^^^^
-- Bumped the minimum supported version of ``s3fs`` from 0.0.8 to 0.3.0 (:issue:`28616`)
- :class:`core.groupby.GroupBy.transform` now raises on invalid operation names (:issue:`27489`)
- :meth:`pandas.api.types.infer_dtype` will now return "integer-na" for integer and ``np.nan`` mix (:issue:`27283`)
- :meth:`MultiIndex.from_arrays` will no longer infer names from arrays if ``names=None`` is explicitly provided (:issue:`27292`)
- In order to improve tab-completion, Pandas does not include most deprecated attributes when introspecting a pandas object using ``dir`` (e.g. ``dir(df)``).
To see which attributes are excluded, see an object's ``_deprecations`` attribute, for example ``pd.DataFrame._deprecations`` (:issue:`28805`).
-- The returned dtype of ::func:`pd.unique` now matches the input dtype. (:issue:`27874`)
+- The returned dtype of :func:`unique` now matches the input dtype. (:issue:`27874`)
- Changed the default configuration value for ``options.matplotlib.register_converters`` from ``True`` to ``"auto"`` (:issue:`18720`).
Now, pandas custom formatters will only be applied to plots created by pandas, through :meth:`~DataFrame.plot`.
Previously, pandas' formatters would be applied to all plots created *after* a :meth:`~DataFrame.plot`.
@@ -669,7 +776,6 @@ Other API changes
- :meth:`Series.str.__iter__` was deprecated and will be removed in future releases (:issue:`28277`).
- Added ``<NA>`` to the list of default NA values for :meth:`read_csv` (:issue:`30821`)
-
.. _whatsnew_100.api.documentation:
Documentation Improvements
@@ -705,8 +811,11 @@ Deprecations
- The deprecated internal attributes ``_start``, ``_stop`` and ``_step`` of :class:`RangeIndex` now raise a ``FutureWarning`` instead of a ``DeprecationWarning`` (:issue:`26581`)
- The ``pandas.util.testing`` module has been deprecated. Use the public API in ``pandas.testing`` documented at :ref:`api.general.testing` (:issue:`16232`).
- ``pandas.SparseArray`` has been deprecated. Use ``pandas.arrays.SparseArray`` (:class:`arrays.SparseArray`) instead. (:issue:`30642`)
-- The parameter ``is_copy`` of :meth:`DataFrame.take` has been deprecated and will be removed in a future version. (:issue:`27357`)
+- The parameter ``is_copy`` of :meth:`Series.take` and :meth:`DataFrame.take` has been deprecated and will be removed in a future version. (:issue:`27357`)
- Support for multi-dimensional indexing (e.g. ``index[:, None]``) on a :class:`Index` is deprecated and will be removed in a future version, convert to a numpy array before indexing instead (:issue:`30588`)
+- The ``pandas.np`` submodule is now deprecated. Import numpy directly instead (:issue:`30296`)
+- The ``pandas.datetime`` class is now deprecated. Import from ``datetime`` instead (:issue:`30610`)
+- :class:`~DataFrame.diff` will raise a ``TypeError`` rather than implicitly losing the dtype of extension types in the future. Convert to the correct dtype before calling ``diff`` instead (:issue:`31025`)
**Selecting Columns from a Grouped DataFrame**
@@ -998,6 +1107,9 @@ Numeric
- Bug in :meth:`DataFrame.round` where a :class:`DataFrame` with a :class:`CategoricalIndex` of :class:`IntervalIndex` columns would incorrectly raise a ``TypeError`` (:issue:`30063`)
- Bug in :meth:`Series.pct_change` and :meth:`DataFrame.pct_change` when there are duplicated indices (:issue:`30463`)
- Bug in :class:`DataFrame` cumulative operations (e.g. cumsum, cummax) incorrect casting to object-dtype (:issue:`19296`)
+- Bug in dtypes being lost in ``DataFrame.__invert__`` (``~`` operator) with mixed dtypes (:issue:`31183`)
+- Bug in :class:`~DataFrame.diff` losing the dtype for extension types (:issue:`30889`)
+- Bug in :class:`DataFrame.diff` raising an ``IndexError`` when one of the columns was a nullable integer dtype (:issue:`30967`)
Conversion
^^^^^^^^^^
@@ -1033,6 +1145,7 @@ Indexing
- Bug when indexing with ``.loc`` where the index was a :class:`CategoricalIndex` with non-string categories didn't work (:issue:`17569`, :issue:`30225`)
- :meth:`Index.get_indexer_non_unique` could fail with ``TypeError`` in some cases, such as when searching for ints in a string index (:issue:`28257`)
- Bug in :meth:`Float64Index.get_loc` incorrectly raising ``TypeError`` instead of ``KeyError`` (:issue:`29189`)
+- Bug in :meth:`DataFrame.loc` with incorrect dtype when setting Categorical value in 1-row DataFrame (:issue:`25495`)
- :meth:`MultiIndex.get_loc` can't find missing values when input includes missing values (:issue:`19132`)
- Bug in :meth:`Series.__setitem__` incorrectly assigning values with boolean indexer when the length of new data matches the number of ``True`` values and new data is not a ``Series`` or an ``np.array`` (:issue:`30567`)
- Bug in indexing with a :class:`PeriodIndex` incorrectly accepting integers representing years, use e.g. ``ser.loc["2007"]`` instead of ``ser.loc[2007]`` (:issue:`30763`)
@@ -1112,6 +1225,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrame.groupby` when using nunique on axis=1 (:issue:`30253`)
- Bug in :meth:`GroupBy.quantile` with multiple list-like q value and integer column names (:issue:`30289`)
- Bug in :meth:`GroupBy.pct_change` and :meth:`core.groupby.SeriesGroupBy.pct_change` causes ``TypeError`` when ``fill_method`` is ``None`` (:issue:`30463`)
+- Bug in :meth:`Rolling.count` and :meth:`Expanding.count` argument where ``min_periods`` was ignored (:issue:`26996`)
Reshaping
^^^^^^^^^
@@ -1138,7 +1252,7 @@ Sparse
^^^^^^
- Bug in :class:`SparseDataFrame` arithmetic operations incorrectly casting inputs to float (:issue:`28107`)
- Bug in ``DataFrame.sparse`` returning a ``Series`` when there was a column named ``sparse`` rather than the accessor (:issue:`30758`)
--
+- Fixed :meth:`operator.xor` with a boolean-dtype ``SparseArray``. Now returns a sparse result, rather than object dtype (:issue:`31025`)
ExtensionArray
^^^^^^^^^^^^^^
@@ -1146,6 +1260,7 @@ ExtensionArray
- Bug in :class:`arrays.PandasArray` when setting a scalar string (:issue:`28118`, :issue:`28150`).
- Bug where nullable integers could not be compared to strings (:issue:`28930`)
- Bug where :class:`DataFrame` constructor raised ``ValueError`` with list-like data and ``dtype`` specified (:issue:`30280`)
+- Bug in dtype being lost in ``__invert__`` (``~`` operator) for extension-array backed ``Series`` and ``DataFrame`` (:issue:`23087`)
Other
@@ -1177,3 +1292,5 @@ Other
Contributors
~~~~~~~~~~~~
+
+.. contributors:: v0.25.3..v1.0.0
diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
new file mode 100644
index 0000000000000..2e694e601e79e
--- /dev/null
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -0,0 +1,135 @@
+.. _whatsnew_101:
+
+What's new in 1.0.1 (??)
+------------------------
+
+These are the changes in pandas 1.0.1. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+
+
+.. _whatsnew_101.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+
+
+Categorical
+^^^^^^^^^^^
+
+-
+-
+
+Datetimelike
+^^^^^^^^^^^^
+-
+-
+
+Timedelta
+^^^^^^^^^
+
+-
+-
+
+Timezones
+^^^^^^^^^
+
+-
+-
+
+
+Numeric
+^^^^^^^
+-
+-
+
+Conversion
+^^^^^^^^^^
+
+-
+-
+
+Strings
+^^^^^^^
+
+-
+-
+
+
+Interval
+^^^^^^^^
+
+-
+-
+
+Indexing
+^^^^^^^^
+
+-
+-
+- Bug where assigning to a :class:`Series` using a IntegerArray / BooleanArray as a mask would raise ``TypeError`` (:issue:`31446`)
+
+Missing
+^^^^^^^
+
+-
+-
+
+MultiIndex
+^^^^^^^^^^
+
+-
+-
+
+I/O
+^^^
+
+-
+-
+
+Plotting
+^^^^^^^^
+
+-
+-
+
+Groupby/resample/rolling
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+-
+-
+
+
+Reshaping
+^^^^^^^^^
+
+-
+-
+
+Sparse
+^^^^^^
+
+-
+-
+
+ExtensionArray
+^^^^^^^^^^^^^^
+
+-
+-
+
+
+Other
+^^^^^
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_101.contributors:
+
+Contributors
+~~~~~~~~~~~~
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
new file mode 100644
index 0000000000000..54175fada6e56
--- /dev/null
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -0,0 +1,218 @@
+.. _whatsnew_110:
+
+What's new in 1.1.0 (??)
+------------------------
+
+These are the changes in pandas 1.1.0. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+
+Enhancements
+~~~~~~~~~~~~
+
+.. _whatsnew_110.period_index_partial_string_slicing:
+
+Nonmonotonic PeriodIndex Partial String Slicing
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+:class:`PeriodIndex` now supports partial string slicing for non-monotonic indexes, mirroring :class:`DatetimeIndex` behavior (:issue:`31096`)
+
+For example:
+
+.. ipython:: python
+
+ dti = pd.date_range("2014-01-01", periods=30, freq="30D")
+ pi = dti.to_period("D")
+ ser_monotonic = pd.Series(np.arange(30), index=pi)
+ shuffler = list(range(0, 30, 2)) + list(range(1, 31, 2))
+ ser = ser_monotonic[shuffler]
+ ser
+
+.. ipython:: python
+
+ ser["2014"]
+ ser.loc["May 2015"]
+
+.. _whatsnew_110.enhancements.other:
+
+Other enhancements
+^^^^^^^^^^^^^^^^^^
+
+- :class:`Styler` may now render CSS more efficiently where multiple cells have the same styling (:issue:`30876`)
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_110.api.other:
+
+Other API changes
+^^^^^^^^^^^^^^^^^
+
+- :meth:`Series.describe` will now show distribution percentiles for ``datetime`` dtypes, statistics ``first`` and ``last``
+ will now be ``min`` and ``max`` to match with numeric dtypes in :meth:`DataFrame.describe` (:issue:`30164`)
+- :meth:`Groupby.groups` now returns an abbreviated representation when called on large dataframes (:issue:`1135`)
+
+Backwards incompatible API changes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+- :meth:`DataFrame.swaplevels` now raises a ``TypeError`` if the axis is not a :class:`MultiIndex`.
+ Previously a ``AttributeError`` was raised (:issue:`31126`)
+
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_110.deprecations:
+
+Deprecations
+~~~~~~~~~~~~
+
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+
+.. _whatsnew_110.performance:
+
+Performance improvements
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Performance improvement in :class:`Timedelta` constructor (:issue:`30543`)
+- Performance improvement in :class:`Timestamp` constructor (:issue:`30543`)
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_110.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+
+
+Categorical
+^^^^^^^^^^^
+
+-
+-
+
+Datetimelike
+^^^^^^^^^^^^
+
+- Bug in :class:`Timestamp` where constructing :class:`Timestamp` from ambiguous epoch time and calling constructor again changed :meth:`Timestamp.value` property (:issue:`24329`)
+- :meth:`DatetimeArray.searchsorted`, :meth:`TimedeltaArray.searchsorted`, :meth:`PeriodArray.searchsorted` not recognizing non-pandas scalars and incorrectly raising ``ValueError`` instead of ``TypeError`` (:issue:`30950`)
+- Bug in :class:`Timestamp` where constructing :class:`Timestamp` with dateutil timezone less than 128 nanoseconds before daylight saving time switch from winter to summer would result in nonexistent time (:issue:`31043`)
+
+Timedelta
+^^^^^^^^^
+
+-
+-
+
+Timezones
+^^^^^^^^^
+
+-
+-
+
+
+Numeric
+^^^^^^^
+- Bug in :meth:`DataFrame.floordiv` with ``axis=0`` not treating division-by-zero like :meth:`Series.floordiv` (:issue:`31271`)
+-
+-
+
+Conversion
+^^^^^^^^^^
+- Bug in :class:`Series` construction from NumPy array with big-endian ``datetime64`` dtype (:issue:`29684`)
+-
+-
+
+Strings
+^^^^^^^
+
+-
+-
+
+
+Interval
+^^^^^^^^
+
+-
+-
+
+Indexing
+^^^^^^^^
+- Bug in slicing on a :class:`DatetimeIndex` with a partial-timestamp dropping high-resolution indices near the end of a year, quarter, or month (:issue:`31064`)
+- Bug in :meth:`PeriodIndex.get_loc` treating higher-resolution strings differently from :meth:`PeriodIndex.get_value` (:issue:`31172`)
+- Bug in :meth:`Series.at` and :meth:`DataFrame.at` not matching ``.loc`` behavior when looking up an integer in a :class:`Float64Index` (:issue:`31329`)
+
+Missing
+^^^^^^^
+
+-
+-
+
+MultiIndex
+^^^^^^^^^^
+
+-
+-
+
+I/O
+^^^
+- Bug in :meth:`read_json` where integer overflow was occuring when json contains big number strings. (:issue:`30320`)
+-
+-
+
+Plotting
+^^^^^^^^
+
+- :func:`.plot` for line/bar now accepts color by dictonary (:issue:`8193`).
+-
+
+Groupby/resample/rolling
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Bug in :meth:`GroupBy.apply` raises ``ValueError`` when the ``by`` axis is not sorted and has duplicates and the applied ``func`` does not mutate passed in objects (:issue:`30667`)
+- Bug in :meth:`DataFrameGroupby.transform` produces incorrect result with transformation functions (:issue:`30918`)
+
+Reshaping
+^^^^^^^^^
+
+- Bug effecting all numeric and boolean reduction methods not returning subclassed data type. (:issue:`25596`)
+- Bug in :meth:`DataFrame.pivot_table` when only MultiIndexed columns is set (:issue:`17038`)
+- Bug in :meth:`DataFrame.unstack` and :meth:`Series.unstack` can take tuple names in MultiIndexed data (:issue:`19966`)
+- Bug in :meth:`DataFrame.pivot_table` when ``margin`` is ``True`` and only ``column`` is defined (:issue:`31016`)
+- Fix incorrect error message in :meth:`DataFrame.pivot` when ``columns`` is set to ``None``. (:issue:`30924`)
+- Bug in :func:`crosstab` when inputs are two Series and have tuple names, the output will keep dummy MultiIndex as columns. (:issue:`18321`)
+- Bug in :func:`concat` where the resulting indices are not copied when ``copy=True`` (:issue:`29879`)
+
+Sparse
+^^^^^^
+
+-
+-
+
+ExtensionArray
+^^^^^^^^^^^^^^
+
+-
+-
+
+
+Other
+^^^^^
+- Appending a dictionary to a :class:`DataFrame` without passing ``ignore_index=True`` will raise ``TypeError: Can only append a dict if ignore_index=True``
+ instead of ``TypeError: Can only append a Series if ignore_index=True or if the Series has a name`` (:issue:`30871`)
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_110.contributors:
+
+Contributors
+~~~~~~~~~~~~
diff --git a/doc/sphinxext/announce.py b/doc/sphinxext/announce.py
index fdc5a6b283ba8..f394aac5c545b 100755
--- a/doc/sphinxext/announce.py
+++ b/doc/sphinxext/announce.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# -*- encoding:utf-8 -*-
"""
Script to generate contributor and pull request lists
diff --git a/environment.yml b/environment.yml
index e244350a0bea0..5f1184e921119 100644
--- a/environment.yml
+++ b/environment.yml
@@ -27,7 +27,6 @@ dependencies:
# documentation
- gitpython # obtain contributors from git for whatsnew
- sphinx
- - numpydoc>=0.9.0
# documentation (jupyter notebooks)
- nbconvert>=5.4.1
@@ -105,3 +104,4 @@ dependencies:
- tabulate>=0.8.3 # DataFrame.to_markdown
- pip:
- git+https://github.com/pandas-dev/pandas-sphinx-theme.git@master
+ - git+https://github.com/numpy/numpydoc
diff --git a/pandas/__init__.py b/pandas/__init__.py
index 491bcb21f245d..d526531b159b2 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -35,8 +35,7 @@
raise ImportError(
f"C extension: {module} not built. If you want to import "
"pandas from the source directory, you may need to run "
- "'python setup.py build_ext --inplace --force' to build "
- "the C extensions first."
+ "'python setup.py build_ext --inplace --force' to build the C extensions first."
)
from pandas._config import (
@@ -198,8 +197,7 @@ def __getattr__(name):
warnings.warn(
"The Panel class is removed from pandas. Accessing it "
- "from the top-level namespace will also be removed in "
- "the next version",
+ "from the top-level namespace will also be removed in the next version",
FutureWarning,
stacklevel=2,
)
@@ -238,8 +236,7 @@ class Panel:
elif name in {"SparseSeries", "SparseDataFrame"}:
warnings.warn(
f"The {name} class is removed from pandas. Accessing it from "
- "the top-level namespace will also be removed in the next "
- "version",
+ "the top-level namespace will also be removed in the next version",
FutureWarning,
stacklevel=2,
)
diff --git a/pandas/_config/config.py b/pandas/_config/config.py
index 0a3009f74492f..cacd6f5454de7 100644
--- a/pandas/_config/config.py
+++ b/pandas/_config/config.py
@@ -51,7 +51,18 @@
from collections import namedtuple
from contextlib import contextmanager
import re
-from typing import Any, Dict, Iterable, List
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Tuple,
+ Type,
+ TypeVar,
+ cast,
+)
import warnings
DeprecatedOption = namedtuple("DeprecatedOption", "key msg rkey removal_ver")
@@ -80,7 +91,7 @@ class OptionError(AttributeError, KeyError):
# User API
-def _get_single_key(pat, silent):
+def _get_single_key(pat: str, silent: bool) -> str:
keys = _select_options(pat)
if len(keys) == 0:
if not silent:
@@ -98,7 +109,7 @@ def _get_single_key(pat, silent):
return key
-def _get_option(pat, silent=False):
+def _get_option(pat: str, silent: bool = False):
key = _get_single_key(pat, silent)
# walk the nested dict
@@ -106,7 +117,7 @@ def _get_option(pat, silent=False):
return root[k]
-def _set_option(*args, **kwargs):
+def _set_option(*args, **kwargs) -> None:
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
@@ -138,7 +149,7 @@ def _set_option(*args, **kwargs):
o.cb(key)
-def _describe_option(pat="", _print_desc=True):
+def _describe_option(pat: str = "", _print_desc: bool = True):
keys = _select_options(pat)
if len(keys) == 0:
@@ -154,7 +165,7 @@ def _describe_option(pat="", _print_desc=True):
return s
-def _reset_option(pat, silent=False):
+def _reset_option(pat: str, silent: bool = False) -> None:
keys = _select_options(pat)
@@ -165,15 +176,14 @@ def _reset_option(pat, silent=False):
raise ValueError(
"You must specify at least 4 characters when "
"resetting multiple keys, use the special keyword "
- '"all" to reset all the options to their default '
- "value"
+ '"all" to reset all the options to their default value'
)
for k in keys:
_set_option(k, _registered_options[k].defval, silent=silent)
-def get_default_val(pat):
+def get_default_val(pat: str):
key = _get_single_key(pat, silent=True)
return _get_registered_option(key).defval
@@ -181,11 +191,11 @@ def get_default_val(pat):
class DictWrapper:
""" provide attribute-style access to a nested dict"""
- def __init__(self, d, prefix=""):
+ def __init__(self, d: Dict[str, Any], prefix: str = ""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
- def __setattr__(self, key, val):
+ def __setattr__(self, key: str, val: Any) -> None:
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
@@ -211,7 +221,7 @@ def __getattr__(self, key: str):
else:
return _get_option(prefix)
- def __dir__(self):
+ def __dir__(self) -> Iterable[str]:
return list(self.d.keys())
@@ -412,23 +422,31 @@ def __exit__(self, *args):
_set_option(pat, val, silent=True)
-def register_option(key: str, defval: object, doc="", validator=None, cb=None):
- """Register an option in the package-wide pandas config object
+def register_option(
+ key: str,
+ defval: object,
+ doc: str = "",
+ validator: Optional[Callable[[Any], Any]] = None,
+ cb: Optional[Callable[[str], Any]] = None,
+) -> None:
+ """
+ Register an option in the package-wide pandas config object
Parameters
----------
- key - a fully-qualified key, e.g. "x.y.option - z".
- defval - the default value of the option
- doc - a string description of the option
- validator - a function of a single argument, should raise `ValueError` if
- called with a value which is not a legal value for the option.
- cb - a function of a single argument "key", which is called
- immediately after an option value is set/reset. key is
- the full name of the option.
-
- Returns
- -------
- Nothing.
+ key : str
+ Fully-qualified key, e.g. "x.y.option - z".
+ defval : object
+ Default value of the option.
+ doc : str
+ Description of the option.
+ validator : Callable, optional
+ Function of a single argument, should raise `ValueError` if
+ called with a value which is not a legal value for the option.
+ cb
+ a function of a single argument "key", which is called
+ immediately after an option value is set/reset. key is
+ the full name of the option.
Raises
------
@@ -481,7 +499,9 @@ def register_option(key: str, defval: object, doc="", validator=None, cb=None):
)
-def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
+def deprecate_option(
+ key: str, msg: Optional[str] = None, rkey: Optional[str] = None, removal_ver=None
+) -> None:
"""
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
@@ -494,32 +514,27 @@ def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
Parameters
----------
- key - the name of the option to be deprecated. must be a fully-qualified
- option name (e.g "x.y.z.rkey").
-
- msg - (Optional) a warning message to output when the key is referenced.
- if no message is given a default message will be emitted.
-
- rkey - (Optional) the name of an option to reroute access to.
- If specified, any referenced `key` will be re-routed to `rkey`
- including set/get/reset.
- rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
- used by the default message if no `msg` is specified.
-
- removal_ver - (Optional) specifies the version in which this option will
- be removed. used by the default message if no `msg`
- is specified.
-
- Returns
- -------
- Nothing
+ key : str
+ Name of the option to be deprecated.
+ must be a fully-qualified option name (e.g "x.y.z.rkey").
+ msg : str, optional
+ Warning message to output when the key is referenced.
+ if no message is given a default message will be emitted.
+ rkey : str, optional
+ Name of an option to reroute access to.
+ If specified, any referenced `key` will be
+ re-routed to `rkey` including set/get/reset.
+ rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
+ used by the default message if no `msg` is specified.
+ removal_ver : optional
+ Specifies the version in which this option will
+ be removed. used by the default message if no `msg` is specified.
Raises
------
- OptionError - if key has already been deprecated.
-
+ OptionError
+ If the specified key has already been deprecated.
"""
-
key = key.lower()
if key in _deprecated_options:
@@ -532,7 +547,7 @@ def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
# functions internal to the module
-def _select_options(pat):
+def _select_options(pat: str) -> List[str]:
"""returns a list of keys matching `pat`
if pat=="all", returns all registered options
@@ -550,7 +565,7 @@ def _select_options(pat):
return [k for k in keys if re.search(pat, k, re.I)]
-def _get_root(key):
+def _get_root(key: str) -> Tuple[Dict[str, Any], str]:
path = key.split(".")
cursor = _global_config
for p in path[:-1]:
@@ -558,14 +573,14 @@ def _get_root(key):
return cursor, path[-1]
-def _is_deprecated(key):
+def _is_deprecated(key: str) -> bool:
""" Returns True if the given option has been deprecated """
key = key.lower()
return key in _deprecated_options
-def _get_deprecated_option(key):
+def _get_deprecated_option(key: str):
"""
Retrieves the metadata for a deprecated option, if `key` is deprecated.
@@ -582,7 +597,7 @@ def _get_deprecated_option(key):
return d
-def _get_registered_option(key):
+def _get_registered_option(key: str):
"""
Retrieves the option metadata if `key` is a registered option.
@@ -593,7 +608,7 @@ def _get_registered_option(key):
return _registered_options.get(key)
-def _translate_key(key):
+def _translate_key(key: str) -> str:
"""
if key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
@@ -606,7 +621,7 @@ def _translate_key(key):
return key
-def _warn_if_deprecated(key):
+def _warn_if_deprecated(key: str) -> bool:
"""
Checks if `key` is a deprecated option and if so, prints a warning.
@@ -634,7 +649,7 @@ def _warn_if_deprecated(key):
return False
-def _build_option_description(k):
+def _build_option_description(k: str) -> str:
""" Builds a formatted description of a registered option and prints it """
o = _get_registered_option(k)
@@ -659,7 +674,7 @@ def _build_option_description(k):
return s
-def pp_options_list(keys, width=80, _print=False):
+def pp_options_list(keys: Iterable[str], width=80, _print: bool = False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
@@ -697,6 +712,9 @@ def pp(name: str, ks: Iterable[str]) -> List[str]:
#
# helpers
+FuncType = Callable[..., Any]
+F = TypeVar("F", bound=FuncType)
+
@contextmanager
def config_prefix(prefix):
@@ -728,12 +746,12 @@ def config_prefix(prefix):
global register_option, get_option, set_option, reset_option
- def wrap(func):
- def inner(key, *args, **kwds):
+ def wrap(func: F) -> F:
+ def inner(key: str, *args, **kwds):
pkey = f"{prefix}.{key}"
return func(pkey, *args, **kwds)
- return inner
+ return cast(F, inner)
_register_option = register_option
_get_option = get_option
@@ -751,7 +769,7 @@ def inner(key, *args, **kwds):
# arg in register_option
-def is_type_factory(_type):
+def is_type_factory(_type: Type[Any]) -> Callable[[Any], None]:
"""
Parameters
@@ -765,14 +783,14 @@ def is_type_factory(_type):
"""
- def inner(x):
+ def inner(x) -> None:
if type(x) != _type:
raise ValueError(f"Value must have type '{_type}'")
return inner
-def is_instance_factory(_type):
+def is_instance_factory(_type) -> Callable[[Any], None]:
"""
Parameters
@@ -792,19 +810,19 @@ def is_instance_factory(_type):
else:
type_repr = f"'{_type}'"
- def inner(x):
+ def inner(x) -> None:
if not isinstance(x, _type):
raise ValueError(f"Value must be an instance of {type_repr}")
return inner
-def is_one_of_factory(legal_values):
+def is_one_of_factory(legal_values) -> Callable[[Any], None]:
callables = [c for c in legal_values if callable(c)]
legal_values = [c for c in legal_values if not callable(c)]
- def inner(x):
+ def inner(x) -> None:
if x not in legal_values:
if not any(c(x) for c in callables):
@@ -818,7 +836,7 @@ def inner(x):
return inner
-def is_nonnegative_int(value):
+def is_nonnegative_int(value: Optional[int]) -> None:
"""
Verify that value is None or a positive int.
@@ -853,7 +871,7 @@ def is_nonnegative_int(value):
is_text = is_instance_factory((str, bytes))
-def is_callable(obj):
+def is_callable(obj) -> bool:
"""
Parameters
diff --git a/pandas/_config/display.py b/pandas/_config/display.py
index 067b7c503baab..ef319f4447565 100644
--- a/pandas/_config/display.py
+++ b/pandas/_config/display.py
@@ -1,6 +1,7 @@
"""
Unopinionated display configuration.
"""
+
import locale
import sys
@@ -11,7 +12,7 @@
_initial_defencoding = None
-def detect_console_encoding():
+def detect_console_encoding() -> str:
"""
Try to find the most capable encoding supported by the console.
slightly modified from the way IPython handles the same issue.
diff --git a/pandas/_config/localization.py b/pandas/_config/localization.py
index dd1d4948aa6e3..0d68e78372d8a 100644
--- a/pandas/_config/localization.py
+++ b/pandas/_config/localization.py
@@ -12,7 +12,7 @@
@contextmanager
-def set_locale(new_locale, lc_var=locale.LC_ALL):
+def set_locale(new_locale, lc_var: int = locale.LC_ALL):
"""
Context manager for temporarily setting a locale.
@@ -44,7 +44,7 @@ def set_locale(new_locale, lc_var=locale.LC_ALL):
locale.setlocale(lc_var, current_locale)
-def can_set_locale(lc, lc_var=locale.LC_ALL):
+def can_set_locale(lc: str, lc_var: int = locale.LC_ALL) -> bool:
"""
Check to see if we can set a locale, and subsequently get the locale,
without raising an Exception.
@@ -58,7 +58,7 @@ def can_set_locale(lc, lc_var=locale.LC_ALL):
Returns
-------
- is_valid : bool
+ bool
Whether the passed locale can be set
"""
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 7a2fc9dc7845a..dd1f38ce3a842 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -914,8 +914,7 @@ def rank_1d(rank_t[:] in_arr, ties_method='average',
ranks[argsorted[j]] = i + 1
elif tiebreak == TIEBREAK_FIRST:
if rank_t is object:
- raise ValueError('first not supported for '
- 'non-numeric data')
+ raise ValueError('first not supported for non-numeric data')
else:
for j in range(i - dups + 1, i + 1):
ranks[argsorted[j]] = j + 1
@@ -971,8 +970,7 @@ def rank_1d(rank_t[:] in_arr, ties_method='average',
ranks[argsorted[j]] = i + 1
elif tiebreak == TIEBREAK_FIRST:
if rank_t is object:
- raise ValueError('first not supported for '
- 'non-numeric data')
+ raise ValueError('first not supported for non-numeric data')
else:
for j in range(i - dups + 1, i + 1):
ranks[argsorted[j]] = j + 1
@@ -1137,8 +1135,7 @@ def rank_2d(rank_t[:, :] in_arr, axis=0, ties_method='average',
ranks[i, argsorted[i, z]] = j + 1
elif tiebreak == TIEBREAK_FIRST:
if rank_t is object:
- raise ValueError('first not supported '
- 'for non-numeric data')
+ raise ValueError('first not supported for non-numeric data')
else:
for z in range(j - dups + 1, j + 1):
ranks[i, argsorted[i, z]] = z + 1
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index abb8a6d388d26..93ea94f7b18fc 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -686,8 +686,7 @@ def _group_ohlc(floating[:, :] out,
raise ValueError('Output array must have 4 columns')
if K > 1:
- raise NotImplementedError("Argument 'values' must have only "
- "one dimension")
+ raise NotImplementedError("Argument 'values' must have only one dimension")
out[:] = np.nan
with nogil:
diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx
index 5298d8c5ed34e..878da670b2f68 100644
--- a/pandas/_libs/hashing.pyx
+++ b/pandas/_libs/hashing.pyx
@@ -51,8 +51,9 @@ def hash_object_array(object[:] arr, object key, object encoding='utf8'):
k = <bytes>key.encode(encoding)
kb = <uint8_t *>k
if len(k) != 16:
- raise ValueError("key should be a 16-byte string encoded, "
- f"got {k} (len {len(k)})")
+ raise ValueError(
+ f"key should be a 16-byte string encoded, got {k} (len {len(k)})"
+ )
n = len(arr)
@@ -77,8 +78,10 @@ def hash_object_array(object[:] arr, object key, object encoding='utf8'):
hash(val)
data = <bytes>str(val).encode(encoding)
else:
- raise TypeError(f"{val} of type {type(val)} is not a valid type "
- "for hashing, must be string or null")
+ raise TypeError(
+ f"{val} of type {type(val)} is not a valid type for hashing, "
+ "must be string or null"
+ )
l = len(data)
lens[i] = l
diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx
index 59ba1705d2dbb..884db9ee931d4 100644
--- a/pandas/_libs/hashtable.pyx
+++ b/pandas/_libs/hashtable.pyx
@@ -13,26 +13,45 @@ cnp.import_array()
cdef extern from "numpy/npy_math.h":
float64_t NAN "NPY_NAN"
-
from pandas._libs.khash cimport (
khiter_t,
-
- kh_str_t, kh_init_str, kh_put_str, kh_exist_str,
- kh_get_str, kh_destroy_str, kh_resize_str,
-
- kh_put_strbox, kh_get_strbox, kh_init_strbox,
-
- kh_int64_t, kh_init_int64, kh_resize_int64, kh_destroy_int64,
- kh_get_int64, kh_exist_int64, kh_put_int64,
-
- kh_float64_t, kh_exist_float64, kh_put_float64, kh_init_float64,
- kh_get_float64, kh_destroy_float64, kh_resize_float64,
-
- kh_resize_uint64, kh_exist_uint64, kh_destroy_uint64, kh_put_uint64,
- kh_get_uint64, kh_init_uint64,
-
- kh_destroy_pymap, kh_exist_pymap, kh_init_pymap, kh_get_pymap,
- kh_put_pymap, kh_resize_pymap)
+ kh_str_t,
+ kh_init_str,
+ kh_put_str,
+ kh_exist_str,
+ kh_get_str,
+ kh_destroy_str,
+ kh_resize_str,
+ kh_put_strbox,
+ kh_get_strbox,
+ kh_init_strbox,
+ kh_int64_t,
+ kh_init_int64,
+ kh_resize_int64,
+ kh_destroy_int64,
+ kh_get_int64,
+ kh_exist_int64,
+ kh_put_int64,
+ kh_float64_t,
+ kh_exist_float64,
+ kh_put_float64,
+ kh_init_float64,
+ kh_get_float64,
+ kh_destroy_float64,
+ kh_resize_float64,
+ kh_resize_uint64,
+ kh_exist_uint64,
+ kh_destroy_uint64,
+ kh_put_uint64,
+ kh_get_uint64,
+ kh_init_uint64,
+ kh_destroy_pymap,
+ kh_exist_pymap,
+ kh_init_pymap,
+ kh_get_pymap,
+ kh_put_pymap,
+ kh_resize_pymap,
+)
cimport pandas._libs.util as util
@@ -63,8 +82,9 @@ cdef class Factorizer:
def get_count(self):
return self.count
- def factorize(self, ndarray[object] values, sort=False, na_sentinel=-1,
- na_value=None):
+ def factorize(
+ self, ndarray[object] values, sort=False, na_sentinel=-1, na_value=None
+ ):
"""
Factorize values with nans replaced by na_sentinel
>>> factorize(np.array([1,2,np.nan], dtype='O'), na_sentinel=20)
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index ac8172146d351..2dfc14378baf6 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -17,8 +17,8 @@ cnp.import_array()
cimport pandas._libs.util as util
-from pandas._libs.tslibs.conversion cimport maybe_datetimelike_to_i8
from pandas._libs.tslibs.nattype cimport c_NaT as NaT
+from pandas._libs.tslibs.c_timestamp cimport _Timestamp
from pandas._libs.hashtable cimport HashTable
@@ -26,19 +26,13 @@ from pandas._libs import algos, hashtable as _hash
from pandas._libs.tslibs import Timestamp, Timedelta, period as periodlib
from pandas._libs.missing import checknull
-cdef int64_t NPY_NAT = util.get_nat()
-
cdef inline bint is_definitely_invalid_key(object val):
- if isinstance(val, tuple):
- try:
- hash(val)
- except TypeError:
- return True
-
- # we have a _data, means we are a NDFrame
- return (isinstance(val, slice) or util.is_array(val)
- or isinstance(val, list) or hasattr(val, '_data'))
+ try:
+ hash(val)
+ except TypeError:
+ return True
+ return False
cpdef get_value_at(ndarray arr, object loc, object tz=None):
@@ -72,9 +66,10 @@ cdef class IndexEngine:
self.over_size_threshold = n >= _SIZE_CUTOFF
self.clear_mapping()
- def __contains__(self, object val):
+ def __contains__(self, val: object) -> bool:
+ # We assume before we get here:
+ # - val is hashable
self._ensure_mapping_populated()
- hash(val)
return val in self.mapping
cpdef get_value(self, ndarray arr, object key, object tz=None):
@@ -85,7 +80,6 @@ cdef class IndexEngine:
"""
cdef:
object loc
- void* data_ptr
loc = self.get_loc(key)
if isinstance(loc, slice) or util.is_array(loc):
@@ -101,7 +95,6 @@ cdef class IndexEngine:
"""
cdef:
object loc
- void* data_ptr
loc = self.get_loc(key)
value = convert_scalar(arr, value)
@@ -169,6 +162,15 @@ cdef class IndexEngine:
int count
indexer = self._get_index_values() == val
+ return self._unpack_bool_indexer(indexer, val)
+
+ cdef _unpack_bool_indexer(self,
+ ndarray[uint8_t, ndim=1, cast=True] indexer,
+ object val):
+ cdef:
+ ndarray[intp_t, ndim=1] found
+ int count
+
found = np.where(indexer)[0]
count = len(found)
@@ -215,7 +217,8 @@ cdef class IndexEngine:
return self.monotonic_dec == 1
cdef inline _do_monotonic_check(self):
- cdef object is_unique
+ cdef:
+ bint is_unique
try:
values = self._get_index_values()
self.monotonic_inc, self.monotonic_dec, is_unique = \
@@ -238,10 +241,10 @@ cdef class IndexEngine:
cdef _call_monotonic(self, values):
return algos.is_monotonic(values, timelike=False)
- def get_backfill_indexer(self, other, limit=None):
+ def get_backfill_indexer(self, other: np.ndarray, limit=None) -> np.ndarray:
return algos.backfill(self._get_index_values(), other, limit=limit)
- def get_pad_indexer(self, other, limit=None):
+ def get_pad_indexer(self, other: np.ndarray, limit=None) -> np.ndarray:
return algos.pad(self._get_index_values(), other, limit=limit)
cdef _make_hash_table(self, Py_ssize_t n):
@@ -409,20 +412,29 @@ cdef class DatetimeEngine(Int64Engine):
cdef _get_box_dtype(self):
return 'M8[ns]'
- def __contains__(self, object val):
+ cdef int64_t _unbox_scalar(self, scalar) except? -1:
+ # NB: caller is responsible for ensuring tzawareness compat
+ # before we get here
+ if not (isinstance(scalar, _Timestamp) or scalar is NaT):
+ raise TypeError(scalar)
+ return scalar.value
+
+ def __contains__(self, val: object) -> bool:
+ # We assume before we get here:
+ # - val is hashable
cdef:
- int64_t loc
+ int64_t loc, conv
+ conv = self._unbox_scalar(val)
if self.over_size_threshold and self.is_monotonic_increasing:
if not self.is_unique:
- return self._get_loc_duplicates(val)
+ return self._get_loc_duplicates(conv)
values = self._get_index_values()
- conv = maybe_datetimelike_to_i8(val)
loc = values.searchsorted(conv, side='left')
return values[loc] == conv
self._ensure_mapping_populated()
- return maybe_datetimelike_to_i8(val) in self.mapping
+ return conv in self.mapping
cdef _get_index_values(self):
return self.vgetter().view('i8')
@@ -431,24 +443,26 @@ cdef class DatetimeEngine(Int64Engine):
return algos.is_monotonic(values, timelike=True)
cpdef get_loc(self, object val):
+ # NB: the caller is responsible for ensuring that we are called
+ # with either a Timestamp or NaT (Timedelta or NaT for TimedeltaEngine)
+
cdef:
int64_t loc
if is_definitely_invalid_key(val):
- raise TypeError
+ raise TypeError(f"'{val}' is an invalid key")
+
+ try:
+ conv = self._unbox_scalar(val)
+ except TypeError:
+ raise KeyError(val)
# Welcome to the spaghetti factory
if self.over_size_threshold and self.is_monotonic_increasing:
if not self.is_unique:
- val = maybe_datetimelike_to_i8(val)
- return self._get_loc_duplicates(val)
+ return self._get_loc_duplicates(conv)
values = self._get_index_values()
- try:
- conv = maybe_datetimelike_to_i8(val)
- loc = values.searchsorted(conv, side='left')
- except TypeError:
- self._date_check_type(val)
- raise KeyError(val)
+ loc = values.searchsorted(conv, side='left')
if loc == len(values) or values[loc] != conv:
raise KeyError(val)
@@ -456,27 +470,12 @@ cdef class DatetimeEngine(Int64Engine):
self._ensure_mapping_populated()
if not self.unique:
- val = maybe_datetimelike_to_i8(val)
- return self._get_loc_duplicates(val)
+ return self._get_loc_duplicates(conv)
try:
- return self.mapping.get_item(val.value)
+ return self.mapping.get_item(conv)
except KeyError:
raise KeyError(val)
- except AttributeError:
- pass
-
- try:
- val = maybe_datetimelike_to_i8(val)
- return self.mapping.get_item(val)
- except (TypeError, ValueError):
- self._date_check_type(val)
- raise KeyError(val)
-
- cdef inline _date_check_type(self, object val):
- hash(val)
- if not util.is_integer_object(val):
- raise KeyError(val)
def get_indexer(self, values):
self._ensure_mapping_populated()
@@ -485,13 +484,13 @@ cdef class DatetimeEngine(Int64Engine):
values = np.asarray(values).view('i8')
return self.mapping.lookup(values)
- def get_pad_indexer(self, other, limit=None):
+ def get_pad_indexer(self, other: np.ndarray, limit=None) -> np.ndarray:
if other.dtype != self._get_box_dtype():
return np.repeat(-1, len(other)).astype('i4')
other = np.asarray(other).view('i8')
return algos.pad(self._get_index_values(), other, limit=limit)
- def get_backfill_indexer(self, other, limit=None):
+ def get_backfill_indexer(self, other: np.ndarray, limit=None) -> np.ndarray:
if other.dtype != self._get_box_dtype():
return np.repeat(-1, len(other)).astype('i4')
other = np.asarray(other).view('i8')
@@ -503,22 +502,24 @@ cdef class TimedeltaEngine(DatetimeEngine):
cdef _get_box_dtype(self):
return 'm8[ns]'
+ cdef int64_t _unbox_scalar(self, scalar) except? -1:
+ if not (isinstance(scalar, Timedelta) or scalar is NaT):
+ raise TypeError(scalar)
+ return scalar.value
+
cdef class PeriodEngine(Int64Engine):
cdef _get_index_values(self):
- return super(PeriodEngine, self).vgetter()
-
- cdef void _call_map_locations(self, values):
- # super(...) pattern doesn't seem to work with `cdef`
- Int64Engine._call_map_locations(self, values.view('i8'))
+ return super(PeriodEngine, self).vgetter().view("i8")
cdef _call_monotonic(self, values):
# super(...) pattern doesn't seem to work with `cdef`
return Int64Engine._call_monotonic(self, values.view('i8'))
def get_indexer(self, values):
- cdef ndarray[int64_t, ndim=1] ordinals
+ cdef:
+ ndarray[int64_t, ndim=1] ordinals
super(PeriodEngine, self)._ensure_mapping_populated()
@@ -527,14 +528,14 @@ cdef class PeriodEngine(Int64Engine):
return self.mapping.lookup(ordinals)
- def get_pad_indexer(self, other, limit=None):
+ def get_pad_indexer(self, other: np.ndarray, limit=None) -> np.ndarray:
freq = super(PeriodEngine, self).vgetter().freq
ordinal = periodlib.extract_ordinals(other, freq)
return algos.pad(self._get_index_values(),
np.asarray(ordinal), limit=limit)
- def get_backfill_indexer(self, other, limit=None):
+ def get_backfill_indexer(self, other: np.ndarray, limit=None) -> np.ndarray:
freq = super(PeriodEngine, self).vgetter().freq
ordinal = periodlib.extract_ordinals(other, freq)
@@ -653,7 +654,10 @@ cdef class BaseMultiIndexCodesEngine:
# integers representing labels: we will use its get_loc and get_indexer
self._base.__init__(self, lambda: lab_ints, len(lab_ints))
- def _extract_level_codes(self, object target, object method=None):
+ def _codes_to_ints(self, codes):
+ raise NotImplementedError("Implemented by subclass")
+
+ def _extract_level_codes(self, object target):
"""
Map the requested list of (tuple) keys to their integer representations
for searching in the underlying integer index.
@@ -717,7 +721,9 @@ cdef class BaseMultiIndexCodesEngine:
return indexer
- def __contains__(self, object val):
+ def __contains__(self, val: object) -> bool:
+ # We assume before we get here:
+ # - val is hashable
# Default __contains__ looks in the underlying mapping, which in this
# case only contains integer representations.
try:
diff --git a/pandas/_libs/index_class_helper.pxi.in b/pandas/_libs/index_class_helper.pxi.in
index 093cca4fe7ed5..c7b67667bda17 100644
--- a/pandas/_libs/index_class_helper.pxi.in
+++ b/pandas/_libs/index_class_helper.pxi.in
@@ -10,24 +10,26 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
{{py:
-# name, dtype, ctype, hashtable_name, hashtable_dtype
-dtypes = [('Float64', 'float64', 'float64_t', 'Float64', 'float64'),
- ('Float32', 'float32', 'float32_t', 'Float64', 'float64'),
- ('Int64', 'int64', 'int64_t', 'Int64', 'int64'),
- ('Int32', 'int32', 'int32_t', 'Int64', 'int64'),
- ('Int16', 'int16', 'int16_t', 'Int64', 'int64'),
- ('Int8', 'int8', 'int8_t', 'Int64', 'int64'),
- ('UInt64', 'uint64', 'uint64_t', 'UInt64', 'uint64'),
- ('UInt32', 'uint32', 'uint32_t', 'UInt64', 'uint64'),
- ('UInt16', 'uint16', 'uint16_t', 'UInt64', 'uint64'),
- ('UInt8', 'uint8', 'uint8_t', 'UInt64', 'uint64'),
+# name, dtype, hashtable_name
+dtypes = [('Float64', 'float64', 'Float64'),
+ ('Float32', 'float32', 'Float64'),
+ ('Int64', 'int64', 'Int64'),
+ ('Int32', 'int32', 'Int64'),
+ ('Int16', 'int16', 'Int64'),
+ ('Int8', 'int8', 'Int64'),
+ ('UInt64', 'uint64', 'UInt64'),
+ ('UInt32', 'uint32', 'UInt64'),
+ ('UInt16', 'uint16', 'UInt64'),
+ ('UInt8', 'uint8', 'UInt64'),
]
}}
-{{for name, dtype, ctype, hashtable_name, hashtable_dtype in dtypes}}
+{{for name, dtype, hashtable_name in dtypes}}
cdef class {{name}}Engine(IndexEngine):
+ # constructor-caller is responsible for ensuring that vgetter()
+ # returns an ndarray with dtype {{dtype}}_t
cdef _make_hash_table(self, Py_ssize_t n):
return _hash.{{hashtable_name}}HashTable(n)
@@ -41,25 +43,18 @@ cdef class {{name}}Engine(IndexEngine):
cdef void _call_map_locations(self, values):
# self.mapping is of type {{hashtable_name}}HashTable,
# so convert dtype of values
- self.mapping.map_locations(algos.ensure_{{hashtable_dtype}}(values))
-
- cdef _get_index_values(self):
- return algos.ensure_{{dtype}}(self.vgetter())
+ self.mapping.map_locations(algos.ensure_{{hashtable_name.lower()}}(values))
cdef _maybe_get_bool_indexer(self, object val):
cdef:
ndarray[uint8_t, ndim=1, cast=True] indexer
ndarray[intp_t, ndim=1] found
- ndarray[{{ctype}}] values
+ ndarray[{{dtype}}_t, ndim=1] values
int count = 0
- {{if name not in {'Float64', 'Float32'} }}
- if not util.is_integer_object(val):
- raise KeyError(val)
- {{endif}}
+ self._check_type(val)
- # A view is needed for some subclasses, such as PeriodEngine:
- values = self._get_index_values().view('{{dtype}}')
+ values = self._get_index_values()
try:
with warnings.catch_warnings():
# e.g. if values is float64 and `val` is a str, suppress warning
@@ -70,14 +65,6 @@ cdef class {{name}}Engine(IndexEngine):
# when trying to cast it to ndarray
raise KeyError(val)
- found = np.where(indexer)[0]
- count = len(found)
-
- if count > 1:
- return indexer
- if count == 1:
- return int(found[0])
-
- raise KeyError(val)
+ return self._unpack_bool_indexer(indexer, val)
{{endfor}}
diff --git a/pandas/_libs/indexing.pyx b/pandas/_libs/indexing.pyx
index 01f4fb060d982..cdccdb504571c 100644
--- a/pandas/_libs/indexing.pyx
+++ b/pandas/_libs/indexing.pyx
@@ -18,6 +18,7 @@ cdef class _NDFrameIndexerBase:
if ndim is None:
ndim = self._ndim = self.obj.ndim
if ndim > 2:
- raise ValueError("NDFrameIndexer does not support "
- "NDFrame objects with ndim > 2")
+ raise ValueError(
+ "NDFrameIndexer does not support NDFrame objects with ndim > 2"
+ )
return ndim
diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in
index d09413bfa5210..a8728050f8071 100644
--- a/pandas/_libs/intervaltree.pxi.in
+++ b/pandas/_libs/intervaltree.pxi.in
@@ -26,7 +26,7 @@ cdef class IntervalTree(IntervalMixin):
"""A centered interval tree
Based off the algorithm described on Wikipedia:
- http://en.wikipedia.org/wiki/Interval_tree
+ https://en.wikipedia.org/wiki/Interval_tree
we are emulating the IndexEngine interface
"""
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 719db5c03f07f..9702eb4615909 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -11,6 +11,9 @@ from cython import Py_ssize_t
from cpython.object cimport PyObject_RichCompareBool, Py_EQ
from cpython.ref cimport Py_INCREF
from cpython.tuple cimport PyTuple_SET_ITEM, PyTuple_New
+from cpython.iterator cimport PyIter_Check
+from cpython.sequence cimport PySequence_Check
+from cpython.number cimport PyNumber_Check
from cpython.datetime cimport (PyDateTime_Check, PyDate_Check,
PyTime_Check, PyDelta_Check,
@@ -156,7 +159,8 @@ def is_scalar(val: object) -> bool:
True
"""
- return (cnp.PyArray_IsAnyScalar(val)
+ # Start with C-optimized checks
+ if (cnp.PyArray_IsAnyScalar(val)
# PyArray_IsAnyScalar is always False for bytearrays on Py3
or PyDate_Check(val)
or PyDelta_Check(val)
@@ -164,14 +168,54 @@ def is_scalar(val: object) -> bool:
# We differ from numpy, which claims that None is not scalar;
# see np.isscalar
or val is C_NA
- or val is None
- or isinstance(val, (Fraction, Number))
+ or val is None):
+ return True
+
+ # Next use C-optimized checks to exclude common non-scalars before falling
+ # back to non-optimized checks.
+ if PySequence_Check(val):
+ # e.g. list, tuple
+ # includes np.ndarray, Series which PyNumber_Check can return True for
+ return False
+
+ # Note: PyNumber_Check check includes Decimal, Fraction, numbers.Number
+ return (PyNumber_Check(val)
or util.is_period_object(val)
- or is_decimal(val)
or is_interval(val)
or util.is_offset_object(val))
+def is_iterator(obj: object) -> bool:
+ """
+ Check if the object is an iterator.
+
+ This is intended for generators, not list-like objects.
+
+ Parameters
+ ----------
+ obj : The object to check
+
+ Returns
+ -------
+ is_iter : bool
+ Whether `obj` is an iterator.
+
+ Examples
+ --------
+ >>> is_iterator((x for x in []))
+ True
+ >>> is_iterator([1, 2, 3])
+ False
+ >>> is_iterator(datetime(2017, 1, 1))
+ False
+ >>> is_iterator("foo")
+ False
+ >>> is_iterator(1)
+ False
+ """
+ return PyIter_Check(obj)
+
+
def item_from_zerodim(val: object) -> object:
"""
If the value is a zerodim array, return the item it contains.
@@ -1624,6 +1668,10 @@ cdef class StringValidator(Validator):
cdef inline bint is_array_typed(self) except -1:
return issubclass(self.dtype.type, np.str_)
+ cdef bint is_valid_null(self, object value) except -1:
+ # We deliberately exclude None / NaN here since StringArray uses NA
+ return value is C_NA
+
cpdef bint is_string_array(ndarray values, bint skipna=False):
cdef:
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx
index 26653438356b1..4d17a6f883c1c 100644
--- a/pandas/_libs/missing.pyx
+++ b/pandas/_libs/missing.pyx
@@ -417,12 +417,12 @@ class NAType(C_NAType):
if other is C_NA:
return NA
elif isinstance(other, (numbers.Number, np.bool_)):
- if other == 1 or other == -1:
+ if other == 1:
return other
else:
return NA
elif isinstance(other, np.ndarray):
- return np.where((other == 1) | (other == -1), other, NA)
+ return np.where(other == 1, other, NA)
return NotImplemented
diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx
index ee83901040b36..3a6dd506b2428 100644
--- a/pandas/_libs/sparse.pyx
+++ b/pandas/_libs/sparse.pyx
@@ -72,9 +72,9 @@ cdef class IntIndex(SparseIndex):
"""
if self.npoints > self.length:
- msg = (f"Too many indices. Expected "
- f"{self.length} but found {self.npoints}")
- raise ValueError(msg)
+ raise ValueError(
+ f"Too many indices. Expected {self.length} but found {self.npoints}"
+ )
# Indices are vacuously ordered and non-negative
# if the sequence of indices is empty.
diff --git a/pandas/_libs/sparse_op_helper.pxi.in b/pandas/_libs/sparse_op_helper.pxi.in
index 62ea477167b72..996da4ca2f92b 100644
--- a/pandas/_libs/sparse_op_helper.pxi.in
+++ b/pandas/_libs/sparse_op_helper.pxi.in
@@ -84,7 +84,8 @@ def get_op(tup):
'ge': '{0} >= {1}',
'and': '{0} & {1}', # logical op
- 'or': '{0} | {1}'}
+ 'or': '{0} | {1}',
+ 'xor': '{0} ^ {1}'}
return ops_dict[opname].format(lval, rval)
@@ -94,7 +95,7 @@ def get_dispatch(dtypes):
ops_list = ['add', 'sub', 'mul', 'div', 'mod', 'truediv',
'floordiv', 'pow',
'eq', 'ne', 'lt', 'gt', 'le', 'ge',
- 'and', 'or']
+ 'and', 'or', 'xor']
for opname in ops_list:
for dtype, arith_comp_group, logical_group in dtypes:
@@ -104,13 +105,13 @@ def get_dispatch(dtypes):
elif opname in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'):
# comparison op
rdtype = 'uint8'
- elif opname in ('and', 'or'):
+ elif opname in ('and', 'or', 'xor'):
# logical op
rdtype = 'uint8'
else:
rdtype = dtype
- if opname in ('and', 'or'):
+ if opname in ('and', 'or', 'xor'):
if logical_group:
yield opname, dtype, rdtype
else:
diff --git a/pandas/_libs/src/klib/khash.h b/pandas/_libs/src/klib/khash.h
index bcf6350aa9090..916838d1e9584 100644
--- a/pandas/_libs/src/klib/khash.h
+++ b/pandas/_libs/src/klib/khash.h
@@ -53,7 +53,7 @@ int main() {
speed for simple keys. Thank Zilong Tan for the suggestion. Reference:
- https://github.com/stefanocasazza/ULib
- - http://nothings.org/computer/judy/
+ - https://nothings.org/computer/judy/
* Allow to optionally use linear probing which usually has better
performance for random input. Double hashing is still the default as it
diff --git a/pandas/_libs/src/skiplist.h b/pandas/_libs/src/skiplist.h
index 60c1a56727777..1679ced174f29 100644
--- a/pandas/_libs/src/skiplist.h
+++ b/pandas/_libs/src/skiplist.h
@@ -10,7 +10,7 @@ Flexibly-sized, index-able skiplist data structure for maintaining a sorted
list of values
Port of Wes McKinney's Cython version of Raymond Hettinger's original pure
-Python recipe (http://rhettinger.wordpress.com/2010/02/06/lost-knowledge/)
+Python recipe (https://rhettinger.wordpress.com/2010/02/06/lost-knowledge/)
*/
#ifndef PANDAS__LIBS_SRC_SKIPLIST_H_
diff --git a/pandas/_libs/src/ujson/lib/ultrajson.h b/pandas/_libs/src/ujson/lib/ultrajson.h
index 8d04874b4c9bf..b40ac9856d6a6 100644
--- a/pandas/_libs/src/ujson/lib/ultrajson.h
+++ b/pandas/_libs/src/ujson/lib/ultrajson.h
@@ -30,7 +30,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
Numeric decoder derived from from TCL library
-http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
+https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
* Copyright (c) 1988-1993 The Regents of the University of California.
* Copyright (c) 1994 Sun Microsystems, Inc.
*/
diff --git a/pandas/_libs/src/ujson/lib/ultrajsondec.c b/pandas/_libs/src/ujson/lib/ultrajsondec.c
index 4eb18ee13d70b..36eb170f8048f 100644
--- a/pandas/_libs/src/ujson/lib/ultrajsondec.c
+++ b/pandas/_libs/src/ujson/lib/ultrajsondec.c
@@ -33,7 +33,7 @@ Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights
reserved.
Numeric decoder derived from from TCL library
-http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
+https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
* Copyright (c) 1988-1993 The Regents of the University of California.
* Copyright (c) 1994 Sun Microsystems, Inc.
*/
diff --git a/pandas/_libs/src/ujson/lib/ultrajsonenc.c b/pandas/_libs/src/ujson/lib/ultrajsonenc.c
index 51c9b9244ecfc..065e3b2c60cf9 100644
--- a/pandas/_libs/src/ujson/lib/ultrajsonenc.c
+++ b/pandas/_libs/src/ujson/lib/ultrajsonenc.c
@@ -33,7 +33,7 @@ Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights
reserved.
Numeric decoder derived from from TCL library
-http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
+https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
* Copyright (c) 1988-1993 The Regents of the University of California.
* Copyright (c) 1994 Sun Microsystems, Inc.
*/
diff --git a/pandas/_libs/src/ujson/python/JSONtoObj.c b/pandas/_libs/src/ujson/python/JSONtoObj.c
index b2fc788478864..3db10237b2688 100644
--- a/pandas/_libs/src/ujson/python/JSONtoObj.c
+++ b/pandas/_libs/src/ujson/python/JSONtoObj.c
@@ -30,7 +30,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
Numeric decoder derived from from TCL library
-http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
+https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
* Copyright (c) 1988-1993 The Regents of the University of California.
* Copyright (c) 1994 Sun Microsystems, Inc.
*/
diff --git a/pandas/_libs/src/ujson/python/date_conversions.c b/pandas/_libs/src/ujson/python/date_conversions.c
new file mode 100644
index 0000000000000..fc4bdef8463af
--- /dev/null
+++ b/pandas/_libs/src/ujson/python/date_conversions.c
@@ -0,0 +1,118 @@
+// Conversion routines that are useful for serialization,
+// but which don't interact with JSON objects directly
+
+#include "date_conversions.h"
+#include <../../../tslibs/src/datetime/np_datetime.h>
+#include <../../../tslibs/src/datetime/np_datetime_strings.h>
+
+/*
+ * Function: scaleNanosecToUnit
+ * -----------------------------
+ *
+ * Scales an integer value representing time in nanoseconds to provided unit.
+ *
+ * Mutates the provided value directly. Returns 0 on success, non-zero on error.
+ */
+int scaleNanosecToUnit(npy_int64 *value, NPY_DATETIMEUNIT unit) {
+ switch (unit) {
+ case NPY_FR_ns:
+ break;
+ case NPY_FR_us:
+ *value /= 1000LL;
+ break;
+ case NPY_FR_ms:
+ *value /= 1000000LL;
+ break;
+ case NPY_FR_s:
+ *value /= 1000000000LL;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Converts the int64_t representation of a datetime to ISO; mutates len */
+char *int64ToIso(int64_t value, NPY_DATETIMEUNIT base, size_t *len) {
+ npy_datetimestruct dts;
+ int ret_code;
+
+ pandas_datetime_to_datetimestruct(value, NPY_FR_ns, &dts);
+
+ *len = (size_t)get_datetime_iso_8601_strlen(0, base);
+ char *result = PyObject_Malloc(*len);
+
+ if (result == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+
+ ret_code = make_iso_8601_datetime(&dts, result, *len, base);
+ if (ret_code != 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "Could not convert datetime value to string");
+ PyObject_Free(result);
+ }
+
+ // Note that get_datetime_iso_8601_strlen just gives a generic size
+ // for ISO string conversion, not the actual size used
+ *len = strlen(result);
+ return result;
+}
+
+npy_datetime NpyDateTimeToEpoch(npy_datetime dt, NPY_DATETIMEUNIT base) {
+ scaleNanosecToUnit(&dt, base);
+ return dt;
+}
+
+/* Convert PyDatetime To ISO C-string. mutates len */
+char *PyDateTimeToIso(PyDateTime_Date *obj, NPY_DATETIMEUNIT base,
+ size_t *len) {
+ npy_datetimestruct dts;
+ int ret;
+
+ ret = convert_pydatetime_to_datetimestruct(obj, &dts);
+ if (ret != 0) {
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_ValueError,
+ "Could not convert PyDateTime to numpy datetime");
+ }
+ return NULL;
+ }
+
+ *len = (size_t)get_datetime_iso_8601_strlen(0, base);
+ char *result = PyObject_Malloc(*len);
+ ret = make_iso_8601_datetime(&dts, result, *len, base);
+
+ if (ret != 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "Could not convert datetime value to string");
+ PyObject_Free(result);
+ return NULL;
+ }
+
+ // Note that get_datetime_iso_8601_strlen just gives a generic size
+ // for ISO string conversion, not the actual size used
+ *len = strlen(result);
+ return result;
+}
+
+npy_datetime PyDateTimeToEpoch(PyDateTime_Date *dt, NPY_DATETIMEUNIT base) {
+ npy_datetimestruct dts;
+ int ret;
+
+ ret = convert_pydatetime_to_datetimestruct(dt, &dts);
+ if (ret != 0) {
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_ValueError,
+ "Could not convert PyDateTime to numpy datetime");
+ }
+ // TODO: is setting errMsg required?
+ //((JSONObjectEncoder *)tc->encoder)->errorMsg = "";
+ // return NULL;
+ }
+
+ npy_datetime npy_dt = npy_datetimestruct_to_datetime(NPY_FR_ns, &dts);
+ return NpyDateTimeToEpoch(npy_dt, base);
+}
diff --git a/pandas/_libs/src/ujson/python/date_conversions.h b/pandas/_libs/src/ujson/python/date_conversions.h
new file mode 100644
index 0000000000000..45455f4d6128b
--- /dev/null
+++ b/pandas/_libs/src/ujson/python/date_conversions.h
@@ -0,0 +1,31 @@
+#ifndef PANDAS__LIBS_SRC_UJSON_DATE_CONVERSIONS
+#define PANDAS__LIBS_SRC_UJSON_DATE_CONVERSIONS
+
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+#include <numpy/ndarraytypes.h>
+#include "datetime.h"
+
+// Scales value inplace from nanosecond resolution to unit resolution
+int scaleNanosecToUnit(npy_int64 *value, NPY_DATETIMEUNIT unit);
+
+// Converts an int64 object representing a date to ISO format
+// up to precision `base` e.g. base="s" yields 2020-01-03T00:00:00Z
+// while base="ns" yields "2020-01-01T00:00:00.000000000Z"
+// len is mutated to save the length of the returned string
+char *int64ToIso(int64_t value, NPY_DATETIMEUNIT base, size_t *len);
+
+// TODO: this function doesn't do a lot; should augment or replace with
+// scaleNanosecToUnit
+npy_datetime NpyDateTimeToEpoch(npy_datetime dt, NPY_DATETIMEUNIT base);
+
+// Converts a Python object representing a Date / Datetime to ISO format
+// up to precision `base` e.g. base="s" yields 2020-01-03T00:00:00Z
+// while base="ns" yields "2020-01-01T00:00:00.000000000Z"
+// len is mutated to save the length of the returned string
+char *PyDateTimeToIso(PyDateTime_Date *obj, NPY_DATETIMEUNIT base, size_t *len);
+
+// Convert a Python Date/Datetime to Unix epoch with resolution base
+npy_datetime PyDateTimeToEpoch(PyDateTime_Date *dt, NPY_DATETIMEUNIT base);
+
+#endif
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index c413a16f8d5f0..62c2870c198c4 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -31,7 +31,7 @@ Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights
reserved.
Numeric decoder derived from from TCL library
-http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
+https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
* Copyright (c) 1988-1993 The Regents of the University of California.
* Copyright (c) 1994 Sun Microsystems, Inc.
*/
@@ -45,8 +45,7 @@ Numeric decoder derived from from TCL library
#include <numpy/ndarraytypes.h>
#include <numpy/npy_math.h>
#include <ultrajson.h>
-#include <../../../tslibs/src/datetime/np_datetime.h>
-#include <../../../tslibs/src/datetime/np_datetime_strings.h>
+#include "date_conversions.h"
#include "datetime.h"
static PyTypeObject *type_decimal;
@@ -209,34 +208,6 @@ static TypeContext *createTypeContext(void) {
return pc;
}
-/*
- * Function: scaleNanosecToUnit
- * -----------------------------
- *
- * Scales an integer value representing time in nanoseconds to provided unit.
- *
- * Mutates the provided value directly. Returns 0 on success, non-zero on error.
- */
-static int scaleNanosecToUnit(npy_int64 *value, NPY_DATETIMEUNIT unit) {
- switch (unit) {
- case NPY_FR_ns:
- break;
- case NPY_FR_us:
- *value /= 1000LL;
- break;
- case NPY_FR_ms:
- *value /= 1000000LL;
- break;
- case NPY_FR_s:
- *value /= 1000000000LL;
- break;
- default:
- return -1;
- }
-
- return 0;
-}
-
static PyObject *get_values(PyObject *obj) {
PyObject *values = NULL;
@@ -379,34 +350,6 @@ static char *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *Py_UNUSED(tc),
return (char *)PyUnicode_AsUTF8AndSize(_obj, (Py_ssize_t *)_outLen);
}
-/* Converts the int64_t representation of a datetime to ISO; mutates len */
-static char *int64ToIso(int64_t value, NPY_DATETIMEUNIT base, size_t *len) {
- npy_datetimestruct dts;
- int ret_code;
-
- pandas_datetime_to_datetimestruct(value, NPY_FR_ns, &dts);
-
- *len = (size_t)get_datetime_iso_8601_strlen(0, base);
- char *result = PyObject_Malloc(*len);
-
- if (result == NULL) {
- PyErr_NoMemory();
- return NULL;
- }
-
- ret_code = make_iso_8601_datetime(&dts, result, *len, base);
- if (ret_code != 0) {
- PyErr_SetString(PyExc_ValueError,
- "Could not convert datetime value to string");
- PyObject_Free(result);
- }
-
- // Note that get_datetime_iso_8601_strlen just gives a generic size
- // for ISO string conversion, not the actual size used
- *len = strlen(result);
- return result;
-}
-
/* JSON callback. returns a char* and mutates the pointer to *len */
static char *NpyDateTimeToIsoCallback(JSOBJ Py_UNUSED(unused),
JSONTypeContext *tc, size_t *len) {
@@ -414,50 +357,12 @@ static char *NpyDateTimeToIsoCallback(JSOBJ Py_UNUSED(unused),
return int64ToIso(GET_TC(tc)->longValue, base, len);
}
-static npy_datetime NpyDateTimeToEpoch(npy_datetime dt, NPY_DATETIMEUNIT base) {
- scaleNanosecToUnit(&dt, base);
- return dt;
-}
-
-/* Convert PyDatetime To ISO C-string. mutates len */
-static char *PyDateTimeToIso(PyDateTime_Date *obj, NPY_DATETIMEUNIT base,
- size_t *len) {
- npy_datetimestruct dts;
- int ret;
-
- ret = convert_pydatetime_to_datetimestruct(obj, &dts);
- if (ret != 0) {
- if (!PyErr_Occurred()) {
- PyErr_SetString(PyExc_ValueError,
- "Could not convert PyDateTime to numpy datetime");
- }
- return NULL;
- }
-
- *len = (size_t)get_datetime_iso_8601_strlen(0, base);
- char *result = PyObject_Malloc(*len);
- ret = make_iso_8601_datetime(&dts, result, *len, base);
-
- if (ret != 0) {
- PRINTMARK();
- PyErr_SetString(PyExc_ValueError,
- "Could not convert datetime value to string");
- PyObject_Free(result);
- return NULL;
- }
-
- // Note that get_datetime_iso_8601_strlen just gives a generic size
- // for ISO string conversion, not the actual size used
- *len = strlen(result);
- return result;
-}
-
/* JSON callback */
static char *PyDateTimeToIsoCallback(JSOBJ obj, JSONTypeContext *tc,
size_t *len) {
- if (!PyDateTime_Check(obj)) {
- PyErr_SetString(PyExc_TypeError, "Expected datetime object");
+ if (!PyDate_Check(obj)) {
+ PyErr_SetString(PyExc_TypeError, "Expected date object");
return NULL;
}
@@ -465,30 +370,6 @@ static char *PyDateTimeToIsoCallback(JSOBJ obj, JSONTypeContext *tc,
return PyDateTimeToIso(obj, base, len);
}
-static npy_datetime PyDateTimeToEpoch(PyObject *obj, NPY_DATETIMEUNIT base) {
- npy_datetimestruct dts;
- int ret;
-
- if (!PyDateTime_Check(obj)) {
- // TODO: raise TypeError
- }
- PyDateTime_Date *dt = (PyDateTime_Date *)obj;
-
- ret = convert_pydatetime_to_datetimestruct(dt, &dts);
- if (ret != 0) {
- if (!PyErr_Occurred()) {
- PyErr_SetString(PyExc_ValueError,
- "Could not convert PyDateTime to numpy datetime");
- }
- // TODO: is setting errMsg required?
- //((JSONObjectEncoder *)tc->encoder)->errorMsg = "";
- // return NULL;
- }
-
- npy_datetime npy_dt = npy_datetimestruct_to_datetime(NPY_FR_ns, &dts);
- return NpyDateTimeToEpoch(npy_dt, base);
-}
-
static char *PyTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, size_t *outLen) {
PyObject *obj = (PyObject *)_obj;
PyObject *str;
@@ -1504,6 +1385,7 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc,
char **ret;
char *dataptr, *cLabel;
int type_num;
+ NPY_DATETIMEUNIT base = enc->datetimeUnit;
PRINTMARK();
if (!labels) {
@@ -1541,32 +1423,10 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc,
break;
}
- // TODO: vectorized timedelta solution
- if (enc->datetimeIso &&
- (type_num == NPY_TIMEDELTA || PyDelta_Check(item))) {
- PyObject *td = PyObject_CallFunction(cls_timedelta, "(O)", item);
- if (td == NULL) {
- Py_DECREF(item);
- NpyArr_freeLabels(ret, num);
- ret = 0;
- break;
- }
-
- PyObject *iso = PyObject_CallMethod(td, "isoformat", NULL);
- Py_DECREF(td);
- if (iso == NULL) {
- Py_DECREF(item);
- NpyArr_freeLabels(ret, num);
- ret = 0;
- break;
- }
-
- cLabel = (char *)PyUnicode_AsUTF8(iso);
- Py_DECREF(iso);
- len = strlen(cLabel);
- } else if (PyTypeNum_ISDATETIME(type_num)) {
- NPY_DATETIMEUNIT base = enc->datetimeUnit;
- npy_int64 longVal;
+ int is_datetimelike = 0;
+ npy_int64 nanosecVal;
+ if (PyTypeNum_ISDATETIME(type_num)) {
+ is_datetimelike = 1;
PyArray_VectorUnaryFunc *castfunc =
PyArray_GetCastFunc(PyArray_DescrFromType(type_num), NPY_INT64);
if (!castfunc) {
@@ -1574,27 +1434,74 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc,
"Cannot cast numpy dtype %d to long",
enc->npyType);
}
- castfunc(dataptr, &longVal, 1, NULL, NULL);
- if (enc->datetimeIso) {
- cLabel = int64ToIso(longVal, base, &len);
+ castfunc(dataptr, &nanosecVal, 1, NULL, NULL);
+ } else if (PyDate_Check(item) || PyDelta_Check(item)) {
+ is_datetimelike = 1;
+ if (PyObject_HasAttrString(item, "value")) {
+ nanosecVal = get_long_attr(item, "value");
} else {
- if (!scaleNanosecToUnit(&longVal, base)) {
- // TODO: This gets hit but somehow doesn't cause errors
- // need to clean up (elsewhere in module as well)
+ if (PyDelta_Check(item)) {
+ nanosecVal = total_seconds(item) *
+ 1000000000LL; // nanoseconds per second
+ } else {
+ // datetime.* objects don't follow above rules
+ nanosecVal = PyDateTimeToEpoch(item, NPY_FR_ns);
}
- cLabel = PyObject_Malloc(21); // 21 chars for int64
- sprintf(cLabel, "%" NPY_INT64_FMT, longVal);
- len = strlen(cLabel);
}
- } else if (PyDateTime_Check(item) || PyDate_Check(item)) {
- NPY_DATETIMEUNIT base = enc->datetimeUnit;
- if (enc->datetimeIso) {
- cLabel = PyDateTimeToIso((PyDateTime_Date *)item, base, &len);
+ }
+
+ if (is_datetimelike) {
+ if (nanosecVal == get_nat()) {
+ len = 5; // TODO: shouldn't require extra space for terminator
+ cLabel = PyObject_Malloc(len);
+ strncpy(cLabel, "null", len);
} else {
- cLabel = PyObject_Malloc(21); // 21 chars for int64
- sprintf(cLabel, "%" NPY_DATETIME_FMT,
- PyDateTimeToEpoch(item, base));
- len = strlen(cLabel);
+ if (enc->datetimeIso) {
+ // TODO: Vectorized Timedelta function
+ if ((type_num == NPY_TIMEDELTA) || (PyDelta_Check(item))) {
+ PyObject *td =
+ PyObject_CallFunction(cls_timedelta, "(O)", item);
+ if (td == NULL) {
+ Py_DECREF(item);
+ NpyArr_freeLabels(ret, num);
+ ret = 0;
+ break;
+ }
+
+ PyObject *iso =
+ PyObject_CallMethod(td, "isoformat", NULL);
+ Py_DECREF(td);
+ if (iso == NULL) {
+ Py_DECREF(item);
+ NpyArr_freeLabels(ret, num);
+ ret = 0;
+ break;
+ }
+
+ len = strlen(PyUnicode_AsUTF8(iso));
+ cLabel = PyObject_Malloc(len + 1);
+ memcpy(cLabel, PyUnicode_AsUTF8(iso), len + 1);
+ Py_DECREF(iso);
+ } else {
+ if (type_num == NPY_DATETIME) {
+ cLabel = int64ToIso(nanosecVal, base, &len);
+ } else {
+ cLabel = PyDateTimeToIso((PyDateTime_Date *)item,
+ base, &len);
+ }
+ }
+ if (cLabel == NULL) {
+ Py_DECREF(item);
+ NpyArr_freeLabels(ret, num);
+ ret = 0;
+ break;
+ }
+ } else {
+ cLabel = PyObject_Malloc(21); // 21 chars for int64
+ sprintf(cLabel, "%" NPY_DATETIME_FMT,
+ NpyDateTimeToEpoch(nanosecVal, base));
+ len = strlen(cLabel);
+ }
}
} else { // Fallback to string representation
PyObject *str = PyObject_Str(item);
@@ -1615,6 +1522,10 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc,
ret[i] = PyObject_Malloc(len + 1);
memcpy(ret[i], cLabel, len + 1);
+ if (is_datetimelike) {
+ PyObject_Free(cLabel);
+ }
+
if (PyErr_Occurred()) {
NpyArr_freeLabels(ret, num);
ret = 0;
@@ -1784,7 +1695,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
PRINTMARK();
NPY_DATETIMEUNIT base =
((PyObjectEncoder *)tc->encoder)->datetimeUnit;
- GET_TC(tc)->longValue = PyDateTimeToEpoch(obj, base);
+ GET_TC(tc)->longValue = PyDateTimeToEpoch((PyDateTime_Date *)obj, base);
tc->type = JT_LONG;
}
return;
@@ -1810,7 +1721,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
PRINTMARK();
NPY_DATETIMEUNIT base =
((PyObjectEncoder *)tc->encoder)->datetimeUnit;
- GET_TC(tc)->longValue = PyDateTimeToEpoch(obj, base);
+ GET_TC(tc)->longValue = PyDateTimeToEpoch((PyDateTime_Date *)obj, base);
tc->type = JT_LONG;
}
return;
diff --git a/pandas/_libs/src/ujson/python/ujson.c b/pandas/_libs/src/ujson/python/ujson.c
index 4a88fb7a4e849..a40f2709c0c61 100644
--- a/pandas/_libs/src/ujson/python/ujson.c
+++ b/pandas/_libs/src/ujson/python/ujson.c
@@ -30,7 +30,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
Numeric decoder derived from from TCL library
-http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
+https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
* Copyright (c) 1988-1993 The Regents of the University of California.
* Copyright (c) 1994 Sun Microsystems, Inc.
*/
diff --git a/pandas/_libs/src/ujson/python/version.h b/pandas/_libs/src/ujson/python/version.h
index ef6d28bf3a1f7..3f38642b6df87 100644
--- a/pandas/_libs/src/ujson/python/version.h
+++ b/pandas/_libs/src/ujson/python/version.h
@@ -30,7 +30,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
Numeric decoder derived from from TCL library
-http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
+https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
* Copyright (c) 1988-1993 The Regents of the University of California.
* Copyright (c) 1994 Sun Microsystems, Inc.
*/
diff --git a/pandas/_libs/testing.pyx b/pandas/_libs/testing.pyx
index 5a30b71a6fea1..0e57b563d4d25 100644
--- a/pandas/_libs/testing.pyx
+++ b/pandas/_libs/testing.pyx
@@ -127,9 +127,9 @@ cpdef assert_almost_equal(a, b,
# classes can't be the same, to raise error
assert_class_equal(a, b, obj=obj)
- assert has_length(a) and has_length(b), ("Can't compare objects without "
- "length, one or both is invalid: "
- f"({a}, {b})")
+ assert has_length(a) and has_length(b), (
+ f"Can't compare objects without length, one or both is invalid: ({a}, {b})"
+ )
if a_is_ndarray and b_is_ndarray:
na, nb = a.size, b.size
diff --git a/pandas/_libs/tslibs/c_timestamp.pyx b/pandas/_libs/tslibs/c_timestamp.pyx
index 6e6b809b9b5a6..2c72cec18f096 100644
--- a/pandas/_libs/tslibs/c_timestamp.pyx
+++ b/pandas/_libs/tslibs/c_timestamp.pyx
@@ -57,11 +57,12 @@ def integer_op_not_supported(obj):
# the caller; mypy finds this more palatable.
cls = type(obj).__name__
+ # GH#30886 using an fstring raises SystemError
int_addsub_msg = (
- f"Addition/subtraction of integers and integer-arrays with {cls} is "
+ "Addition/subtraction of integers and integer-arrays with {cls} is "
"no longer supported. Instead of adding/subtracting `n`, "
"use `n * obj.freq`"
- )
+ ).format(cls=cls)
return TypeError(int_addsub_msg)
@@ -123,7 +124,7 @@ cdef class _Timestamp(datetime):
def __reduce_ex__(self, protocol):
# python 3.6 compat
- # http://bugs.python.org/issue28730
+ # https://bugs.python.org/issue28730
# now __reduce_ex__ is defined and higher priority than __reduce__
return self.__reduce__()
diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd
index 36e6b14be182a..c74307a3d2887 100644
--- a/pandas/_libs/tslibs/conversion.pxd
+++ b/pandas/_libs/tslibs/conversion.pxd
@@ -23,8 +23,4 @@ cdef _TSObject convert_datetime_to_tsobject(datetime ts, object tz,
cdef int64_t get_datetime64_nanos(object val) except? -1
-cpdef int64_t pydt_to_i8(object pydt) except? -1
-
-cdef maybe_datetimelike_to_i8(object val)
-
cpdef datetime localize_pydatetime(datetime dt, object tz)
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 2988d7bae9a5e..e0862b9250045 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -29,7 +29,7 @@ from pandas._libs.tslibs.util cimport (
from pandas._libs.tslibs.timedeltas cimport cast_from_unit
from pandas._libs.tslibs.timezones cimport (
is_utc, is_tzlocal, is_fixed_offset, get_utcoffset, get_dst_info,
- get_timezone, maybe_get_tz, tz_compare)
+ get_timezone, maybe_get_tz, tz_compare, treat_tz_as_dateutil)
from pandas._libs.tslibs.timezones import UTC
from pandas._libs.tslibs.parsing import parse_datetime_string
@@ -99,6 +99,11 @@ def ensure_datetime64ns(arr: ndarray, copy: bool=True):
shape = (<object>arr).shape
+ if (<object>arr).dtype.byteorder == ">":
+ # GH#29684 we incorrectly get OutOfBoundsDatetime if we dont swap
+ dtype = arr.dtype
+ arr = arr.astype(dtype.newbyteorder("<"))
+
ivalues = arr.view(np.int64).ravel()
result = np.empty(shape, dtype=NS_DTYPE)
@@ -202,31 +207,6 @@ def datetime_to_datetime64(object[:] values):
return result, inferred_tz
-cdef inline maybe_datetimelike_to_i8(object val):
- """
- Try to convert to a nanosecond timestamp. Fall back to returning the
- input value.
-
- Parameters
- ----------
- val : object
-
- Returns
- -------
- val : int64 timestamp or original input
- """
- cdef:
- npy_datetimestruct dts
- try:
- return val.value
- except AttributeError:
- if is_datetime64_object(val):
- return get_datetime64_value(val)
- elif PyDateTime_Check(val):
- return convert_datetime_to_tsobject(val, None).value
- return val
-
-
# ----------------------------------------------------------------------
# _TSObject Conversion
@@ -243,27 +223,6 @@ cdef class _TSObject:
return self.value
-cpdef int64_t pydt_to_i8(object pydt) except? -1:
- """
- Convert to int64 representation compatible with numpy datetime64; converts
- to UTC
-
- Parameters
- ----------
- pydt : object
-
- Returns
- -------
- i8value : np.int64
- """
- cdef:
- _TSObject ts
-
- ts = convert_to_tsobject(pydt, None, None, 0, 0)
-
- return ts.value
-
-
cdef convert_to_tsobject(object ts, object tz, object unit,
bint dayfirst, bint yearfirst, int32_t nanos=0):
"""
@@ -382,6 +341,14 @@ cdef _TSObject convert_datetime_to_tsobject(datetime ts, object tz,
obj.tzinfo = tz
else:
obj.value = pydatetime_to_dt64(ts, &obj.dts)
+ # GH 24329 When datetime is ambiguous,
+ # pydatetime_to_dt64 doesn't take DST into account
+ # but with dateutil timezone, get_utcoffset does
+ # so we need to correct for it
+ if treat_tz_as_dateutil(ts.tzinfo):
+ if ts.tzinfo.is_ambiguous(ts):
+ dst_offset = ts.tzinfo.dst(ts)
+ obj.value += int(dst_offset.total_seconds() * 1e9)
obj.tzinfo = ts.tzinfo
if obj.tzinfo is not None and not is_utc(obj.tzinfo):
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 67c0f0cc33ab8..357f183b3a845 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -278,7 +278,7 @@ cdef class _NaT(datetime):
def total_seconds(self):
"""
- Total duration of timedelta in seconds (to ns precision).
+ Total duration of timedelta in seconds (to microsecond precision).
"""
# GH#10939
return np.nan
@@ -326,7 +326,7 @@ class NaTType(_NaT):
def __reduce_ex__(self, protocol):
# python 3.6 compat
- # http://bugs.python.org/issue28730
+ # https://bugs.python.org/issue28730
# now __reduce_ex__ is defined and higher priority than __reduce__
return self.__reduce__()
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index f24dce28cd5f7..48a3886c20a3a 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -22,7 +22,10 @@ from pandas._libs.tslibs.util cimport is_integer_object
from pandas._libs.tslibs.ccalendar import MONTHS, DAYS
from pandas._libs.tslibs.ccalendar cimport get_days_in_month, dayofweek
-from pandas._libs.tslibs.conversion cimport pydt_to_i8, localize_pydatetime
+from pandas._libs.tslibs.conversion cimport (
+ convert_datetime_to_tsobject,
+ localize_pydatetime,
+)
from pandas._libs.tslibs.nattype cimport NPY_NAT
from pandas._libs.tslibs.np_datetime cimport (
npy_datetimestruct, dtstruct_to_dt64, dt64_to_dtstruct)
@@ -216,7 +219,7 @@ def _get_calendar(weekmask, holidays, calendar):
holidays = holidays + calendar.holidays().tolist()
except AttributeError:
pass
- holidays = [_to_dt64(dt, dtype='datetime64[D]') for dt in holidays]
+ holidays = [_to_dt64D(dt) for dt in holidays]
holidays = tuple(sorted(holidays))
kwargs = {'weekmask': weekmask}
@@ -227,19 +230,22 @@ def _get_calendar(weekmask, holidays, calendar):
return busdaycalendar, holidays
-def _to_dt64(dt, dtype='datetime64'):
+def _to_dt64D(dt):
# Currently
# > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]')
# numpy.datetime64('2013-05-01T02:00:00.000000+0200')
# Thus astype is needed to cast datetime to datetime64[D]
if getattr(dt, 'tzinfo', None) is not None:
- i8 = pydt_to_i8(dt)
+ # Get the nanosecond timestamp,
+ # equiv `Timestamp(dt).value` or `dt.timestamp() * 10**9`
+ nanos = getattr(dt, "nanosecond", 0)
+ i8 = convert_datetime_to_tsobject(dt, tz=None, nanos=nanos).value
dt = tz_convert_single(i8, UTC, dt.tzinfo)
dt = np.int64(dt).astype('datetime64[ns]')
else:
dt = np.datetime64(dt)
- if dt.dtype.name != dtype:
- dt = dt.astype(dtype)
+ if dt.dtype.name != "datetime64[D]":
+ dt = dt.astype("datetime64[D]")
return dt
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index 3705b0a41fe55..ebdf7a1e29216 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -217,7 +217,7 @@ def parse_datetime_string(date_string: str, freq=None, dayfirst=False,
return dt
try:
- dt, _, _ = _parse_dateabbr_string(date_string, _DEFAULT_DATETIME, freq)
+ dt, _ = _parse_dateabbr_string(date_string, _DEFAULT_DATETIME, freq)
return dt
except DateParseError:
raise
@@ -280,7 +280,6 @@ cdef parse_datetime_string_with_reso(str date_string, freq=None, dayfirst=False,
Returns
-------
datetime
- datetime/dateutil.parser._result
str
Inferred resolution of the parsed string.
@@ -297,7 +296,7 @@ cdef parse_datetime_string_with_reso(str date_string, freq=None, dayfirst=False,
parsed, reso = _parse_delimited_date(date_string, dayfirst)
if parsed is not None:
- return parsed, parsed, reso
+ return parsed, reso
try:
return _parse_dateabbr_string(date_string, _DEFAULT_DATETIME, freq)
@@ -315,7 +314,7 @@ cdef parse_datetime_string_with_reso(str date_string, freq=None, dayfirst=False,
raise DateParseError(err)
if parsed is None:
raise DateParseError(f"Could not parse {date_string}")
- return parsed, parsed, reso
+ return parsed, reso
cpdef bint _does_string_look_like_datetime(str py_string):
@@ -375,7 +374,7 @@ cdef inline object _parse_dateabbr_string(object date_string, object default,
assert isinstance(date_string, str)
if date_string in nat_strings:
- return NaT, NaT, ''
+ return NaT, ''
date_string = date_string.upper()
date_len = len(date_string)
@@ -384,7 +383,7 @@ cdef inline object _parse_dateabbr_string(object date_string, object default,
# parse year only like 2000
try:
ret = default.replace(year=int(date_string))
- return ret, ret, 'year'
+ return ret, 'year'
except ValueError:
pass
@@ -441,7 +440,7 @@ cdef inline object _parse_dateabbr_string(object date_string, object default,
month = (quarter - 1) * 3 + 1
ret = default.replace(year=year, month=month)
- return ret, ret, 'quarter'
+ return ret, 'quarter'
except DateParseError:
raise
@@ -454,14 +453,14 @@ cdef inline object _parse_dateabbr_string(object date_string, object default,
month = int(date_string[4:6])
try:
ret = default.replace(year=year, month=month)
- return ret, ret, 'month'
+ return ret, 'month'
except ValueError:
pass
for pat in ['%Y-%m', '%b %Y', '%b-%Y']:
try:
ret = datetime.strptime(date_string, pat)
- return ret, ret, 'month'
+ return ret, 'month'
except ValueError:
pass
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index bd57e75c72f19..3dd560ece188d 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -2466,7 +2466,7 @@ class Period(_Period):
if util.is_integer_object(value):
value = str(value)
value = value.upper()
- dt, _, reso = parse_time_string(value, freq)
+ dt, reso = parse_time_string(value, freq)
if dt is NaT:
ordinal = NPY_NAT
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 0a773b8a215ed..9c031baf70a77 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -861,9 +861,11 @@ cdef class _Timedelta(timedelta):
def total_seconds(self):
"""
- Total duration of timedelta in seconds (to ns precision).
+ Total duration of timedelta in seconds (to microsecond precision).
"""
- return self.value / 1e9
+ # GH 31043
+ # Microseconds precision to avoid confusing tzinfo.utcoffset
+ return (self.value - self.value % 1000) / 1e9
def view(self, dtype):
"""
@@ -1208,7 +1210,12 @@ class Timedelta(_Timedelta):
"represent unambiguous timedelta values durations."
)
- if isinstance(value, Timedelta):
+ # GH 30543 if pd.Timedelta already passed, return it
+ # check that only value is passed
+ if (isinstance(value, Timedelta) and unit is None and
+ len(kwargs) == 0):
+ return value
+ elif isinstance(value, Timedelta):
value = value.value
elif isinstance(value, str):
if len(value) > 0 and value[0] == 'P':
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index abe7f9e5b4105..4915671aa6512 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -161,8 +161,7 @@ def round_nsint64(values, mode, freq):
# if/elif above should catch all rounding modes defined in enum 'RoundTo':
# if flow of control arrives here, it is a bug
- raise ValueError("round_nsint64 called with an unrecognized "
- "rounding mode")
+ raise ValueError("round_nsint64 called with an unrecognized rounding mode")
# ----------------------------------------------------------------------
@@ -324,8 +323,10 @@ class Timestamp(_Timestamp):
Function is not implemented. Use pd.to_datetime().
"""
- raise NotImplementedError("Timestamp.strptime() is not implemented."
- "Use to_datetime() to parse date strings.")
+ raise NotImplementedError(
+ "Timestamp.strptime() is not implemented. "
+ "Use to_datetime() to parse date strings."
+ )
@classmethod
def combine(cls, date, time):
@@ -381,20 +382,34 @@ class Timestamp(_Timestamp):
if tzinfo is not None:
if not PyTZInfo_Check(tzinfo):
# tzinfo must be a datetime.tzinfo object, GH#17690
- raise TypeError(f'tzinfo must be a datetime.tzinfo object, '
- f'not {type(tzinfo)}')
+ raise TypeError(
+ f"tzinfo must be a datetime.tzinfo object, not {type(tzinfo)}"
+ )
elif tz is not None:
raise ValueError('Can provide at most one of tz, tzinfo')
# User passed tzinfo instead of tz; avoid silently ignoring
tz, tzinfo = tzinfo, None
- if isinstance(ts_input, str):
+ # GH 30543 if pd.Timestamp already passed, return it
+ # check that only ts_input is passed
+ # checking verbosely, because cython doesn't optimize
+ # list comprehensions (as of cython 0.29.x)
+ if (isinstance(ts_input, Timestamp) and freq is None and
+ tz is None and unit is None and year is None and
+ month is None and day is None and hour is None and
+ minute is None and second is None and
+ microsecond is None and nanosecond is None and
+ tzinfo is None):
+ return ts_input
+ elif isinstance(ts_input, str):
# User passed a date string to parse.
# Check that the user didn't also pass a date attribute kwarg.
if any(arg is not None for arg in _date_attributes):
- raise ValueError('Cannot pass a date attribute keyword '
- 'argument when passing a date string')
+ raise ValueError(
+ "Cannot pass a date attribute keyword "
+ "argument when passing a date string"
+ )
elif ts_input is _no_input:
# User passed keyword arguments.
@@ -578,8 +593,10 @@ timedelta}, default 'raise'
@tz.setter
def tz(self, value):
# GH 3746: Prevent localizing or converting the index by setting tz
- raise AttributeError("Cannot directly set timezone. Use tz_localize() "
- "or tz_convert() as appropriate")
+ raise AttributeError(
+ "Cannot directly set timezone. "
+ "Use tz_localize() or tz_convert() as appropriate"
+ )
def __setstate__(self, state):
self.value = state[0]
@@ -598,9 +615,10 @@ timedelta}, default 'raise'
if self.tz is not None:
# GH#21333
- warnings.warn("Converting to Period representation will "
- "drop timezone information.",
- UserWarning)
+ warnings.warn(
+ "Converting to Period representation will drop timezone information.",
+ UserWarning,
+ )
if freq is None:
freq = self.freq
@@ -810,13 +828,13 @@ default 'raise'
if ambiguous == 'infer':
raise ValueError('Cannot infer offset with only one time.')
- nonexistent_options = ('raise', 'NaT', 'shift_forward',
- 'shift_backward')
+ nonexistent_options = ('raise', 'NaT', 'shift_forward', 'shift_backward')
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta):
- raise ValueError("The nonexistent argument must be one of 'raise', "
- "'NaT', 'shift_forward', 'shift_backward' or "
- "a timedelta object")
+ raise ValueError(
+ "The nonexistent argument must be one of 'raise', "
+ "'NaT', 'shift_forward', 'shift_backward' or a timedelta object"
+ )
if self.tzinfo is None:
# tz naive, localize
@@ -833,8 +851,9 @@ default 'raise'
value = tz_convert_single(self.value, UTC, self.tz)
return Timestamp(value, tz=tz, freq=self.freq)
else:
- raise TypeError('Cannot localize tz-aware Timestamp, use '
- 'tz_convert for conversions')
+ raise TypeError(
+ "Cannot localize tz-aware Timestamp, use tz_convert for conversions"
+ )
def tz_convert(self, tz):
"""
@@ -857,17 +876,28 @@ default 'raise'
"""
if self.tzinfo is None:
# tz naive, use tz_localize
- raise TypeError('Cannot convert tz-naive Timestamp, use '
- 'tz_localize to localize')
+ raise TypeError(
+ "Cannot convert tz-naive Timestamp, use tz_localize to localize"
+ )
else:
# Same UTC timestamp, different time zone
return Timestamp(self.value, tz=tz, freq=self.freq)
astimezone = tz_convert
- def replace(self, year=None, month=None, day=None,
- hour=None, minute=None, second=None, microsecond=None,
- nanosecond=None, tzinfo=object, fold=0):
+ def replace(
+ self,
+ year=None,
+ month=None,
+ day=None,
+ hour=None,
+ minute=None,
+ second=None,
+ microsecond=None,
+ nanosecond=None,
+ tzinfo=object,
+ fold=0,
+ ):
"""
implements datetime.replace, handles nanoseconds.
@@ -910,8 +940,9 @@ default 'raise'
def validate(k, v):
""" validate integers """
if not is_integer_object(v):
- raise ValueError(f"value must be an integer, received "
- f"{type(v)} for {k}")
+ raise ValueError(
+ f"value must be an integer, received {type(v)} for {k}"
+ )
return v
if year is not None:
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index 0348843abc129..f675818599b2c 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -56,8 +56,9 @@ cdef:
cdef inline int int_max(int a, int b): return a if a >= b else b
cdef inline int int_min(int a, int b): return a if a <= b else b
-cdef inline bint is_monotonic_start_end_bounds(ndarray[int64_t, ndim=1] start,
- ndarray[int64_t, ndim=1] end):
+cdef inline bint is_monotonic_start_end_bounds(
+ ndarray[int64_t, ndim=1] start, ndarray[int64_t, ndim=1] end
+):
return is_monotonic(start, False)[0] and is_monotonic(end, False)[0]
# Cython implementations of rolling sum, mean, variance, skewness,
@@ -90,8 +91,12 @@ cdef inline bint is_monotonic_start_end_bounds(ndarray[int64_t, ndim=1] start,
# this is only an impl for index not None, IOW, freq aware
-def roll_count(ndarray[float64_t] values, ndarray[int64_t] start, ndarray[int64_t] end,
- int64_t minp):
+def roll_count(
+ ndarray[float64_t] values,
+ ndarray[int64_t] start,
+ ndarray[int64_t] end,
+ int64_t minp,
+):
cdef:
float64_t val, count_x = 0.0
int64_t s, e, nobs, N = len(values)
@@ -1871,8 +1876,7 @@ def ewmcov(float64_t[:] input_x, float64_t[:] input_y,
bint is_observation
if <Py_ssize_t>len(input_y) != N:
- raise ValueError(f"arrays are of different lengths "
- f"({N} and {len(input_y)})")
+ raise ValueError(f"arrays are of different lengths ({N} and {len(input_y)})")
output = np.empty(N, dtype=float)
if N == 0:
diff --git a/pandas/_testing.py b/pandas/_testing.py
index 0b81fb0f7a8d5..631d550c60534 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -122,9 +122,9 @@ def round_trip_pickle(
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
- with ensure_clean(_path) as path:
- pd.to_pickle(obj, _path)
- return pd.read_pickle(_path)
+ with ensure_clean(_path) as temp_path:
+ pd.to_pickle(obj, temp_path)
+ return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
@@ -473,7 +473,7 @@ def close(fignum=None):
@contextmanager
-def ensure_clean(filename=None, return_filelike=False):
+def ensure_clean(filename=None, return_filelike=False, **kwargs):
"""
Gets a temporary path and agrees to remove on close.
@@ -485,23 +485,37 @@ def ensure_clean(filename=None, return_filelike=False):
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
+ **kwargs
+ Additional keywords passed in for creating a temporary file.
+ :meth:`tempFile.TemporaryFile` is used when `return_filelike` is ``True``.
+ :meth:`tempfile.mkstemp` is used when `return_filelike` is ``False``.
+ Note that the `filename` parameter will be passed in as the `suffix`
+ argument to either function.
+
+ See Also
+ --------
+ tempfile.TemporaryFile
+ tempfile.mkstemp
"""
filename = filename or ""
fd = None
+ kwargs["suffix"] = filename
+
if return_filelike:
- f = tempfile.TemporaryFile(suffix=filename)
+ f = tempfile.TemporaryFile(**kwargs)
+
try:
yield f
finally:
f.close()
else:
- # don't generate tempfile if using a path with directory specified
+ # Don't generate tempfile if using a path with directory specified.
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
- fd, filename = tempfile.mkstemp(suffix=filename)
+ fd, filename = tempfile.mkstemp(**kwargs)
except UnicodeEncodeError:
import pytest
@@ -613,8 +627,8 @@ def _check_types(l, r, obj="Index"):
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
- if l.inferred_type in ("string", "unicode"):
- assert r.inferred_type in ("string", "unicode")
+ if l.inferred_type in ("string"):
+ assert r.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
diff --git a/pandas/_typing.py b/pandas/_typing.py
index 171b76b4d2c4b..e2858441605f7 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -45,6 +45,7 @@
# other
Dtype = Union[str, np.dtype, "ExtensionDtype"]
+DtypeObj = Union[np.dtype, "ExtensionDtype"]
FilePathOrBuffer = Union[str, Path, IO[AnyStr]]
# FrameOrSeriesUnion means either a DataFrame or a Series. E.g.
diff --git a/pandas/api/indexers/__init__.py b/pandas/api/indexers/__init__.py
index 10654eb0888ee..826297e6b498f 100644
--- a/pandas/api/indexers/__init__.py
+++ b/pandas/api/indexers/__init__.py
@@ -2,7 +2,7 @@
Public API for Rolling Window Indexers.
"""
-from pandas.core.indexers import check_bool_array_indexer
+from pandas.core.indexers import check_array_indexer
from pandas.core.window.indexers import BaseIndexer
-__all__ = ["check_bool_array_indexer", "BaseIndexer"]
+__all__ = ["check_array_indexer", "BaseIndexer"]
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 60cfecd5804ac..3547a33ea357b 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -110,8 +110,7 @@ def _import_lzma():
return lzma
except ImportError:
msg = (
- "Could not import the lzma module. "
- "Your installed Python is incomplete. "
+ "Could not import the lzma module. Your installed Python is incomplete. "
"Attempting to use lzma compression will result in a RuntimeError."
)
warnings.warn(msg)
diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py
index 7aeb0327139f1..cd711bcace013 100644
--- a/pandas/compat/_optional.py
+++ b/pandas/compat/_optional.py
@@ -19,6 +19,7 @@
"pyarrow": "0.13.0",
"pytables": "3.4.2",
"pytest": "5.0.1",
+ "pyxlsb": "1.0.6",
"s3fs": "0.3.0",
"scipy": "0.19.0",
"sqlalchemy": "1.1.4",
diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py
index 27f1c32058941..6c9ac5944e6a1 100644
--- a/pandas/compat/numpy/__init__.py
+++ b/pandas/compat/numpy/__init__.py
@@ -18,11 +18,9 @@
if _nlv < "1.13.3":
raise ImportError(
- f"this version of pandas is incompatible with "
- f"numpy < 1.13.3\n"
+ "this version of pandas is incompatible with numpy < 1.13.3\n"
f"your numpy version is {_np_version}.\n"
- f"Please upgrade numpy to >= 1.13.3 to use "
- f"this pandas version"
+ "Please upgrade numpy to >= 1.13.3 to use this pandas version"
)
diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py
index 7158f251ad805..05ecccc67daef 100644
--- a/pandas/compat/numpy/function.py
+++ b/pandas/compat/numpy/function.py
@@ -33,13 +33,26 @@
class CompatValidator:
- def __init__(self, defaults, fname=None, method=None, max_fname_arg_count=None):
+ def __init__(
+ self,
+ defaults,
+ fname=None,
+ method: Optional[str] = None,
+ max_fname_arg_count=None,
+ ):
self.fname = fname
self.method = method
self.defaults = defaults
self.max_fname_arg_count = max_fname_arg_count
- def __call__(self, args, kwargs, fname=None, max_fname_arg_count=None, method=None):
+ def __call__(
+ self,
+ args,
+ kwargs,
+ fname=None,
+ max_fname_arg_count=None,
+ method: Optional[str] = None,
+ ) -> None:
if args or kwargs:
fname = self.fname if fname is None else fname
max_fname_arg_count = (
@@ -300,7 +313,7 @@ def validate_take_with_convert(convert, args, kwargs):
)
-def validate_window_func(name, args, kwargs):
+def validate_window_func(name, args, kwargs) -> None:
numpy_args = ("axis", "dtype", "out")
msg = (
f"numpy operations are not valid with window objects. "
@@ -315,7 +328,7 @@ def validate_window_func(name, args, kwargs):
raise UnsupportedFunctionCall(msg)
-def validate_rolling_func(name, args, kwargs):
+def validate_rolling_func(name, args, kwargs) -> None:
numpy_args = ("axis", "dtype", "out")
msg = (
f"numpy operations are not valid with window objects. "
@@ -330,7 +343,7 @@ def validate_rolling_func(name, args, kwargs):
raise UnsupportedFunctionCall(msg)
-def validate_expanding_func(name, args, kwargs):
+def validate_expanding_func(name, args, kwargs) -> None:
numpy_args = ("axis", "dtype", "out")
msg = (
f"numpy operations are not valid with window objects. "
@@ -345,7 +358,7 @@ def validate_expanding_func(name, args, kwargs):
raise UnsupportedFunctionCall(msg)
-def validate_groupby_func(name, args, kwargs, allowed=None):
+def validate_groupby_func(name, args, kwargs, allowed=None) -> None:
"""
'args' and 'kwargs' should be empty, except for allowed
kwargs because all of
@@ -359,16 +372,15 @@ def validate_groupby_func(name, args, kwargs, allowed=None):
if len(args) + len(kwargs) > 0:
raise UnsupportedFunctionCall(
- f"numpy operations are not valid with "
- f"groupby. Use .groupby(...).{name}() "
- f"instead"
+ "numpy operations are not valid with groupby. "
+ f"Use .groupby(...).{name}() instead"
)
RESAMPLER_NUMPY_OPS = ("min", "max", "sum", "prod", "mean", "std", "var")
-def validate_resampler_func(method, args, kwargs):
+def validate_resampler_func(method: str, args, kwargs) -> None:
"""
'args' and 'kwargs' should be empty because all of
their necessary parameters are explicitly listed in
@@ -377,15 +389,14 @@ def validate_resampler_func(method, args, kwargs):
if len(args) + len(kwargs) > 0:
if method in RESAMPLER_NUMPY_OPS:
raise UnsupportedFunctionCall(
- f"numpy operations are not "
- f"valid with resample. Use "
- f".resample(...).{method}() instead"
+ "numpy operations are not valid with resample. "
+ f"Use .resample(...).{method}() instead"
)
else:
raise TypeError("too many arguments passed in")
-def validate_minmax_axis(axis):
+def validate_minmax_axis(axis: Optional[int]) -> None:
"""
Ensure that the axis argument passed to min, max, argmin, or argmax is
zero or None, as otherwise it will be incorrectly ignored.
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 3eab2186ccb94..0c964452df5da 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -65,25 +65,28 @@ def pytest_runtest_setup(item):
pytest.skip("skipping high memory test since --run-high-memory was not set")
-# Configurations for all tests and all test modules
-
-
@pytest.fixture(autouse=True)
def configure_tests():
+ """
+ Configure settings for all tests and test modules.
+ """
pd.set_option("chained_assignment", "raise")
-# For running doctests: make np and pd names available
-
-
@pytest.fixture(autouse=True)
def add_imports(doctest_namespace):
+ """
+ Make `np` and `pd` names available for doctests.
+ """
doctest_namespace["np"] = np
doctest_namespace["pd"] = pd
@pytest.fixture(params=["bsr", "coo", "csc", "csr", "dia", "dok", "lil"])
def spmatrix(request):
+ """
+ Yields scipy sparse matrix classes.
+ """
from scipy import sparse
return getattr(sparse, request.param + "_matrix")
@@ -92,8 +95,8 @@ def spmatrix(request):
@pytest.fixture(params=[0, 1, "index", "columns"], ids=lambda x: f"axis {repr(x)}")
def axis(request):
"""
- Fixture for returning the axis numbers of a DataFrame.
- """
+ Fixture for returning the axis numbers of a DataFrame.
+ """
return request.param
@@ -237,6 +240,10 @@ def all_boolean_reductions(request):
@pytest.fixture(params=list(_cython_table))
def cython_table_items(request):
+ """
+ Yields a tuple of a function and its corresponding name. Correspond to
+ the list of aggregator "Cython functions" used on selected table items.
+ """
return request.param
@@ -337,6 +344,9 @@ def writable(request):
@pytest.fixture(scope="module")
def datetime_tz_utc():
+ """
+ Yields the UTC timezone object from the datetime module.
+ """
return timezone.utc
@@ -358,6 +368,9 @@ def join_type(request):
@pytest.fixture
def strict_data_files(pytestconfig):
+ """
+ Returns the configuration for the test setting `--strict-data-files`.
+ """
return pytestconfig.getoption("--strict-data-files")
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py
index 3f1c7b1c049cf..a04e9c3e68310 100644
--- a/pandas/core/accessor.py
+++ b/pandas/core/accessor.py
@@ -186,7 +186,7 @@ def __get__(self, obj, cls):
return self._accessor
accessor_obj = self._accessor(obj)
# Replace the property with the accessor object. Inspired by:
- # http://www.pydanny.com/cached-property.html
+ # https://www.pydanny.com/cached-property.html
# We need to use object.__setattr__ because we overwrite __setattr__ on
# NDFrame
object.__setattr__(obj, self._name, accessor_obj)
diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py
new file mode 100644
index 0000000000000..79b87f146b9a7
--- /dev/null
+++ b/pandas/core/aggregation.py
@@ -0,0 +1,198 @@
+"""
+aggregation.py contains utility functions to handle multiple named and lambda
+kwarg aggregations in groupby and DataFrame/Series aggregation
+"""
+
+from collections import defaultdict
+from functools import partial
+from typing import Any, DefaultDict, List, Sequence, Tuple
+
+from pandas.core.dtypes.common import is_dict_like, is_list_like
+
+import pandas.core.common as com
+from pandas.core.indexes.api import Index
+
+
+def is_multi_agg_with_relabel(**kwargs) -> bool:
+ """
+ Check whether kwargs passed to .agg look like multi-agg with relabeling.
+
+ Parameters
+ ----------
+ **kwargs : dict
+
+ Returns
+ -------
+ bool
+
+ Examples
+ --------
+ >>> is_multi_agg_with_relabel(a='max')
+ False
+ >>> is_multi_agg_with_relabel(a_max=('a', 'max'),
+ ... a_min=('a', 'min'))
+ True
+ >>> is_multi_agg_with_relabel()
+ False
+ """
+ return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and (
+ len(kwargs) > 0
+ )
+
+
+def normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[int]]:
+ """
+ Normalize user-provided "named aggregation" kwargs.
+ Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs
+ to the old Dict[str, List[scalar]]].
+
+ Parameters
+ ----------
+ kwargs : dict
+
+ Returns
+ -------
+ aggspec : dict
+ The transformed kwargs.
+ columns : List[str]
+ The user-provided keys.
+ col_idx_order : List[int]
+ List of columns indices.
+
+ Examples
+ --------
+ >>> normalize_keyword_aggregation({'output': ('input', 'sum')})
+ ({'input': ['sum']}, ('output',), [('input', 'sum')])
+ """
+ # Normalize the aggregation functions as Mapping[column, List[func]],
+ # process normally, then fixup the names.
+ # TODO: aggspec type: typing.Dict[str, List[AggScalar]]
+ # May be hitting https://github.com/python/mypy/issues/5958
+ # saying it doesn't have an attribute __name__
+ aggspec: DefaultDict = defaultdict(list)
+ order = []
+ columns, pairs = list(zip(*kwargs.items()))
+
+ for name, (column, aggfunc) in zip(columns, pairs):
+ aggspec[column].append(aggfunc)
+ order.append((column, com.get_callable_name(aggfunc) or aggfunc))
+
+ # uniquify aggfunc name if duplicated in order list
+ uniquified_order = _make_unique_kwarg_list(order)
+
+ # GH 25719, due to aggspec will change the order of assigned columns in aggregation
+ # uniquified_aggspec will store uniquified order list and will compare it with order
+ # based on index
+ aggspec_order = [
+ (column, com.get_callable_name(aggfunc) or aggfunc)
+ for column, aggfuncs in aggspec.items()
+ for aggfunc in aggfuncs
+ ]
+ uniquified_aggspec = _make_unique_kwarg_list(aggspec_order)
+
+ # get the new indice of columns by comparison
+ col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)
+ return aggspec, columns, col_idx_order
+
+
+def _make_unique_kwarg_list(
+ seq: Sequence[Tuple[Any, Any]]
+) -> Sequence[Tuple[Any, Any]]:
+ """Uniquify aggfunc name of the pairs in the order list
+
+ Examples:
+ --------
+ >>> kwarg_list = [('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')]
+ >>> _make_unique_kwarg_list(kwarg_list)
+ [('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
+ """
+ return [
+ (pair[0], "_".join([pair[1], str(seq[:i].count(pair))]))
+ if seq.count(pair) > 1
+ else pair
+ for i, pair in enumerate(seq)
+ ]
+
+
+# TODO: Can't use, because mypy doesn't like us setting __name__
+# error: "partial[Any]" has no attribute "__name__"
+# the type is:
+# typing.Sequence[Callable[..., ScalarResult]]
+# -> typing.Sequence[Callable[..., ScalarResult]]:
+
+
+def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:
+ """
+ Possibly mangle a list of aggfuncs.
+
+ Parameters
+ ----------
+ aggfuncs : Sequence
+
+ Returns
+ -------
+ mangled: list-like
+ A new AggSpec sequence, where lambdas have been converted
+ to have unique names.
+
+ Notes
+ -----
+ If just one aggfunc is passed, the name will not be mangled.
+ """
+ if len(aggfuncs) <= 1:
+ # don't mangle for .agg([lambda x: .])
+ return aggfuncs
+ i = 0
+ mangled_aggfuncs = []
+ for aggfunc in aggfuncs:
+ if com.get_callable_name(aggfunc) == "<lambda>":
+ aggfunc = partial(aggfunc)
+ aggfunc.__name__ = f"<lambda_{i}>"
+ i += 1
+ mangled_aggfuncs.append(aggfunc)
+
+ return mangled_aggfuncs
+
+
+def maybe_mangle_lambdas(agg_spec: Any) -> Any:
+ """
+ Make new lambdas with unique names.
+
+ Parameters
+ ----------
+ agg_spec : Any
+ An argument to GroupBy.agg.
+ Non-dict-like `agg_spec` are pass through as is.
+ For dict-like `agg_spec` a new spec is returned
+ with name-mangled lambdas.
+
+ Returns
+ -------
+ mangled : Any
+ Same type as the input.
+
+ Examples
+ --------
+ >>> maybe_mangle_lambdas('sum')
+ 'sum'
+ >>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
+ [<function __main__.<lambda_0>,
+ <function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
+ """
+ is_dict = is_dict_like(agg_spec)
+ if not (is_dict or is_list_like(agg_spec)):
+ return agg_spec
+ mangled_aggspec = type(agg_spec)() # dict or OrderdDict
+
+ if is_dict:
+ for key, aggfuncs in agg_spec.items():
+ if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):
+ mangled_aggfuncs = _managle_lambda_list(aggfuncs)
+ else:
+ mangled_aggfuncs = aggfuncs
+
+ mangled_aggspec[key] = mangled_aggfuncs
+ else:
+ mangled_aggspec = _managle_lambda_list(agg_spec)
+
+ return mangled_aggspec
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 39e8e9008a844..8af9e2cc9790f 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -2,6 +2,7 @@
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
+import operator
from textwrap import dedent
from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union
from warnings import catch_warnings, simplefilter, warn
@@ -201,7 +202,7 @@ def _ensure_arraylike(values):
"""
if not is_array_like(values):
inferred = lib.infer_dtype(values, skipna=False)
- if inferred in ["mixed", "string", "unicode"]:
+ if inferred in ["mixed", "string"]:
if isinstance(values, tuple):
values = list(values)
values = construct_1d_object_array_from_listlike(values)
@@ -1812,7 +1813,7 @@ def searchsorted(arr, value, side="left", sorter=None):
_diff_special = {"float64", "float32", "int64", "int32", "int16", "int8"}
-def diff(arr, n: int, axis: int = 0):
+def diff(arr, n: int, axis: int = 0, stacklevel=3):
"""
difference of n between self,
analogous to s-s.shift(n)
@@ -1824,16 +1825,42 @@ def diff(arr, n: int, axis: int = 0):
number of periods
axis : int
axis to shift on
+ stacklevel : int
+ The stacklevel for the lost dtype warning.
Returns
-------
shifted
"""
+ from pandas.core.arrays import PandasDtype
n = int(n)
na = np.nan
dtype = arr.dtype
+ if dtype.kind == "b":
+ op = operator.xor
+ else:
+ op = operator.sub
+
+ if isinstance(dtype, PandasDtype):
+ # PandasArray cannot necessarily hold shifted versions of itself.
+ arr = np.asarray(arr)
+ dtype = arr.dtype
+
+ if is_extension_array_dtype(dtype):
+ if hasattr(arr, f"__{op.__name__}__"):
+ return op(arr, arr.shift(n))
+ else:
+ warn(
+ "dtype lost in 'diff()'. In the future this will raise a "
+ "TypeError. Convert to a suitable dtype prior to calling 'diff'.",
+ FutureWarning,
+ stacklevel=stacklevel,
+ )
+ arr = np.asarray(arr)
+ dtype = arr.dtype
+
is_timedelta = False
is_bool = False
if needs_i8_conversion(arr):
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 14a3c3c008e92..81e1d84880f60 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -1,10 +1,11 @@
import abc
import inspect
-from typing import TYPE_CHECKING, Any, Dict, Iterator, Tuple, Type, Union
+from typing import TYPE_CHECKING, Any, Dict, Iterator, Optional, Tuple, Type, Union
import numpy as np
from pandas._libs import reduction as libreduction
+from pandas._typing import Axis
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
@@ -13,7 +14,7 @@
is_list_like,
is_sequence,
)
-from pandas.core.dtypes.generic import ABCMultiIndex, ABCSeries
+from pandas.core.dtypes.generic import ABCSeries
from pandas.core.construction import create_series_with_explicit_dtype
@@ -26,9 +27,9 @@
def frame_apply(
obj: "DataFrame",
func,
- axis=0,
+ axis: Axis = 0,
raw: bool = False,
- result_type=None,
+ result_type: Optional[str] = None,
ignore_failures: bool = False,
args=None,
kwds=None,
@@ -87,7 +88,7 @@ def __init__(
obj: "DataFrame",
func,
raw: bool,
- result_type,
+ result_type: Optional[str],
ignore_failures: bool,
args,
kwds,
@@ -277,9 +278,11 @@ def apply_standard(self):
if (
self.result_type in ["reduce", None]
and not self.dtypes.apply(is_extension_array_dtype).any()
- # Disallow complex_internals since libreduction shortcut
- # cannot handle MultiIndex
- and not isinstance(self.agg_axis, ABCMultiIndex)
+ # Disallow dtypes where setting _index_data will break
+ # ExtensionArray values, see GH#31182
+ and not self.dtypes.apply(lambda x: x.kind in ["m", "M"]).any()
+ # Disallow complex_internals since libreduction shortcut raises a TypeError
+ and not self.agg_axis._has_complex_internals
):
values = self.values
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 9723343ea7af5..c3c91cea43f6b 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -176,6 +176,9 @@ class ExtensionArray:
types present.
See :ref:`extending.extension.ufunc` for more.
+
+ By default, ExtensionArrays are not hashable. Immutable subclasses may
+ override this behavior.
"""
# '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray.
@@ -1073,6 +1076,9 @@ def _reduce(self, name, skipna=True, **kwargs):
"""
raise TypeError(f"cannot perform {name} with type {self.dtype}")
+ def __hash__(self):
+ raise TypeError(f"unhashable type: {repr(type(self).__name__)}")
+
class ExtensionOpsMixin:
"""
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py
index fa1cbc87cc5c1..9eeed42124f2a 100644
--- a/pandas/core/arrays/boolean.py
+++ b/pandas/core/arrays/boolean.py
@@ -1,5 +1,5 @@
import numbers
-from typing import TYPE_CHECKING, Any, Tuple, Type
+from typing import TYPE_CHECKING, Any, List, Tuple, Type
import warnings
import numpy as np
@@ -26,6 +26,7 @@
from pandas.core.dtypes.missing import isna, notna
from pandas.core import nanops, ops
+from pandas.core.indexers import check_array_indexer
from .masked import BaseMaskedArray
@@ -286,6 +287,23 @@ def _from_sequence(cls, scalars, dtype=None, copy: bool = False):
values, mask = coerce_to_array(scalars, copy=copy)
return BooleanArray(values, mask)
+ @classmethod
+ def _from_sequence_of_strings(
+ cls, strings: List[str], dtype=None, copy: bool = False
+ ):
+ def map_string(s):
+ if isna(s):
+ return s
+ elif s in ["True", "TRUE", "true"]:
+ return True
+ elif s in ["False", "FALSE", "false"]:
+ return False
+ else:
+ raise ValueError(f"{s} cannot be cast to bool")
+
+ scalars = [map_string(x) for x in strings]
+ return cls._from_sequence(scalars, dtype, copy)
+
def _values_for_factorize(self) -> Tuple[np.ndarray, Any]:
data = self._data.astype("int8")
data[self._mask] = -1
@@ -352,6 +370,7 @@ def __setitem__(self, key, value):
value = value[0]
mask = mask[0]
+ key = check_array_indexer(self, key)
self._data[key] = value
self._mask[key] = mask
@@ -670,13 +689,15 @@ def _reduce(self, name, skipna=True, **kwargs):
mask = self._mask
# coerce to a nan-aware float if needed
- if mask.any():
- data = self._data.astype("float64")
- data[mask] = np.nan
+ if self._hasna:
+ data = self.to_numpy("float64", na_value=np.nan)
op = getattr(nanops, "nan" + name)
result = op(data, axis=0, skipna=skipna, mask=mask, **kwargs)
+ if np.isnan(result):
+ return libmissing.NA
+
# if we have numeric op that would result in an int, coerce to int if possible
if name in ["sum", "prod"] and notna(result):
int_result = np.int64(result)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 2806635211459..3a6662d3e3ae2 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -39,7 +39,7 @@
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
-from pandas.core.dtypes.inference import is_array_like, is_hashable
+from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
from pandas.core import ops
@@ -54,7 +54,7 @@
from pandas.core.base import NoNewAttributesMixin, PandasObject, _shared_docs
import pandas.core.common as com
from pandas.core.construction import array, extract_array, sanitize_array
-from pandas.core.indexers import check_bool_array_indexer
+from pandas.core.indexers import check_array_indexer, deprecate_ndim_indexing
from pandas.core.missing import interpolate_2d
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.sorting import nargsort
@@ -2001,14 +2001,11 @@ def __getitem__(self, key):
else:
return self.categories[i]
- if is_list_like(key) and not is_array_like(key):
- key = np.asarray(key)
-
- if com.is_bool_indexer(key):
- key = check_bool_array_indexer(self, key)
+ key = check_array_indexer(self, key)
result = self._codes[key]
if result.ndim > 1:
+ deprecate_ndim_indexing(result)
return result
return self._constructor(result, dtype=self.dtype, fastpath=True)
@@ -2076,6 +2073,8 @@ def __setitem__(self, key, value):
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
+
+ key = check_array_indexer(self, key)
self._codes[key] = lindexer
def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]:
@@ -2404,8 +2403,8 @@ def isin(self, values):
if not is_list_like(values):
values_type = type(values).__name__
raise TypeError(
- "only list-like objects are allowed to be passed"
- f" to isin(), you passed a [{values_type}]"
+ "only list-like objects are allowed to be passed "
+ f"to isin(), you passed a [{values_type}]"
)
values = sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index d7cabbabddf95..4f14ac2a14157 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -42,7 +42,7 @@
from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts
from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin
import pandas.core.common as com
-from pandas.core.indexers import check_bool_array_indexer
+from pandas.core.indexers import check_array_indexer
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.ops.invalid import invalid_comparison, make_invalid_op
@@ -518,11 +518,20 @@ def __getitem__(self, key):
return type(self)(val, dtype=self.dtype)
if com.is_bool_indexer(key):
- key = check_bool_array_indexer(self, key)
+ # first convert to boolean, because check_array_indexer doesn't
+ # allow object dtype
+ key = np.asarray(key, dtype=bool)
+ key = check_array_indexer(self, key)
if key.all():
key = slice(0, None, None)
else:
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
+ elif isinstance(key, list) and len(key) == 1 and isinstance(key[0], slice):
+ # see https://github.com/pandas-dev/pandas/issues/31299, need to allow
+ # this for now (would otherwise raise in check_array_indexer)
+ pass
+ else:
+ key = check_array_indexer(self, key)
is_period = is_period_dtype(self)
if is_period:
@@ -592,6 +601,8 @@ def __setitem__(
f"or array of those. Got '{type(value).__name__}' instead."
)
raise TypeError(msg)
+
+ key = check_array_indexer(self, key)
self._data[key] = value
self._maybe_clear_freq()
@@ -743,17 +754,36 @@ def searchsorted(self, value, side="left", sorter=None):
Array of insertion points with the same shape as `value`.
"""
if isinstance(value, str):
- value = self._scalar_from_string(value)
+ try:
+ value = self._scalar_from_string(value)
+ except ValueError:
+ raise TypeError("searchsorted requires compatible dtype or scalar")
+
+ elif is_valid_nat_for_dtype(value, self.dtype):
+ value = NaT
+
+ elif isinstance(value, self._recognized_scalars):
+ value = self._scalar_type(value)
+
+ elif isinstance(value, np.ndarray):
+ if not type(self)._is_recognized_dtype(value):
+ raise TypeError(
+ "searchsorted requires compatible dtype or scalar, "
+ f"not {type(value).__name__}"
+ )
+ value = type(self)(value)
+ self._check_compatible_with(value)
- if not (isinstance(value, (self._scalar_type, type(self))) or isna(value)):
- raise ValueError(f"Unexpected type for 'value': {type(value)}")
+ if not (isinstance(value, (self._scalar_type, type(self))) or (value is NaT)):
+ raise TypeError(f"Unexpected type for 'value': {type(value)}")
- self._check_compatible_with(value)
if isinstance(value, type(self)):
+ self._check_compatible_with(value)
value = value.asi8
else:
value = self._unbox_scalar(value)
+ # TODO: Use datetime64 semantics for sorting, xref GH#29844
return self.asi8.searchsorted(value, side=side, sorter=sorter)
def repeat(self, repeats, *args, **kwargs):
@@ -920,7 +950,7 @@ def freq(self, value):
@property
def freqstr(self):
"""
- Return the frequency object as a string if its set, otherwise None
+ Return the frequency object as a string if its set, otherwise None.
"""
if self.freq is None:
return None
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index e42402b307f28..4b6b54cce64ec 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -234,11 +234,10 @@ def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False):
values = values._data
if not isinstance(values, np.ndarray):
- msg = (
+ raise ValueError(
f"Unexpected type '{type(values).__name__}'. 'values' must be "
"a DatetimeArray ndarray, or Series or Index containing one of those."
)
- raise ValueError(msg)
if values.ndim not in [1, 2]:
raise ValueError("Only 1-dimensional input arrays are supported.")
@@ -249,20 +248,18 @@ def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False):
values = values.view(_NS_DTYPE)
if values.dtype != _NS_DTYPE:
- msg = (
- "The dtype of 'values' is incorrect. Must be 'datetime64[ns]'."
- f" Got {values.dtype} instead."
+ raise ValueError(
+ "The dtype of 'values' is incorrect. Must be 'datetime64[ns]'. "
+ f"Got {values.dtype} instead."
)
- raise ValueError(msg)
dtype = _validate_dt64_dtype(dtype)
if freq == "infer":
- msg = (
+ raise ValueError(
"Frequency inference not allowed in DatetimeArray.__init__. "
"Use 'pd.array()' instead."
)
- raise ValueError(msg)
if copy:
values = values.copy()
@@ -1640,7 +1637,7 @@ def to_julian_date(self):
"""
Convert Datetime Array to float64 ndarray of Julian Dates.
0 Julian date is noon January 1, 4713 BC.
- http://en.wikipedia.org/wiki/Julian_day
+ https://en.wikipedia.org/wiki/Julian_day
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index cb1e7115cd3c2..9a0f5794e7607 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -19,11 +19,13 @@
is_list_like,
is_object_dtype,
is_scalar,
+ pandas_dtype,
)
from pandas.core.dtypes.dtypes import register_extension_dtype
-from pandas.core.dtypes.missing import isna, notna
+from pandas.core.dtypes.missing import isna
from pandas.core import nanops, ops
+from pandas.core.indexers import check_array_indexer
from pandas.core.ops import invalid_comparison
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.tools.numeric import to_numeric
@@ -413,6 +415,7 @@ def __setitem__(self, key, value):
value = value[0]
mask = mask[0]
+ key = check_array_indexer(self, key)
self._data[key] = value
self._mask[key] = mask
@@ -440,11 +443,17 @@ def astype(self, dtype, copy=True):
if incompatible type with an IntegerDtype, equivalent of same_kind
casting
"""
+ from pandas.core.arrays.boolean import BooleanArray, BooleanDtype
+
+ dtype = pandas_dtype(dtype)
# if we are astyping to an existing IntegerDtype we can fastpath
if isinstance(dtype, _IntegerDtype):
result = self._data.astype(dtype.numpy_dtype, copy=False)
return type(self)(result, mask=self._mask, copy=False)
+ elif isinstance(dtype, BooleanDtype):
+ result = self._data.astype("bool", copy=False)
+ return BooleanArray(result, mask=self._mask, copy=False)
# coerce
if is_float_dtype(dtype):
@@ -549,21 +558,23 @@ def _reduce(self, name, skipna=True, **kwargs):
mask = self._mask
# coerce to a nan-aware float if needed
- if mask.any():
- data = self._data.astype("float64")
- # We explicitly use NaN within reductions.
- data[mask] = np.nan
+ # (we explicitly use NaN within reductions)
+ if self._hasna:
+ data = self.to_numpy("float64", na_value=np.nan)
op = getattr(nanops, "nan" + name)
result = op(data, axis=0, skipna=skipna, mask=mask, **kwargs)
+ if np.isnan(result):
+ return libmissing.NA
+
# if we have a boolean op, don't coerce
if name in ["any", "all"]:
pass
# if we have a preservable numeric op,
# provide coercion back to an integer type if possible
- elif name in ["sum", "min", "max", "prod"] and notna(result):
+ elif name in ["sum", "min", "max", "prod"]:
int_result = int(result)
if int_result == result:
result = int_result
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 37d2baed2c09e..23cf5f317ac7d 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -40,6 +40,7 @@
from pandas.core.arrays.categorical import Categorical
import pandas.core.common as com
from pandas.core.construction import array
+from pandas.core.indexers import check_array_indexer
from pandas.core.indexes.base import ensure_index
_VALID_CLOSED = {"left", "right", "both", "neither"}
@@ -495,6 +496,7 @@ def __len__(self) -> int:
return len(self.left)
def __getitem__(self, value):
+ value = check_array_indexer(self, value)
left = self.left[value]
right = self.right[value]
@@ -539,6 +541,7 @@ def __setitem__(self, key, value):
msg = f"'value' should be an interval type, got {type(value)} instead."
raise TypeError(msg)
+ key = check_array_indexer(self, key)
# Need to ensure that left and right are updated atomically, so we're
# forced to copy, update the copy, and swap in the new values.
left = self.left.copy(deep=True)
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 47605413ff1a6..80e317123126a 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -9,8 +9,7 @@
from pandas.core.algorithms import take
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
-import pandas.core.common as com
-from pandas.core.indexers import check_bool_array_indexer
+from pandas.core.indexers import check_array_indexer
if TYPE_CHECKING:
from pandas._typing import Scalar
@@ -35,8 +34,7 @@ def __getitem__(self, item):
return self.dtype.na_value
return self._data[item]
- elif com.is_bool_indexer(item):
- item = check_bool_array_indexer(self, item)
+ item = check_array_indexer(self, item)
return type(self)(self._data[item], self._mask[item])
@@ -50,6 +48,9 @@ def __iter__(self):
def __len__(self) -> int:
return len(self._data)
+ def __invert__(self):
+ return type(self)(~self._data, self._mask)
+
def to_numpy(
self, dtype=None, copy=False, na_value: "Scalar" = lib.no_default,
):
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 4db3d3010adaf..e56d6a7d2f089 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -18,9 +18,8 @@
from pandas.core import nanops
from pandas.core.algorithms import searchsorted, take, unique
from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin
-import pandas.core.common as com
from pandas.core.construction import extract_array
-from pandas.core.indexers import check_bool_array_indexer
+from pandas.core.indexers import check_array_indexer
from pandas.core.missing import backfill_1d, pad_1d
@@ -43,7 +42,6 @@ class PandasDtype(ExtensionDtype):
def __init__(self, dtype):
dtype = np.dtype(dtype)
self._dtype = dtype
- self._name = dtype.name
self._type = dtype.type
def __repr__(self) -> str:
@@ -56,7 +54,7 @@ def numpy_dtype(self):
@property
def name(self):
- return self._name
+ return self._dtype.name
@property
def type(self):
@@ -76,9 +74,11 @@ def construct_from_string(cls, string):
try:
return cls(np.dtype(string))
except TypeError as err:
- raise TypeError(
- f"Cannot construct a 'PandasDtype' from '{string}'"
- ) from err
+ if not isinstance(string, str):
+ msg = f"'construct_from_string' expects a string, got {type(string)}"
+ else:
+ msg = f"Cannot construct a 'PandasDtype' from '{string}'"
+ raise TypeError(msg) from err
@classmethod
def construct_array_type(cls):
@@ -235,8 +235,7 @@ def __getitem__(self, item):
if isinstance(item, type(self)):
item = item._ndarray
- elif com.is_bool_indexer(item):
- item = check_bool_array_indexer(self, item)
+ item = check_array_indexer(self, item)
result = self._ndarray[item]
if not lib.is_scalar(item):
@@ -246,6 +245,7 @@ def __getitem__(self, item):
def __setitem__(self, key, value):
value = extract_array(value, extract_numpy=True)
+ key = check_array_indexer(self, key)
scalar_key = lib.is_scalar(key)
scalar_value = lib.is_scalar(value)
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 8b49c2186dde0..d9b53aa4a867c 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -169,8 +169,9 @@ def __init__(self, values, freq=None, dtype=None, copy=False):
self._dtype = PeriodDtype(freq)
@classmethod
- def _simple_new(cls, values, freq=None, **kwargs):
+ def _simple_new(cls, values: np.ndarray, freq=None, **kwargs):
# alias for PeriodArray.__init__
+ assert isinstance(values, np.ndarray) and values.dtype == "i8"
return cls(values, freq=freq, **kwargs)
@classmethod
@@ -297,12 +298,12 @@ def __arrow_array__(self, type=None):
# ensure we have the same freq
if self.freqstr != type.freq:
raise TypeError(
- "Not supported to convert PeriodArray to array with different"
- " 'freq' ({0} vs {1})".format(self.freqstr, type.freq)
+ "Not supported to convert PeriodArray to array with different "
+ f"'freq' ({self.freqstr} vs {type.freq})"
)
else:
raise TypeError(
- "Not supported to convert PeriodArray to '{0}' type".format(type)
+ f"Not supported to convert PeriodArray to '{type}' type"
)
period_type = ArrowPeriodType(self.freqstr)
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index e2562a375515d..b476a019c66cc 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -43,6 +43,7 @@
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.construction import sanitize_array
+from pandas.core.indexers import check_array_indexer
from pandas.core.missing import interpolate_2d
import pandas.core.ops as ops
from pandas.core.ops.common import unpack_zerodim_and_defer
@@ -141,7 +142,7 @@ def _sparse_array_op(
left, right = right, left
name = name[1:]
- if name in ("and", "or") and dtype == "bool":
+ if name in ("and", "or", "xor") and dtype == "bool":
opname = f"sparse_{name}_uint8"
# to make template simple, cast here
left_sp_values = left.sp_values.view(np.uint8)
@@ -768,6 +769,8 @@ def __getitem__(self, key):
else:
key = np.asarray(key)
+ key = check_array_indexer(self, key)
+
if com.is_bool_indexer(key):
key = check_bool_indexer(self, key)
@@ -1459,6 +1462,7 @@ def _add_unary_ops(cls):
def _add_comparison_ops(cls):
cls.__and__ = cls._create_comparison_method(operator.and_)
cls.__or__ = cls._create_comparison_method(operator.or_)
+ cls.__xor__ = cls._create_arithmetic_method(operator.xor)
super()._add_comparison_ops()
# ----------
diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py
index 6f15681cab87e..9cdc0d56d0061 100644
--- a/pandas/core/arrays/sparse/dtype.py
+++ b/pandas/core/arrays/sparse/dtype.py
@@ -206,6 +206,10 @@ def construct_from_string(cls, string):
-------
SparseDtype
"""
+ if not isinstance(string, str):
+ raise TypeError(
+ f"'construct_from_string' expects a string, got {type(string)}"
+ )
msg = f"Cannot construct a 'SparseDtype' from '{string}'"
if string.startswith("Sparse"):
try:
diff --git a/pandas/core/arrays/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py
index 88d63071c360f..17a953fce9ec0 100644
--- a/pandas/core/arrays/sparse/scipy_sparse.py
+++ b/pandas/core/arrays/sparse/scipy_sparse.py
@@ -17,14 +17,14 @@ def _check_is_partition(parts, whole):
def _to_ijv(ss, row_levels=(0,), column_levels=(1,), sort_labels=False):
- """ For arbitrary (MultiIndexed) SparseSeries return
+ """ For arbitrary (MultiIndexed) sparse Series return
(v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for
passing to scipy.sparse.coo constructor. """
# index and column levels must be a partition of the index
_check_is_partition([row_levels, column_levels], range(ss.index.nlevels))
- # from the SparseSeries: get the labels and data for non-null entries
- values = ss._data.internal_values()._valid_sp_values
+ # from the sparse Series: get the labels and data for non-null entries
+ values = ss.array._valid_sp_values
nonnull_labels = ss.dropna()
@@ -85,7 +85,7 @@ def _get_index_subset_to_coord_dict(index, subset, sort_labels=False):
def _sparse_series_to_coo(ss, row_levels=(0,), column_levels=(1,), sort_labels=False):
"""
- Convert a SparseSeries to a scipy.sparse.coo_matrix using index
+ Convert a sparse Series to a scipy.sparse.coo_matrix using index
levels row_levels, column_levels as the row and column
labels respectively. Returns the sparse_matrix, row and column labels.
"""
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index 84130132de4dc..b53484e1892f9 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -15,6 +15,7 @@
from pandas.core import ops
from pandas.core.arrays import PandasArray
from pandas.core.construction import extract_array
+from pandas.core.indexers import check_array_indexer
from pandas.core.missing import isna
@@ -93,9 +94,6 @@ class StringArray(PandasArray):
StringArray is considered experimental. The implementation and
parts of the API may change without warning.
- In particular, the NA value used may change to no longer be
- ``numpy.nan``.
-
Parameters
----------
values : array-like
@@ -104,8 +102,11 @@ class StringArray(PandasArray):
.. warning::
Currently, this expects an object-dtype ndarray
- where the elements are Python strings. This may
- change without warning in the future.
+ where the elements are Python strings or :attr:`pandas.NA`.
+ This may change without warning in the future. Use
+ :meth:`pandas.array` with ``dtype="string"`` for a stable way of
+ creating a `StringArray` from any sequence.
+
copy : bool, default False
Whether to copy the array of data.
@@ -119,6 +120,8 @@ class StringArray(PandasArray):
See Also
--------
+ array
+ The recommended function for creating a StringArray.
Series.str
The string methods are available on Series backed by
a StringArray.
@@ -165,12 +168,10 @@ def __init__(self, values, copy=False):
def _validate(self):
"""Validate that we only store NA or strings."""
if len(self._ndarray) and not lib.is_string_array(self._ndarray, skipna=True):
- raise ValueError(
- "StringArray requires a sequence of strings or missing values."
- )
+ raise ValueError("StringArray requires a sequence of strings or pandas.NA")
if self._ndarray.dtype != "object":
raise ValueError(
- "StringArray requires a sequence of strings. Got "
+ "StringArray requires a sequence of strings or pandas.NA. Got "
f"'{self._ndarray.dtype}' dtype instead."
)
@@ -178,12 +179,22 @@ def _validate(self):
def _from_sequence(cls, scalars, dtype=None, copy=False):
if dtype:
assert dtype == "string"
- result = super()._from_sequence(scalars, dtype=object, copy=copy)
+
+ result = np.asarray(scalars, dtype="object")
+ if copy and result is scalars:
+ result = result.copy()
+
# Standardize all missing-like values to NA
# TODO: it would be nice to do this in _validate / lib.is_string_array
# We are already doing a scan over the values there.
- result[result.isna()] = StringDtype.na_value
- return result
+ na_values = isna(result)
+ if na_values.any():
+ if result is scalars:
+ # force a copy now, if we haven't already
+ result = result.copy()
+ result[na_values] = StringDtype.na_value
+
+ return cls(result)
@classmethod
def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
@@ -214,6 +225,7 @@ def __setitem__(self, key, value):
# extract_array doesn't extract PandasArray subclasses
value = value._ndarray
+ key = check_array_indexer(self, key)
scalar_key = lib.is_scalar(key)
scalar_value = lib.is_scalar(value)
if scalar_key and not scalar_value:
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index c34d14f15075c..d77a37ad355a7 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -43,12 +43,6 @@
from pandas.tseries.frequencies import to_offset
from pandas.tseries.offsets import Tick
-_BAD_DTYPE = "dtype {dtype} cannot be converted to timedelta64[ns]"
-
-
-def _is_convertible_to_td(key):
- return isinstance(key, (Tick, timedelta, np.timedelta64, str))
-
def _field_accessor(name, alias, docstring=None):
def f(self):
@@ -1064,7 +1058,7 @@ def _validate_td64_dtype(dtype):
raise ValueError(msg)
if not is_dtype_equal(dtype, _TD_DTYPE):
- raise ValueError(_BAD_DTYPE.format(dtype=dtype))
+ raise ValueError(f"dtype {dtype} cannot be converted to timedelta64[ns]")
return dtype
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 66d7cd59dcfa4..9fe1af776dd2b 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1,9 +1,10 @@
"""
Base and utility classes for pandas objects.
"""
+
import builtins
import textwrap
-from typing import Dict, FrozenSet, List, Optional
+from typing import Dict, FrozenSet, List, Optional, Union
import numpy as np
@@ -18,13 +19,11 @@
from pandas.core.dtypes.cast import is_nested_object
from pandas.core.dtypes.common import (
is_categorical_dtype,
- is_datetime64_ns_dtype,
is_dict_like,
is_extension_array_dtype,
is_list_like,
is_object_dtype,
is_scalar,
- is_timedelta64_ns_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
@@ -47,11 +46,15 @@
class PandasObject(DirNamesMixin):
- """baseclass for various pandas objects"""
+ """
+ Baseclass for various pandas objects.
+ """
@property
def _constructor(self):
- """class constructor (for this class it's just `__class__`"""
+ """
+ Class constructor (for this class it's just `__class__`.
+ """
return type(self)
def __repr__(self) -> str:
@@ -79,16 +82,14 @@ def __sizeof__(self):
"""
if hasattr(self, "memory_usage"):
mem = self.memory_usage(deep=True)
- if not is_scalar(mem):
- mem = mem.sum()
- return int(mem)
+ return int(mem if is_scalar(mem) else mem.sum())
- # no memory_usage attribute, so fall back to
- # object's 'sizeof'
+ # no memory_usage attribute, so fall back to object's 'sizeof'
return super().__sizeof__()
def _ensure_type(self: T, obj) -> T:
- """Ensure that an object has same type as self.
+ """
+ Ensure that an object has same type as self.
Used by type checkers.
"""
@@ -97,7 +98,8 @@ def _ensure_type(self: T, obj) -> T:
class NoNewAttributesMixin:
- """Mixin which prevents adding new attributes.
+ """
+ Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
@@ -108,11 +110,13 @@ class NoNewAttributesMixin:
"""
def _freeze(self):
- """Prevents setting additional attributes"""
+ """
+ Prevents setting additional attributes.
+ """
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
- def __setattr__(self, key, value):
+ def __setattr__(self, key: str, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
# because
@@ -182,14 +186,12 @@ class SelectionMixin:
@property
def _selection_name(self):
"""
- return a name for myself; this would ideally be called
- the 'name' property, but we cannot conflict with the
- Series.name property which can be set
+ Return a name for myself;
+
+ This would ideally be called the 'name' property,
+ but we cannot conflict with the Series.name property which can be set.
"""
- if self._selection is None:
- return None # 'result'
- else:
- return self._selection
+ return self._selection
@property
def _selection_list(self):
@@ -201,7 +203,6 @@ def _selection_list(self):
@cache_readonly
def _selected_obj(self):
-
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
@@ -241,19 +242,18 @@ def __getitem__(self, key):
raise KeyError(f"Column not found: {key}")
return self._gotitem(key, ndim=1)
- def _gotitem(self, key, ndim, subset=None):
+ def _gotitem(self, key, ndim: int, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
- key : string / list of selections
+ key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
-
"""
raise AbstractMethodError(self)
@@ -268,7 +268,6 @@ def _try_aggregate_string_function(self, arg: str, *args, **kwargs):
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
-
"""
assert isinstance(arg, str)
@@ -583,13 +582,10 @@ def _is_builtin_func(self, arg):
class ShallowMixin:
_attributes: List[str] = []
- def _shallow_copy(self, obj=None, **kwargs):
+ def _shallow_copy(self, obj, **kwargs):
"""
return a new object with the replacement attributes
"""
- if obj is None:
- obj = self._selected_obj.copy()
-
if isinstance(obj, self._constructor):
obj = obj.obj
for attr in self._attributes:
@@ -609,6 +605,11 @@ class IndexOpsMixin:
["tolist"] # tolist is not deprecated, just suppressed in the __dir__
)
+ @property
+ def _values(self) -> Union[ExtensionArray, np.ndarray]:
+ # must be defined here as a property for mypy
+ raise AbstractMethodError(self)
+
def transpose(self, *args, **kwargs):
"""
Return the transpose, which is by definition self.
@@ -634,6 +635,10 @@ def shape(self):
"""
return self._values.shape
+ def __len__(self) -> int:
+ # We need this defined here for mypy
+ raise AbstractMethodError(self)
+
@property
def ndim(self) -> int:
"""
@@ -664,18 +669,17 @@ def item(self):
if len(self) == 1:
return next(iter(self))
- else:
- raise ValueError("can only convert an array of size 1 to a Python scalar")
+ raise ValueError("can only convert an array of size 1 to a Python scalar")
@property
- def nbytes(self):
+ def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
return self._values.nbytes
@property
- def size(self):
+ def size(self) -> int:
"""
Return the number of elements in the underlying data.
"""
@@ -730,7 +734,6 @@ def array(self) -> ExtensionArray:
Examples
--------
-
For regular NumPy types like int, and float, a PandasArray
is returned.
@@ -747,26 +750,7 @@ def array(self) -> ExtensionArray:
[a, b, a]
Categories (2, object): [a, b]
"""
- # As a mixin, we depend on the mixing class having _values.
- # Special mixin syntax may be developed in the future:
- # https://github.com/python/typing/issues/246
- result = self._values # type: ignore
-
- if is_datetime64_ns_dtype(result.dtype):
- from pandas.arrays import DatetimeArray
-
- result = DatetimeArray(result)
- elif is_timedelta64_ns_dtype(result.dtype):
- from pandas.arrays import TimedeltaArray
-
- result = TimedeltaArray(result)
-
- elif not is_extension_array_dtype(result.dtype):
- from pandas.core.arrays.numpy_ import PandasArray
-
- result = PandasArray(result)
-
- return result
+ raise AbstractMethodError(self)
def to_numpy(self, dtype=None, copy=False, na_value=lib.no_default, **kwargs):
"""
@@ -865,12 +849,11 @@ def to_numpy(self, dtype=None, copy=False, na_value=lib.no_default, **kwargs):
"""
if is_extension_array_dtype(self.dtype):
return self.array.to_numpy(dtype, copy=copy, na_value=na_value, **kwargs)
- else:
- if kwargs:
- msg = "to_numpy() got an unexpected keyword argument '{}'".format(
- list(kwargs.keys())[0]
- )
- raise TypeError(msg)
+ elif kwargs:
+ bad_keys = list(kwargs.keys())[0]
+ raise TypeError(
+ f"to_numpy() got an unexpected keyword argument '{bad_keys}'"
+ )
result = np.asarray(self._values, dtype=dtype)
# TODO(GH-24345): Avoid potential double copy
@@ -1081,9 +1064,18 @@ def hasnans(self):
return bool(isna(self).any())
def _reduce(
- self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds
+ self,
+ op,
+ name: str,
+ axis=0,
+ skipna=True,
+ numeric_only=None,
+ filter_type=None,
+ **kwds,
):
- """ perform the reduction type operation if we can """
+ """
+ Perform the reduction type operation if we can.
+ """
func = getattr(self, name, None)
if func is None:
raise TypeError(
@@ -1110,9 +1102,7 @@ def _map_values(self, mapper, na_action=None):
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
-
"""
-
# we can fastpath dict/Series to an efficient map
# as we know that we are not going to have to yield
# python types
@@ -1272,12 +1262,16 @@ def unique(self):
if hasattr(values, "unique"):
result = values.unique()
+ if self.dtype.kind in ["m", "M"] and isinstance(self, ABCSeries):
+ # GH#31182 Series._values returns EA, unpack for backward-compat
+ if getattr(self.dtype, "tz", None) is None:
+ result = np.asarray(result)
else:
result = unique1d(values)
return result
- def nunique(self, dropna=True):
+ def nunique(self, dropna: bool = True) -> int:
"""
Return number of unique elements in the object.
@@ -1318,7 +1312,7 @@ def nunique(self, dropna=True):
return n
@property
- def is_unique(self):
+ def is_unique(self) -> bool:
"""
Return boolean if values in the object are unique.
@@ -1329,7 +1323,7 @@ def is_unique(self):
return self.nunique(dropna=False) == len(self)
@property
- def is_monotonic(self):
+ def is_monotonic(self) -> bool:
"""
Return boolean if values in the object are
monotonic_increasing.
@@ -1342,7 +1336,13 @@ def is_monotonic(self):
return Index(self).is_monotonic
- is_monotonic_increasing = is_monotonic
+ @property
+ def is_monotonic_increasing(self) -> bool:
+ """
+ Alias for is_monotonic.
+ """
+ # mypy complains if we alias directly
+ return self.is_monotonic
@property
def is_monotonic_decreasing(self) -> bool:
@@ -1454,7 +1454,6 @@ def factorize(self, sort=False, na_sentinel=-1):
Examples
--------
-
>>> x = pd.Series([1, 2, 3])
>>> x
0 1
@@ -1495,7 +1494,7 @@ def factorize(self, sort=False, na_sentinel=-1):
@Substitution(klass="Index")
@Appender(_shared_docs["searchsorted"])
- def searchsorted(self, value, side="left", sorter=None):
+ def searchsorted(self, value, side="left", sorter=None) -> np.ndarray:
return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)
def drop_duplicates(self, keep="first", inplace=False):
diff --git a/pandas/core/common.py b/pandas/core/common.py
index f0fcb736586d6..a76119da2707a 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -72,16 +72,6 @@ def consensus_name_attr(objs):
return name
-def maybe_box(indexer, values, obj, key):
-
- # if we have multiples coming back, box em
- if isinstance(values, np.ndarray):
- return obj[indexer.get_loc(key)]
-
- # return the value
- return values
-
-
def maybe_box_datetimelike(value):
# turn a datetime like into a Timestamp/timedelta as needed
@@ -121,8 +111,8 @@ def is_bool_indexer(key: Any) -> bool:
See Also
--------
- check_bool_array_indexer : Check that `key`
- is a valid mask for an array, and convert to an ndarray.
+ check_array_indexer : Check that `key` is a valid array to index,
+ and convert to an ndarray.
"""
na_msg = "cannot mask with array containing NA / NaN values"
if isinstance(key, (ABCSeries, np.ndarray, ABCIndex)) or (
@@ -178,35 +168,35 @@ def not_none(*args):
return (arg for arg in args if arg is not None)
-def any_none(*args):
+def any_none(*args) -> bool:
"""
Returns a boolean indicating if any argument is None.
"""
return any(arg is None for arg in args)
-def all_none(*args):
+def all_none(*args) -> bool:
"""
Returns a boolean indicating if all arguments are None.
"""
return all(arg is None for arg in args)
-def any_not_none(*args):
+def any_not_none(*args) -> bool:
"""
Returns a boolean indicating if any argument is not None.
"""
return any(arg is not None for arg in args)
-def all_not_none(*args):
+def all_not_none(*args) -> bool:
"""
Returns a boolean indicating if all arguments are not None.
"""
return all(arg is not None for arg in args)
-def count_not_none(*args):
+def count_not_none(*args) -> int:
"""
Returns the count of arguments that are not None.
"""
@@ -286,7 +276,7 @@ def maybe_iterable_to_list(obj: Union[Iterable[T], T]) -> Union[Collection[T], T
return obj
-def is_null_slice(obj):
+def is_null_slice(obj) -> bool:
"""
We have a null slice.
"""
@@ -306,7 +296,7 @@ def is_true_slices(l):
# TODO: used only once in indexing; belongs elsewhere?
-def is_full_slice(obj, l):
+def is_full_slice(obj, l) -> bool:
"""
We have a full length slice.
"""
diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py
index a1b1cffdd1d76..e45d3ca66b6ec 100644
--- a/pandas/core/computation/align.py
+++ b/pandas/core/computation/align.py
@@ -1,4 +1,5 @@
-"""Core eval alignment algorithms
+"""
+Core eval alignment algorithms.
"""
from functools import partial, wraps
diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py
index 51892b8c02d87..4cdf4bac61316 100644
--- a/pandas/core/computation/eval.py
+++ b/pandas/core/computation/eval.py
@@ -1,5 +1,4 @@
-#!/usr/bin/env python
-
+#!/usr/bin/env python3
"""
Top level ``eval`` module.
"""
@@ -26,30 +25,29 @@ def _check_engine(engine: Optional[str]) -> str:
Parameters
----------
engine : str
+ String to validate.
Raises
------
KeyError
- * If an invalid engine is passed
+ * If an invalid engine is passed.
ImportError
- * If numexpr was requested but doesn't exist
+ * If numexpr was requested but doesn't exist.
Returns
-------
- string engine
+ str
+ Engine name.
"""
from pandas.core.computation.check import _NUMEXPR_INSTALLED
if engine is None:
- if _NUMEXPR_INSTALLED:
- engine = "numexpr"
- else:
- engine = "python"
+ engine = "numexpr" if _NUMEXPR_INSTALLED else "python"
if engine not in _engines:
- valid = list(_engines.keys())
+ valid_engines = list(_engines.keys())
raise KeyError(
- f"Invalid engine {repr(engine)} passed, valid engines are {valid}"
+ f"Invalid engine '{engine}' passed, valid engines are {valid_engines}"
)
# TODO: validate this in a more general way (thinking of future engines
@@ -58,10 +56,8 @@ def _check_engine(engine: Optional[str]) -> str:
if engine == "numexpr":
if not _NUMEXPR_INSTALLED:
raise ImportError(
- "'numexpr' is not installed or an "
- "unsupported version. Cannot use "
- "engine='numexpr' for query/eval "
- "if 'numexpr' is not installed"
+ "'numexpr' is not installed or an unsupported version. Cannot use "
+ "engine='numexpr' for query/eval if 'numexpr' is not installed"
)
return engine
@@ -80,11 +76,9 @@ def _check_parser(parser: str):
KeyError
* If an invalid parser is passed
"""
-
if parser not in _parsers:
raise KeyError(
- f"Invalid parser {repr(parser)} passed, "
- f"valid parsers are {_parsers.keys()}"
+ f"Invalid parser '{parser}' passed, valid parsers are {_parsers.keys()}"
)
@@ -94,8 +88,8 @@ def _check_resolvers(resolvers):
if not hasattr(resolver, "__getitem__"):
name = type(resolver).__name__
raise TypeError(
- f"Resolver of type {repr(name)} does not "
- f"implement the __getitem__ method"
+ f"Resolver of type '{name}' does not "
+ "implement the __getitem__ method"
)
@@ -155,10 +149,8 @@ def _check_for_locals(expr: str, stack_level: int, parser: str):
msg = "The '@' prefix is only supported by the pandas parser"
elif at_top_of_stack:
msg = (
- "The '@' prefix is not allowed in "
- "top-level eval calls, \nplease refer to "
- "your variables by name without the '@' "
- "prefix"
+ "The '@' prefix is not allowed in top-level eval calls.\n"
+ "please refer to your variables by name without the '@' prefix."
)
if at_top_of_stack or not_pandas_parser:
@@ -285,13 +277,14 @@ def eval(
See the :ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
"""
-
inplace = validate_bool_kwarg(inplace, "inplace")
if truediv is not no_default:
warnings.warn(
- "The `truediv` parameter in pd.eval is deprecated and will be "
- "removed in a future version.",
+ (
+ "The `truediv` parameter in pd.eval is deprecated and "
+ "will be removed in a future version."
+ ),
FutureWarning,
stacklevel=2,
)
diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py
index 1350587b5ca90..c26208d3b4465 100644
--- a/pandas/core/computation/expr.py
+++ b/pandas/core/computation/expr.py
@@ -1,11 +1,12 @@
-""":func:`~pandas.eval` parsers
+"""
+:func:`~pandas.eval` parsers.
"""
import ast
from functools import partial, reduce
from keyword import iskeyword
import tokenize
-from typing import Optional, Type
+from typing import Callable, Optional, Set, Tuple, Type, TypeVar
import numpy as np
@@ -34,8 +35,9 @@
import pandas.io.formats.printing as printing
-def _rewrite_assign(tok):
- """Rewrite the assignment operator for PyTables expressions that use ``=``
+def _rewrite_assign(tok: Tuple[int, str]) -> Tuple[int, str]:
+ """
+ Rewrite the assignment operator for PyTables expressions that use ``=``
as a substitute for ``==``.
Parameters
@@ -45,15 +47,16 @@ def _rewrite_assign(tok):
Returns
-------
- t : tuple of int, str
+ tuple of int, str
Either the input or token or the replacement values
"""
toknum, tokval = tok
return toknum, "==" if tokval == "=" else tokval
-def _replace_booleans(tok):
- """Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
+def _replace_booleans(tok: Tuple[int, str]) -> Tuple[int, str]:
+ """
+ Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
precedence is changed to boolean precedence.
Parameters
@@ -63,7 +66,7 @@ def _replace_booleans(tok):
Returns
-------
- t : tuple of int, str
+ tuple of int, str
Either the input or token or the replacement values
"""
toknum, tokval = tok
@@ -76,8 +79,9 @@ def _replace_booleans(tok):
return toknum, tokval
-def _replace_locals(tok):
- """Replace local variables with a syntactically valid name.
+def _replace_locals(tok: Tuple[int, str]) -> Tuple[int, str]:
+ """
+ Replace local variables with a syntactically valid name.
Parameters
----------
@@ -86,7 +90,7 @@ def _replace_locals(tok):
Returns
-------
- t : tuple of int, str
+ tuple of int, str
Either the input or token or the replacement values
Notes
@@ -102,12 +106,16 @@ def _replace_locals(tok):
def _compose2(f, g):
- """Compose 2 callables"""
+ """
+ Compose 2 callables.
+ """
return lambda *args, **kwargs: f(g(*args, **kwargs))
def _compose(*funcs):
- """Compose 2 or more callables"""
+ """
+ Compose 2 or more callables.
+ """
assert len(funcs) > 1, "At least 2 callables must be passed to compose"
return reduce(_compose2, funcs)
@@ -117,8 +125,9 @@ def _preparse(
f=_compose(
_replace_locals, _replace_booleans, _rewrite_assign, clean_backtick_quoted_toks
),
-):
- """Compose a collection of tokenization functions
+) -> str:
+ """
+ Compose a collection of tokenization functions.
Parameters
----------
@@ -132,7 +141,7 @@ def _preparse(
Returns
-------
- s : str
+ str
Valid Python source code
Notes
@@ -146,7 +155,9 @@ def _preparse(
def _is_type(t):
- """Factory for a type checking function of type ``t`` or tuple of types."""
+ """
+ Factory for a type checking function of type ``t`` or tuple of types.
+ """
return lambda x: isinstance(x.value, t)
@@ -164,7 +175,9 @@ def _is_type(t):
def _filter_nodes(superclass, all_nodes=_all_nodes):
- """Filter out AST nodes that are subclasses of ``superclass``."""
+ """
+ Filter out AST nodes that are subclasses of ``superclass``.
+ """
node_names = (node.__name__ for node in all_nodes if issubclass(node, superclass))
return frozenset(node_names)
@@ -227,30 +240,35 @@ def _filter_nodes(superclass, all_nodes=_all_nodes):
assert not intersection, _msg
-def _node_not_implemented(node_name, cls):
- """Return a function that raises a NotImplementedError with a passed node
- name.
+# TODO: Python 3.6.2: replace Callable[..., None] with Callable[..., NoReturn]
+def _node_not_implemented(node_name: str) -> Callable[..., None]:
+ """
+ Return a function that raises a NotImplementedError with a passed node name.
"""
def f(self, *args, **kwargs):
- raise NotImplementedError(f"{repr(node_name)} nodes are not implemented")
+ raise NotImplementedError(f"'{node_name}' nodes are not implemented")
return f
-def disallow(nodes):
- """Decorator to disallow certain nodes from parsing. Raises a
+_T = TypeVar("_T", bound="BaseExprVisitor")
+
+
+def disallow(nodes: Set[str]) -> Callable[[Type[_T]], Type[_T]]:
+ """
+ Decorator to disallow certain nodes from parsing. Raises a
NotImplementedError instead.
Returns
-------
- disallowed : callable
+ callable
"""
- def disallowed(cls):
+ def disallowed(cls: Type[_T]) -> Type[_T]:
cls.unsupported_nodes = ()
for node in nodes:
- new_method = _node_not_implemented(node, cls)
+ new_method = _node_not_implemented(node)
name = f"visit_{node}"
cls.unsupported_nodes += (name,)
setattr(cls, name, new_method)
@@ -260,20 +278,21 @@ def disallowed(cls):
def _op_maker(op_class, op_symbol):
- """Return a function to create an op class with its symbol already passed.
+ """
+ Return a function to create an op class with its symbol already passed.
Returns
-------
- f : callable
+ callable
"""
def f(self, node, *args, **kwargs):
- """Return a partial function with an Op subclass with an operator
- already passed.
+ """
+ Return a partial function with an Op subclass with an operator already passed.
Returns
-------
- f : callable
+ callable
"""
return partial(op_class, op_symbol, *args, **kwargs)
@@ -284,7 +303,9 @@ def f(self, node, *args, **kwargs):
def add_ops(op_classes):
- """Decorator to add default implementation of ops."""
+ """
+ Decorator to add default implementation of ops.
+ """
def f(cls):
for op_attr_name, op_class in op_classes.items():
@@ -353,6 +374,8 @@ class BaseExprVisitor(ast.NodeVisitor):
ast.NotIn: ast.NotIn,
}
+ unsupported_nodes: Tuple[str, ...]
+
def __init__(self, env, engine, parser, preparser=_preparse):
self.env = env
self.engine = engine
@@ -466,8 +489,8 @@ def _maybe_evaluate_binop(
if res.has_invalid_return_type:
raise TypeError(
- f"unsupported operand type(s) for {res.op}:"
- f" '{lhs.type}' and '{rhs.type}'"
+ f"unsupported operand type(s) for {res.op}: "
+ f"'{lhs.type}' and '{rhs.type}'"
)
if self.engine != "pytables":
@@ -647,7 +670,7 @@ def visit_Call(self, node, side=None, **kwargs):
f'Function "{res.name}" does not support keyword arguments'
)
- return res(*new_args, **kwargs)
+ return res(*new_args)
else:
@@ -777,12 +800,16 @@ def __len__(self) -> int:
return len(self.expr)
def parse(self):
- """Parse an expression"""
+ """
+ Parse an expression.
+ """
return self._visitor.visit(self.expr)
@property
def names(self):
- """Get the names in an expression"""
+ """
+ Get the names in an expression.
+ """
if is_term(self.terms):
return frozenset([self.terms.name])
return frozenset(term.name for term in com.flatten(self.terms))
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py
index 7e959889ee997..fdc299ccdfde8 100644
--- a/pandas/core/computation/expressions.py
+++ b/pandas/core/computation/expressions.py
@@ -12,8 +12,6 @@
from pandas._config import get_option
-from pandas._libs.lib import values_from_object
-
from pandas.core.dtypes.generic import ABCDataFrame
from pandas.core.computation.check import _NUMEXPR_INSTALLED
@@ -45,12 +43,9 @@ def set_use_numexpr(v=True):
# choose what we are going to do
global _evaluate, _where
- if not _USE_NUMEXPR:
- _evaluate = _evaluate_standard
- _where = _where_standard
- else:
- _evaluate = _evaluate_numexpr
- _where = _where_numexpr
+
+ _evaluate = _evaluate_numexpr if _USE_NUMEXPR else _evaluate_standard
+ _where = _where_numexpr if _USE_NUMEXPR else _where_standard
def set_numexpr_threads(n=None):
@@ -63,7 +58,9 @@ def set_numexpr_threads(n=None):
def _evaluate_standard(op, op_str, a, b):
- """ standard evaluation """
+ """
+ Standard evaluation.
+ """
if _TEST_MODE:
_store_test_result(False)
with np.errstate(all="ignore"):
@@ -124,26 +121,19 @@ def _evaluate_numexpr(op, op_str, a, b):
def _where_standard(cond, a, b):
- return np.where(
- values_from_object(cond), values_from_object(a), values_from_object(b)
- )
+ # Caller is responsible for calling values_from_object if necessary
+ return np.where(cond, a, b)
def _where_numexpr(cond, a, b):
+ # Caller is responsible for calling values_from_object if necessary
result = None
if _can_use_numexpr(None, "where", a, b, "where"):
- cond_value = getattr(cond, "values", cond)
- a_value = getattr(a, "values", a)
- b_value = getattr(b, "values", b)
result = ne.evaluate(
"where(cond_value, a_value, b_value)",
- local_dict={
- "cond_value": cond_value,
- "a_value": a_value,
- "b_value": b_value,
- },
+ local_dict={"cond_value": cond, "a_value": a, "b_value": b},
casting="safe",
)
@@ -176,7 +166,7 @@ def _bool_arith_check(
if op_str in unsupported:
warnings.warn(
f"evaluating in Python space because the {repr(op_str)} "
- f"operator is not supported by numexpr for "
+ "operator is not supported by numexpr for "
f"the bool dtype, use {repr(unsupported[op_str])} instead"
)
return False
@@ -202,7 +192,6 @@ def evaluate(op, op_str, a, b, use_numexpr=True):
use_numexpr : bool, default True
Whether to try to use numexpr.
"""
-
use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b)
if use_numexpr:
return _evaluate(op, op_str, a, b)
@@ -221,10 +210,7 @@ def where(cond, a, b, use_numexpr=True):
use_numexpr : bool, default True
Whether to try to use numexpr.
"""
-
- if use_numexpr:
- return _where(cond, a, b)
- return _where_standard(cond, a, b)
+ return _where(cond, a, b) if use_numexpr else _where_standard(cond, a, b)
def set_test_mode(v=True):
diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py
index cb166ba65152b..5563d3ae27118 100644
--- a/pandas/core/computation/ops.py
+++ b/pandas/core/computation/ops.py
@@ -5,6 +5,7 @@
from distutils.version import LooseVersion
from functools import partial
import operator
+from typing import Callable, Iterable, Optional, Union
import numpy as np
@@ -55,7 +56,7 @@ class UndefinedVariableError(NameError):
NameError subclass for local variables.
"""
- def __init__(self, name, is_local: bool):
+ def __init__(self, name: str, is_local: Optional[bool] = None):
base_msg = f"{repr(name)} is not defined"
if is_local:
msg = f"local variable {base_msg}"
@@ -199,10 +200,10 @@ class Op:
op: str
- def __init__(self, op: str, operands, *args, **kwargs):
+ def __init__(self, op: str, operands: Iterable[Union[Term, "Op"]], encoding=None):
self.op = _bool_op_map.get(op, op)
self.operands = operands
- self.encoding = kwargs.get("encoding", None)
+ self.encoding = encoding
def __iter__(self):
return iter(self.operands)
@@ -353,11 +354,11 @@ class BinOp(Op):
Parameters
----------
op : str
- left : Term or Op
- right : Term or Op
+ lhs : Term or Op
+ rhs : Term or Op
"""
- def __init__(self, op: str, lhs, rhs, **kwargs):
+ def __init__(self, op: str, lhs, rhs):
super().__init__(op, (lhs, rhs))
self.lhs = lhs
self.rhs = rhs
@@ -388,7 +389,6 @@ def __call__(self, env):
object
The result of an evaluated expression.
"""
-
# recurse over the left/right nodes
left = self.lhs(env)
right = self.rhs(env)
@@ -416,6 +416,7 @@ def evaluate(self, env, engine: str, parser, term_type, eval_in_python):
res = self(env)
else:
# recurse over the left/right nodes
+
left = self.lhs.evaluate(
env,
engine=engine,
@@ -423,6 +424,7 @@ def evaluate(self, env, engine: str, parser, term_type, eval_in_python):
term_type=term_type,
eval_in_python=eval_in_python,
)
+
right = self.rhs.evaluate(
env,
engine=engine,
@@ -447,6 +449,7 @@ def convert_values(self):
"""
def stringify(value):
+ encoder: Callable
if self.encoding is not None:
encoder = partial(pprint_thing_encoded, encoding=self.encoding)
else:
@@ -501,8 +504,8 @@ class Div(BinOp):
The Terms or Ops in the ``/`` expression.
"""
- def __init__(self, lhs, rhs, **kwargs):
- super().__init__("/", lhs, rhs, **kwargs)
+ def __init__(self, lhs, rhs):
+ super().__init__("/", lhs, rhs)
if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type):
raise TypeError(
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index afdd8a01ee003..3776c6f816d96 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -6,7 +6,7 @@
module is imported, which may or may not be a problem.
If you need to make sure options are available even before a certain
-module is imported, register them here rather then in the module.
+module is imported, register them here rather than in the module.
"""
import pandas._config.config as cf
@@ -479,6 +479,7 @@ def use_inf_as_na_cb(key):
_xlsm_options = ["xlrd", "openpyxl"]
_xlsx_options = ["xlrd", "openpyxl"]
_ods_options = ["odf"]
+_xlsb_options = ["pyxlsb"]
with cf.config_prefix("io.excel.xls"):
@@ -515,6 +516,13 @@ def use_inf_as_na_cb(key):
validator=str,
)
+with cf.config_prefix("io.excel.xlsb"):
+ cf.register_option(
+ "reader",
+ "auto",
+ reader_engine_doc.format(ext="xlsb", others=", ".join(_xlsb_options)),
+ validator=str,
+ )
# Set up the io.excel specific writer configuration.
writer_engine_doc = """
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index 203ef3ec75c8f..f947a1fda49f1 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -334,7 +334,7 @@ def array(
return result
-def extract_array(obj, extract_numpy=False):
+def extract_array(obj, extract_numpy: bool = False):
"""
Extract the ndarray or ExtensionArray from a Series or Index.
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index 1b4e7062b38e5..eddf46ee362d6 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -235,8 +235,9 @@ def construct_from_string(cls, string: str):
... " "'{string}'")
"""
if not isinstance(string, str):
- raise TypeError(f"Expects a string, got {type(string).__name__}")
-
+ raise TypeError(
+ f"'construct_from_string' expects a string, got {type(string)}"
+ )
# error: Non-overlapping equality check (left operand type: "str", right
# operand type: "Callable[[ExtensionDtype], str]") [comparison-overlap]
assert isinstance(cls.name, str), (cls, type(cls.name))
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 1dbdb8dbba48b..52c569793e499 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -7,6 +7,7 @@
from pandas._libs import lib, tslib, tslibs
from pandas._libs.tslibs import NaT, OutOfBoundsDatetime, Period, iNaT
from pandas._libs.tslibs.timezones import tz_compare
+from pandas._typing import Dtype
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
@@ -34,6 +35,7 @@
is_float_dtype,
is_integer,
is_integer_dtype,
+ is_numeric_dtype,
is_object_dtype,
is_scalar,
is_string_dtype,
@@ -670,7 +672,7 @@ def infer_dtype_from_array(arr, pandas_dtype: bool = False):
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr, skipna=False)
- if inferred in ["string", "bytes", "unicode", "mixed", "mixed-integer"]:
+ if inferred in ["string", "bytes", "mixed", "mixed-integer"]:
return (np.object_, arr)
arr = np.asarray(arr)
@@ -1018,6 +1020,80 @@ def soft_convert_objects(
return values
+def convert_dtypes(
+ input_array,
+ convert_string: bool = True,
+ convert_integer: bool = True,
+ convert_boolean: bool = True,
+) -> Dtype:
+ """
+ Convert objects to best possible type, and optionally,
+ to types supporting ``pd.NA``.
+
+ Parameters
+ ----------
+ input_array : ExtensionArray or PandasArray
+ convert_string : bool, default True
+ Whether object dtypes should be converted to ``StringDtype()``.
+ convert_integer : bool, default True
+ Whether, if possible, conversion can be done to integer extension types.
+ convert_boolean : bool, defaults True
+ Whether object dtypes should be converted to ``BooleanDtypes()``.
+
+ Returns
+ -------
+ dtype
+ new dtype
+ """
+
+ if convert_string or convert_integer or convert_boolean:
+ try:
+ inferred_dtype = lib.infer_dtype(input_array)
+ except ValueError:
+ # Required to catch due to Period. Can remove once GH 23553 is fixed
+ inferred_dtype = input_array.dtype
+
+ if not convert_string and is_string_dtype(inferred_dtype):
+ inferred_dtype = input_array.dtype
+
+ if convert_integer:
+ target_int_dtype = "Int64"
+
+ if isinstance(inferred_dtype, str) and (
+ inferred_dtype == "mixed-integer"
+ or inferred_dtype == "mixed-integer-float"
+ ):
+ inferred_dtype = target_int_dtype
+ if is_integer_dtype(input_array.dtype) and not is_extension_array_dtype(
+ input_array.dtype
+ ):
+ from pandas.core.arrays.integer import _dtypes
+
+ inferred_dtype = _dtypes.get(input_array.dtype.name, target_int_dtype)
+ if not is_integer_dtype(input_array.dtype) and is_numeric_dtype(
+ input_array.dtype
+ ):
+ inferred_dtype = target_int_dtype
+
+ else:
+ if is_integer_dtype(inferred_dtype):
+ inferred_dtype = input_array.dtype
+
+ if convert_boolean:
+ if is_bool_dtype(input_array.dtype) and not is_extension_array_dtype(
+ input_array.dtype
+ ):
+ inferred_dtype = "boolean"
+ else:
+ if isinstance(inferred_dtype, str) and inferred_dtype == "boolean":
+ inferred_dtype = input_array.dtype
+
+ else:
+ inferred_dtype = input_array.dtype
+
+ return inferred_dtype
+
+
def maybe_castable(arr) -> bool:
# return False to force a non-fastpath
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 5a007f28d63cb..eb9b880cd10d9 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -6,7 +6,7 @@
from pandas._libs import algos, lib
from pandas._libs.tslibs import conversion
-from pandas._typing import ArrayLike
+from pandas._typing import ArrayLike, DtypeObj
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
@@ -194,12 +194,11 @@ def ensure_python_int(value: Union[int, np.integer]) -> int:
"""
if not is_scalar(value):
raise TypeError(f"Value needs to be a scalar value, was type {type(value)}")
- msg = "Wrong type {} for value {}"
try:
new_value = int(value)
assert new_value == value
except (TypeError, ValueError, AssertionError):
- raise TypeError(msg.format(type(value), value))
+ raise TypeError(f"Wrong type {type(value)} for value {value}")
return new_value
@@ -1669,7 +1668,7 @@ def _is_dtype(arr_or_dtype, condition) -> bool:
return condition(dtype)
-def _get_dtype(arr_or_dtype):
+def _get_dtype(arr_or_dtype) -> DtypeObj:
"""
Get the dtype instance associated with an array
or dtype object.
@@ -1841,7 +1840,7 @@ def _validate_date_like_dtype(dtype) -> None:
)
-def pandas_dtype(dtype):
+def pandas_dtype(dtype) -> DtypeObj:
"""
Convert input into a pandas only dtype object or a numpy dtype object.
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 466ed815e8e5a..d00b46700981c 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -352,7 +352,9 @@ def construct_from_string(cls, string: str_type) -> "CategoricalDtype":
If a CategoricalDtype cannot be constructed from the input.
"""
if not isinstance(string, str):
- raise TypeError(f"Expects a string, got {type(string)}")
+ raise TypeError(
+ f"'construct_from_string' expects a string, got {type(string)}"
+ )
if string != cls.name:
raise TypeError(f"Cannot construct a 'CategoricalDtype' from '{string}'")
@@ -435,12 +437,11 @@ def __eq__(self, other: Any) -> bool:
return hash(self) == hash(other)
def __repr__(self) -> str_type:
- tpl = "CategoricalDtype(categories={data}ordered={ordered})"
if self.categories is None:
data = "None, "
else:
data = self.categories._format_data(name=type(self).__name__)
- return tpl.format(data=data, ordered=self.ordered)
+ return f"CategoricalDtype(categories={data}ordered={self.ordered})"
@staticmethod
def _hash_categories(categories, ordered: Ordered = True) -> int:
@@ -729,22 +730,24 @@ def construct_from_string(cls, string: str_type):
>>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]')
datetime64[ns, UTC]
"""
- if isinstance(string, str):
- msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'"
- match = cls._match.match(string)
- if match:
- d = match.groupdict()
- try:
- return cls(unit=d["unit"], tz=d["tz"])
- except (KeyError, TypeError, ValueError) as err:
- # KeyError if maybe_get_tz tries and fails to get a
- # pytz timezone (actually pytz.UnknownTimeZoneError).
- # TypeError if we pass a nonsense tz;
- # ValueError if we pass a unit other than "ns"
- raise TypeError(msg) from err
- raise TypeError(msg)
+ if not isinstance(string, str):
+ raise TypeError(
+ f"'construct_from_string' expects a string, got {type(string)}"
+ )
- raise TypeError("Cannot construct a 'DatetimeTZDtype'")
+ msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'"
+ match = cls._match.match(string)
+ if match:
+ d = match.groupdict()
+ try:
+ return cls(unit=d["unit"], tz=d["tz"])
+ except (KeyError, TypeError, ValueError) as err:
+ # KeyError if maybe_get_tz tries and fails to get a
+ # pytz timezone (actually pytz.UnknownTimeZoneError).
+ # TypeError if we pass a nonsense tz;
+ # ValueError if we pass a unit other than "ns"
+ raise TypeError(msg) from err
+ raise TypeError(msg)
def __str__(self) -> str_type:
return f"datetime64[{self.unit}, {self.tz}]"
@@ -1076,7 +1079,9 @@ def construct_from_string(cls, string):
if its not possible
"""
if not isinstance(string, str):
- raise TypeError(f"a string needs to be passed, got type {type(string)}")
+ raise TypeError(
+ f"'construct_from_string' expects a string, got {type(string)}"
+ )
if string.lower() == "interval" or cls._match.search(string) is not None:
return cls(string)
diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py
index 4c3f8b7374465..435d80b2c4dfb 100644
--- a/pandas/core/dtypes/generic.py
+++ b/pandas/core/dtypes/generic.py
@@ -56,9 +56,7 @@ def _check(cls, inst) -> bool:
ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series",))
ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",))
-ABCSparseArray = create_pandas_abc_type(
- "ABCSparseArray", "_subtyp", ("sparse_array", "sparse_series")
-)
+ABCSparseArray = create_pandas_abc_type("ABCSparseArray", "_subtyp", ("sparse_array",))
ABCCategorical = create_pandas_abc_type("ABCCategorical", "_typ", ("categorical"))
ABCDatetimeArray = create_pandas_abc_type("ABCDatetimeArray", "_typ", ("datetimearray"))
ABCTimedeltaArray = create_pandas_abc_type(
diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py
index 9e9278052e35d..37bca76802843 100644
--- a/pandas/core/dtypes/inference.py
+++ b/pandas/core/dtypes/inference.py
@@ -25,6 +25,8 @@
is_list_like = lib.is_list_like
+is_iterator = lib.is_iterator
+
def is_number(obj) -> bool:
"""
@@ -93,40 +95,6 @@ def _iterable_not_string(obj) -> bool:
return isinstance(obj, abc.Iterable) and not isinstance(obj, str)
-def is_iterator(obj) -> bool:
- """
- Check if the object is an iterator.
-
- For example, lists are considered iterators
- but not strings or datetime objects.
-
- Parameters
- ----------
- obj : The object to check
-
- Returns
- -------
- is_iter : bool
- Whether `obj` is an iterator.
-
- Examples
- --------
- >>> is_iterator([1, 2, 3])
- True
- >>> is_iterator(datetime(2017, 1, 1))
- False
- >>> is_iterator("foo")
- False
- >>> is_iterator(1)
- False
- """
-
- if not hasattr(obj, "__iter__"):
- return False
-
- return hasattr(obj, "__next__")
-
-
def is_file_like(obj) -> bool:
"""
Check if the object is a file-like object.
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index fb579f2f58a57..0bc754b3e8fb3 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -8,6 +8,7 @@
from pandas._libs import lib
import pandas._libs.missing as libmissing
from pandas._libs.tslibs import NaT, iNaT
+from pandas._typing import DtypeObj
from pandas.core.dtypes.common import (
_NS_DTYPE,
@@ -585,7 +586,7 @@ def remove_na_arraylike(arr):
return arr[notna(lib.values_from_object(arr))]
-def is_valid_nat_for_dtype(obj, dtype) -> bool:
+def is_valid_nat_for_dtype(obj, dtype: DtypeObj) -> bool:
"""
isna check that excludes incompatible dtypes
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5ad133f9e21a4..70e440b49ae6c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -8,8 +8,10 @@
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
+
import collections
from collections import abc
+import datetime
from io import StringIO
import itertools
import sys
@@ -18,6 +20,7 @@
IO,
TYPE_CHECKING,
Any,
+ Dict,
FrozenSet,
Hashable,
Iterable,
@@ -37,8 +40,8 @@
from pandas._config import get_option
-from pandas._libs import algos as libalgos, lib
-from pandas._typing import Axes, Axis, Dtype, FilePathOrBuffer, Level, Renamer
+from pandas._libs import algos as libalgos, lib, properties
+from pandas._typing import Axes, Axis, Dtype, FilePathOrBuffer, Label, Level, Renamer
from pandas.compat import PY37
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
@@ -90,8 +93,10 @@
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
+ ABCDatetimeIndex,
ABCIndexClass,
ABCMultiIndex,
+ ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna, notna
@@ -102,7 +107,6 @@
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin as DatetimeLikeArray
from pandas.core.arrays.sparse import SparseFrameAccessor
from pandas.core.generic import NDFrame, _shared_docs
-from pandas.core.groupby import generic as groupby_generic
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import Index, ensure_index, ensure_index_from_sequences
from pandas.core.indexes.datetimes import DatetimeIndex
@@ -129,6 +133,7 @@
import pandas.plotting
if TYPE_CHECKING:
+ from pandas.core.groupby.generic import DataFrameGroupBy
from pandas.io.formats.style import Styler
# ---------------------------------------------------------------------
@@ -258,7 +263,6 @@
Examples
--------
-
>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
@@ -394,6 +398,7 @@ class DataFrame(NDFrame):
2 7 8 9
"""
+ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set
_typ = "dataframe"
@property
@@ -491,12 +496,12 @@ def __init__(
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
- except (ValueError, TypeError) as e:
+ except (ValueError, TypeError) as err:
exc = TypeError(
"DataFrame constructor called with "
- f"incompatible data and dtype: {e}"
+ f"incompatible data and dtype: {err}"
)
- raise exc from e
+ raise exc from err
if arr.ndim == 0 and index is not None and columns is not None:
values = cast_scalar_to_array(
@@ -602,7 +607,7 @@ def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns.
- In case off non-interactive session, no boundaries apply.
+ In case of non-interactive session, no boundaries apply.
`ignore_width` is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
@@ -794,7 +799,6 @@ def to_string(
1 2 5
2 3 6
"""
-
from pandas import option_context
with option_context("display.max_colwidth", max_colwidth):
@@ -870,8 +874,8 @@ def style(self) -> "Styler":
polar bear 22000
koala marsupial 80000
>>> for label, content in df.items():
- ... print('label:', label)
- ... print('content:', content, sep='\n')
+ ... print(f'label: {label}')
+ ... print(f'content: {content}', sep='\n')
...
label: species
content:
@@ -1488,9 +1492,9 @@ def to_gbq(
when getting user credentials.
.. _local webserver flow:
- http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
+ https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
- http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
+ https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
@@ -1583,7 +1587,6 @@ def from_records(
-------
DataFrame
"""
-
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
@@ -1764,7 +1767,6 @@ def to_records(
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
-
if index:
if isinstance(self.index, ABCMultiIndex):
# array of tuples to numpy cols. copy copy copy
@@ -1851,16 +1853,16 @@ def _from_arrays(cls, arrays, columns, index, dtype=None) -> "DataFrame":
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_stata(
self,
- path,
- convert_dates=None,
- write_index=True,
- byteorder=None,
- time_stamp=None,
- data_label=None,
- variable_labels=None,
- version=114,
- convert_strl=None,
- ):
+ path: FilePathOrBuffer,
+ convert_dates: Optional[Dict[Label, str]] = None,
+ write_index: bool = True,
+ byteorder: Optional[str] = None,
+ time_stamp: Optional[datetime.datetime] = None,
+ data_label: Optional[str] = None,
+ variable_labels: Optional[Dict[Label, str]] = None,
+ version: Optional[int] = 114,
+ convert_strl: Optional[Sequence[Label]] = None,
+ ) -> None:
"""
Export DataFrame object to Stata dta format.
@@ -1898,14 +1900,22 @@ def to_stata(
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
- version : {114, 117}, default 114
- Version to use in the output dta file. Version 114 can be used
- read by Stata 10 and later. Version 117 can be read by Stata 13
- or later. Version 114 limits string variables to 244 characters or
- fewer while 117 allows strings with lengths up to 2,000,000
- characters.
+ version : {114, 117, 118, 119, None}, default 114
+ Version to use in the output dta file. Set to None to let pandas
+ decide between 118 or 119 formats depending on the number of
+ columns in the frame. Version 114 can be read by Stata 10 and
+ later. Version 117 can be read by Stata 13 or later. Version 118
+ is supported in Stata 14 and later. Version 119 is supported in
+ Stata 15 and later. Version 114 limits string variables to 244
+ characters or fewer while versions 117 and later allow strings
+ with lengths up to 2,000,000 characters. Versions 118 and 119
+ support Unicode characters, and version 119 supports more than
+ 32,767 variables.
.. versionadded:: 0.23.0
+ .. versionchanged:: 1.0.0
+
+ Added support for formats 118 and 119.
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
@@ -1939,22 +1949,29 @@ def to_stata(
... 'speed': [350, 18, 361, 15]})
>>> df.to_stata('animals.dta') # doctest: +SKIP
"""
- kwargs = {}
- if version not in (114, 117, 118):
- raise ValueError("Only formats 114, 117 and 118 are supported.")
+ if version not in (114, 117, 118, 119, None):
+ raise ValueError("Only formats 114, 117, 118 and 119 are supported.")
if version == 114:
if convert_strl is not None:
raise ValueError("strl is not supported in format 114")
from pandas.io.stata import StataWriter as statawriter
- else:
- if version == 117:
- from pandas.io.stata import StataWriter117 as statawriter
- else:
- from pandas.io.stata import StataWriter118 as statawriter
-
+ elif version == 117:
+ # mypy: Name 'statawriter' already defined (possibly by an import)
+ from pandas.io.stata import StataWriter117 as statawriter # type: ignore
+ else: # versions 118 and 119
+ # mypy: Name 'statawriter' already defined (possibly by an import)
+ from pandas.io.stata import StataWriterUTF8 as statawriter # type:ignore
+
+ kwargs: Dict[str, Any] = {}
+ if version is None or version >= 117:
+ # strl conversion is only supported >= 117
kwargs["convert_strl"] = convert_strl
+ if version is None or version >= 118:
+ # Specifying the version is only supported for UTF8 (118 or 119)
+ kwargs["version"] = version
- writer = statawriter(
+ # mypy: Too many arguments for "StataWriter"
+ writer = statawriter( # type: ignore
path,
self,
convert_dates=convert_dates,
@@ -2173,7 +2190,6 @@ def to_html(
--------
to_string : Convert DataFrame to a string.
"""
-
if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS:
raise ValueError("Invalid value for justify parameter")
@@ -2347,7 +2363,6 @@ def info(
dtypes: object(3)
memory usage: 188.8 MB
"""
-
if buf is None: # pragma: no cover
buf = sys.stdout
@@ -2431,7 +2446,7 @@ def _verbose_repr():
dtype = self.dtypes.iloc[i]
col = pprint_thing(col)
- line_no = _put_str(" {num}".format(num=i), space_num)
+ line_no = _put_str(f" {i}", space_num)
count = ""
if show_counts:
count = counts.iloc[i]
@@ -2737,14 +2752,7 @@ def _ixs(self, i: int, axis: int = 0):
else:
label = self.columns[i]
- # if the values returned are not the same length
- # as the index (iow a not found value), iget returns
- # a 0-len ndarray. This is effectively catching
- # a numpy error (as numpy should really raise)
values = self._data.iget(i)
-
- if len(self.index) and not len(values):
- values = np.array([np.nan] * len(self.index), dtype=object)
result = self._box_col_values(values, label)
# this is a cached value, mark it so
@@ -2797,7 +2805,7 @@ def __getitem__(self, key):
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
- data = self.take(indexer, axis=1)
+ data = self._take_with_is_copy(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
@@ -2830,7 +2838,7 @@ def _getitem_bool_array(self, key):
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
- return self.take(indexer, axis=0)
+ return self._take_with_is_copy(indexer, axis=0)
def _getitem_multilevel(self, key):
# self.columns is a MultiIndex
@@ -2892,14 +2900,17 @@ def _get_value(self, index, col, takeable: bool = False):
engine = self.index._engine
try:
- return engine.get_value(series._values, index)
+ if isinstance(series._values, np.ndarray):
+ # i.e. not EA, we can use engine
+ return engine.get_value(series._values, index)
+ else:
+ loc = series.index.get_loc(index)
+ return series._values[loc]
except KeyError:
# GH 20629
if self.index.nlevels > 1:
# partial indexing forbidden
raise
- except (TypeError, ValueError):
- pass
# we cannot handle direct indexing
# use positional
@@ -2980,7 +2991,6 @@ def _set_item(self, key, value):
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
-
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
@@ -3040,8 +3050,7 @@ def _ensure_valid_index(self, value):
except (ValueError, NotImplementedError, TypeError):
raise ValueError(
"Cannot set a frame with no defined index "
- "and a value that cannot be converted to a "
- "Series"
+ "and a value that cannot be converted to a Series"
)
self._data = self._data.reindex_axis(
@@ -3359,7 +3368,7 @@ def select_dtypes(self, include=None, exclude=None) -> "DataFrame":
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
- <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
+ <https://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
@@ -3409,7 +3418,6 @@ def select_dtypes(self, include=None, exclude=None) -> "DataFrame":
4 True 1.0
5 False 2.0
"""
-
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
@@ -3613,12 +3621,12 @@ def reindexer(value):
# Explicitly copy here, instead of in sanitize_index,
# as sanitize_index won't copy an EA, even with copy=True
value = value.copy()
- value = sanitize_index(value, self.index, copy=False)
+ value = sanitize_index(value, self.index)
elif isinstance(value, Index) or is_sequence(value):
# turn me into an ndarray
- value = sanitize_index(value, self.index, copy=False)
+ value = sanitize_index(value, self.index)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
@@ -3680,11 +3688,7 @@ def lookup(self, row_labels, col_labels) -> np.ndarray:
Returns
-------
numpy.ndarray
-
- Examples
- --------
- values : ndarray
- The found values
+ The found values.
"""
n = len(row_labels)
if n != len(col_labels):
@@ -3775,7 +3779,6 @@ def _reindex_multi(self, axes, copy, fill_value) -> "DataFrame":
"""
We are guaranteed non-Nones in the axes.
"""
-
new_index, row_indexer = self.index.reindex(axes["index"])
new_columns, col_indexer = self.columns.reindex(axes["columns"])
@@ -3819,6 +3822,46 @@ def align(
broadcast_axis=broadcast_axis,
)
+ @Appender(
+ """
+ >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
+
+ Change the row labels.
+
+ >>> df.set_axis(['a', 'b', 'c'], axis='index')
+ A B
+ a 1 4
+ b 2 5
+ c 3 6
+
+ Change the column labels.
+
+ >>> df.set_axis(['I', 'II'], axis='columns')
+ I II
+ 0 1 4
+ 1 2 5
+ 2 3 6
+
+ Now, update the labels inplace.
+
+ >>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
+ >>> df
+ i ii
+ 0 1 4
+ 1 2 5
+ 2 3 6
+ """
+ )
+ @Substitution(
+ **_shared_doc_kwargs,
+ extended_summary_sub=" column or",
+ axis_description_sub=", and 1 identifies the columns",
+ see_also_sub=" or columns",
+ )
+ @Appender(NDFrame.set_axis.__doc__)
+ def set_axis(self, labels, axis=0, inplace=False):
+ return super().set_axis(labels, axis=axis, inplace=inplace)
+
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
@rewrite_axis_style_signature(
@@ -4056,7 +4099,6 @@ def rename(
Examples
--------
-
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
@@ -4930,19 +4972,52 @@ def sort_values(
else:
return self._constructor(new_data).__finalize__(self)
- @Substitution(**_shared_doc_kwargs)
- @Appender(NDFrame.sort_index.__doc__)
def sort_index(
self,
axis=0,
level=None,
- ascending=True,
- inplace=False,
- kind="quicksort",
- na_position="last",
- sort_remaining=True,
+ ascending: bool = True,
+ inplace: bool = False,
+ kind: str = "quicksort",
+ na_position: str = "last",
+ sort_remaining: bool = True,
ignore_index: bool = False,
):
+ """
+ Sort object by labels (along an axis).
+
+ Parameters
+ ----------
+ axis : {0 or 'index', 1 or 'columns'}, default 0
+ The axis along which to sort. The value 0 identifies the rows,
+ and 1 identifies the columns.
+ level : int or level name or list of ints or list of level names
+ If not None, sort on values in specified index level(s).
+ ascending : bool, default True
+ Sort ascending vs. descending.
+ inplace : bool, default False
+ If True, perform operation in-place.
+ kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
+ Choice of sorting algorithm. See also ndarray.np.sort for more
+ information. `mergesort` is the only stable algorithm. For
+ DataFrames, this option is only applied when sorting on a single
+ column or label.
+ na_position : {'first', 'last'}, default 'last'
+ Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
+ Not implemented for MultiIndex.
+ sort_remaining : bool, default True
+ If True and sorting by level and index is multilevel, sort by other
+ levels too (in order) after sorting by specified level.
+ ignore_index : bool, default False
+ If True, the resulting axis will be labeled 0, 1, …, n - 1.
+
+ .. versionadded:: 1.0.0
+
+ Returns
+ -------
+ sorted_obj : DataFrame or None
+ DataFrame with sorted index if inplace=False, None otherwise.
+ """
# TODO: this can be combined with Series.sort_index impl as
# almost identical
@@ -5226,9 +5301,15 @@ def swaplevel(self, i=-2, j=-1, axis=0) -> "DataFrame":
result = self.copy()
axis = self._get_axis_number(axis)
+
+ if not isinstance(result._get_axis(axis), ABCMultiIndex): # pragma: no cover
+ raise TypeError("Can only swap levels on a hierarchical axis.")
+
if axis == 0:
+ assert isinstance(result.index, ABCMultiIndex)
result.index = result.index.swaplevel(i, j)
else:
+ assert isinstance(result.columns, ABCMultiIndex)
result.columns = result.columns.swaplevel(i, j)
return result
@@ -5255,15 +5336,17 @@ def reorder_levels(self, order, axis=0) -> "DataFrame":
result = self.copy()
if axis == 0:
+ assert isinstance(result.index, ABCMultiIndex)
result.index = result.index.reorder_levels(order)
else:
+ assert isinstance(result.columns, ABCMultiIndex)
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
- def _combine_frame(self, other, func, fill_value=None, level=None):
+ def _combine_frame(self, other: "DataFrame", func, fill_value=None):
# at this point we have `self._indexed_same(other)`
if fill_value is None:
@@ -5290,7 +5373,7 @@ def _arith_op(left, right):
return new_data
- def _combine_match_index(self, other, func):
+ def _combine_match_index(self, other: Series, func):
# at this point we have `self.index.equals(other.index)`
if ops.should_series_dispatch(self, other, func):
@@ -5298,8 +5381,10 @@ def _combine_match_index(self, other, func):
new_data = ops.dispatch_to_series(self, other, func)
else:
# fastpath --> operate directly on values
+ other_vals = other.values.reshape(-1, 1)
with np.errstate(all="ignore"):
- new_data = func(self.values.T, other.values).T
+ new_data = func(self.values, other_vals)
+ new_data = dispatch_fill_zeros(func, self.values, other_vals, new_data)
return new_data
def _construct_result(self, result) -> "DataFrame":
@@ -5513,7 +5598,6 @@ def combine_first(self, other: "DataFrame") -> "DataFrame":
Examples
--------
-
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
@@ -5777,13 +5861,14 @@ def groupby(
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
- ) -> "groupby_generic.DataFrameGroupBy":
+ ) -> "DataFrameGroupBy":
+ from pandas.core.groupby.generic import DataFrameGroupBy
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
- return groupby_generic.DataFrameGroupBy(
+ return DataFrameGroupBy(
obj=self,
keys=by,
axis=axis,
@@ -6291,7 +6376,6 @@ def explode(self, column: Union[str, Tuple]) -> "DataFrame":
3 3 1
3 4 1
"""
-
if not (is_scalar(column) or isinstance(column, tuple)):
raise ValueError("column must be a scalar")
if not self.columns.is_unique:
@@ -6521,6 +6605,11 @@ def diff(self, periods=1, axis=0) -> "DataFrame":
DataFrame.shift: Shift index by desired number of periods with an
optional time freq.
+ Notes
+ -----
+ For boolean dtypes, this uses :meth:`operator.xor` rather than
+ :meth:`operator.sub`.
+
Examples
--------
Difference with previous row
@@ -6776,7 +6865,6 @@ def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
Examples
--------
-
>>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
@@ -6971,7 +7059,6 @@ def append(
Examples
--------
-
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
@@ -7023,6 +7110,8 @@ def append(
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
+ if not ignore_index:
+ raise TypeError("Can only append a dict if ignore_index=True")
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
@@ -7532,7 +7621,7 @@ def cov(self, min_periods=None) -> "DataFrame":
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
- <http://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
+ <https://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
@@ -7942,7 +8031,7 @@ def _get_data(axis_matters):
result = coerce_to_dtypes(result, self.dtypes)
if constructor is not None:
- result = Series(result, index=labels)
+ result = self._constructor_sliced(result, index=labels)
return result
def nunique(self, axis=0, dropna=True) -> Series:
@@ -8281,8 +8370,10 @@ def to_timestamp(self, freq=None, how="start", axis=0, copy=True) -> "DataFrame"
axis = self._get_axis_number(axis)
if axis == 0:
+ assert isinstance(self.index, (ABCDatetimeIndex, ABCPeriodIndex))
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
+ assert isinstance(self.columns, (ABCDatetimeIndex, ABCPeriodIndex))
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError(f"Axis must be 0 or 1. Got {axis}")
@@ -8315,8 +8406,10 @@ def to_period(self, freq=None, axis=0, copy=True) -> "DataFrame":
axis = self._get_axis_number(axis)
if axis == 0:
+ assert isinstance(self.index, ABCDatetimeIndex)
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
+ assert isinstance(self.columns, ABCDatetimeIndex)
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError(f"Axis must be 0 or 1. Got {axis}")
@@ -8351,7 +8444,6 @@ def isin(self, values) -> "DataFrame":
Examples
--------
-
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
@@ -8412,7 +8504,7 @@ def isin(self, values) -> "DataFrame":
raise TypeError(
"only list-like or dict-like objects are allowed "
"to be passed to DataFrame.isin(), "
- f"you passed a {repr(type(values).__name__)}"
+ f"you passed a '{type(values).__name__}'"
)
return DataFrame(
algorithms.isin(self.values.ravel(), values).reshape(self.shape),
@@ -8420,6 +8512,15 @@ def isin(self, values) -> "DataFrame":
self.columns,
)
+ # ----------------------------------------------------------------------
+ # Add index and columns
+ index: "Index" = properties.AxisProperty(
+ axis=1, doc="The index (row labels) of the DataFrame."
+ )
+ columns: "Index" = properties.AxisProperty(
+ axis=0, doc="The column labels of the DataFrame."
+ )
+
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 0116207675889..f8ee47de94edd 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -8,6 +8,7 @@
import re
from textwrap import dedent
from typing import (
+ TYPE_CHECKING,
Any,
Callable,
Dict,
@@ -29,13 +30,14 @@
from pandas._config import config
-from pandas._libs import Timestamp, iNaT, lib, properties
+from pandas._libs import Timestamp, iNaT, lib
from pandas._typing import (
Axis,
Dtype,
FilePathOrBuffer,
FrameOrSeries,
JSONSerializable,
+ Label,
Level,
Renamer,
)
@@ -101,6 +103,9 @@
from pandas.io.formats.printing import pprint_thing
from pandas.tseries.frequencies import to_offset
+if TYPE_CHECKING:
+ from pandas.core.resample import Resampler
+
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs: Dict[str, str] = dict()
@@ -177,7 +182,7 @@ class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
]
_internal_names_set: Set[str] = set(_internal_names)
_accessors: Set[str] = set()
- _deprecations: FrozenSet[str] = frozenset(["get_values", "ix"])
+ _deprecations: FrozenSet[str] = frozenset(["get_values"])
_metadata: List[str] = []
_is_copy = None
_data: BlockManager
@@ -261,8 +266,8 @@ def _validate_dtype(self, dtype):
# a compound dtype
if dtype.kind == "V":
raise NotImplementedError(
- "compound dtypes are not implemented"
- f" in the {type(self).__name__} constructor"
+ "compound dtypes are not implemented "
+ f"in the {type(self).__name__} constructor"
)
return dtype
@@ -328,31 +333,12 @@ def _setup_axes(cls, axes: List[str], docs: Dict[str, str]) -> None:
cls._info_axis_number = info_axis
cls._info_axis_name = axes[info_axis]
- # setup the actual axis
- def set_axis(a, i):
- setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))
- cls._internal_names_set.add(a)
-
- if axes_are_reversed:
- for i, a in cls._AXIS_NAMES.items():
- set_axis(a, 1 - i)
- else:
- for i, a in cls._AXIS_NAMES.items():
- set_axis(a, i)
-
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
- @staticmethod
- def _construct_axes_dict_from(self, axes, **kwargs):
- """Return an axes dictionary for the passed axes."""
- d = {a: ax for a, ax in zip(self._AXIS_ORDERS, axes)}
- d.update(kwargs)
- return d
-
def _construct_axes_from_arguments(
self, args, kwargs, require_all: bool = False, sentinel=None
):
@@ -381,18 +367,6 @@ def _construct_axes_from_arguments(
axes = {a: kwargs.pop(a, sentinel) for a in self._AXIS_ORDERS}
return axes, kwargs
- @classmethod
- def _from_axes(cls: Type[FrameOrSeries], data, axes, **kwargs) -> FrameOrSeries:
- # for construction from BlockManager
- if isinstance(data, BlockManager):
- return cls(data, **kwargs)
- else:
- if cls._AXIS_REVERSED:
- axes = axes[::-1]
- d = cls._construct_axes_dict_from(cls, axes, copy=False)
- d.update(kwargs)
- return cls(data, **d)
-
@classmethod
def _get_axis_number(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
@@ -573,7 +547,7 @@ def set_axis(self, labels, axis=0, inplace=False):
"""
Assign desired index to given axis.
- Indexes for column or row labels can be changed by assigning
+ Indexes for%(extended_summary_sub)s row labels can be changed by assigning
a list-like or Index.
.. versionchanged:: 0.21.0
@@ -588,9 +562,8 @@ def set_axis(self, labels, axis=0, inplace=False):
labels : list-like, Index
The values for the new index.
- axis : {0 or 'index', 1 or 'columns'}, default 0
- The axis to update. The value 0 identifies the rows, and 1
- identifies the columns.
+ axis : %(axes_single_arg)s, default 0
+ The axis to update. The value 0 identifies the rows%(axis_description_sub)s.
inplace : bool, default False
Whether to return a new %(klass)s instance.
@@ -598,57 +571,14 @@ def set_axis(self, labels, axis=0, inplace=False):
Returns
-------
renamed : %(klass)s or None
- An object of same type as caller if inplace=False, None otherwise.
+ An object of type %(klass)s if inplace=False, None otherwise.
See Also
--------
- DataFrame.rename_axis : Alter the name of the index or columns.
+ %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.
Examples
--------
- **Series**
-
- >>> s = pd.Series([1, 2, 3])
- >>> s
- 0 1
- 1 2
- 2 3
- dtype: int64
-
- >>> s.set_axis(['a', 'b', 'c'], axis=0)
- a 1
- b 2
- c 3
- dtype: int64
-
- **DataFrame**
-
- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
-
- Change the row labels.
-
- >>> df.set_axis(['a', 'b', 'c'], axis='index')
- A B
- a 1 4
- b 2 5
- c 3 6
-
- Change the column labels.
-
- >>> df.set_axis(['I', 'II'], axis='columns')
- I II
- 0 1 4
- 1 2 5
- 2 3 6
-
- Now, update the labels inplace.
-
- >>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
- >>> df
- i ii
- 0 1 4
- 1 2 5
- 2 3 6
"""
if inplace:
setattr(self, self._get_axis_name(axis), labels)
@@ -907,25 +837,6 @@ def squeeze(self, axis=None):
)
]
- def swaplevel(self: FrameOrSeries, i=-2, j=-1, axis=0) -> FrameOrSeries:
- """
- Swap levels i and j in a MultiIndex on a particular axis
-
- Parameters
- ----------
- i, j : int, str (can be mixed)
- Level of index to be swapped. Can pass level name as string.
-
- Returns
- -------
- swapped : same type as caller (new object)
- """
- axis = self._get_axis_number(axis)
- result = self.copy()
- labels = result._data.axes[axis]
- result._data.set_axis(axis, labels.swaplevel(i, j))
- return result
-
# ----------------------------------------------------------------------
# Rename
@@ -1470,8 +1381,9 @@ def __invert__(self):
# inv fails with 0 len
return self
- arr = operator.inv(com.values_from_object(self))
- return self.__array_wrap__(arr)
+ new_data = self._data.apply(operator.invert)
+ result = self._constructor(new_data).__finalize__(self)
+ return result
def __nonzero__(self):
raise ValueError(
@@ -1697,8 +1609,7 @@ def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
multi_message = (
"\n"
"For a multi-index, the label must be a "
- "tuple with elements corresponding to "
- "each level."
+ "tuple with elements corresponding to each level."
)
else:
multi_message = ""
@@ -1962,9 +1873,9 @@ def __setstate__(self, state):
object.__setattr__(self, k, v)
else:
- self._unpickle_series_compat(state)
+ raise NotImplementedError("Pre-0.12 pickles are no longer supported")
elif len(state) == 2:
- self._unpickle_series_compat(state)
+ raise NotImplementedError("Pre-0.12 pickles are no longer supported")
self._item_cache = {}
@@ -2591,7 +2502,7 @@ def to_sql(
References
----------
- .. [1] http://docs.sqlalchemy.org
+ .. [1] https://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
@@ -2800,7 +2711,7 @@ def to_xarray(self):
Notes
-----
- See the `xarray docs <http://xarray.pydata.org/en/stable/>`__
+ See the `xarray docs <https://xarray.pydata.org/en/stable/>`__
Examples
--------
@@ -3044,10 +2955,10 @@ def to_csv(
sep: str = ",",
na_rep: str = "",
float_format: Optional[str] = None,
- columns: Optional[Sequence[Optional[Hashable]]] = None,
+ columns: Optional[Sequence[Label]] = None,
header: Union[bool_t, List[str]] = True,
index: bool_t = True,
- index_label: Optional[Union[bool_t, str, Sequence[Optional[Hashable]]]] = None,
+ index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None,
mode: str = "w",
encoding: Optional[str] = None,
compression: Optional[Union[str, Mapping[str, str]]] = "infer",
@@ -3166,10 +3077,10 @@ def to_csv(
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
- # create 'out.zip' containing 'out.csv'
+ Create 'out.zip' containing 'out.csv'
+
>>> compression_opts = dict(method='zip',
... archive_name='out.csv') # doctest: +SKIP
-
>>> df.to_csv('out.zip', index=False,
... compression=compression_opts) # doctest: +SKIP
"""
@@ -3207,19 +3118,6 @@ def to_csv(
return None
- # ----------------------------------------------------------------------
- # Fancy Indexing
-
- @classmethod
- def _create_indexer(cls, name: str, indexer) -> None:
- """Create an indexer like _name in the class.
-
- Kept for compatibility with geopandas. To be removed in the future. See GH27258
- """
- if getattr(cls, name, None) is None:
- _indexer = functools.partial(indexer, name)
- setattr(cls, name, property(_indexer, doc=indexer.__doc__))
-
# ----------------------------------------------------------------------
# Lookup Caching
@@ -3313,8 +3211,11 @@ def take(
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
- is_copy : bool, default True
- Whether to return a copy of the original object or not.
+ is_copy : bool
+ Before pandas 1.0, ``is_copy=False`` can be specified to ensure
+ that the return value is an actual copy. Starting with pandas 1.0,
+ ``take`` always returns a copy, and the keyword is therefore
+ deprecated.
.. deprecated:: 1.0.0
**kwargs
@@ -3378,12 +3279,10 @@ class max_speed
if is_copy is not None:
warnings.warn(
"is_copy is deprecated and will be removed in a future version. "
- "take will always return a copy in the future.",
+ "'take' always returns a copy, so there is no need to specify this.",
FutureWarning,
stacklevel=2,
)
- else:
- is_copy = True
nv.validate_take(tuple(), kwargs)
@@ -3392,13 +3291,22 @@ class max_speed
new_data = self._data.take(
indices, axis=self._get_block_manager_axis(axis), verify=True
)
- result = self._constructor(new_data).__finalize__(self)
+ return self._constructor(new_data).__finalize__(self)
- # Maybe set copy if we didn't actually change the index.
- if is_copy:
- if not result._get_axis(axis).equals(self._get_axis(axis)):
- result._set_is_copy(self)
+ def _take_with_is_copy(
+ self: FrameOrSeries, indices, axis=0, **kwargs
+ ) -> FrameOrSeries:
+ """
+ Internal version of the `take` method that sets the `_is_copy`
+ attribute to keep track of the parent dataframe (using in indexing
+ for the SettingWithCopyWarning).
+ See the docstring of `take` for full explanation of the parameters.
+ """
+ result = self.take(indices=indices, axis=axis, **kwargs)
+ # Maybe set copy if we didn't actually change the index.
+ if not result._get_axis(axis).equals(self._get_axis(axis)):
+ result._set_is_copy(self)
return result
def xs(self, key, axis=0, level=None, drop_level: bool_t = True):
@@ -3528,9 +3436,9 @@ class animal locomotion
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
(inds,) = loc.nonzero()
- return self.take(inds, axis=axis)
+ return self._take_with_is_copy(inds, axis=axis)
else:
- return self.take(loc, axis=axis)
+ return self._take_with_is_copy(loc, axis=axis)
if not is_scalar(loc):
new_index = self.index[loc]
@@ -3587,7 +3495,7 @@ def _iget_item_cache(self, item):
if ax.is_unique:
lower = self._get_item_cache(ax[item])
else:
- lower = self.take(item, axis=self._info_axis_number)
+ lower = self._take_with_is_copy(item, axis=self._info_axis_number)
return lower
def _box_item_values(self, key, values):
@@ -3613,14 +3521,12 @@ def _set_item(self, key, value) -> None:
self._data.set(key, value)
self._clear_item_cache()
- def _set_is_copy(self, ref=None, copy: bool_t = True) -> None:
+ def _set_is_copy(self, ref, copy: bool_t = True) -> None:
if not copy:
self._is_copy = None
else:
- if ref is not None:
- self._is_copy = weakref.ref(ref)
- else:
- self._is_copy = None
+ assert ref is not None
+ self._is_copy = weakref.ref(ref)
def _check_is_chained_assignment_possible(self) -> bool_t:
"""
@@ -4121,7 +4027,6 @@ def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:
def sort_values(
self,
- by=None,
axis=0,
ascending=True,
inplace: bool_t = False,
@@ -4222,69 +4127,6 @@ def sort_values(
"""
raise AbstractMethodError(self)
- def sort_index(
- self,
- axis=0,
- level=None,
- ascending: bool_t = True,
- inplace: bool_t = False,
- kind: str = "quicksort",
- na_position: str = "last",
- sort_remaining: bool_t = True,
- ignore_index: bool_t = False,
- ):
- """
- Sort object by labels (along an axis).
-
- Parameters
- ----------
- axis : {0 or 'index', 1 or 'columns'}, default 0
- The axis along which to sort. The value 0 identifies the rows,
- and 1 identifies the columns.
- level : int or level name or list of ints or list of level names
- If not None, sort on values in specified index level(s).
- ascending : bool, default True
- Sort ascending vs. descending.
- inplace : bool, default False
- If True, perform operation in-place.
- kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
- Choice of sorting algorithm. See also ndarray.np.sort for more
- information. `mergesort` is the only stable algorithm. For
- DataFrames, this option is only applied when sorting on a single
- column or label.
- na_position : {'first', 'last'}, default 'last'
- Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
- Not implemented for MultiIndex.
- sort_remaining : bool, default True
- If True and sorting by level and index is multilevel, sort by other
- levels too (in order) after sorting by specified level.
- ignore_index : bool, default False
- If True, the resulting axis will be labeled 0, 1, …, n - 1.
-
- .. versionadded:: 1.0.0
-
- Returns
- -------
- sorted_obj : DataFrame or None
- DataFrame with sorted index if inplace=False, None otherwise.
- """
- inplace = validate_bool_kwarg(inplace, "inplace")
- axis = self._get_axis_number(axis)
- axis_name = self._get_axis_name(axis)
- labels = self._get_axis(axis)
-
- if level is not None:
- raise NotImplementedError("level is not implemented")
- if inplace:
- raise NotImplementedError("inplace is not implemented")
-
- sort_index = labels.argsort()
- if not ascending:
- sort_index = sort_index[::-1]
-
- new_axis = labels.take(sort_index)
- return self.reindex(**{axis_name: new_axis})
-
def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:
"""
Conform %(klass)s to new index with optional filling logic.
@@ -5240,6 +5082,7 @@ def __finalize__(
self.attrs[name] = other.attrs[name]
# For subclasses using _metadata.
for name in self._metadata:
+ assert isinstance(name, str)
object.__setattr__(self, name, getattr(other, name, None))
return self
@@ -5367,11 +5210,6 @@ def _is_numeric_mixed_type(self):
f = lambda: self._data.is_numeric_mixed_type
return self._protect_consolidate(f)
- @property
- def _is_datelike_mixed_type(self):
- f = lambda: self._data.is_datelike_mixed_type
- return self._protect_consolidate(f)
-
def _check_inplace_setting(self, value) -> bool_t:
""" check whether we allow in-place setting with this type of value """
@@ -5480,11 +5318,6 @@ def _values(self) -> np.ndarray:
"""internal implementation"""
return self.values
- @property
- def _get_values(self) -> np.ndarray:
- # compat
- return self.values
-
def _internal_get_values(self) -> np.ndarray:
"""
Return an ndarray after converting sparse values to dense.
@@ -5879,6 +5712,7 @@ def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
+ convert_dtypes : Convert argument to best possible dtype.
Examples
--------
@@ -5907,6 +5741,142 @@ def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
)
).__finalize__(self)
+ def convert_dtypes(
+ self: FrameOrSeries,
+ infer_objects: bool_t = True,
+ convert_string: bool_t = True,
+ convert_integer: bool_t = True,
+ convert_boolean: bool_t = True,
+ ) -> FrameOrSeries:
+ """
+ Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.
+
+ .. versionadded:: 1.0.0
+
+ Parameters
+ ----------
+ infer_objects : bool, default True
+ Whether object dtypes should be converted to the best possible types.
+ convert_string : bool, default True
+ Whether object dtypes should be converted to ``StringDtype()``.
+ convert_integer : bool, default True
+ Whether, if possible, conversion can be done to integer extension types.
+ convert_boolean : bool, defaults True
+ Whether object dtypes should be converted to ``BooleanDtypes()``.
+
+ Returns
+ -------
+ Series or DataFrame
+ Copy of input object with new dtype.
+
+ See Also
+ --------
+ infer_objects : Infer dtypes of objects.
+ to_datetime : Convert argument to datetime.
+ to_timedelta : Convert argument to timedelta.
+ to_numeric : Convert argument to a numeric type.
+
+ Notes
+ -----
+
+ By default, ``convert_dtypes`` will attempt to convert a Series (or each
+ Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options
+ ``convert_string``, ``convert_integer``, and ``convert_boolean``, it is
+ possible to turn off individual conversions to ``StringDtype``, the integer
+ extension types or ``BooleanDtype``, respectively.
+
+ For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference
+ rules as during normal Series/DataFrame construction. Then, if possible,
+ convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer extension
+ type, otherwise leave as ``object``.
+
+ If the dtype is integer, convert to an appropriate integer extension type.
+
+ If the dtype is numeric, and consists of all integers, convert to an
+ appropriate integer extension type.
+
+ In the future, as new dtypes are added that support ``pd.NA``, the results
+ of this method will change to support those new dtypes.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame(
+ ... {
+ ... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
+ ... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
+ ... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")),
+ ... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")),
+ ... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")),
+ ... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
+ ... }
+ ... )
+
+ Start with a DataFrame with default dtypes.
+
+ >>> df
+ a b c d e f
+ 0 1 x True h 10.0 NaN
+ 1 2 y False i NaN 100.5
+ 2 3 z NaN NaN 20.0 200.0
+
+ >>> df.dtypes
+ a int32
+ b object
+ c object
+ d object
+ e float64
+ f float64
+ dtype: object
+
+ Convert the DataFrame to use best possible dtypes.
+
+ >>> dfn = df.convert_dtypes()
+ >>> dfn
+ a b c d e f
+ 0 1 x True h 10 NaN
+ 1 2 y False i <NA> 100.5
+ 2 3 z <NA> <NA> 20 200.0
+
+ >>> dfn.dtypes
+ a Int32
+ b string
+ c boolean
+ d string
+ e Int64
+ f float64
+ dtype: object
+
+ Start with a Series of strings and missing data represented by ``np.nan``.
+
+ >>> s = pd.Series(["a", "b", np.nan])
+ >>> s
+ 0 a
+ 1 b
+ 2 NaN
+ dtype: object
+
+ Obtain a Series with dtype ``StringDtype``.
+
+ >>> s.convert_dtypes()
+ 0 a
+ 1 b
+ 2 <NA>
+ dtype: string
+ """
+ if self.ndim == 1:
+ return self._convert_dtypes(
+ infer_objects, convert_string, convert_integer, convert_boolean
+ )
+ else:
+ results = [
+ col._convert_dtypes(
+ infer_objects, convert_string, convert_integer, convert_boolean
+ )
+ for col_name, col in self.items()
+ ]
+ result = pd.concat(results, axis=1, copy=False)
+ return result
+
# ----------------------------------------------------------------------
# Filling NA's
@@ -6682,9 +6652,9 @@ def replace(
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
- <http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
+ <https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `SciPy tutorial
- <http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.
+ <https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.
Examples
--------
@@ -7043,8 +7013,7 @@ def asof(self, where, subset=None):
# mask the missing
missing = locs == -1
- d = self.take(locs)
- data = d.copy()
+ data = self.take(locs)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
@@ -7237,7 +7206,7 @@ def _clip_with_one_bound(self, threshold, method, axis, inplace):
if isinstance(self, ABCSeries):
threshold = self._constructor(threshold, index=self.index)
else:
- threshold = _align_method_FRAME(self, threshold, axis)
+ threshold = _align_method_FRAME(self, threshold, axis, flex=None)[1]
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(
@@ -7538,7 +7507,7 @@ def at_time(
self: FrameOrSeries, time, asof: bool_t = False, axis=None
) -> FrameOrSeries:
"""
- Select values at particular time of day (e.g. 9:30AM).
+ Select values at particular time of day (e.g., 9:30AM).
Parameters
----------
@@ -7590,7 +7559,7 @@ def at_time(
except AttributeError:
raise TypeError("Index must be DatetimeIndex")
- return self.take(indexer, axis=axis)
+ return self._take_with_is_copy(indexer, axis=axis)
def between_time(
self: FrameOrSeries,
@@ -7672,7 +7641,7 @@ def between_time(
except AttributeError:
raise TypeError("Index must be DatetimeIndex")
- return self.take(indexer, axis=axis)
+ return self._take_with_is_copy(indexer, axis=axis)
def resample(
self,
@@ -7686,7 +7655,7 @@ def resample(
base: int = 0,
on=None,
level=None,
- ):
+ ) -> "Resampler":
"""
Resample time-series data.
@@ -7951,10 +7920,10 @@ def resample(
2000-01-04 36 90
"""
- from pandas.core.resample import resample
+ from pandas.core.resample import get_resampler
axis = self._get_axis_number(axis)
- return resample(
+ return get_resampler(
self,
freq=rule,
label=label,
@@ -8991,11 +8960,10 @@ def tshift(
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
elif orig_freq is not None:
- msg = (
- f"Given freq {freq.rule_code} does not match"
- f" PeriodIndex freq {orig_freq.rule_code}"
+ raise ValueError(
+ f"Given freq {freq.rule_code} does not match "
+ f"PeriodIndex freq {orig_freq.rule_code}"
)
- raise ValueError(msg)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
@@ -9735,26 +9703,8 @@ def describe_categorical_1d(data):
dtype = None
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
-
- if is_datetime64_any_dtype(data):
- tz = data.dt.tz
- asint = data.dropna().values.view("i8")
- top = Timestamp(top)
- if top.tzinfo is not None and tz is not None:
- # Don't tz_localize(None) if key is already tz-aware
- top = top.tz_convert(tz)
- else:
- top = top.tz_localize(tz)
- names += ["top", "freq", "first", "last"]
- result += [
- top,
- freq,
- Timestamp(asint.min(), tz=tz),
- Timestamp(asint.max(), tz=tz),
- ]
- else:
- names += ["top", "freq"]
- result += [top, freq]
+ names += ["top", "freq"]
+ result += [top, freq]
# If the DataFrame is empty, set 'top' and 'freq' to None
# to maintain output shape consistency
@@ -9765,11 +9715,23 @@ def describe_categorical_1d(data):
return pd.Series(result, index=names, name=data.name, dtype=dtype)
+ def describe_timestamp_1d(data):
+ # GH-30164
+ stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]
+ d = (
+ [data.count(), data.mean(), data.min()]
+ + data.quantile(percentiles).tolist()
+ + [data.max()]
+ )
+ return pd.Series(d, index=stat_index, name=data.name)
+
def describe_1d(data):
if is_bool_dtype(data):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
+ elif is_datetime64_any_dtype(data):
+ return describe_timestamp_1d(data)
elif is_timedelta64_dtype(data):
return describe_numeric_1d(data)
else:
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index c49677fa27a31..27dd6e953c219 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -5,7 +5,7 @@
These are user facing as the result of the ``df.groupby(...)`` operations,
which here returns a DataFrameGroupBy object.
"""
-from collections import abc, defaultdict, namedtuple
+from collections import abc, namedtuple
import copy
from functools import partial
from textwrap import dedent
@@ -42,10 +42,8 @@
ensure_int64,
ensure_platform_int,
is_bool,
- is_dict_like,
is_integer_dtype,
is_interval_dtype,
- is_list_like,
is_numeric_dtype,
is_object_dtype,
is_scalar,
@@ -53,6 +51,11 @@
)
from pandas.core.dtypes.missing import _isna_ndarraylike, isna, notna
+from pandas.core.aggregation import (
+ is_multi_agg_with_relabel,
+ maybe_mangle_lambdas,
+ normalize_keyword_aggregation,
+)
import pandas.core.algorithms as algorithms
from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
@@ -249,7 +252,7 @@ def aggregate(self, func=None, *args, **kwargs):
elif isinstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
- func = _maybe_mangle_lambdas(func)
+ func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
ret.columns = columns
@@ -918,9 +921,9 @@ class DataFrameGroupBy(GroupBy):
@Appender(_shared_docs["aggregate"])
def aggregate(self, func=None, *args, **kwargs):
- relabeling = func is None and _is_multi_agg_with_relabel(**kwargs)
+ relabeling = func is None and is_multi_agg_with_relabel(**kwargs)
if relabeling:
- func, columns, order = _normalize_keyword_aggregation(kwargs)
+ func, columns, order = normalize_keyword_aggregation(kwargs)
kwargs = {}
elif isinstance(func, list) and len(func) > len(set(func)):
@@ -935,7 +938,7 @@ def aggregate(self, func=None, *args, **kwargs):
# nicer error message
raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).")
- func = _maybe_mangle_lambdas(func)
+ func = maybe_mangle_lambdas(func)
result, how = self._aggregate(func, *args, **kwargs)
if how is None:
@@ -1413,22 +1416,20 @@ def transform(self, func, *args, **kwargs):
# cythonized transformation or canned "reduction+broadcast"
return getattr(self, func)(*args, **kwargs)
- # If func is a reduction, we need to broadcast the
- # result to the whole group. Compute func result
- # and deal with possible broadcasting below.
- result = getattr(self, func)(*args, **kwargs)
-
- # a reduction transform
- if not isinstance(result, DataFrame):
- return self._transform_general(func, *args, **kwargs)
+ # GH 30918
+ # Use _transform_fast only when we know func is an aggregation
+ if func in base.reduction_kernels:
+ # If func is a reduction, we need to broadcast the
+ # result to the whole group. Compute func result
+ # and deal with possible broadcasting below.
+ result = getattr(self, func)(*args, **kwargs)
- obj = self._obj_with_exclusions
-
- # nuisance columns
- if not result.columns.equals(obj.columns):
- return self._transform_general(func, *args, **kwargs)
+ if isinstance(result, DataFrame) and result.columns.equals(
+ self._obj_with_exclusions.columns
+ ):
+ return self._transform_fast(result, func)
- return self._transform_fast(result, func)
+ return self._transform_general(func, *args, **kwargs)
def _transform_fast(self, result: DataFrame, func_nm: str) -> DataFrame:
"""
@@ -1860,190 +1861,6 @@ def groupby_series(obj, col=None):
boxplot = boxplot_frame_groupby
-def _is_multi_agg_with_relabel(**kwargs) -> bool:
- """
- Check whether kwargs passed to .agg look like multi-agg with relabeling.
-
- Parameters
- ----------
- **kwargs : dict
-
- Returns
- -------
- bool
-
- Examples
- --------
- >>> _is_multi_agg_with_relabel(a='max')
- False
- >>> _is_multi_agg_with_relabel(a_max=('a', 'max'),
- ... a_min=('a', 'min'))
- True
- >>> _is_multi_agg_with_relabel()
- False
- """
- return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and (
- len(kwargs) > 0
- )
-
-
-def _normalize_keyword_aggregation(kwargs):
- """
- Normalize user-provided "named aggregation" kwargs.
-
- Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs
- to the old Dict[str, List[scalar]]].
-
- Parameters
- ----------
- kwargs : dict
-
- Returns
- -------
- aggspec : dict
- The transformed kwargs.
- columns : List[str]
- The user-provided keys.
- col_idx_order : List[int]
- List of columns indices.
-
- Examples
- --------
- >>> _normalize_keyword_aggregation({'output': ('input', 'sum')})
- ({'input': ['sum']}, ('output',), [('input', 'sum')])
- """
- # Normalize the aggregation functions as Mapping[column, List[func]],
- # process normally, then fixup the names.
- # TODO: aggspec type: typing.Dict[str, List[AggScalar]]
- # May be hitting https://github.com/python/mypy/issues/5958
- # saying it doesn't have an attribute __name__
- aggspec = defaultdict(list)
- order = []
- columns, pairs = list(zip(*kwargs.items()))
-
- for name, (column, aggfunc) in zip(columns, pairs):
- aggspec[column].append(aggfunc)
- order.append((column, com.get_callable_name(aggfunc) or aggfunc))
-
- # uniquify aggfunc name if duplicated in order list
- uniquified_order = _make_unique(order)
-
- # GH 25719, due to aggspec will change the order of assigned columns in aggregation
- # uniquified_aggspec will store uniquified order list and will compare it with order
- # based on index
- aggspec_order = [
- (column, com.get_callable_name(aggfunc) or aggfunc)
- for column, aggfuncs in aggspec.items()
- for aggfunc in aggfuncs
- ]
- uniquified_aggspec = _make_unique(aggspec_order)
-
- # get the new indice of columns by comparison
- col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)
- return aggspec, columns, col_idx_order
-
-
-def _make_unique(seq):
- """Uniquify aggfunc name of the pairs in the order list
-
- Examples:
- --------
- >>> _make_unique([('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')])
- [('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
- """
- return [
- (pair[0], "_".join([pair[1], str(seq[:i].count(pair))]))
- if seq.count(pair) > 1
- else pair
- for i, pair in enumerate(seq)
- ]
-
-
-# TODO: Can't use, because mypy doesn't like us setting __name__
-# error: "partial[Any]" has no attribute "__name__"
-# the type is:
-# typing.Sequence[Callable[..., ScalarResult]]
-# -> typing.Sequence[Callable[..., ScalarResult]]:
-
-
-def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:
- """
- Possibly mangle a list of aggfuncs.
-
- Parameters
- ----------
- aggfuncs : Sequence
-
- Returns
- -------
- mangled: list-like
- A new AggSpec sequence, where lambdas have been converted
- to have unique names.
-
- Notes
- -----
- If just one aggfunc is passed, the name will not be mangled.
- """
- if len(aggfuncs) <= 1:
- # don't mangle for .agg([lambda x: .])
- return aggfuncs
- i = 0
- mangled_aggfuncs = []
- for aggfunc in aggfuncs:
- if com.get_callable_name(aggfunc) == "<lambda>":
- aggfunc = partial(aggfunc)
- aggfunc.__name__ = f"<lambda_{i}>"
- i += 1
- mangled_aggfuncs.append(aggfunc)
-
- return mangled_aggfuncs
-
-
-def _maybe_mangle_lambdas(agg_spec: Any) -> Any:
- """
- Make new lambdas with unique names.
-
- Parameters
- ----------
- agg_spec : Any
- An argument to GroupBy.agg.
- Non-dict-like `agg_spec` are pass through as is.
- For dict-like `agg_spec` a new spec is returned
- with name-mangled lambdas.
-
- Returns
- -------
- mangled : Any
- Same type as the input.
-
- Examples
- --------
- >>> _maybe_mangle_lambdas('sum')
- 'sum'
-
- >>> _maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
- [<function __main__.<lambda_0>,
- <function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
- """
- is_dict = is_dict_like(agg_spec)
- if not (is_dict or is_list_like(agg_spec)):
- return agg_spec
- mangled_aggspec = type(agg_spec)() # dict or OrderdDict
-
- if is_dict:
- for key, aggfuncs in agg_spec.items():
- if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):
- mangled_aggfuncs = _managle_lambda_list(aggfuncs)
- else:
- mangled_aggfuncs = aggfuncs
-
- mangled_aggspec[key] = mangled_aggfuncs
- else:
- mangled_aggspec = _managle_lambda_list(agg_spec)
-
- return mangled_aggspec
-
-
def _recast_datetimelike_result(result: DataFrame) -> DataFrame:
"""
If we have date/time like in the original, then coerce dates
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 233bdd11b372b..71e7aafbca27d 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -72,7 +72,7 @@ class providing the base-class of operations.
_apply_docs = dict(
template="""
- Apply function `func` group-wise and combine the results together.
+ Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a {input} as its first
argument and return a DataFrame, Series or scalar. `apply` will
@@ -685,7 +685,7 @@ def get_group(self, name, obj=None):
if not len(inds):
raise KeyError(name)
- return obj.take(inds, axis=self.axis)
+ return obj._take_with_is_copy(inds, axis=self.axis)
def __iter__(self):
"""
@@ -813,9 +813,10 @@ def _try_cast(self, result, obj, numeric_only: bool = False):
# datetime64tz is handled correctly in agg_series,
# so is excluded here.
- # return the same type (Series) as our caller
- cls = dtype.construct_array_type()
- result = try_cast_to_ea(cls, result, dtype=dtype)
+ if len(result) and isinstance(result[0], dtype.type):
+ cls = dtype.construct_array_type()
+ result = try_cast_to_ea(cls, result, dtype=dtype)
+
elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
@@ -969,22 +970,17 @@ def reset_identity(values):
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
- if isinstance(result, Series):
- result = result.reindex(ax)
+ # this is a very unfortunate situation
+ # we can't use reindex to restore the original order
+ # when the ax has duplicates
+ # so we resort to this
+ # GH 14776, 30667
+ if ax.has_duplicates:
+ indexer, _ = result.index.get_indexer_non_unique(ax.values)
+ indexer = algorithms.unique1d(indexer)
+ result = result.take(indexer, axis=self.axis)
else:
-
- # this is a very unfortunate situation
- # we have a multi-index that is NOT lexsorted
- # and we have a result which is duplicated
- # we can't reindex, so we resort to this
- # GH 14776
- if isinstance(ax, MultiIndex) and not ax.is_unique:
- indexer = algorithms.unique1d(
- result.index.get_indexer_for(ax.values)
- )
- result = result.take(indexer, axis=self.axis)
- else:
- result = result.reindex(ax, axis=self.axis)
+ result = result.reindex(ax, axis=self.axis)
elif self.group_keys:
@@ -1364,17 +1360,17 @@ def groupby_function(
@Substitution(name="groupby", f=name)
@Appender(_common_see_also)
@Appender(_local_template)
- def f(self, **kwargs):
- if "numeric_only" not in kwargs:
- kwargs["numeric_only"] = numeric_only
- if "min_count" not in kwargs:
- kwargs["min_count"] = min_count
-
+ def func(self, numeric_only=numeric_only, min_count=min_count):
self._set_group_selection()
# try a cython aggregation if we can
try:
- return self._cython_agg_general(alias, alt=npfunc, **kwargs)
+ return self._cython_agg_general(
+ how=alias,
+ alt=npfunc,
+ numeric_only=numeric_only,
+ min_count=min_count,
+ )
except DataError:
pass
except NotImplementedError as err:
@@ -1389,9 +1385,9 @@ def f(self, **kwargs):
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
return result
- set_function_name(f, name, cls)
+ set_function_name(func, name, cls)
- return f
+ return func
def first_compat(x, axis=0):
def first(x):
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 0b89e702c9867..f0c6eedf5cee4 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -27,6 +27,7 @@
from pandas.core.groupby import ops
from pandas.core.groupby.categorical import recode_for_groupby, recode_from_groupby
from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex
+from pandas.core.indexes.base import InvalidIndexError
from pandas.core.series import Series
from pandas.io.formats.printing import pprint_thing
@@ -565,7 +566,7 @@ def is_in_axis(key) -> bool:
items = obj._data.items
try:
items.get_loc(key)
- except (KeyError, TypeError):
+ except (KeyError, TypeError, InvalidIndexError):
# TypeError shows up here if we pass e.g. Int64Index
return False
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 37067a1897a52..77c54ec736aaa 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -164,8 +164,8 @@ def apply(self, f, data: FrameOrSeries, axis: int = 0):
com.get_callable_name(f) not in base.plotting_methods
and isinstance(splitter, FrameSplitter)
and axis == 0
- # apply_frame_axis0 doesn't allow MultiIndex
- and not isinstance(sdata.index, MultiIndex)
+ # fast_apply/libreduction doesn't allow non-numpy backed indexes
+ and not sdata.index._has_complex_internals
):
try:
result_values, mutated = splitter.fast_apply(f, group_keys)
@@ -350,7 +350,7 @@ def get_group_levels(self):
def _is_builtin_func(self, arg):
"""
- if we define an builtin function for this argument, return it,
+ if we define a builtin function for this argument, return it,
otherwise return the arg
"""
return SelectionMixin._builtin_table.get(arg, arg)
@@ -543,6 +543,17 @@ def _cython_operation(
if mask.any():
result = result.astype("float64")
result[mask] = np.nan
+ elif (
+ how == "add"
+ and is_integer_dtype(orig_values.dtype)
+ and is_extension_array_dtype(orig_values.dtype)
+ ):
+ # We need this to ensure that Series[Int64Dtype].resample().sum()
+ # remains int64 dtype.
+ # Two options for avoiding this special case
+ # 1. mask-aware ops and avoid casting to float with NaN above
+ # 2. specify the result dtype when calling this method
+ result = result.astype("int64")
if kind == "aggregate" and self._filter_empty_groups and not counts.all():
assert result.ndim != 2
@@ -616,8 +627,8 @@ def agg_series(self, obj: Series, func):
# TODO: can we get a performant workaround for EAs backed by ndarray?
return self._aggregate_series_pure_python(obj, func)
- elif isinstance(obj.index, MultiIndex):
- # MultiIndex; Pre-empt TypeError in _aggregate_series_fast
+ elif obj.index._has_complex_internals:
+ # Pre-empt TypeError in _aggregate_series_fast
return self._aggregate_series_pure_python(obj, func)
try:
diff --git a/pandas/core/index.py b/pandas/core/index.py
index a9c8e6731a17e..8cff53d7a8b74 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -27,4 +27,5 @@
"pandas.core.index is deprecated and will be removed in a future version. "
"The public classes are available in the top-level namespace.",
FutureWarning,
+ stacklevel=2,
)
diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py
index 4d45769d2fea9..fe475527f4596 100644
--- a/pandas/core/indexers.py
+++ b/pandas/core/indexers.py
@@ -1,11 +1,18 @@
"""
Low-dependency indexing utilities.
"""
+import warnings
+
import numpy as np
-from pandas._typing import AnyArrayLike
+from pandas._typing import Any, AnyArrayLike
-from pandas.core.dtypes.common import is_list_like
+from pandas.core.dtypes.common import (
+ is_array_like,
+ is_bool_dtype,
+ is_integer_dtype,
+ is_list_like,
+)
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
# -----------------------------------------------------------
@@ -244,33 +251,65 @@ def length_of_indexer(indexer, target=None) -> int:
raise AssertionError("cannot find the length of the indexer")
-def check_bool_array_indexer(array: AnyArrayLike, mask: AnyArrayLike) -> np.ndarray:
+def deprecate_ndim_indexing(result):
+ """
+ Helper function to raise the deprecation warning for multi-dimensional
+ indexing on 1D Series/Index.
+
+ GH#27125 indexer like idx[:, None] expands dim, but we cannot do that
+ and keep an index, so we currently return ndarray, which is deprecated
+ (Deprecation GH#30588).
"""
- Check if `mask` is a valid boolean indexer for `array`.
+ if np.ndim(result) > 1:
+ warnings.warn(
+ "Support for multi-dimensional indexing (e.g. `index[:, None]`) "
+ "on an Index is deprecated and will be removed in a future "
+ "version. Convert to a numpy array before indexing instead.",
+ DeprecationWarning,
+ stacklevel=3,
+ )
+
+
+# -----------------------------------------------------------
+# Public indexer validation
- `array` and `mask` are checked to have the same length, and the
- dtype is validated.
+
+def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any:
+ """
+ Check if `indexer` is a valid array indexer for `array`.
+
+ For a boolean mask, `array` and `indexer` are checked to have the same
+ length. The dtype is validated, and if it is an integer or boolean
+ ExtensionArray, it is checked if there are missing values present, and
+ it is converted to the appropriate numpy array. Other dtypes will raise
+ an error.
+
+ Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed
+ through as is.
.. versionadded:: 1.0.0
Parameters
----------
- array : array
- The array that's being masked.
- mask : array
- The boolean array that's masking.
+ array : array-like
+ The array that is being indexed (only used for the length).
+ indexer : array-like or list-like
+ The array-like that's used to index. List-like input that is not yet
+ a numpy array or an ExtensionArray is converted to one. Other input
+ types are passed through as is
Returns
-------
numpy.ndarray
- The validated boolean mask.
+ The validated indexer as a numpy array that can be used to index.
Raises
------
IndexError
When the lengths don't match.
ValueError
- When `mask` cannot be converted to a bool-dtype ndarray.
+ When `indexer` cannot be converted to a numpy ndarray to index
+ (e.g. presence of missing values).
See Also
--------
@@ -278,32 +317,100 @@ def check_bool_array_indexer(array: AnyArrayLike, mask: AnyArrayLike) -> np.ndar
Examples
--------
- A boolean ndarray is returned when the arguments are all valid.
+ When checking a boolean mask, a boolean ndarray is returned when the
+ arguments are all valid.
>>> mask = pd.array([True, False])
>>> arr = pd.array([1, 2])
- >>> pd.api.extensions.check_bool_array_indexer(arr, mask)
+ >>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
An IndexError is raised when the lengths don't match.
>>> mask = pd.array([True, False, True])
- >>> pd.api.extensions.check_bool_array_indexer(arr, mask)
+ >>> pd.api.indexers.check_array_indexer(arr, mask)
Traceback (most recent call last):
...
- IndexError: Item wrong length 3 instead of 2.
+ IndexError: Boolean index has wrong length: 3 instead of 2.
A ValueError is raised when the mask cannot be converted to
a bool-dtype ndarray.
>>> mask = pd.array([True, pd.NA])
- >>> pd.api.extensions.check_bool_array_indexer(arr, mask)
+ >>> pd.api.indexers.check_array_indexer(arr, mask)
+ Traceback (most recent call last):
+ ...
+ ValueError: Cannot mask with a boolean indexer containing NA values
+
+ A numpy boolean mask will get passed through (if the length is correct):
+
+ >>> mask = np.array([True, False])
+ >>> pd.api.indexers.check_array_indexer(arr, mask)
+ array([ True, False])
+
+ Similarly for integer indexers, an integer ndarray is returned when it is
+ a valid indexer, otherwise an error is (for integer indexers, a matching
+ length is not required):
+
+ >>> indexer = pd.array([0, 2], dtype="Int64")
+ >>> arr = pd.array([1, 2, 3])
+ >>> pd.api.indexers.check_array_indexer(arr, indexer)
+ array([0, 2])
+
+ >>> indexer = pd.array([0, pd.NA], dtype="Int64")
+ >>> pd.api.indexers.check_array_indexer(arr, indexer)
+ Traceback (most recent call last):
+ ...
+ ValueError: Cannot index with an integer indexer containing NA values
+
+ For non-integer/boolean dtypes, an appropriate error is raised:
+
+ >>> indexer = np.array([0., 2.], dtype="float64")
+ >>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
- ValueError: cannot convert to bool numpy array in presence of missing values
+ IndexError: arrays used as indices must be of integer or boolean type
"""
- result = np.asarray(mask, dtype=bool)
- # GH26658
- if len(result) != len(array):
- raise IndexError(f"Item wrong length {len(result)} instead of {len(array)}.")
- return result
+ from pandas.core.construction import array as pd_array
+
+ # whathever is not an array-like is returned as-is (possible valid array
+ # indexers that are not array-like: integer, slice, Ellipsis, None)
+ # In this context, tuples are not considered as array-like, as they have
+ # a specific meaning in indexing (multi-dimensional indexing)
+ if is_list_like(indexer):
+ if isinstance(indexer, tuple):
+ return indexer
+ else:
+ return indexer
+
+ # convert list-likes to array
+ if not is_array_like(indexer):
+ indexer = pd_array(indexer)
+ if len(indexer) == 0:
+ # empty list is converted to float array by pd.array
+ indexer = np.array([], dtype=np.intp)
+
+ dtype = indexer.dtype
+ if is_bool_dtype(dtype):
+ try:
+ indexer = np.asarray(indexer, dtype=bool)
+ except ValueError:
+ raise ValueError("Cannot mask with a boolean indexer containing NA values")
+
+ # GH26658
+ if len(indexer) != len(array):
+ raise IndexError(
+ f"Boolean index has wrong length: "
+ f"{len(indexer)} instead of {len(array)}"
+ )
+ elif is_integer_dtype(dtype):
+ try:
+ indexer = np.asarray(indexer, dtype=np.intp)
+ except ValueError:
+ raise ValueError(
+ "Cannot index with an integer indexer containing NA values"
+ )
+ else:
+ raise IndexError("arrays used as indices must be of integer or boolean type")
+
+ return indexer
diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py
index 4072d06b9427c..0a23d38ace37e 100644
--- a/pandas/core/indexes/api.py
+++ b/pandas/core/indexes/api.py
@@ -63,7 +63,7 @@
def get_objs_combined_axis(
- objs, intersect: bool = False, axis=0, sort: bool = True
+ objs, intersect: bool = False, axis=0, sort: bool = True, copy: bool = False
) -> Index:
"""
Extract combined index: return intersection or union (depending on the
@@ -81,13 +81,15 @@ def get_objs_combined_axis(
The axis to extract indexes from.
sort : bool, default True
Whether the result index should come out sorted or not.
+ copy : bool, default False
+ If True, return a copy of the combined index.
Returns
-------
Index
"""
obs_idxes = [obj._get_axis(axis) for obj in objs]
- return _get_combined_index(obs_idxes, intersect=intersect, sort=sort)
+ return _get_combined_index(obs_idxes, intersect=intersect, sort=sort, copy=copy)
def _get_distinct_objs(objs: List[Index]) -> List[Index]:
@@ -105,7 +107,10 @@ def _get_distinct_objs(objs: List[Index]) -> List[Index]:
def _get_combined_index(
- indexes: List[Index], intersect: bool = False, sort: bool = False
+ indexes: List[Index],
+ intersect: bool = False,
+ sort: bool = False,
+ copy: bool = False,
) -> Index:
"""
Return the union or intersection of indexes.
@@ -119,6 +124,8 @@ def _get_combined_index(
calculate the union.
sort : bool, default False
Whether the result index should come out sorted or not.
+ copy : bool, default False
+ If True, return a copy of the combined index.
Returns
-------
@@ -143,6 +150,11 @@ def _get_combined_index(
index = index.sort_values()
except TypeError:
pass
+
+ # GH 29879
+ if copy:
+ index = index.copy()
+
return index
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index ca929b188dc33..f5f793c507480 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1,7 +1,7 @@
from datetime import datetime
import operator
from textwrap import dedent
-from typing import Dict, FrozenSet, Hashable, Optional, Union
+from typing import Any, FrozenSet, Hashable, Optional, Union
import warnings
import numpy as np
@@ -12,6 +12,7 @@
from pandas._libs.tslibs import OutOfBoundsDatetime, Timestamp
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas._libs.tslibs.timezones import tz_compare
+from pandas._typing import Label
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
@@ -50,13 +51,12 @@
from pandas.core.dtypes.generic import (
ABCCategorical,
ABCDataFrame,
- ABCDatetimeArray,
ABCDatetimeIndex,
- ABCIndexClass,
ABCIntervalIndex,
ABCMultiIndex,
ABCPandasArray,
ABCPeriodIndex,
+ ABCRangeIndex,
ABCSeries,
ABCTimedeltaIndex,
)
@@ -68,8 +68,7 @@
from pandas.core.arrays import ExtensionArray
from pandas.core.base import IndexOpsMixin, PandasObject
import pandas.core.common as com
-from pandas.core.construction import extract_array
-from pandas.core.indexers import maybe_convert_indices
+from pandas.core.indexers import deprecate_ndim_indexing, maybe_convert_indices
from pandas.core.indexes.frozen import FrozenList
import pandas.core.missing as missing
from pandas.core.ops import get_op_result_name
@@ -77,6 +76,7 @@
from pandas.core.strings import StringMethods
from pandas.io.formats.printing import (
+ PrettyDict,
default_pprint,
format_object_attrs,
format_object_summary,
@@ -96,6 +96,7 @@
duplicated="np.ndarray",
)
_index_shared_docs = dict()
+str_t = str
def _make_comparison_op(op, cls):
@@ -244,7 +245,7 @@ def _outer_indexer(self, left, right):
_typ = "index"
_data: Union[ExtensionArray, np.ndarray]
_id = None
- _name: Optional[Hashable] = None
+ _name: Label = None
# MultiIndex.levels previously allowed setting the index name. We
# don't allow this anymore, and raise if it happens rather than
# failing silently.
@@ -278,10 +279,6 @@ def __new__(
) -> "Index":
from pandas.core.indexes.range import RangeIndex
- from pandas import PeriodIndex, DatetimeIndex, TimedeltaIndex
- from pandas.core.indexes.numeric import Float64Index, Int64Index, UInt64Index
- from pandas.core.indexes.interval import IntervalIndex
- from pandas.core.indexes.category import CategoricalIndex
name = maybe_extract_name(name, data, cls)
@@ -297,10 +294,16 @@ def __new__(
# categorical
elif is_categorical_dtype(data) or is_categorical_dtype(dtype):
+ # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
+ from pandas.core.indexes.category import CategoricalIndex
+
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name, **kwargs)
# interval
elif is_interval_dtype(data) or is_interval_dtype(dtype):
+ # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
+ from pandas.core.indexes.interval import IntervalIndex
+
closed = kwargs.pop("closed", None)
if is_dtype_equal(_o_dtype, dtype):
return IntervalIndex(
@@ -315,6 +318,9 @@ def __new__(
or is_datetime64_any_dtype(dtype)
or "tz" in kwargs
):
+ # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
+ from pandas import DatetimeIndex
+
if is_dtype_equal(_o_dtype, dtype):
# GH#23524 passing `dtype=object` to DatetimeIndex is invalid,
# will raise in the where `data` is already tz-aware. So
@@ -329,6 +335,9 @@ def __new__(
return DatetimeIndex(data, copy=copy, name=name, dtype=dtype, **kwargs)
elif is_timedelta64_dtype(data) or is_timedelta64_dtype(dtype):
+ # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
+ from pandas import TimedeltaIndex
+
if is_dtype_equal(_o_dtype, dtype):
# Note we can pass copy=False because the .astype below
# will always make a copy
@@ -339,6 +348,9 @@ def __new__(
return TimedeltaIndex(data, copy=copy, name=name, dtype=dtype, **kwargs)
elif is_period_dtype(data) or is_period_dtype(dtype):
+ # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
+ from pandas import PeriodIndex
+
if is_dtype_equal(_o_dtype, dtype):
return PeriodIndex(data, copy=False, name=name, **kwargs).astype(object)
return PeriodIndex(data, dtype=dtype, copy=copy, name=name, **kwargs)
@@ -358,6 +370,13 @@ def __new__(
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
+ # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
+ from pandas.core.indexes.numeric import (
+ Float64Index,
+ Int64Index,
+ UInt64Index,
+ )
+
if dtype is not None:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
@@ -396,7 +415,7 @@ def __new__(
if subarr.ndim > 1:
# GH#13601, GH#20285, GH#27125
raise ValueError("Index data must be 1-dimensional")
- return cls._simple_new(subarr, name, **kwargs)
+ return cls._simple_new(subarr, name)
elif hasattr(data, "__array__"):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name, **kwargs)
@@ -459,11 +478,7 @@ def _simple_new(cls, values, name=None, dtype=None):
Must be careful not to recurse.
"""
- if isinstance(values, (ABCSeries, ABCIndexClass)):
- # Index._data must always be an ndarray.
- # This is no-copy for when _values is an ndarray,
- # which should be always at this point.
- values = np.asarray(values._values)
+ assert isinstance(values, np.ndarray), type(values)
result = object.__new__(cls)
result._data = values
@@ -509,17 +524,9 @@ def _get_attributes_dict(self):
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
+
attributes = self._get_attributes_dict()
attributes.update(kwargs)
- if not len(values) and "dtype" not in kwargs:
- attributes["dtype"] = self.dtype
-
- # _simple_new expects an the type of self._data
- values = getattr(values, "_values", values)
- if isinstance(values, ABCDatetimeArray):
- # `self.values` returns `self` for tz-aware, so we need to unwrap
- # more specifically
- values = values.asi8
return self._simple_new(values, **attributes)
@@ -540,6 +547,7 @@ def _shallow_copy_with_infer(self, values, **kwargs):
attributes.update(kwargs)
attributes["copy"] = False
if not len(values) and "dtype" not in kwargs:
+ # TODO: what if hasattr(values, "dtype")?
attributes["dtype"] = self.dtype
if self._infer_as_myclass:
try:
@@ -867,7 +875,7 @@ def __deepcopy__(self, memo=None):
# --------------------------------------------------------------------
# Rendering Methods
- def __repr__(self):
+ def __repr__(self) -> str_t:
"""
Return a string representation for this object.
"""
@@ -886,7 +894,7 @@ def __repr__(self):
return res
- def _format_space(self):
+ def _format_space(self) -> str_t:
# using space here controls if the attributes
# are line separated or not (the default)
@@ -903,18 +911,19 @@ def _formatter_func(self):
"""
return default_pprint
- def _format_data(self, name=None):
+ def _format_data(self, name=None) -> str_t:
"""
Return the formatted data as a unicode string.
"""
# do we want to justify (only do so for non-objects)
- is_justify = not (
- self.inferred_type in ("string", "unicode")
- or (
- self.inferred_type == "categorical" and is_object_dtype(self.categories)
- )
- )
+ is_justify = True
+
+ if self.inferred_type == "string":
+ is_justify = False
+ elif self.inferred_type == "categorical":
+ if is_object_dtype(self.categories): # type: ignore
+ is_justify = False
return format_object_summary(
self, self._formatter_func, is_justify=is_justify, name=name
@@ -930,7 +939,7 @@ def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
- def format(self, name=False, formatter=None, **kwargs):
+ def format(self, name: bool = False, formatter=None, **kwargs):
"""
Render a string representation of the Index.
"""
@@ -1016,7 +1025,7 @@ def _format_native_types(self, na_rep="", quoting=None, **kwargs):
values[mask] = na_rep
return values
- def _summary(self, name=None):
+ def _summary(self, name=None) -> str_t:
"""
Return a summarized representation.
@@ -1096,7 +1105,7 @@ def to_series(self, index=None, name=None):
return Series(self.values.copy(), index=index, name=name)
- def to_frame(self, index=True, name=None):
+ def to_frame(self, index: bool = True, name=None):
"""
Create a DataFrame with a column containing the Index.
@@ -1163,6 +1172,9 @@ def to_frame(self, index=True, name=None):
@property
def name(self):
+ """
+ Return Index or MultiIndex name.
+ """
return self._name
@name.setter
@@ -1176,7 +1188,7 @@ def name(self, value):
maybe_extract_name(value, None, type(self))
self._name = value
- def _validate_names(self, name=None, names=None, deep=False):
+ def _validate_names(self, name=None, names=None, deep: bool = False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
@@ -1229,7 +1241,7 @@ def _set_names(self, values, level=None):
names = property(fset=_set_names, fget=_get_names)
- def set_names(self, names, level=None, inplace=False):
+ def set_names(self, names, level=None, inplace: bool = False):
"""
Set Index or MultiIndex name.
@@ -1398,7 +1410,7 @@ def _validate_index_level(self, level):
f"Requested level ({level}) does not match index name ({self.name})"
)
- def _get_level_number(self, level):
+ def _get_level_number(self, level) -> int:
self._validate_index_level(level)
return 0
@@ -1568,7 +1580,7 @@ def is_monotonic(self) -> bool:
return self.is_monotonic_increasing
@property
- def is_monotonic_increasing(self):
+ def is_monotonic_increasing(self) -> bool:
"""
Return if the index is monotonic increasing (only equal or
increasing) values.
@@ -1644,21 +1656,230 @@ def is_unique(self) -> bool:
@property
def has_duplicates(self) -> bool:
+ """
+ Check if the Index has duplicate values.
+
+ Returns
+ -------
+ bool
+ Whether or not the Index has duplicate values.
+
+ Examples
+ --------
+ >>> idx = pd.Index([1, 5, 7, 7])
+ >>> idx.has_duplicates
+ True
+
+ >>> idx = pd.Index([1, 5, 7])
+ >>> idx.has_duplicates
+ False
+
+ >>> idx = pd.Index(["Watermelon", "Orange", "Apple",
+ ... "Watermelon"]).astype("category")
+ >>> idx.has_duplicates
+ True
+
+ >>> idx = pd.Index(["Orange", "Apple",
+ ... "Watermelon"]).astype("category")
+ >>> idx.has_duplicates
+ False
+ """
return not self.is_unique
def is_boolean(self) -> bool:
+ """
+ Check if the Index only consists of booleans.
+
+ Returns
+ -------
+ bool
+ Whether or not the Index only consists of booleans.
+
+ See Also
+ --------
+ is_integer : Check if the Index only consists of integers.
+ is_floating : Check if the Index is a floating type.
+ is_numeric : Check if the Index only consists of numeric data.
+ is_object : Check if the Index is of the object dtype.
+ is_categorical : Check if the Index holds categorical data.
+ is_interval : Check if the Index holds Interval objects.
+ is_mixed : Check if the Index holds data with mixed data types.
+
+ Examples
+ --------
+ >>> idx = pd.Index([True, False, True])
+ >>> idx.is_boolean()
+ True
+
+ >>> idx = pd.Index(["True", "False", "True"])
+ >>> idx.is_boolean()
+ False
+
+ >>> idx = pd.Index([True, False, "True"])
+ >>> idx.is_boolean()
+ False
+ """
return self.inferred_type in ["boolean"]
def is_integer(self) -> bool:
+ """
+ Check if the Index only consists of integers.
+
+ Returns
+ -------
+ bool
+ Whether or not the Index only consists of integers.
+
+ See Also
+ --------
+ is_boolean : Check if the Index only consists of booleans.
+ is_floating : Check if the Index is a floating type.
+ is_numeric : Check if the Index only consists of numeric data.
+ is_object : Check if the Index is of the object dtype.
+ is_categorical : Check if the Index holds categorical data.
+ is_interval : Check if the Index holds Interval objects.
+ is_mixed : Check if the Index holds data with mixed data types.
+
+ Examples
+ --------
+ >>> idx = pd.Index([1, 2, 3, 4])
+ >>> idx.is_integer()
+ True
+
+ >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
+ >>> idx.is_integer()
+ False
+
+ >>> idx = pd.Index(["Apple", "Mango", "Watermelon"])
+ >>> idx.is_integer()
+ False
+ """
return self.inferred_type in ["integer"]
def is_floating(self) -> bool:
+ """
+ Check if the Index is a floating type.
+
+ The Index may consist of only floats, NaNs, or a mix of floats,
+ integers, or NaNs.
+
+ Returns
+ -------
+ bool
+ Whether or not the Index only consists of only consists of floats, NaNs, or
+ a mix of floats, integers, or NaNs.
+
+ See Also
+ --------
+ is_boolean : Check if the Index only consists of booleans.
+ is_integer : Check if the Index only consists of integers.
+ is_numeric : Check if the Index only consists of numeric data.
+ is_object : Check if the Index is of the object dtype.
+ is_categorical : Check if the Index holds categorical data.
+ is_interval : Check if the Index holds Interval objects.
+ is_mixed : Check if the Index holds data with mixed data types.
+
+ Examples
+ --------
+ >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
+ >>> idx.is_floating()
+ True
+
+ >>> idx = pd.Index([1.0, 2.0, np.nan, 4.0])
+ >>> idx.is_floating()
+ True
+
+ >>> idx = pd.Index([1, 2, 3, 4, np.nan])
+ >>> idx.is_floating()
+ True
+
+ >>> idx = pd.Index([1, 2, 3, 4])
+ >>> idx.is_floating()
+ False
+ """
return self.inferred_type in ["floating", "mixed-integer-float", "integer-na"]
def is_numeric(self) -> bool:
+ """
+ Check if the Index only consists of numeric data.
+
+ Returns
+ -------
+ bool
+ Whether or not the Index only consists of numeric data.
+
+ See Also
+ --------
+ is_boolean : Check if the Index only consists of booleans.
+ is_integer : Check if the Index only consists of integers.
+ is_floating : Check if the Index is a floating type.
+ is_object : Check if the Index is of the object dtype.
+ is_categorical : Check if the Index holds categorical data.
+ is_interval : Check if the Index holds Interval objects.
+ is_mixed : Check if the Index holds data with mixed data types.
+
+ Examples
+ --------
+ >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
+ >>> idx.is_numeric()
+ True
+
+ >>> idx = pd.Index([1, 2, 3, 4.0])
+ >>> idx.is_numeric()
+ True
+
+ >>> idx = pd.Index([1, 2, 3, 4])
+ >>> idx.is_numeric()
+ True
+
+ >>> idx = pd.Index([1, 2, 3, 4.0, np.nan])
+ >>> idx.is_numeric()
+ True
+
+ >>> idx = pd.Index([1, 2, 3, 4.0, np.nan, "Apple"])
+ >>> idx.is_numeric()
+ False
+ """
return self.inferred_type in ["integer", "floating"]
def is_object(self) -> bool:
+ """
+ Check if the Index is of the object dtype.
+
+ Returns
+ -------
+ bool
+ Whether or not the Index is of the object dtype.
+
+ See Also
+ --------
+ is_boolean : Check if the Index only consists of booleans.
+ is_integer : Check if the Index only consists of integers.
+ is_floating : Check if the Index is a floating type.
+ is_numeric : Check if the Index only consists of numeric data.
+ is_categorical : Check if the Index holds categorical data.
+ is_interval : Check if the Index holds Interval objects.
+ is_mixed : Check if the Index holds data with mixed data types.
+
+ Examples
+ --------
+ >>> idx = pd.Index(["Apple", "Mango", "Watermelon"])
+ >>> idx.is_object()
+ True
+
+ >>> idx = pd.Index(["Apple", "Mango", 2.0])
+ >>> idx.is_object()
+ True
+
+ >>> idx = pd.Index(["Watermelon", "Orange", "Apple",
+ ... "Watermelon"]).astype("category")
+ >>> idx.object()
+ False
+
+ >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
+ >>> idx.is_object()
+ False
+ """
return is_object_dtype(self.dtype)
def is_categorical(self) -> bool:
@@ -1667,12 +1888,19 @@ def is_categorical(self) -> bool:
Returns
-------
- boolean
+ bool
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
+ is_boolean : Check if the Index only consists of booleans.
+ is_integer : Check if the Index only consists of integers.
+ is_floating : Check if the Index is a floating type.
+ is_numeric : Check if the Index only consists of numeric data.
+ is_object : Check if the Index is of the object dtype.
+ is_interval : Check if the Index holds Interval objects.
+ is_mixed : Check if the Index holds data with mixed data types.
Examples
--------
@@ -1698,19 +1926,77 @@ def is_categorical(self) -> bool:
return self.inferred_type in ["categorical"]
def is_interval(self) -> bool:
+ """
+ Check if the Index holds Interval objects.
+
+ Returns
+ -------
+ bool
+ Whether or not the Index holds Interval objects.
+
+ See Also
+ --------
+ IntervalIndex : Index for Interval objects.
+ is_boolean : Check if the Index only consists of booleans.
+ is_integer : Check if the Index only consists of integers.
+ is_floating : Check if the Index is a floating type.
+ is_numeric : Check if the Index only consists of numeric data.
+ is_object : Check if the Index is of the object dtype.
+ is_categorical : Check if the Index holds categorical data.
+ is_mixed : Check if the Index holds data with mixed data types.
+
+ Examples
+ --------
+ >>> idx = pd.Index([pd.Interval(left=0, right=5),
+ ... pd.Interval(left=5, right=10)])
+ >>> idx.is_interval()
+ True
+
+ >>> idx = pd.Index([1, 3, 5, 7])
+ >>> idx.is_interval()
+ False
+ """
return self.inferred_type in ["interval"]
def is_mixed(self) -> bool:
+ """
+ Check if the Index holds data with mixed data types.
+
+ Returns
+ -------
+ bool
+ Whether or not the Index holds data with mixed data types.
+
+ See Also
+ --------
+ is_boolean : Check if the Index only consists of booleans.
+ is_integer : Check if the Index only consists of integers.
+ is_floating : Check if the Index is a floating type.
+ is_numeric : Check if the Index only consists of numeric data.
+ is_object : Check if the Index is of the object dtype.
+ is_categorical : Check if the Index holds categorical data.
+ is_interval : Check if the Index holds Interval objects.
+
+ Examples
+ --------
+ >>> idx = pd.Index(['a', np.nan, 'b'])
+ >>> idx.is_mixed()
+ True
+
+ >>> idx = pd.Index([1.0, 2.0, 3.0, 5.0])
+ >>> idx.is_mixed()
+ False
+ """
return self.inferred_type in ["mixed"]
- def holds_integer(self):
+ def holds_integer(self) -> bool:
"""
Whether the type is an integer type.
"""
return self.inferred_type in ["integer", "mixed-integer"]
@cache_readonly
- def inferred_type(self):
+ def inferred_type(self) -> str_t:
"""
Return a string of the type inferred from the values.
"""
@@ -1718,6 +2004,9 @@ def inferred_type(self):
@cache_readonly
def is_all_dates(self) -> bool:
+ """
+ Whether or not the index values only consist of dates.
+ """
return is_datetime_array(ensure_object(self.values))
# --------------------------------------------------------------------
@@ -1755,7 +2044,7 @@ def _nan_idxs(self):
return np.array([], dtype=np.int64)
@cache_readonly
- def hasnans(self):
+ def hasnans(self) -> bool:
"""
Return if I have any nans; enables various perf speedups.
"""
@@ -2060,13 +2349,13 @@ def duplicated(self, keep="first"):
"""
return super().duplicated(keep=keep)
- def _get_unique_index(self, dropna=False):
+ def _get_unique_index(self, dropna: bool = False):
"""
Returns an index containing unique values.
Parameters
----------
- dropna : bool
+ dropna : bool, default False
If True, NaN values are dropped.
Returns
@@ -2080,6 +2369,9 @@ def _get_unique_index(self, dropna=False):
if not self.is_unique:
values = self.unique()
+ if not isinstance(self, ABCMultiIndex):
+ # extract an array to pass to _shallow_copy
+ values = values._data
if dropna:
try:
@@ -2173,7 +2465,7 @@ def _union_incompatible_dtypes(self, other, sort):
other = Index(other).astype(object, copy=False)
return Index.union(this, other, sort=sort).astype(object, copy=False)
- def _is_compatible_with_other(self, other):
+ def _is_compatible_with_other(self, other) -> bool:
"""
Check whether this and the other dtype are compatible with each other.
Meaning a union can be formed between them without needing to be cast
@@ -2686,7 +2978,9 @@ def get_loc(self, key, method=None, tolerance=None):
"""
@Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs)
- def get_indexer(self, target, method=None, limit=None, tolerance=None):
+ def get_indexer(
+ self, target, method=None, limit=None, tolerance=None
+ ) -> np.ndarray:
method = missing.clean_reindex_fill_method(method)
target = ensure_index(target)
if tolerance is not None:
@@ -2743,14 +3037,16 @@ def _convert_tolerance(self, tolerance, target):
raise ValueError("list-like tolerance size must match target index size")
return tolerance
- def _get_fill_indexer(self, target, method, limit=None, tolerance=None):
+ def _get_fill_indexer(
+ self, target: "Index", method: str_t, limit=None, tolerance=None
+ ) -> np.ndarray:
if self.is_monotonic_increasing and target.is_monotonic_increasing:
- method = (
+ engine_method = (
self._engine.get_pad_indexer
if method == "pad"
else self._engine.get_backfill_indexer
)
- indexer = method(target._ndarray_values, limit)
+ indexer = engine_method(target._ndarray_values, limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method, limit)
if tolerance is not None:
@@ -2759,7 +3055,9 @@ def _get_fill_indexer(self, target, method, limit=None, tolerance=None):
)
return indexer
- def _get_fill_indexer_searchsorted(self, target, method, limit=None):
+ def _get_fill_indexer_searchsorted(
+ self, target: "Index", method: str_t, limit=None
+ ) -> np.ndarray:
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets.
@@ -2790,7 +3088,7 @@ def _get_fill_indexer_searchsorted(self, target, method, limit=None):
indexer[indexer == len(self)] = -1
return indexer
- def _get_nearest_indexer(self, target, limit, tolerance):
+ def _get_nearest_indexer(self, target: "Index", limit, tolerance) -> np.ndarray:
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
@@ -2813,7 +3111,9 @@ def _get_nearest_indexer(self, target, limit, tolerance):
indexer = self._filter_indexer_tolerance(target, indexer, tolerance)
return indexer
- def _filter_indexer_tolerance(self, target, indexer, tolerance):
+ def _filter_indexer_tolerance(
+ self, target: "Index", indexer: np.ndarray, tolerance
+ ) -> np.ndarray:
distance = abs(self.values[indexer] - target)
indexer = np.where(distance <= tolerance, indexer, -1)
return indexer
@@ -2829,28 +3129,29 @@ def _filter_indexer_tolerance(self, target, indexer, tolerance):
Parameters
----------
key : label of the slice bound
- kind : {'ix', 'loc', 'getitem', 'iloc'} or None
+ kind : {'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs["_convert_scalar_indexer"])
def _convert_scalar_indexer(self, key, kind=None):
- assert kind in ["ix", "loc", "getitem", "iloc", None]
+ assert kind in ["loc", "getitem", "iloc", None]
if kind == "iloc":
- return self._validate_indexer("positional", key, kind)
+ self._validate_indexer("positional", key, "iloc")
+ return key
if len(self) and not isinstance(self, ABCMultiIndex):
# we can raise here if we are definitive that this
- # is positional indexing (eg. .ix on with a float)
+ # is positional indexing (eg. .loc on with a float)
# or label indexing if we are using a type able
# to be represented in the index
- if kind in ["getitem", "ix"] and is_float(key):
+ if kind == "getitem" and is_float(key):
if not self.is_floating():
- return self._invalid_indexer("label", key)
+ self._invalid_indexer("label", key)
- elif kind in ["loc"] and is_float(key):
+ elif kind == "loc" and is_float(key):
# we want to raise KeyError on string/mixed here
# technically we *could* raise a TypeError
@@ -2860,12 +3161,11 @@ def _convert_scalar_indexer(self, key, kind=None):
"mixed-integer-float",
"integer-na",
"string",
- "unicode",
"mixed",
]:
self._invalid_indexer("label", key)
- elif kind in ["loc"] and is_integer(key):
+ elif kind == "loc" and is_integer(key):
if not self.holds_integer():
self._invalid_indexer("label", key)
@@ -2882,20 +3182,19 @@ def _convert_scalar_indexer(self, key, kind=None):
Parameters
----------
key : label of the slice bound
- kind : {'ix', 'loc', 'getitem', 'iloc'} or None
+ kind : {'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs["_convert_slice_indexer"])
def _convert_slice_indexer(self, key: slice, kind=None):
- assert kind in ["ix", "loc", "getitem", "iloc", None]
+ assert kind in ["loc", "getitem", "iloc", None]
# validate iloc
if kind == "iloc":
- return slice(
- self._validate_indexer("slice", key.start, kind),
- self._validate_indexer("slice", key.stop, kind),
- self._validate_indexer("slice", key.step, kind),
- )
+ self._validate_indexer("slice", key.start, "iloc")
+ self._validate_indexer("slice", key.stop, "iloc")
+ self._validate_indexer("slice", key.step, "iloc")
+ return key
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
@@ -2916,11 +3215,10 @@ def is_int(v):
integers
"""
if self.is_integer() or is_index_slice:
- return slice(
- self._validate_indexer("slice", key.start, kind),
- self._validate_indexer("slice", key.stop, kind),
- self._validate_indexer("slice", key.step, kind),
- )
+ self._validate_indexer("slice", key.start, "getitem")
+ self._validate_indexer("slice", key.stop, "getitem")
+ self._validate_indexer("slice", key.step, "getitem")
+ return key
# convert the slice to an indexer here
@@ -3016,7 +3314,7 @@ def _convert_index_indexer(self, keyarr):
----------
keyarr : Index (or sub-class)
Indexer to convert.
- kind : iloc, ix, loc, optional
+ kind : iloc, loc, optional
Returns
-------
@@ -3026,10 +3324,9 @@ def _convert_index_indexer(self, keyarr):
@Appender(_index_shared_docs["_convert_list_indexer"])
def _convert_list_indexer(self, keyarr, kind=None):
if (
- kind in [None, "iloc", "ix"]
+ kind in [None, "iloc"]
and is_integer_dtype(keyarr)
and not self.is_floating()
- and not isinstance(keyarr, ABCPeriodIndex)
):
if self.inferred_type == "mixed-integer":
@@ -3051,7 +3348,7 @@ def _convert_list_indexer(self, keyarr, kind=None):
return None
- def _invalid_indexer(self, form, key):
+ def _invalid_indexer(self, form: str_t, key):
"""
Consistent invalid indexer message.
"""
@@ -3106,7 +3403,10 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
if not isinstance(target, Index) and len(target) == 0:
attrs = self._get_attributes_dict()
attrs.pop("freq", None) # don't preserve freq
- values = self._data[:0] # appropriately-dtyped empty array
+ if isinstance(self, ABCRangeIndex):
+ values = range(0)
+ else:
+ values = self._data[:0] # appropriately-dtyped empty array
target = self._simple_new(values, dtype=self.dtype, **attrs)
else:
target = ensure_index(target)
@@ -3619,7 +3919,7 @@ def _wrap_joined_index(self, joined, other):
# Uncategorized Methods
@property
- def values(self):
+ def values(self) -> np.ndarray:
"""
Return an array representing the data in the Index.
@@ -3640,17 +3940,28 @@ def values(self):
"""
return self._data.view(np.ndarray)
+ @cache_readonly
+ @Appender(IndexOpsMixin.array.__doc__) # type: ignore
+ def array(self) -> ExtensionArray:
+ array = self._data
+ if isinstance(array, np.ndarray):
+ from pandas.core.arrays.numpy_ import PandasArray
+
+ array = PandasArray(array)
+ return array
+
@property
- def _values(self) -> Union[ExtensionArray, ABCIndexClass, np.ndarray]:
- # TODO(EA): remove index types as they become extension arrays
+ def _values(self) -> Union[ExtensionArray, np.ndarray]:
"""
The best array representation.
- This is an ndarray, ExtensionArray, or Index subclass. This differs
- from ``_ndarray_values``, which always returns an ndarray.
+ This is an ndarray or ExtensionArray. This differs from
+ ``_ndarray_values``, which always returns an ndarray.
Both ``_values`` and ``_ndarray_values`` are consistent between
- ``Series`` and ``Index``.
+ ``Series`` and ``Index`` (except for datetime64[ns], which returns
+ a DatetimeArray for _values on the Index, but ndarray[M8ns] on the
+ Series).
It may differ from the public '.values' method.
@@ -3658,8 +3969,8 @@ def _values(self) -> Union[ExtensionArray, ABCIndexClass, np.ndarray]:
----------------- | --------------- | ------------- | --------------- |
Index | ndarray | ndarray | ndarray |
CategoricalIndex | Categorical | Categorical | ndarray[int] |
- DatetimeIndex | ndarray[M8ns] | ndarray[M8ns] | ndarray[M8ns] |
- DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |
+ DatetimeIndex | ndarray[M8ns] | DatetimeArray | ndarray[M8ns] |
+ DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray | ndarray[M8ns] |
PeriodIndex | ndarray[object] | PeriodArray | ndarray[int] |
IntervalIndex | IntervalArray | IntervalArray | ndarray[object] |
@@ -3670,7 +3981,7 @@ def _values(self) -> Union[ExtensionArray, ABCIndexClass, np.ndarray]:
"""
return self._data
- def _internal_get_values(self):
+ def _internal_get_values(self) -> np.ndarray:
"""
Return `Index` data as an `numpy.ndarray`.
@@ -3715,7 +4026,7 @@ def _internal_get_values(self):
return self.values
@Appender(IndexOpsMixin.memory_usage.__doc__)
- def memory_usage(self, deep=False):
+ def memory_usage(self, deep: bool = False) -> int:
result = super().memory_usage(deep=deep)
# include our engine hashtable
@@ -3815,6 +4126,14 @@ def _assert_can_do_op(self, value):
if not is_scalar(value):
raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}")
+ @property
+ def _has_complex_internals(self) -> bool:
+ """
+ Indicates if an index is not directly backed by a numpy array
+ """
+ # used to avoid libreduction code paths, which raise or require conversion
+ return False
+
def _is_memory_usage_qualified(self) -> bool:
"""
Return a boolean if we need a qualified .info display.
@@ -3860,7 +4179,7 @@ def is_type_compatible(self, kind) -> bool:
"""
@Appender(_index_shared_docs["contains"] % _index_doc_kwargs)
- def __contains__(self, key) -> bool:
+ def __contains__(self, key: Any) -> bool:
hash(key)
try:
return key in self._engine
@@ -4164,7 +4483,7 @@ def asof_locs(self, where, mask):
return result
- def sort_values(self, return_indexer=False, ascending=True):
+ def sort_values(self, return_indexer: bool = False, ascending: bool = True):
"""
Return a sorted copy of the index.
@@ -4282,7 +4601,7 @@ def shift(self, periods=1, freq=None):
"""
raise NotImplementedError(f"Not supported for type {type(self).__name__}")
- def argsort(self, *args, **kwargs):
+ def argsort(self, *args, **kwargs) -> np.ndarray:
"""
Return the integer indices that would sort the index.
@@ -4337,57 +4656,45 @@ def argsort(self, *args, **kwargs):
@Appender(_index_shared_docs["get_value"] % _index_doc_kwargs)
def get_value(self, series, key):
- # if we have something that is Index-like, then
- # use this, e.g. DatetimeIndex
- # Things like `Series._get_value` (via .at) pass the EA directly here.
- s = extract_array(series, extract_numpy=True)
- if isinstance(s, ExtensionArray):
- if is_scalar(key):
- # GH 20882, 21257
- # First try to convert the key to a location
- # If that fails, raise a KeyError if an integer
- # index, otherwise, see if key is an integer, and
- # try that
- try:
- iloc = self.get_loc(key)
- return s[iloc]
- except KeyError:
- if len(self) > 0 and (self.holds_integer() or self.is_boolean()):
- raise
- elif is_integer(key):
- return s[key]
- else:
- # if key is not a scalar, directly raise an error (the code below
- # would convert to numpy arrays and raise later any way) - GH29926
- raise InvalidIndexError(key)
-
- s = com.values_from_object(series)
- k = com.values_from_object(key)
+ if not is_scalar(key):
+ # if key is not a scalar, directly raise an error (the code below
+ # would convert to numpy arrays and raise later any way) - GH29926
+ raise InvalidIndexError(key)
- k = self._convert_scalar_indexer(k, kind="getitem")
try:
- return self._engine.get_value(s, k, tz=getattr(series.dtype, "tz", None))
- except KeyError as e1:
+ # GH 20882, 21257
+ # First try to convert the key to a location
+ # If that fails, raise a KeyError if an integer
+ # index, otherwise, see if key is an integer, and
+ # try that
+ loc = self._engine.get_loc(key)
+ except KeyError:
if len(self) > 0 and (self.holds_integer() or self.is_boolean()):
raise
-
- try:
- return libindex.get_value_at(s, key)
- except IndexError:
+ elif is_integer(key):
+ # If the Index cannot hold integer, then this is unambiguously
+ # a locational lookup.
+ loc = key
+ else:
raise
- except TypeError:
- # generator/iterator-like
- if is_iterator(key):
- raise InvalidIndexError(key)
- else:
- raise e1
- except Exception:
- raise e1
- except TypeError:
- # e.g. "[False] is an invalid key"
- if is_scalar(key):
- raise IndexError(key)
- raise InvalidIndexError(key)
+
+ return self._get_values_for_loc(series, loc)
+
+ def _get_values_for_loc(self, series, loc):
+ """
+ Do a positional lookup on the given Series, returning either a scalar
+ or a Series.
+
+ Assumes that `series.index is self`
+ """
+ if is_integer(loc):
+ if isinstance(series._values, np.ndarray):
+ # Since we have an ndarray and not DatetimeArray, we dont
+ # have to worry about a tz.
+ return libindex.get_value_at(series._values, loc, tz=None)
+ return series._values[loc]
+
+ return series.iloc[loc]
def set_value(self, arr, key, value):
"""
@@ -4477,7 +4784,7 @@ def _maybe_promote(self, other):
return self.astype("object"), other.astype("object")
return self, other
- def groupby(self, values) -> Dict[Hashable, np.ndarray]:
+ def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]:
"""
Group the index labels by a given array of values.
@@ -4502,7 +4809,7 @@ def groupby(self, values) -> Dict[Hashable, np.ndarray]:
# map to the label
result = {k: self.take(v) for k, v in result.items()}
- return result
+ return PrettyDict(result)
def map(self, mapper, na_action=None):
"""
@@ -4630,7 +4937,7 @@ def isin(self, values, level=None):
self._validate_index_level(level)
return algos.isin(self, values)
- def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
+ def _get_string_slice(self, key: str_t, use_lhs: bool = True, use_rhs: bool = True):
# this is for partial string indexing,
# overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
raise NotImplementedError
@@ -4699,20 +5006,19 @@ def _maybe_cast_indexer(self, key):
pass
return key
- def _validate_indexer(self, form, key, kind):
+ def _validate_indexer(self, form: str_t, key, kind: str_t):
"""
If we are positional indexer, validate that we have appropriate
typed bounds must be an integer.
"""
- assert kind in ["ix", "loc", "getitem", "iloc"]
+ assert kind in ["getitem", "iloc"]
if key is None:
pass
elif is_integer(key):
pass
- elif kind in ["iloc", "getitem"]:
+ else:
self._invalid_indexer(form, key)
- return key
_index_shared_docs[
"_maybe_cast_slice_bound"
@@ -4725,7 +5031,7 @@ def _validate_indexer(self, form, key, kind):
----------
label : object
side : {'left', 'right'}
- kind : {'ix', 'loc', 'getitem'}
+ kind : {'loc', 'getitem'} or None
Returns
-------
@@ -4737,16 +5043,15 @@ def _validate_indexer(self, form, key, kind):
"""
@Appender(_index_shared_docs["_maybe_cast_slice_bound"])
- def _maybe_cast_slice_bound(self, label, side, kind):
- assert kind in ["ix", "loc", "getitem", None]
+ def _maybe_cast_slice_bound(self, label, side: str_t, kind):
+ assert kind in ["loc", "getitem", None]
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. Float64Index and
# datetimelike Indexes
# reject them
if is_float(label):
- if not (kind in ["ix"] and (self.holds_integer() or self.is_floating())):
- self._invalid_indexer("slice", label)
+ self._invalid_indexer("slice", label)
# we are trying to find integer bounds on a non-integer based index
# this is rejected (generally .loc gets you here)
@@ -4769,7 +5074,7 @@ def _searchsorted_monotonic(self, label, side="left"):
raise ValueError("index must be monotonic increasing or decreasing")
- def get_slice_bound(self, label, side, kind):
+ def get_slice_bound(self, label, side: str_t, kind) -> int:
"""
Calculate slice bound that corresponds to given label.
@@ -4780,19 +5085,19 @@ def get_slice_bound(self, label, side, kind):
----------
label : object
side : {'left', 'right'}
- kind : {'ix', 'loc', 'getitem'}
+ kind : {'loc', 'getitem'} or None
Returns
-------
int
Index of label.
"""
- assert kind in ["ix", "loc", "getitem", None]
+ assert kind in ["loc", "getitem", None]
if side not in ("left", "right"):
raise ValueError(
- f"Invalid value for side kwarg, must be either"
- f" 'left' or 'right': {side}"
+ "Invalid value for side kwarg, must be either "
+ f"'left' or 'right': {side}"
)
original_label = label
@@ -4847,7 +5152,7 @@ def slice_locs(self, start=None, end=None, step=None, kind=None):
If None, defaults to the end.
step : int, defaults None
If None, defaults to 1.
- kind : {'ix', 'loc', 'getitem'} or None
+ kind : {'loc', 'getitem'} or None
Returns
-------
@@ -4934,7 +5239,7 @@ def delete(self, loc):
"""
return self._shallow_copy(np.delete(self._data, loc))
- def insert(self, loc, item):
+ def insert(self, loc: int, item):
"""
Make new Index inserting new item at location.
@@ -4954,7 +5259,7 @@ def insert(self, loc, item):
idx = np.concatenate((_self[:loc], item, _self[loc:]))
return self._shallow_copy_with_infer(idx)
- def drop(self, labels, errors="raise"):
+ def drop(self, labels, errors: str_t = "raise"):
"""
Make new Index with passed list of labels deleted.
@@ -5538,17 +5843,3 @@ def _try_convert_to_int_array(
pass
raise ValueError
-
-
-def deprecate_ndim_indexing(result):
- if np.ndim(result) > 1:
- # GH#27125 indexer like idx[:, None] expands dim, but we
- # cannot do that and keep an index, so return ndarray
- # Deprecation GH#30588
- warnings.warn(
- "Support for multi-dimensional indexing (e.g. `index[:, None]`) "
- "on an Index is deprecated and will be removed in a future "
- "version. Convert to a numpy array before indexing instead.",
- DeprecationWarning,
- stacklevel=3,
- )
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 41072d4ce6a93..235d1856a2d0b 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -27,7 +27,7 @@
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs, maybe_extract_name
-from pandas.core.indexes.extension import ExtensionIndex
+from pandas.core.indexes.extension import ExtensionIndex, inherit_names
import pandas.core.missing as missing
from pandas.core.ops import get_op_result_name
@@ -35,11 +35,21 @@
_index_doc_kwargs.update(dict(target_klass="CategoricalIndex"))
-@accessor.delegate_names(
- delegate=Categorical,
- accessors=["codes", "categories", "ordered"],
- typ="property",
- overwrite=True,
+@inherit_names(
+ [
+ "argsort",
+ "_internal_get_values",
+ "tolist",
+ "codes",
+ "categories",
+ "ordered",
+ "_reverse_indexer",
+ "searchsorted",
+ "is_dtype_equal",
+ "min",
+ "max",
+ ],
+ Categorical,
)
@accessor.delegate_names(
delegate=Categorical,
@@ -52,14 +62,6 @@
"set_categories",
"as_ordered",
"as_unordered",
- "min",
- "max",
- "is_dtype_equal",
- "tolist",
- "_internal_get_values",
- "_reverse_indexer",
- "searchsorted",
- "argsort",
],
typ="method",
overwrite=True,
@@ -170,6 +172,7 @@ class CategoricalIndex(ExtensionIndex, accessor.PandasDelegate):
codes: np.ndarray
categories: Index
+ _data: Categorical
@property
def _engine_type(self):
@@ -310,7 +313,7 @@ def _is_dtype_compat(self, other) -> bool:
return other
- def equals(self, other):
+ def equals(self, other) -> bool:
"""
Determine if two CategoricalIndex objects contain the same elements.
@@ -378,6 +381,11 @@ def values(self):
""" return the underlying data, which is a Categorical """
return self._data
+ @property
+ def _has_complex_internals(self) -> bool:
+ # used to avoid libreduction code paths, which raise or require conversion
+ return True
+
def _wrap_setop_result(self, other, result):
name = get_op_result_name(self, other)
# We use _shallow_copy rather than the Index implementation
@@ -385,11 +393,12 @@ def _wrap_setop_result(self, other, result):
return self._shallow_copy(result, name=name)
@Appender(_index_shared_docs["contains"] % _index_doc_kwargs)
- def __contains__(self, key) -> bool:
+ def __contains__(self, key: Any) -> bool:
# if key is a NaN, check if any NaN is in self.
if is_scalar(key) and isna(key):
return self.hasnans
+ hash(key)
return contains(self, key, container=self._engine)
def __array__(self, dtype=None) -> np.ndarray:
@@ -428,19 +437,6 @@ def _engine(self):
codes = self.codes
return self._engine_type(lambda: codes, len(self))
- # introspection
- @cache_readonly
- def is_unique(self) -> bool:
- return self._engine.is_unique
-
- @property
- def is_monotonic_increasing(self):
- return self._engine.is_monotonic_increasing
-
- @property
- def is_monotonic_decreasing(self) -> bool:
- return self._engine.is_monotonic_decreasing
-
@Appender(_index_shared_docs["index_unique"] % _index_doc_kwargs)
def unique(self, level=None):
if level is not None:
@@ -516,8 +512,8 @@ def get_value(self, series: AnyArrayLike, key: Any):
Any
The element of the series at the position indicated by the key
"""
+ k = key
try:
- k = com.values_from_object(key)
k = self._convert_scalar_indexer(k, kind="getitem")
indexer = self.get_loc(k)
return series.take([indexer])[0]
@@ -811,7 +807,7 @@ def delete(self, loc):
"""
return self._create_from_codes(np.delete(self.codes, loc))
- def insert(self, loc, item):
+ def insert(self, loc: int, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
@@ -856,12 +852,12 @@ def _concat_same_dtype(self, to_concat, name):
result.name = name
return result
- def _delegate_property_get(self, name, *args, **kwargs):
+ def _delegate_property_get(self, name: str, *args, **kwargs):
""" method delegation to the ._values """
prop = getattr(self._values, name)
return prop # no wrapping for now
- def _delegate_method(self, name, *args, **kwargs):
+ def _delegate_method(self, name: str, *args, **kwargs):
""" method delegation to the ._values """
method = getattr(self._values, name)
if "inplace" in kwargs:
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index c4dac9d1c4a11..0f385d9aba9c5 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -2,7 +2,7 @@
Base and utility classes for tseries type pandas objects.
"""
import operator
-from typing import List, Optional, Set
+from typing import Any, List, Optional, Set, Union
import numpy as np
@@ -27,12 +27,13 @@
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries
-from pandas.core.dtypes.missing import isna
+from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
from pandas.core import algorithms
from pandas.core.accessor import PandasDelegate
-from pandas.core.arrays import DatetimeArray, ExtensionArray, TimedeltaArray
+from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
+from pandas.core.base import _shared_docs
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.indexes.extension import (
@@ -89,7 +90,7 @@ class DatetimeIndexOpsMixin(ExtensionIndex):
Common ops mixin to support a unified interface datetimelike Index.
"""
- _data: ExtensionArray
+ _data: Union[DatetimeArray, TimedeltaArray, PeriodArray]
freq: Optional[DateOffset]
freqstr: Optional[str]
_resolution: int
@@ -153,32 +154,15 @@ def equals(self, other) -> bool:
return np.array_equal(self.asi8, other.asi8)
@Appender(_index_shared_docs["contains"] % _index_doc_kwargs)
- def __contains__(self, key):
+ def __contains__(self, key: Any) -> bool:
+ hash(key)
try:
res = self.get_loc(key)
- return (
- is_scalar(res)
- or isinstance(res, slice)
- or (is_list_like(res) and len(res))
- )
except (KeyError, TypeError, ValueError):
return False
-
- # Try to run function on index first, and then on elements of index
- # Especially important for group-by functionality
- def map(self, mapper, na_action=None):
- try:
- result = mapper(self)
-
- # Try to use this result if we can
- if isinstance(result, np.ndarray):
- result = Index(result)
-
- if not isinstance(result, Index):
- raise TypeError("The map function must return an Index object")
- return result
- except Exception:
- return self.astype(object).map(mapper)
+ return bool(
+ is_scalar(res) or isinstance(res, slice) or (is_list_like(res) and len(res))
+ )
def sort_values(self, return_indexer=False, ascending=True):
"""
@@ -195,20 +179,21 @@ def sort_values(self, return_indexer=False, ascending=True):
# because the treatment of NaT has been changed to put NaT last
# instead of first.
sorted_values = np.sort(self.asi8)
- attribs = self._get_attributes_dict()
- freq = attribs["freq"]
+ freq = self.freq
if freq is not None and not is_period_dtype(self):
if freq.n > 0 and not ascending:
freq = freq * -1
elif freq.n < 0 and ascending:
freq = freq * -1
- attribs["freq"] = freq
if not ascending:
sorted_values = sorted_values[::-1]
- return self._simple_new(sorted_values, **attribs)
+ arr = type(self._data)._simple_new(
+ sorted_values, dtype=self.dtype, freq=freq
+ )
+ return self._simple_new(arr, name=self.name)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
@@ -223,6 +208,18 @@ def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
self, indices, axis, allow_fill, fill_value, **kwargs
)
+ @Appender(_shared_docs["searchsorted"])
+ def searchsorted(self, value, side="left", sorter=None):
+ if isinstance(value, str):
+ raise TypeError(
+ "searchsorted requires compatible dtype or scalar, "
+ f"not {type(value).__name__}"
+ )
+ if isinstance(value, Index):
+ value = value._data
+
+ return self._data.searchsorted(value, side=side, sorter=sorter)
+
_can_hold_na = True
_na_value = NaT
@@ -388,10 +385,10 @@ def _convert_scalar_indexer(self, key, kind=None):
Parameters
----------
key : label of the slice bound
- kind : {'ix', 'loc', 'getitem', 'iloc'} or None
+ kind : {'loc', 'getitem', 'iloc'} or None
"""
- assert kind in ["ix", "loc", "getitem", "iloc", None]
+ assert kind in ["loc", "getitem", "iloc", None]
# we don't allow integer/float indexing for loc
# we don't allow float indexing for ix/getitem
@@ -400,7 +397,7 @@ def _convert_scalar_indexer(self, key, kind=None):
is_flt = is_float(key)
if kind in ["loc"] and (is_int or is_flt):
self._invalid_indexer("index", key)
- elif kind in ["ix", "getitem"] and is_flt:
+ elif kind in ["getitem"] and is_flt:
self._invalid_indexer("index", key)
return super()._convert_scalar_indexer(key, kind=kind)
@@ -471,7 +468,7 @@ def where(self, cond, other=None):
result = np.where(cond, values, other).astype("i8")
return self._shallow_copy(result)
- def _summary(self, name=None):
+ def _summary(self, name=None) -> str:
"""
Return a summarized representation.
@@ -505,22 +502,21 @@ def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class.
"""
- attribs = self._get_attributes_dict()
- attribs["name"] = name
+
# do not pass tz to set because tzlocal cannot be hashed
if len({str(x.dtype) for x in to_concat}) != 1:
raise ValueError("to_concat must have the same tz")
- new_data = type(self._values)._concat_same_type(to_concat).asi8
+ new_data = type(self._data)._concat_same_type(to_concat)
- # GH 3232: If the concat result is evenly spaced, we can retain the
- # original frequency
- is_diff_evenly_spaced = len(unique_deltas(new_data)) == 1
- if not is_period_dtype(self) and not is_diff_evenly_spaced:
- # reset freq
- attribs["freq"] = None
+ if not is_period_dtype(self.dtype):
+ # GH 3232: If the concat result is evenly spaced, we can retain the
+ # original frequency
+ is_diff_evenly_spaced = len(unique_deltas(new_data.asi8)) == 1
+ if is_diff_evenly_spaced:
+ new_data._freq = self.freq
- return self._simple_new(new_data, **attribs)
+ return self._simple_new(new_data, name=name)
def shift(self, periods=1, freq=None):
"""
@@ -614,8 +610,6 @@ def _set_freq(self, freq):
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self._data
- if isinstance(values, type(self)):
- values = values._data
attributes = self._get_attributes_dict()
@@ -875,11 +869,7 @@ def _is_convertible_to_index_for_join(cls, other: Index) -> bool:
def _wrap_joined_index(self, joined, other):
name = get_op_result_name(self, other)
- if (
- isinstance(other, type(self))
- and self.freq == other.freq
- and self._can_fast_union(other)
- ):
+ if self._can_fast_union(other):
joined = self._shallow_copy(joined)
joined.name = name
return joined
@@ -889,6 +879,60 @@ def _wrap_joined_index(self, joined, other):
kwargs["tz"] = getattr(other, "tz", None)
return self._simple_new(joined, name, **kwargs)
+ # --------------------------------------------------------------------
+ # List-Like Methods
+
+ def insert(self, loc, item):
+ """
+ Make new Index inserting new item at location
+ Parameters
+ ----------
+ loc : int
+ item : object
+ if not either a Python datetime or a numpy integer-like, returned
+ Index dtype will be object rather than datetime.
+ Returns
+ -------
+ new_index : Index
+ """
+ if isinstance(item, self._data._recognized_scalars):
+ item = self._data._scalar_type(item)
+ elif is_valid_nat_for_dtype(item, self.dtype):
+ # GH 18295
+ item = self._na_value
+ elif is_scalar(item) and isna(item):
+ raise TypeError(
+ f"cannot insert {type(self).__name__} with incompatible label"
+ )
+
+ freq = None
+ if isinstance(item, self._data._scalar_type) or item is NaT:
+ self._data._check_compatible_with(item, setitem=True)
+
+ # check freq can be preserved on edge cases
+ if self.size and self.freq is not None:
+ if item is NaT:
+ pass
+ elif (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
+ freq = self.freq
+ elif (loc == len(self)) and item - self.freq == self[-1]:
+ freq = self.freq
+ item = item.asm8
+
+ try:
+ new_i8s = np.concatenate(
+ (self[:loc].asi8, [item.view(np.int64)], self[loc:].asi8)
+ )
+ return self._shallow_copy(new_i8s, freq=freq)
+ except (AttributeError, TypeError):
+
+ # fall back to object index
+ if isinstance(item, str):
+ return self.astype(object).insert(loc, item)
+ raise TypeError(
+ f"cannot insert {type(self).__name__} with incompatible label"
+ )
+
class DatetimelikeDelegateMixin(PandasDelegate):
"""
@@ -911,7 +955,7 @@ class DatetimelikeDelegateMixin(PandasDelegate):
_raw_methods: Set[str] = set()
# raw_properties : dispatch properties that shouldn't be boxed in an Index
_raw_properties: Set[str] = set()
- _data: ExtensionArray
+ _data: Union[DatetimeArray, TimedeltaArray, PeriodArray]
def _delegate_property_get(self, name, *args, **kwargs):
result = getattr(self._data, name)
@@ -919,7 +963,7 @@ def _delegate_property_get(self, name, *args, **kwargs):
result = Index(result, name=self.name)
return result
- def _delegate_property_set(self, name, value, *args, **kwargs):
+ def _delegate_property_set(self, name: str, value, *args, **kwargs):
setattr(self._data, name, value)
def _delegate_method(self, name, *args, **kwargs):
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 2241921e94694..2b4636155111f 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -5,33 +5,35 @@
import numpy as np
-from pandas._libs import NaT, Timestamp, index as libindex, lib, tslib as libts
+from pandas._libs import (
+ NaT,
+ Timedelta,
+ Timestamp,
+ index as libindex,
+ lib,
+ tslib as libts,
+)
from pandas._libs.tslibs import ccalendar, fields, parsing, timezones
-from pandas.util._decorators import Appender, Substitution, cache_readonly
+from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import _NS_DTYPE, is_float, is_integer, is_scalar
from pandas.core.dtypes.dtypes import DatetimeTZDtype
-from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
+from pandas.core.dtypes.missing import is_valid_nat_for_dtype
-from pandas.core.accessor import delegate_names
from pandas.core.arrays.datetimes import (
DatetimeArray,
tz_to_dtype,
validate_tz_from_dtype,
)
-from pandas.core.base import _shared_docs
import pandas.core.common as com
-from pandas.core.indexes.base import Index, maybe_extract_name
-from pandas.core.indexes.datetimelike import (
- DatetimelikeDelegateMixin,
- DatetimeTimedeltaMixin,
-)
+from pandas.core.indexes.base import Index, InvalidIndexError, maybe_extract_name
+from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
from pandas.core.indexes.extension import inherit_names
from pandas.core.ops import get_op_result_name
import pandas.core.tools.datetimes as tools
from pandas.tseries.frequencies import Resolution, to_offset
-from pandas.tseries.offsets import Nano, prefix_mapping
+from pandas.tseries.offsets import prefix_mapping
def _new_DatetimeIndex(cls, d):
@@ -53,32 +55,13 @@ def _new_DatetimeIndex(cls, d):
return result
-class DatetimeDelegateMixin(DatetimelikeDelegateMixin):
- # Most attrs are dispatched via datetimelike_{ops,methods}
- # Some are "raw" methods, the result is not not re-boxed in an Index
- # We also have a few "extra" attrs, which may or may not be raw,
- # which we we dont' want to expose in the .dt accessor.
- _extra_methods = ["to_period", "to_perioddelta", "to_julian_date", "strftime"]
- _extra_raw_methods = [
- "to_pydatetime",
- "_local_timestamps",
- "_has_same_tz",
- "_format_native_types",
- "__iter__",
- ]
- _extra_raw_properties = ["_box_func", "tz", "tzinfo", "dtype"]
- _delegated_properties = DatetimeArray._datetimelike_ops + _extra_raw_properties
- _delegated_methods = (
- DatetimeArray._datetimelike_methods + _extra_methods + _extra_raw_methods
- )
- _raw_properties = (
- {"date", "time", "timetz"}
- | set(DatetimeArray._bool_ops)
- | set(_extra_raw_properties)
- )
- _raw_methods = set(_extra_raw_methods)
-
-
+@inherit_names(
+ ["to_period", "to_perioddelta", "to_julian_date", "strftime"]
+ + DatetimeArray._field_ops
+ + DatetimeArray._datetimelike_methods,
+ DatetimeArray,
+ wrap=True,
+)
@inherit_names(["_timezone", "is_normalized", "_resolution"], DatetimeArray, cache=True)
@inherit_names(
[
@@ -87,19 +70,22 @@ class DatetimeDelegateMixin(DatetimelikeDelegateMixin):
"_field_ops",
"_datetimelike_ops",
"_datetimelike_methods",
- ],
- DatetimeArray,
-)
-@delegate_names(
- DatetimeArray, DatetimeDelegateMixin._delegated_properties, typ="property"
-)
-@delegate_names(
+ "_box_func",
+ "tz",
+ "tzinfo",
+ "dtype",
+ "to_pydatetime",
+ "_local_timestamps",
+ "_has_same_tz",
+ "_format_native_types",
+ "date",
+ "time",
+ "timetz",
+ ]
+ + DatetimeArray._bool_ops,
DatetimeArray,
- DatetimeDelegateMixin._delegated_methods,
- typ="method",
- overwrite=True,
)
-class DatetimeIndex(DatetimeTimedeltaMixin, DatetimeDelegateMixin):
+class DatetimeIndex(DatetimeTimedeltaMixin):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
@@ -212,6 +198,7 @@ class DatetimeIndex(DatetimeTimedeltaMixin, DatetimeDelegateMixin):
_is_numeric_dtype = False
_infer_as_myclass = True
+ _data: DatetimeArray
tz: Optional[tzinfo]
# --------------------------------------------------------------------
@@ -253,7 +240,7 @@ def __new__(
ambiguous=ambiguous,
)
- subarr = cls._simple_new(dtarr, name=name, freq=dtarr.freq, tz=dtarr.tz)
+ subarr = cls._simple_new(dtarr, name=name)
return subarr
@classmethod
@@ -274,10 +261,6 @@ def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None):
freq = values.freq
values = values._data
- # DatetimeArray._simple_new will accept either i8 or M8[ns] dtypes
- if isinstance(values, DatetimeIndex):
- values = values._data
-
dtype = tz_to_dtype(tz)
dtarr = DatetimeArray._simple_new(values, freq=freq, dtype=dtype)
assert isinstance(dtarr, DatetimeArray)
@@ -375,7 +358,7 @@ def union_many(self, others):
def _wrap_setop_result(self, other, result):
name = get_op_result_name(self, other)
- return self._shallow_copy(result, name=name, freq=None, tz=self.tz)
+ return self._shallow_copy(result, name=name, freq=None)
# --------------------------------------------------------------------
@@ -484,10 +467,10 @@ def snap(self, freq="S"):
s = t1
snapped[i] = s
- # we know it conforms; skip check
- return DatetimeIndex._simple_new(snapped, name=self.name, tz=self.tz, freq=freq)
+ dta = DatetimeArray(snapped, dtype=self.dtype)
+ return DatetimeIndex._simple_new(dta, name=self.name)
- def _parsed_string_to_bounds(self, reso, parsed):
+ def _parsed_string_to_bounds(self, reso: str, parsed: datetime):
"""
Calculate datetime bounds for parsed time string and its resolution.
@@ -519,27 +502,27 @@ def _parsed_string_to_bounds(self, reso, parsed):
raise KeyError
if reso == "year":
start = Timestamp(parsed.year, 1, 1)
- end = Timestamp(parsed.year, 12, 31, 23, 59, 59, 999999)
+ end = Timestamp(parsed.year + 1, 1, 1) - Timedelta(nanoseconds=1)
elif reso == "month":
d = ccalendar.get_days_in_month(parsed.year, parsed.month)
start = Timestamp(parsed.year, parsed.month, 1)
- end = Timestamp(parsed.year, parsed.month, d, 23, 59, 59, 999999)
+ end = start + Timedelta(days=d, nanoseconds=-1)
elif reso == "quarter":
qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead
d = ccalendar.get_days_in_month(parsed.year, qe) # at end of month
start = Timestamp(parsed.year, parsed.month, 1)
- end = Timestamp(parsed.year, qe, d, 23, 59, 59, 999999)
+ end = Timestamp(parsed.year, qe, 1) + Timedelta(days=d, nanoseconds=-1)
elif reso == "day":
start = Timestamp(parsed.year, parsed.month, parsed.day)
- end = start + timedelta(days=1) - Nano(1)
+ end = start + Timedelta(days=1, nanoseconds=-1)
elif reso == "hour":
start = Timestamp(parsed.year, parsed.month, parsed.day, parsed.hour)
- end = start + timedelta(hours=1) - Nano(1)
+ end = start + Timedelta(hours=1, nanoseconds=-1)
elif reso == "minute":
start = Timestamp(
parsed.year, parsed.month, parsed.day, parsed.hour, parsed.minute
)
- end = start + timedelta(minutes=1) - Nano(1)
+ end = start + Timedelta(minutes=1, nanoseconds=-1)
elif reso == "second":
start = Timestamp(
parsed.year,
@@ -549,7 +532,7 @@ def _parsed_string_to_bounds(self, reso, parsed):
parsed.minute,
parsed.second,
)
- end = start + timedelta(seconds=1) - Nano(1)
+ end = start + Timedelta(seconds=1, nanoseconds=-1)
elif reso == "microsecond":
start = Timestamp(
parsed.year,
@@ -560,7 +543,7 @@ def _parsed_string_to_bounds(self, reso, parsed):
parsed.second,
parsed.microsecond,
)
- end = start + timedelta(microseconds=1) - Nano(1)
+ end = start + Timedelta(microseconds=1, nanoseconds=-1)
# GH 24076
# If an incoming date string contained a UTC offset, need to localize
# the parsed date to this offset first before aligning with the index's
@@ -579,7 +562,7 @@ def _parsed_string_to_bounds(self, reso, parsed):
return start, end
def _partial_date_slice(
- self, reso: str, parsed, use_lhs: bool = True, use_rhs: bool = True
+ self, reso: str, parsed: datetime, use_lhs: bool = True, use_rhs: bool = True
):
"""
Parameters
@@ -639,42 +622,11 @@ def get_value(self, series, key):
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
-
- if isinstance(key, (datetime, np.datetime64)):
- return self.get_value_maybe_box(series, key)
-
- if isinstance(key, time):
- locs = self.indexer_at_time(key)
- return series.take(locs)
-
- try:
- value = Index.get_value(self, series, key)
- except KeyError:
- try:
- loc = self._get_string_slice(key)
- return series[loc]
- except (TypeError, ValueError, KeyError):
- pass
-
- try:
- return self.get_value_maybe_box(series, key)
- except (TypeError, ValueError, KeyError):
- raise KeyError(key)
+ if is_integer(key):
+ loc = key
else:
- return com.maybe_box(self, value, series, key)
-
- def get_value_maybe_box(self, series, key):
- # needed to localize naive datetimes
- if self.tz is not None:
- key = Timestamp(key)
- if key.tzinfo is not None:
- key = key.tz_convert(self.tz)
- else:
- key = key.tz_localize(self.tz)
- elif not isinstance(key, Timestamp):
- key = Timestamp(key)
- values = self._engine.get_value(com.values_from_object(series), key, tz=self.tz)
- return com.maybe_box(self, values, series, key)
+ loc = self.get_loc(key)
+ return self._get_values_for_loc(series, loc)
def get_loc(self, key, method=None, tolerance=None):
"""
@@ -684,20 +636,34 @@ def get_loc(self, key, method=None, tolerance=None):
-------
loc : int
"""
+ if not is_scalar(key):
+ raise InvalidIndexError(key)
+
+ if is_valid_nat_for_dtype(key, self.dtype):
+ key = NaT
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance, np.asarray(key))
- if isinstance(key, datetime):
+ if isinstance(key, (datetime, np.datetime64)):
# needed to localize naive datetimes
- if key.tzinfo is None:
- key = Timestamp(key, tz=self.tz)
- else:
- key = Timestamp(key).tz_convert(self.tz)
+ key = self._maybe_cast_for_get_loc(key)
return Index.get_loc(self, key, method, tolerance)
+ elif isinstance(key, str):
+ try:
+ return self._get_string_slice(key)
+ except (TypeError, KeyError, ValueError, OverflowError):
+ pass
+
+ try:
+ stamp = self._maybe_cast_for_get_loc(key)
+ return Index.get_loc(self, stamp, method, tolerance)
+ except (KeyError, ValueError):
+ raise KeyError(key)
+
elif isinstance(key, timedelta):
# GH#20464
raise TypeError(
@@ -711,30 +677,18 @@ def get_loc(self, key, method=None, tolerance=None):
)
return self.indexer_at_time(key)
- try:
- return Index.get_loc(self, key, method, tolerance)
- except (KeyError, ValueError, TypeError):
- try:
- return self._get_string_slice(key)
- except (TypeError, KeyError, ValueError, OverflowError):
- pass
+ return Index.get_loc(self, key, method, tolerance)
- try:
- stamp = Timestamp(key)
- if stamp.tzinfo is not None and self.tz is not None:
- stamp = stamp.tz_convert(self.tz)
- else:
- stamp = stamp.tz_localize(self.tz)
- return Index.get_loc(self, stamp, method, tolerance)
- except KeyError:
- raise KeyError(key)
- except ValueError as e:
- # list-like tolerance size must match target index size
- if "list-like" in str(e):
- raise e
- raise KeyError(key)
+ def _maybe_cast_for_get_loc(self, key) -> Timestamp:
+ # needed to localize naive datetimes
+ key = Timestamp(key)
+ if key.tzinfo is None:
+ key = key.tz_localize(self.tz)
+ else:
+ key = key.tz_convert(self.tz)
+ return key
- def _maybe_cast_slice_bound(self, label, side, kind):
+ def _maybe_cast_slice_bound(self, label, side: str, kind):
"""
If label is a string, cast it to datetime according to resolution.
@@ -742,7 +696,7 @@ def _maybe_cast_slice_bound(self, label, side, kind):
----------
label : object
side : {'left', 'right'}
- kind : {'ix', 'loc', 'getitem'}
+ kind : {'loc', 'getitem'} or None
Returns
-------
@@ -752,14 +706,14 @@ def _maybe_cast_slice_bound(self, label, side, kind):
-----
Value of `side` parameter should be validated in caller.
"""
- assert kind in ["ix", "loc", "getitem", None]
+ assert kind in ["loc", "getitem", None]
if is_float(label) or isinstance(label, time) or is_integer(label):
self._invalid_indexer("slice", label)
if isinstance(label, str):
freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
- _, parsed, reso = parsing.parse_time_string(label, freq)
+ parsed, reso = parsing.parse_time_string(label, freq)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
# lower, upper form the half-open interval:
# [parsed, parsed + 1 freq)
@@ -775,7 +729,7 @@ def _maybe_cast_slice_bound(self, label, side, kind):
def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):
freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
- _, parsed, reso = parsing.parse_time_string(key, freq)
+ parsed, reso = parsing.parse_time_string(key, freq)
loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs, use_rhs=use_rhs)
return loc
@@ -832,30 +786,6 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None):
# --------------------------------------------------------------------
- @Substitution(klass="DatetimeIndex")
- @Appender(_shared_docs["searchsorted"])
- def searchsorted(self, value, side="left", sorter=None):
- if isinstance(value, (np.ndarray, Index)):
- if not type(self._data)._is_recognized_dtype(value):
- raise TypeError(
- "searchsorted requires compatible dtype or scalar, "
- f"not {type(value).__name__}"
- )
- value = type(self._data)(value)
- self._data._check_compatible_with(value)
-
- elif isinstance(value, self._data._recognized_scalars):
- self._data._check_compatible_with(value)
- value = self._data._scalar_type(value)
-
- elif not isinstance(value, DatetimeArray):
- raise TypeError(
- "searchsorted requires compatible dtype or scalar, "
- f"not {type(value).__name__}"
- )
-
- return self._data.searchsorted(value, side=side)
-
def is_type_compatible(self, typ) -> bool:
return typ == self.inferred_type or typ == "datetime"
@@ -865,60 +795,6 @@ def inferred_type(self) -> str:
# sure we can't have ambiguous indexing
return "datetime64"
- def insert(self, loc, item):
- """
- Make new Index inserting new item at location
-
- Parameters
- ----------
- loc : int
- item : object
- if not either a Python datetime or a numpy integer-like, returned
- Index dtype will be object rather than datetime.
-
- Returns
- -------
- new_index : Index
- """
- if isinstance(item, self._data._recognized_scalars):
- item = self._data._scalar_type(item)
- elif is_valid_nat_for_dtype(item, self.dtype):
- # GH 18295
- item = self._na_value
- elif is_scalar(item) and isna(item):
- # i.e. timedeltat64("NaT")
- raise TypeError(
- f"cannot insert {type(self).__name__} with incompatible label"
- )
-
- freq = None
- if isinstance(item, self._data._scalar_type) or item is NaT:
- self._data._check_compatible_with(item, setitem=True)
-
- # check freq can be preserved on edge cases
- if self.size and self.freq is not None:
- if item is NaT:
- pass
- elif (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
- freq = self.freq
- elif (loc == len(self)) and item - self.freq == self[-1]:
- freq = self.freq
- item = item.asm8
-
- try:
- new_i8s = np.concatenate(
- (self[:loc].asi8, [item.view(np.int64)], self[loc:].asi8)
- )
- return self._shallow_copy(new_i8s, freq=freq)
- except (AttributeError, TypeError):
-
- # fall back to object index
- if isinstance(item, str):
- return self.astype(object).insert(loc, item)
- raise TypeError(
- f"cannot insert {type(self).__name__} with incompatible label"
- )
-
def indexer_at_time(self, time, asof=False):
"""
Return index locations of index values at particular time of day
@@ -1172,7 +1048,7 @@ def date_range(
closed=closed,
**kwargs,
)
- return DatetimeIndex._simple_new(dtarr, tz=dtarr.tz, freq=dtarr.freq, name=name)
+ return DatetimeIndex._simple_new(dtarr, name=name)
def bdate_range(
diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py
index 58fcce7e59be7..66b551f654bf1 100644
--- a/pandas/core/indexes/extension.py
+++ b/pandas/core/indexes/extension.py
@@ -6,17 +6,22 @@
import numpy as np
from pandas.compat.numpy import function as nv
-from pandas.util._decorators import cache_readonly
+from pandas.util._decorators import Appender, cache_readonly
-from pandas.core.dtypes.common import ensure_platform_int, is_dtype_equal
+from pandas.core.dtypes.common import (
+ ensure_platform_int,
+ is_dtype_equal,
+ is_object_dtype,
+)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.arrays import ExtensionArray
-from pandas.core.indexes.base import Index, deprecate_ndim_indexing
+from pandas.core.indexers import deprecate_ndim_indexing
+from pandas.core.indexes.base import Index
from pandas.core.ops import get_op_result_name
-def inherit_from_data(name: str, delegate, cache: bool = False):
+def inherit_from_data(name: str, delegate, cache: bool = False, wrap: bool = False):
"""
Make an alias for a method of the underlying ExtensionArray.
@@ -27,6 +32,8 @@ def inherit_from_data(name: str, delegate, cache: bool = False):
delegate : class
cache : bool, default False
Whether to convert wrapped properties into cache_readonly
+ wrap : bool, default False
+ Whether to wrap the inherited result in an Index.
Returns
-------
@@ -37,12 +44,23 @@ def inherit_from_data(name: str, delegate, cache: bool = False):
if isinstance(attr, property):
if cache:
- method = cache_readonly(attr.fget)
+
+ def cached(self):
+ return getattr(self._data, name)
+
+ cached.__name__ = name
+ cached.__doc__ = attr.__doc__
+ method = cache_readonly(cached)
else:
def fget(self):
- return getattr(self._data, name)
+ result = getattr(self._data, name)
+ if wrap:
+ if isinstance(result, type(self._data)):
+ return type(self)._simple_new(result, name=self.name)
+ return Index(result, name=self.name)
+ return result
def fset(self, value):
setattr(self._data, name, value)
@@ -60,6 +78,10 @@ def fset(self, value):
def method(self, *args, **kwargs):
result = attr(self._data, *args, **kwargs)
+ if wrap:
+ if isinstance(result, type(self._data)):
+ return type(self)._simple_new(result, name=self.name)
+ return Index(result, name=self.name)
return result
method.__name__ = name
@@ -67,7 +89,7 @@ def method(self, *args, **kwargs):
return method
-def inherit_names(names: List[str], delegate, cache: bool = False):
+def inherit_names(names: List[str], delegate, cache: bool = False, wrap: bool = False):
"""
Class decorator to pin attributes from an ExtensionArray to a Index subclass.
@@ -76,11 +98,13 @@ def inherit_names(names: List[str], delegate, cache: bool = False):
names : List[str]
delegate : class
cache : bool, default False
+ wrap : bool, default False
+ Whether to wrap the inherited result in an Index.
"""
def wrapper(cls):
for name in names:
- meth = inherit_from_data(name, delegate, cache=cache)
+ meth = inherit_from_data(name, delegate, cache=cache, wrap=wrap)
setattr(cls, name, meth)
return cls
@@ -88,7 +112,7 @@ def wrapper(cls):
return wrapper
-def _make_wrapped_comparison_op(opname):
+def _make_wrapped_comparison_op(opname: str):
"""
Create a comparison method that dispatches to ``._data``.
"""
@@ -108,8 +132,17 @@ def wrapper(self, other):
return wrapper
-def make_wrapped_arith_op(opname):
+def make_wrapped_arith_op(opname: str):
def method(self, other):
+ if (
+ isinstance(other, Index)
+ and is_object_dtype(other.dtype)
+ and type(other) is not Index
+ ):
+ # We return NotImplemented for object-dtype index *subclasses* so they have
+ # a chance to implement ops before we unwrap them.
+ # See https://github.com/pandas-dev/pandas/issues/31109
+ return NotImplemented
meth = getattr(self._data, opname)
result = meth(_maybe_unwrap_index(other))
return _wrap_arithmetic_op(self, other, result)
@@ -188,6 +221,7 @@ def __iter__(self):
def _ndarray_values(self) -> np.ndarray:
return self._data._ndarray_values
+ @Appender(Index.dropna.__doc__)
def dropna(self, how="any"):
if how not in ("any", "all"):
raise ValueError(f"invalid how option: {how}")
@@ -201,6 +235,7 @@ def repeat(self, repeats, axis=None):
result = self._data.repeat(repeats, axis=axis)
return self._shallow_copy(result)
+ @Appender(Index.take.__doc__)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = ensure_platform_int(indices)
@@ -230,6 +265,24 @@ def _get_unique_index(self, dropna=False):
result = result[~result.isna()]
return self._shallow_copy(result)
+ @Appender(Index.map.__doc__)
+ def map(self, mapper, na_action=None):
+ # Try to run function on index first, and then on elements of index
+ # Especially important for group-by functionality
+ try:
+ result = mapper(self)
+
+ # Try to use this result if we can
+ if isinstance(result, np.ndarray):
+ result = Index(result)
+
+ if not isinstance(result, Index):
+ raise TypeError("The map function must return an Index object")
+ return result
+ except Exception:
+ return self.astype(object).map(mapper)
+
+ @Appender(Index.astype.__doc__)
def astype(self, dtype, copy=True):
if is_dtype_equal(self.dtype, dtype) and copy is False:
# Ensure that self.astype(self.dtype) is self
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index d33ba52cc7524..fd812b17fb37c 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1,7 +1,7 @@
""" define the IntervalIndex """
from operator import le, lt
import textwrap
-from typing import Any, Optional, Tuple, Union
+from typing import TYPE_CHECKING, Any, Optional, Tuple, Union
import numpy as np
@@ -34,10 +34,8 @@
is_object_dtype,
is_scalar,
)
-from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.missing import isna
-from pandas.core import accessor
from pandas.core.algorithms import take_1d
from pandas.core.arrays.interval import IntervalArray, _interval_shared_docs
import pandas.core.common as com
@@ -59,6 +57,10 @@
from pandas.tseries.frequencies import to_offset
from pandas.tseries.offsets import DateOffset
+if TYPE_CHECKING:
+ from pandas import Series
+
+
_VALID_CLOSED = {"left", "right", "both", "neither"}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
@@ -183,31 +185,27 @@ def func(intvidx_self, other, sort=False):
),
)
)
-@accessor.delegate_names(
- delegate=IntervalArray,
- accessors=["length", "size", "left", "right", "mid", "closed", "dtype"],
- typ="property",
- overwrite=True,
-)
-@accessor.delegate_names(
- delegate=IntervalArray,
- accessors=[
+@inherit_names(["set_closed", "to_tuples"], IntervalArray, wrap=True)
+@inherit_names(
+ [
+ "__len__",
"__array__",
"overlaps",
"contains",
- "__len__",
- "set_closed",
- "to_tuples",
+ "size",
+ "dtype",
+ "left",
+ "right",
+ "length",
],
- typ="method",
- overwrite=True,
+ IntervalArray,
)
@inherit_names(
- ["is_non_overlapping_monotonic", "mid", "_ndarray_values"],
+ ["is_non_overlapping_monotonic", "mid", "_ndarray_values", "closed"],
IntervalArray,
cache=True,
)
-class IntervalIndex(IntervalMixin, ExtensionIndex, accessor.PandasDelegate):
+class IntervalIndex(IntervalMixin, ExtensionIndex):
_typ = "intervalindex"
_comparables = ["name"]
_attributes = ["name", "closed"]
@@ -218,8 +216,7 @@ class IntervalIndex(IntervalMixin, ExtensionIndex, accessor.PandasDelegate):
# Immutable, so we are able to cache computations like isna in '_mask'
_mask = None
- _raw_inherit = {"__array__", "overlaps", "contains"}
-
+ _data: IntervalArray
# --------------------------------------------------------------------
# Constructors
@@ -259,6 +256,8 @@ def _simple_new(cls, array, name, closed=None):
closed : Any
Ignored.
"""
+ assert isinstance(array, IntervalArray), type(array)
+
result = IntervalMixin.__new__(cls)
result._data = array
result.name = name
@@ -372,7 +371,7 @@ def _engine(self):
right = self._maybe_convert_i8(self.right)
return IntervalTree(left, right, closed=self.closed)
- def __contains__(self, key) -> bool:
+ def __contains__(self, key: Any) -> bool:
"""
return a boolean if this key is IN the index
We *only* accept an Interval
@@ -385,6 +384,7 @@ def __contains__(self, key) -> bool:
-------
bool
"""
+ hash(key)
if not isinstance(key, Interval):
return False
@@ -395,19 +395,20 @@ def __contains__(self, key) -> bool:
return False
@cache_readonly
- def _multiindex(self):
+ def _multiindex(self) -> MultiIndex:
return MultiIndex.from_arrays([self.left, self.right], names=["left", "right"])
@cache_readonly
- def values(self):
+ def values(self) -> IntervalArray:
"""
Return the IntervalIndex's data as an IntervalArray.
"""
return self._data
- @cache_readonly
- def _values(self):
- return self._data
+ @property
+ def _has_complex_internals(self) -> bool:
+ # used to avoid libreduction code paths, which raise or require conversion
+ return True
def __array_wrap__(self, result, context=None):
# we don't want the superclass implementation
@@ -437,22 +438,8 @@ def memory_usage(self, deep: bool = False) -> int:
# so return the bytes here
return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep)
- @cache_readonly
- def is_monotonic(self) -> bool:
- """
- Return True if the IntervalIndex is monotonic increasing (only equal or
- increasing values), else False
- """
- return self.is_monotonic_increasing
-
- @cache_readonly
- def is_monotonic_increasing(self) -> bool:
- """
- Return True if the IntervalIndex is monotonic increasing (only equal or
- increasing values), else False
- """
- return self._engine.is_monotonic_increasing
-
+ # IntervalTree doesn't have a is_monotonic_decreasing, so have to override
+ # the Index implemenation
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
"""
@@ -486,7 +473,7 @@ def is_unique(self):
return True
@property
- def is_overlapping(self):
+ def is_overlapping(self) -> bool:
"""
Return True if the IntervalIndex has overlapping intervals, else False.
@@ -580,7 +567,7 @@ def _can_reindex(self, indexer: np.ndarray) -> None:
if self.is_overlapping and len(indexer):
raise ValueError("cannot reindex from an overlapping axis")
- def _needs_i8_conversion(self, key):
+ def _needs_i8_conversion(self, key) -> bool:
"""
Check if a given key needs i8 conversion. Conversion is necessary for
Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An
@@ -696,7 +683,7 @@ def _searchsorted_monotonic(self, label, side, exclude_label=False):
return sub_idx._searchsorted_monotonic(label, side)
def get_loc(
- self, key: Any, method: Optional[str] = None, tolerance=None
+ self, key, method: Optional[str] = None, tolerance=None
) -> Union[int, slice, np.ndarray]:
"""
Get integer location, slice or boolean mask for requested label.
@@ -738,10 +725,8 @@ def get_loc(
"""
self._check_method(method)
- # list-like are invalid labels for II but in some cases may work, e.g
- # single element array of comparable type, so guard against them early
- if is_list_like(key):
- raise KeyError(key)
+ if not is_scalar(key):
+ raise InvalidIndexError(key)
if isinstance(key, Interval):
if self.closed != key.closed:
@@ -834,6 +819,9 @@ def get_indexer(
loc = self.get_loc(key)
except KeyError:
loc = -1
+ except InvalidIndexError:
+ # i.e. non-scalar key
+ raise TypeError(key)
indexer.append(loc)
return ensure_platform_int(indexer)
@@ -897,25 +885,15 @@ def get_indexer_for(self, target: AnyArrayLike, **kwargs) -> np.ndarray:
return self.get_indexer(target, **kwargs)
@Appender(_index_shared_docs["get_value"] % _index_doc_kwargs)
- def get_value(self, series: ABCSeries, key: Any) -> Any:
-
- if com.is_bool_indexer(key):
- loc = key
- elif is_list_like(key):
- if self.is_overlapping:
- loc, missing = self.get_indexer_non_unique(key)
- if len(missing):
- raise KeyError
- else:
- loc = self.get_indexer(key)
- elif isinstance(key, slice):
- if not (key.step is None or key.step == 1):
- raise ValueError("cannot support not-default step in a slice")
- loc = self._convert_slice_indexer(key, kind="getitem")
- else:
- loc = self.get_loc(key)
+ def get_value(self, series: "Series", key):
+ loc = self.get_loc(key)
return series.iloc[loc]
+ def _convert_slice_indexer(self, key: slice, kind=None):
+ if not (key.step is None or key.step == 1):
+ raise ValueError("cannot support not-default step in a slice")
+ return super()._convert_slice_indexer(key, kind)
+
@Appender(_index_shared_docs["where"])
def where(self, cond, other=None):
if other is None:
@@ -1054,7 +1032,7 @@ def _format_space(self) -> str:
# --------------------------------------------------------------------
- def argsort(self, *args, **kwargs):
+ def argsort(self, *args, **kwargs) -> np.ndarray:
return np.lexsort((self.right, self.left))
def equals(self, other) -> bool:
@@ -1069,7 +1047,7 @@ def equals(self, other) -> bool:
if not isinstance(other, IntervalIndex):
if not is_interval_dtype(other):
return False
- other = Index(getattr(other, ".values", other))
+ other = Index(other)
return (
self.left.equals(other.left)
@@ -1176,21 +1154,6 @@ def is_all_dates(self) -> bool:
# TODO: arithmetic operations
- def _delegate_property_get(self, name, *args, **kwargs):
- """ method delegation to the ._values """
- prop = getattr(self._data, name)
- return prop # no wrapping for now
-
- def _delegate_method(self, name, *args, **kwargs):
- """ method delegation to the ._data """
- method = getattr(self._data, name)
- res = method(*args, **kwargs)
- if is_scalar(res) or name in self._raw_inherit:
- return res
- if isinstance(res, IntervalArray):
- return type(self)._simple_new(res, name=self.name)
- return Index(res)
-
# GH#30817 until IntervalArray implements inequalities, get them from Index
def __lt__(self, other):
return Index.__lt__(self, other)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 84d7399cc4f2d..02db7be1ddf41 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1,6 +1,6 @@
import datetime
from sys import getsizeof
-from typing import Hashable, List, Optional, Sequence, Union
+from typing import Any, Hashable, List, Optional, Sequence, Union
import warnings
import numpy as np
@@ -973,7 +973,7 @@ def _shallow_copy_with_infer(self, values, **kwargs):
return self._shallow_copy(values, **kwargs)
@Appender(_index_shared_docs["contains"] % _index_doc_kwargs)
- def __contains__(self, key) -> bool:
+ def __contains__(self, key: Any) -> bool:
hash(key)
try:
self.get_loc(key)
@@ -1256,11 +1256,15 @@ def _get_grouper_for_level(self, mapper, level):
if len(uniques) < len(level_index):
# Remove unobserved levels from level_index
level_index = level_index.take(uniques)
-
- if len(level_index):
- grouper = level_index.take(codes)
else:
+ # break references back to us so that setting the name
+ # on the output of a groupby doesn't reflect back here.
+ level_index = level_index.copy()
+
+ if level_index._can_hold_na:
grouper = level_index.take(codes, fill_value=True)
+ else:
+ grouper = level_index.take(codes)
return grouper, codes, level_index
@@ -1288,8 +1292,8 @@ def _get_level_number(self, level) -> int:
if level < 0:
orig_level = level - self.nlevels
raise IndexError(
- f"Too many levels: Index has only {self.nlevels} levels,"
- f" {orig_level} is not a valid level number"
+ f"Too many levels: Index has only {self.nlevels} levels, "
+ f"{orig_level} is not a valid level number"
)
# Note: levels are zero-based
elif level >= self.nlevels:
@@ -1342,6 +1346,11 @@ def values(self):
self._tuples = lib.fast_zip(values)
return self._tuples
+ @property
+ def _has_complex_internals(self) -> bool:
+ # used to avoid libreduction code paths, which raise or require conversion
+ return True
+
@cache_readonly
def is_monotonic_increasing(self) -> bool:
"""
@@ -2013,7 +2022,7 @@ def append(self, other):
except (TypeError, IndexError):
return Index(new_tuples)
- def argsort(self, *args, **kwargs):
+ def argsort(self, *args, **kwargs) -> np.ndarray:
return self.values.argsort(*args, **kwargs)
@Appender(_index_shared_docs["repeat"] % _index_doc_kwargs)
@@ -2054,7 +2063,7 @@ def drop(self, codes, level=None, errors="raise"):
if not isinstance(codes, (np.ndarray, Index)):
try:
- codes = com.index_labels_to_array(codes)
+ codes = com.index_labels_to_array(codes, dtype=object)
except ValueError:
pass
@@ -2171,8 +2180,8 @@ def reorder_levels(self, order):
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError(
- f"Length of order must be same as number of levels ({self.nlevels}),"
- f" got {len(order)}"
+ f"Length of order must be same as number of levels ({self.nlevels}), "
+ f"got {len(order)}"
)
new_levels = [self.levels[i] for i in order]
new_codes = [self.codes[i] for i in order]
@@ -2527,8 +2536,8 @@ def slice_locs(self, start=None, end=None, step=None, kind=None):
def _partial_tup_index(self, tup, side="left"):
if len(tup) > self.lexsort_depth:
raise UnsortedIndexError(
- f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth"
- f" ({self.lexsort_depth})"
+ f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth "
+ f"({self.lexsort_depth})"
)
n = len(tup)
@@ -2639,7 +2648,8 @@ def _maybe_to_slice(loc):
mask[loc] = True
return mask
- if not isinstance(key, tuple):
+ if not isinstance(key, (tuple, list)):
+ # not including list here breaks some indexing, xref #30892
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
@@ -2774,7 +2784,7 @@ def maybe_mi_droplevels(indexer, levels, drop_level: bool):
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_mi_droplevels(indexer, [0], drop_level)
return indexer, new_index
- except TypeError:
+ except (TypeError, InvalidIndexError):
pass
if not any(isinstance(k, slice) for k in key):
@@ -3135,7 +3145,7 @@ def equals(self, other) -> bool:
return True
- def equal_levels(self, other):
+ def equal_levels(self, other) -> bool:
"""
Return True if the levels of both MultiIndex objects are the same
@@ -3335,7 +3345,7 @@ def _convert_can_do_setop(self, other):
result_names = self.names if self.names == other.names else None
return other, result_names
- def insert(self, loc, item):
+ def insert(self, loc: int, item):
"""
Make new MultiIndex inserting new item at location
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index b9b44284edaa9..f7af82920adb1 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -1,3 +1,5 @@
+from typing import TYPE_CHECKING, Any
+
import numpy as np
from pandas._libs import index as libindex, lib
@@ -38,6 +40,9 @@
)
from pandas.core.ops import get_op_result_name
+if TYPE_CHECKING:
+ from pandas import Series
+
_num_index_shared_docs = dict()
@@ -52,6 +57,7 @@ class NumericIndex(Index):
def __new__(cls, data=None, dtype=None, copy=False, name=None):
cls._validate_dtype(dtype)
+ name = maybe_extract_name(name, data, cls)
# Coerce to ndarray if not already ndarray or Index
if not isinstance(data, (np.ndarray, Index)):
@@ -77,7 +83,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None):
# GH#13601, GH#20285, GH#27125
raise ValueError("Index data must be 1-dimensional")
- name = maybe_extract_name(name, data, cls)
+ subarr = np.asarray(subarr)
return cls._simple_new(subarr, name=name)
@classmethod
@@ -99,7 +105,7 @@ def _validate_dtype(cls, dtype: Dtype) -> None:
@Appender(_index_shared_docs["_maybe_cast_slice_bound"])
def _maybe_cast_slice_bound(self, label, side, kind):
- assert kind in ["ix", "loc", "getitem", None]
+ assert kind in ["loc", "getitem", None]
# we will try to coerce to integers
return self._maybe_cast_indexer(label)
@@ -160,7 +166,7 @@ def is_all_dates(self) -> bool:
return False
@Appender(Index.insert.__doc__)
- def insert(self, loc, item):
+ def insert(self, loc: int, item):
# treat NA values as nans:
if is_scalar(item) and isna(item):
item = self._na_value
@@ -225,6 +231,8 @@ class IntegerIndex(NumericIndex):
This is an abstract class for Int64Index, UInt64Index.
"""
+ _default_dtype: np.dtype
+
def __contains__(self, key) -> bool:
"""
Check if key is a float and has a decimal. If it has, return False.
@@ -237,36 +245,36 @@ def __contains__(self, key) -> bool:
except (OverflowError, TypeError, ValueError):
return False
-
-class Int64Index(IntegerIndex):
- __doc__ = _num_index_shared_docs["class_descr"] % _int64_descr_args
-
- _typ = "int64index"
- _can_hold_na = False
- _engine_type = libindex.Int64Engine
- _default_dtype = np.int64
-
@property
def inferred_type(self) -> str:
"""
- Always 'integer' for ``Int64Index``
+ Always 'integer' for ``Int64Index`` and ``UInt64Index``
"""
return "integer"
@property
def asi8(self) -> np.ndarray:
# do not cache or you'll create a memory leak
- return self.values.view("i8")
+ return self.values.view(self._default_dtype)
@Appender(_index_shared_docs["_convert_scalar_indexer"])
def _convert_scalar_indexer(self, key, kind=None):
- assert kind in ["ix", "loc", "getitem", "iloc", None]
+ assert kind in ["loc", "getitem", "iloc", None]
# don't coerce ilocs to integers
if kind != "iloc":
key = self._maybe_cast_indexer(key)
return super()._convert_scalar_indexer(key, kind=kind)
+
+class Int64Index(IntegerIndex):
+ __doc__ = _num_index_shared_docs["class_descr"] % _int64_descr_args
+
+ _typ = "int64index"
+ _can_hold_na = False
+ _engine_type = libindex.Int64Engine
+ _default_dtype = np.dtype(np.int64)
+
def _wrap_joined_index(self, joined, other):
name = get_op_result_name(self, other)
return Int64Index(joined, name=name)
@@ -280,9 +288,9 @@ def _assert_safe_casting(cls, data, subarr):
if not np.array_equal(data, subarr):
raise TypeError("Unsafe NumPy casting, you must explicitly cast")
- def _is_compatible_with_other(self, other):
+ def _is_compatible_with_other(self, other) -> bool:
return super()._is_compatible_with_other(other) or all(
- isinstance(type(obj), (ABCInt64Index, ABCFloat64Index, ABCRangeIndex))
+ isinstance(obj, (ABCInt64Index, ABCFloat64Index, ABCRangeIndex))
for obj in [self, other]
)
@@ -301,28 +309,7 @@ class UInt64Index(IntegerIndex):
_typ = "uint64index"
_can_hold_na = False
_engine_type = libindex.UInt64Engine
- _default_dtype = np.uint64
-
- @property
- def inferred_type(self) -> str:
- """
- Always 'integer' for ``UInt64Index``
- """
- return "integer"
-
- @property
- def asi8(self) -> np.ndarray:
- # do not cache or you'll create a memory leak
- return self.values.view("u8")
-
- @Appender(_index_shared_docs["_convert_scalar_indexer"])
- def _convert_scalar_indexer(self, key, kind=None):
- assert kind in ["ix", "loc", "getitem", "iloc", None]
-
- # don't coerce ilocs to integers
- if kind != "iloc":
- key = self._maybe_cast_indexer(key)
- return super()._convert_scalar_indexer(key, kind=kind)
+ _default_dtype = np.dtype(np.uint64)
@Appender(_index_shared_docs["_convert_arr_indexer"])
def _convert_arr_indexer(self, keyarr):
@@ -358,10 +345,9 @@ def _assert_safe_casting(cls, data, subarr):
if not np.array_equal(data, subarr):
raise TypeError("Unsafe NumPy casting, you must explicitly cast")
- def _is_compatible_with_other(self, other):
+ def _is_compatible_with_other(self, other) -> bool:
return super()._is_compatible_with_other(other) or all(
- isinstance(type(obj), (ABCUInt64Index, ABCFloat64Index))
- for obj in [self, other]
+ isinstance(obj, (ABCUInt64Index, ABCFloat64Index)) for obj in [self, other]
)
@@ -404,18 +390,15 @@ def astype(self, dtype, copy=True):
@Appender(_index_shared_docs["_convert_scalar_indexer"])
def _convert_scalar_indexer(self, key, kind=None):
- assert kind in ["ix", "loc", "getitem", "iloc", None]
+ assert kind in ["loc", "getitem", "iloc", None]
if kind == "iloc":
- return self._validate_indexer("positional", key, kind)
+ self._validate_indexer("positional", key, "iloc")
return key
@Appender(_index_shared_docs["_convert_slice_indexer"])
- def _convert_slice_indexer(self, key, kind=None):
- # if we are not a slice, then we are done
- if not isinstance(key, slice):
- return key
+ def _convert_slice_indexer(self, key: slice, kind=None):
if kind == "iloc":
return super()._convert_slice_indexer(key, kind=kind)
@@ -438,18 +421,15 @@ def _format_native_types(
)
return formatter.get_result_as_array()
- def get_value(self, series, key):
+ def get_value(self, series: "Series", key):
"""
We always want to get an index value, never a value.
"""
if not is_scalar(key):
raise InvalidIndexError
- k = com.values_from_object(key)
- loc = self.get_loc(k)
- new_values = com.values_from_object(series)[loc]
-
- return new_values
+ loc = self.get_loc(key)
+ return self._get_values_for_loc(series, loc)
def equals(self, other) -> bool:
"""
@@ -473,38 +453,27 @@ def equals(self, other) -> bool:
except (TypeError, ValueError):
return False
- def __contains__(self, other) -> bool:
+ def __contains__(self, other: Any) -> bool:
+ hash(other)
if super().__contains__(other):
return True
- try:
- # if other is a sequence this throws a ValueError
- return np.isnan(other) and self.hasnans
- except ValueError:
- try:
- return len(other) <= 1 and other.item() in self
- except AttributeError:
- return len(other) <= 1 and other in self
- except TypeError:
- pass
- except TypeError:
- pass
-
- return False
+ return is_float(other) and np.isnan(other) and self.hasnans
@Appender(_index_shared_docs["get_loc"])
def get_loc(self, key, method=None, tolerance=None):
- try:
- if np.all(np.isnan(key)) or is_bool(key):
- nan_idxs = self._nan_idxs
- try:
- return nan_idxs.item()
- except ValueError:
- if not len(nan_idxs):
- raise KeyError(key)
- return nan_idxs
- except (TypeError, NotImplementedError):
- pass
+ if is_bool(key):
+ # Catch this to avoid accidentally casting to 1.0
+ raise KeyError(key)
+
+ if is_float(key) and np.isnan(key):
+ nan_idxs = self._nan_idxs
+ if not len(nan_idxs):
+ raise KeyError(key)
+ elif len(nan_idxs) == 1:
+ return nan_idxs[0]
+ return nan_idxs
+
return super().get_loc(key, method=method, tolerance=tolerance)
@cache_readonly
@@ -517,11 +486,10 @@ def isin(self, values, level=None):
self._validate_index_level(level)
return algorithms.isin(np.array(self), values)
- def _is_compatible_with_other(self, other):
+ def _is_compatible_with_other(self, other) -> bool:
return super()._is_compatible_with_other(other) or all(
isinstance(
- type(obj),
- (ABCInt64Index, ABCFloat64Index, ABCUInt64Index, ABCRangeIndex),
+ obj, (ABCInt64Index, ABCFloat64Index, ABCUInt64Index, ABCRangeIndex),
)
for obj in [self, other]
)
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 6ab2e66e05d6e..4438573cb9067 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -1,12 +1,14 @@
from datetime import datetime, timedelta
+from typing import TYPE_CHECKING, Any
import weakref
import numpy as np
from pandas._libs import index as libindex
-from pandas._libs.tslibs import NaT, frequencies as libfrequencies, iNaT, resolution
+from pandas._libs.tslibs import NaT, frequencies as libfrequencies, resolution
+from pandas._libs.tslibs.parsing import parse_time_string
from pandas._libs.tslibs.period import Period
-from pandas.util._decorators import Appender, Substitution, cache_readonly
+from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes.common import (
ensure_platform_int,
@@ -14,37 +16,34 @@
is_datetime64_any_dtype,
is_dtype_equal,
is_float,
- is_float_dtype,
is_integer,
is_integer_dtype,
+ is_list_like,
is_object_dtype,
+ is_scalar,
pandas_dtype,
)
-from pandas.core.accessor import delegate_names
from pandas.core.arrays.period import (
PeriodArray,
period_array,
raise_on_incompatible,
validate_dtype_freq,
)
-from pandas.core.base import _shared_docs
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
+ InvalidIndexError,
_index_shared_docs,
ensure_index,
maybe_extract_name,
)
-from pandas.core.indexes.datetimelike import (
- DatetimeIndexOpsMixin,
- DatetimelikeDelegateMixin,
-)
+from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas.core.indexes.datetimes import DatetimeIndex, Index
+from pandas.core.indexes.extension import inherit_names
from pandas.core.indexes.numeric import Int64Index
-from pandas.core.missing import isna
from pandas.core.ops import get_op_result_name
-from pandas.core.tools.datetimes import DateParseError, parse_time_string
+from pandas.core.tools.datetimes import DateParseError
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick
@@ -52,6 +51,8 @@
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass="PeriodIndex or list of Periods"))
+if TYPE_CHECKING:
+ from pandas import Series
# --- Period index sketch
@@ -67,23 +68,14 @@ def _new_PeriodIndex(cls, **d):
return cls(values, **d)
-class PeriodDelegateMixin(DatetimelikeDelegateMixin):
- """
- Delegate from PeriodIndex to PeriodArray.
- """
-
- _raw_methods = {"_format_native_types"}
- _raw_properties = {"is_leap_year", "freq"}
-
- _delegated_properties = PeriodArray._datetimelike_ops + list(_raw_properties)
- _delegated_methods = set(PeriodArray._datetimelike_methods) | _raw_methods
-
-
-@delegate_names(PeriodArray, PeriodDelegateMixin._delegated_properties, typ="property")
-@delegate_names(
- PeriodArray, PeriodDelegateMixin._delegated_methods, typ="method", overwrite=True
+@inherit_names(
+ ["strftime", "to_timestamp", "asfreq", "start_time", "end_time"]
+ + PeriodArray._field_ops,
+ PeriodArray,
+ wrap=True,
)
-class PeriodIndex(DatetimeIndexOpsMixin, Int64Index, PeriodDelegateMixin):
+@inherit_names(["is_leap_year", "freq", "_format_native_types"], PeriodArray)
+class PeriodIndex(DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in time.
@@ -233,24 +225,13 @@ def _simple_new(cls, values, name=None, freq=None, **kwargs):
Parameters
----------
- values : PeriodArray, PeriodIndex, Index[int64], ndarray[int64]
+ values : PeriodArray
Values that can be converted to a PeriodArray without inference
or coercion.
-
"""
- # TODO: raising on floats is tested, but maybe not useful.
- # Should the callers know not to pass floats?
- # At the very least, I think we can ensure that lists aren't passed.
- if isinstance(values, list):
- values = np.asarray(values)
- if is_float_dtype(values):
- raise TypeError("PeriodIndex._simple_new does not accept floats.")
- if freq:
- freq = Period._maybe_convert_freq(freq)
- values = PeriodArray(values, freq=freq)
+ assert isinstance(values, PeriodArray), type(values)
+ assert freq is None or freq == values.freq, (freq, values.freq)
- if not isinstance(values, PeriodArray):
- raise TypeError("PeriodIndex._simple_new only accepts PeriodArray")
result = object.__new__(cls)
result._data = values
# For groupby perf. See note in indexes/base about _index_data
@@ -266,6 +247,11 @@ def _simple_new(cls, values, name=None, freq=None, **kwargs):
def values(self):
return np.asarray(self)
+ @property
+ def _has_complex_internals(self):
+ # used to avoid libreduction code paths, which raise or require conversion
+ return True
+
def _shallow_copy(self, values=None, **kwargs):
# TODO: simplify, figure out type of values
if values is None:
@@ -370,22 +356,22 @@ def _engine(self):
return self._engine_type(period, len(self))
@Appender(_index_shared_docs["contains"])
- def __contains__(self, key) -> bool:
+ def __contains__(self, key: Any) -> bool:
if isinstance(key, Period):
if key.freq != self.freq:
return False
else:
return key.ordinal in self._engine
else:
+ hash(key)
try:
self.get_loc(key)
return True
- except (TypeError, KeyError):
- # TypeError can be reached if we pass a tuple that is not hashable
+ except KeyError:
return False
@cache_readonly
- def _int64index(self):
+ def _int64index(self) -> Int64Index:
return Int64Index._simple_new(self.asi8, name=self.name)
# ------------------------------------------------------------------------
@@ -466,23 +452,6 @@ def astype(self, dtype, copy=True, how="start"):
# TODO: should probably raise on `how` here, so we don't ignore it.
return super().astype(dtype, copy=copy)
- @Substitution(klass="PeriodIndex")
- @Appender(_shared_docs["searchsorted"])
- def searchsorted(self, value, side="left", sorter=None):
- if isinstance(value, Period) or value is NaT:
- self._data._check_compatible_with(value)
- elif isinstance(value, str):
- try:
- value = Period(value, freq=self.freq)
- except DateParseError:
- raise KeyError(f"Cannot interpret '{value}' as period")
- elif not isinstance(value, PeriodArray):
- raise TypeError(
- "PeriodIndex.searchsorted requires either a Period or PeriodArray"
- )
-
- return self._data.searchsorted(value, side=side, sorter=sorter)
-
@property
def is_full(self) -> bool:
"""
@@ -502,47 +471,16 @@ def inferred_type(self) -> str:
# indexing
return "period"
- def get_value(self, series, key):
+ def get_value(self, series: "Series", key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
- s = com.values_from_object(series)
- try:
- value = super().get_value(s, key)
- except (KeyError, IndexError):
- if isinstance(key, str):
- asdt, parsed, reso = parse_time_string(key, self.freq)
- grp = resolution.Resolution.get_freq_group(reso)
- freqn = resolution.get_freq_group(self.freq)
-
- vals = self._ndarray_values
-
- # if our data is higher resolution than requested key, slice
- if grp < freqn:
- iv = Period(asdt, freq=(grp, 1))
- ord1 = iv.asfreq(self.freq, how="S").ordinal
- ord2 = iv.asfreq(self.freq, how="E").ordinal
-
- if ord2 < vals[0] or ord1 > vals[-1]:
- raise KeyError(key)
-
- pos = np.searchsorted(self._ndarray_values, [ord1, ord2])
- key = slice(pos[0], pos[1] + 1)
- return series[key]
- elif grp == freqn:
- key = Period(asdt, freq=self.freq).ordinal
- return com.maybe_box(
- self, self._int64index.get_value(s, key), series, key
- )
- else:
- raise KeyError(key)
-
- period = Period(key, self.freq)
- key = period.value if isna(period) else period.ordinal
- return com.maybe_box(self, self._int64index.get_value(s, key), series, key)
+ if is_integer(key):
+ loc = key
else:
- return com.maybe_box(self, value, series, key)
+ loc = self.get_loc(key)
+ return self._get_values_for_loc(series, loc)
@Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
@@ -579,36 +517,76 @@ def get_indexer_non_unique(self, target):
def get_loc(self, key, method=None, tolerance=None):
"""
- Get integer location for requested label
+ Get integer location for requested label.
+
+ Parameters
+ ----------
+ key : Period, NaT, str, or datetime
+ String or datetime key must be parseable as Period.
Returns
-------
- loc : int
+ loc : int or ndarray[int64]
+
+ Raises
+ ------
+ KeyError
+ Key is not present in the index.
+ TypeError
+ If key is listlike or otherwise not hashable.
"""
- try:
- return self._engine.get_loc(key)
- except KeyError:
- if is_integer(key):
- raise
+
+ if not is_scalar(key):
+ raise InvalidIndexError(key)
+
+ if isinstance(key, str):
try:
- asdt, parsed, reso = parse_time_string(key, self.freq)
- key = asdt
- except TypeError:
+ loc = self._get_string_slice(key)
+ return loc
+ except (TypeError, ValueError):
pass
+
+ try:
+ asdt, reso = parse_time_string(key, self.freq)
except DateParseError:
# A string with invalid format
raise KeyError(f"Cannot interpret '{key}' as period")
- try:
- key = Period(key, freq=self.freq)
- except ValueError:
- # we cannot construct the Period
- # as we have an invalid type
+ grp = resolution.Resolution.get_freq_group(reso)
+ freqn = resolution.get_freq_group(self.freq)
+
+ # _get_string_slice will handle cases where grp < freqn
+ assert grp >= freqn
+
+ if grp == freqn:
+ key = Period(asdt, freq=self.freq)
+ loc = self.get_loc(key, method=method, tolerance=tolerance)
+ return loc
+ elif method is None:
raise KeyError(key)
+ else:
+ key = asdt
+
+ elif is_integer(key):
+ # Period constructor will cast to string, which we dont want
+ raise KeyError(key)
+
+ try:
+ key = Period(key, freq=self.freq)
+ except ValueError:
+ # we cannot construct the Period
+ # as we have an invalid type
+ if is_list_like(key):
+ raise TypeError(f"'{key}' is an invalid key")
+ raise KeyError(key)
+
+ ordinal = key.ordinal if key is not NaT else key.value
+ try:
+ return self._engine.get_loc(ordinal)
+ except KeyError:
try:
- ordinal = iNaT if key is NaT else key.ordinal
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, np.asarray(key))
return self._int64index.get_loc(ordinal, method, tolerance)
@@ -616,7 +594,7 @@ def get_loc(self, key, method=None, tolerance=None):
except KeyError:
raise KeyError(key)
- def _maybe_cast_slice_bound(self, label, side, kind):
+ def _maybe_cast_slice_bound(self, label, side: str, kind: str):
"""
If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
@@ -625,7 +603,7 @@ def _maybe_cast_slice_bound(self, label, side, kind):
----------
label : object
side : {'left', 'right'}
- kind : {'ix', 'loc', 'getitem'}
+ kind : {'loc', 'getitem'}
Returns
-------
@@ -636,13 +614,13 @@ def _maybe_cast_slice_bound(self, label, side, kind):
Value of `side` parameter should be validated in caller.
"""
- assert kind in ["ix", "loc", "getitem"]
+ assert kind in ["loc", "getitem"]
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, str):
try:
- _, parsed, reso = parse_time_string(label, self.freq)
+ parsed, reso = parse_time_string(label, self.freq)
bounds = self._parsed_string_to_bounds(reso, parsed)
return bounds[0 if side == "left" else 1]
except ValueError:
@@ -654,61 +632,53 @@ def _maybe_cast_slice_bound(self, label, side, kind):
return label
- def _parsed_string_to_bounds(self, reso, parsed):
- if reso == "year":
- t1 = Period(year=parsed.year, freq="A")
- elif reso == "month":
- t1 = Period(year=parsed.year, month=parsed.month, freq="M")
- elif reso == "quarter":
- q = (parsed.month - 1) // 3 + 1
- t1 = Period(year=parsed.year, quarter=q, freq="Q-DEC")
- elif reso == "day":
- t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day, freq="D")
- elif reso == "hour":
- t1 = Period(
- year=parsed.year,
- month=parsed.month,
- day=parsed.day,
- hour=parsed.hour,
- freq="H",
- )
- elif reso == "minute":
- t1 = Period(
- year=parsed.year,
- month=parsed.month,
- day=parsed.day,
- hour=parsed.hour,
- minute=parsed.minute,
- freq="T",
- )
- elif reso == "second":
- t1 = Period(
- year=parsed.year,
- month=parsed.month,
- day=parsed.day,
- hour=parsed.hour,
- minute=parsed.minute,
- second=parsed.second,
- freq="S",
- )
- else:
+ def _parsed_string_to_bounds(self, reso: str, parsed: datetime):
+ if reso not in ["year", "month", "quarter", "day", "hour", "minute", "second"]:
raise KeyError(reso)
- return (t1.asfreq(self.freq, how="start"), t1.asfreq(self.freq, how="end"))
- def _get_string_slice(self, key):
- if not self.is_monotonic:
- raise ValueError("Partial indexing only valid for ordered time series")
+ grp = resolution.Resolution.get_freq_group(reso)
+ iv = Period(parsed, freq=(grp, 1))
+ return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
- key, parsed, reso = parse_time_string(key, self.freq)
+ def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):
+ # TODO: Check for non-True use_lhs/use_rhs
+ parsed, reso = parse_time_string(key, self.freq)
grp = resolution.Resolution.get_freq_group(reso)
freqn = resolution.get_freq_group(self.freq)
- if reso in ["day", "hour", "minute", "second"] and not grp < freqn:
- raise KeyError(key)
+
+ if not grp < freqn:
+ # TODO: we used to also check for
+ # reso in ["day", "hour", "minute", "second"]
+ # why is that check not needed?
+ raise ValueError(key)
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
- return slice(
- self.searchsorted(t1, side="left"), self.searchsorted(t2, side="right")
- )
+ i8vals = self.asi8
+
+ if self.is_monotonic:
+
+ # we are out of range
+ if len(self) and (
+ (use_lhs and t1 < self[0] and t2 < self[0])
+ or ((use_rhs and t1 > self[-1] and t2 > self[-1]))
+ ):
+ raise KeyError(key)
+
+ # TODO: does this depend on being monotonic _increasing_?
+ # If so, DTI will also be affected.
+
+ # a monotonic (sorted) series can be sliced
+ # Use asi8.searchsorted to avoid re-validating Periods
+ left = i8vals.searchsorted(t1.ordinal, side="left") if use_lhs else None
+ right = i8vals.searchsorted(t2.ordinal, side="right") if use_rhs else None
+ return slice(left, right)
+
+ else:
+ lhs_mask = (i8vals >= t1.ordinal) if use_lhs else True
+ rhs_mask = (i8vals <= t2.ordinal) if use_rhs else True
+
+ # try to find a the dates
+ return (lhs_mask & rhs_mask).nonzero()[0]
def _convert_tolerance(self, tolerance, target):
tolerance = DatetimeIndexOpsMixin._convert_tolerance(self, tolerance, target)
@@ -828,9 +798,11 @@ def _union(self, other, sort):
# ------------------------------------------------------------------------
- def _apply_meta(self, rawarr):
+ def _apply_meta(self, rawarr) -> "PeriodIndex":
if not isinstance(rawarr, PeriodIndex):
- rawarr = PeriodIndex._simple_new(rawarr, freq=self.freq, name=self.name)
+ if not isinstance(rawarr, PeriodArray):
+ rawarr = PeriodArray(rawarr, freq=self.freq)
+ rawarr = PeriodIndex._simple_new(rawarr, name=self.name)
return rawarr
def memory_usage(self, deep=False):
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index b4cc71a25792f..340397b69c624 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -1,7 +1,7 @@
from datetime import timedelta
import operator
from sys import getsizeof
-from typing import Optional, Union
+from typing import Any, Optional
import warnings
import numpy as np
@@ -14,6 +14,7 @@
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
+ is_float,
is_integer,
is_integer_dtype,
is_list_like,
@@ -26,12 +27,14 @@
import pandas.core.common as com
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
-from pandas.core.indexes.base import Index, _index_shared_docs, maybe_extract_name
+from pandas.core.indexes.base import _index_shared_docs, maybe_extract_name
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.io.formats.printing import pprint_thing
+_empty_range = range(0)
+
class RangeIndex(Int64Index):
"""
@@ -111,7 +114,7 @@ def __new__(
return cls._simple_new(rng, dtype=dtype, name=name)
@classmethod
- def from_range(cls, data, name=None, dtype=None):
+ def from_range(cls, data: range, name=None, dtype=None) -> "RangeIndex":
"""
Create RangeIndex from a range object.
@@ -129,15 +132,10 @@ def from_range(cls, data, name=None, dtype=None):
return cls._simple_new(data, dtype=dtype, name=name)
@classmethod
- def _simple_new(cls, values, name=None, dtype=None):
+ def _simple_new(cls, values: range, name=None, dtype=None) -> "RangeIndex":
result = object.__new__(cls)
- # handle passed None, non-integers
- if values is None:
- # empty
- values = range(0, 0, 1)
- elif not isinstance(values, range):
- return Index(values, dtype=dtype, name=name)
+ assert isinstance(values, range)
result._range = values
result.name = name
@@ -334,7 +332,7 @@ def is_monotonic_decreasing(self) -> bool:
def has_duplicates(self) -> bool:
return False
- def __contains__(self, key: Union[int, np.integer]) -> bool:
+ def __contains__(self, key: Any) -> bool:
hash(key)
try:
key = ensure_python_int(key)
@@ -344,12 +342,14 @@ def __contains__(self, key: Union[int, np.integer]) -> bool:
@Appender(_index_shared_docs["get_loc"])
def get_loc(self, key, method=None, tolerance=None):
- if is_integer(key) and method is None and tolerance is None:
- new_key = int(key)
- try:
- return self._range.index(new_key)
- except ValueError:
- raise KeyError(key)
+ if method is None and tolerance is None:
+ if is_integer(key) or (is_float(key) and key.is_integer()):
+ new_key = int(key)
+ try:
+ return self._range.index(new_key)
+ except ValueError:
+ raise KeyError(key)
+ raise KeyError(key)
return super().get_loc(key, method=method, tolerance=tolerance)
@Appender(_index_shared_docs["get_indexer"])
@@ -400,7 +400,7 @@ def copy(self, name=None, deep=False, dtype=None, **kwargs):
name = self.name
return self.from_range(self._range, name=name)
- def _minmax(self, meth):
+ def _minmax(self, meth: str):
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
@@ -409,19 +409,19 @@ def _minmax(self, meth):
return self.start + self.step * no_steps
- def min(self, axis=None, skipna=True, *args, **kwargs):
+ def min(self, axis=None, skipna=True, *args, **kwargs) -> int:
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return self._minmax("min")
- def max(self, axis=None, skipna=True, *args, **kwargs):
+ def max(self, axis=None, skipna=True, *args, **kwargs) -> int:
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return self._minmax("max")
- def argsort(self, *args, **kwargs):
+ def argsort(self, *args, **kwargs) -> np.ndarray:
"""
Returns the indices that would sort the index and its
underlying data.
@@ -441,7 +441,7 @@ def argsort(self, *args, **kwargs):
else:
return np.arange(len(self) - 1, -1, -1)
- def equals(self, other):
+ def equals(self, other) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
@@ -479,7 +479,7 @@ def intersection(self, other, sort=False):
return super().intersection(other, sort=sort)
if not len(self) or not len(other):
- return self._simple_new(None)
+ return self._simple_new(_empty_range)
first = self._range[::-1] if self.step < 0 else self._range
second = other._range[::-1] if other.step < 0 else other._range
@@ -489,7 +489,7 @@ def intersection(self, other, sort=False):
int_low = max(first.start, second.start)
int_high = min(first.stop, second.stop)
if int_high <= int_low:
- return self._simple_new(None)
+ return self._simple_new(_empty_range)
# Method hint: linear Diophantine equation
# solve intersection problem
@@ -499,7 +499,7 @@ def intersection(self, other, sort=False):
# check whether element sets intersect
if (first.start - second.start) % gcd:
- return self._simple_new(None)
+ return self._simple_new(_empty_range)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
@@ -519,12 +519,12 @@ def intersection(self, other, sort=False):
new_index = new_index.sort_values()
return new_index
- def _min_fitting_element(self, lower_limit):
+ def _min_fitting_element(self, lower_limit: int) -> int:
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self.start) // abs(self.step))
return self.start + abs(self.step) * no_steps
- def _max_fitting_element(self, upper_limit):
+ def _max_fitting_element(self, upper_limit: int) -> int:
"""Returns the largest element smaller than or equal to the limit"""
no_steps = (upper_limit - self.start) // abs(self.step)
return self.start + abs(self.step) * no_steps
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 1f3182bc83e1d..8691f0a2a1178 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -1,32 +1,32 @@
""" implement the TimedeltaIndex """
-from datetime import datetime
import numpy as np
from pandas._libs import NaT, Timedelta, index as libindex
-from pandas.util._decorators import Appender, Substitution
+from pandas.util._decorators import Appender
from pandas.core.dtypes.common import (
_TD_DTYPE,
is_float,
is_integer,
- is_list_like,
is_scalar,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
pandas_dtype,
)
-from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
+from pandas.core.dtypes.missing import is_valid_nat_for_dtype
-from pandas.core.accessor import delegate_names
from pandas.core.arrays import datetimelike as dtl
-from pandas.core.arrays.timedeltas import TimedeltaArray, _is_convertible_to_td
-from pandas.core.base import _shared_docs
+from pandas.core.arrays.timedeltas import TimedeltaArray
import pandas.core.common as com
-from pandas.core.indexes.base import Index, _index_shared_docs, maybe_extract_name
+from pandas.core.indexes.base import (
+ Index,
+ InvalidIndexError,
+ _index_shared_docs,
+ maybe_extract_name,
+)
from pandas.core.indexes.datetimelike import (
DatetimeIndexOpsMixin,
- DatetimelikeDelegateMixin,
DatetimeTimedeltaMixin,
)
from pandas.core.indexes.extension import inherit_names
@@ -34,22 +34,21 @@
from pandas.tseries.frequencies import to_offset
-class TimedeltaDelegateMixin(DatetimelikeDelegateMixin):
- # Most attrs are dispatched via datetimelike_{ops,methods}
- # Some are "raw" methods, the result is not re-boxed in an Index
- # We also have a few "extra" attrs, which may or may not be raw,
- # which we don't want to expose in the .dt accessor.
- _raw_properties = {"components", "_box_func"}
- _raw_methods = {"to_pytimedelta", "sum", "std", "median", "_format_native_types"}
-
- _delegated_properties = TimedeltaArray._datetimelike_ops + list(_raw_properties)
- _delegated_methods = (
- TimedeltaArray._datetimelike_methods
- + list(_raw_methods)
- + ["_box_values", "__neg__", "__pos__", "__abs__"]
- )
-
-
+@inherit_names(
+ [
+ "_box_values",
+ "__neg__",
+ "__pos__",
+ "__abs__",
+ "total_seconds",
+ "round",
+ "floor",
+ "ceil",
+ ]
+ + TimedeltaArray._field_ops,
+ TimedeltaArray,
+ wrap=True,
+)
@inherit_names(
[
"_bool_ops",
@@ -58,21 +57,18 @@ class TimedeltaDelegateMixin(DatetimelikeDelegateMixin):
"_datetimelike_ops",
"_datetimelike_methods",
"_other_ops",
+ "components",
+ "_box_func",
+ "to_pytimedelta",
+ "sum",
+ "std",
+ "median",
+ "_format_native_types",
+ "freq",
],
TimedeltaArray,
)
-@delegate_names(
- TimedeltaArray, TimedeltaDelegateMixin._delegated_properties, typ="property"
-)
-@delegate_names(
- TimedeltaArray,
- TimedeltaDelegateMixin._delegated_methods,
- typ="method",
- overwrite=True,
-)
-class TimedeltaIndex(
- DatetimeTimedeltaMixin, dtl.TimelikeOps, TimedeltaDelegateMixin,
-):
+class TimedeltaIndex(DatetimeTimedeltaMixin, dtl.TimelikeOps):
"""
Immutable ndarray of timedelta64 data, represented internally as int64, and
which can be boxed to timedelta objects.
@@ -134,6 +130,8 @@ class TimedeltaIndex(
_is_numeric_dtype = True
_infer_as_myclass = True
+ _data: TimedeltaArray
+
# -------------------------------------------------------------------
# Constructors
@@ -161,7 +159,7 @@ def __new__(
"represent unambiguous timedelta values durations."
)
- if isinstance(data, TimedeltaArray):
+ if isinstance(data, TimedeltaArray) and freq is None:
if copy:
data = data.copy()
return cls._simple_new(data, name=name, freq=freq)
@@ -177,12 +175,13 @@ def __new__(
tdarr = TimedeltaArray._from_sequence(
data, freq=freq, unit=unit, dtype=dtype, copy=copy
)
- return cls._simple_new(tdarr._data, freq=tdarr.freq, name=name)
+ return cls._simple_new(tdarr, name=name)
@classmethod
def _simple_new(cls, values, name=None, freq=None, dtype=_TD_DTYPE):
# `dtype` is passed by _shallow_copy in corner cases, should always
# be timedelta64[ns] if present
+
if not isinstance(values, TimedeltaArray):
values = TimedeltaArray._simple_new(values, dtype=dtype, freq=freq)
else:
@@ -236,30 +235,11 @@ def get_value(self, series, key):
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
-
- if _is_convertible_to_td(key):
- key = Timedelta(key)
- return self.get_value_maybe_box(series, key)
-
- try:
- value = Index.get_value(self, series, key)
- except KeyError:
- try:
- loc = self._get_string_slice(key)
- return series[loc]
- except (TypeError, ValueError, KeyError):
- pass
-
- try:
- return self.get_value_maybe_box(series, key)
- except (TypeError, ValueError, KeyError):
- raise KeyError(key)
+ if is_integer(key):
+ loc = key
else:
- return com.maybe_box(self, value, series, key)
-
- def get_value_maybe_box(self, series, key: Timedelta):
- values = self._engine.get_value(com.values_from_object(series), key)
- return com.maybe_box(self, values, series, key)
+ loc = self.get_loc(key)
+ return self._get_values_for_loc(series, loc)
def get_loc(self, key, method=None, tolerance=None):
"""
@@ -267,42 +247,34 @@ def get_loc(self, key, method=None, tolerance=None):
Returns
-------
- loc : int
+ loc : int, slice, or ndarray[int]
"""
- if is_list_like(key) or (isinstance(key, datetime) and key is not NaT):
- # GH#20464 datetime check here is to ensure we don't allow
- # datetime objects to be incorrectly treated as timedelta
- # objects; NaT is a special case because it plays a double role
- # as Not-A-Timedelta
- raise TypeError
-
- if isna(key):
+ if not is_scalar(key):
+ raise InvalidIndexError(key)
+
+ if is_valid_nat_for_dtype(key, self.dtype):
key = NaT
+ elif isinstance(key, str):
+ try:
+ key = Timedelta(key)
+ except ValueError:
+ raise KeyError(key)
+
+ elif isinstance(key, self._data._recognized_scalars) or key is NaT:
+ key = Timedelta(key)
+
+ else:
+ raise KeyError(key)
+
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance, np.asarray(key))
- if _is_convertible_to_td(key) or key is NaT:
- key = Timedelta(key)
- return Index.get_loc(self, key, method, tolerance)
-
- try:
- return Index.get_loc(self, key, method, tolerance)
- except (KeyError, ValueError, TypeError):
- try:
- return self._get_string_slice(key)
- except (TypeError, KeyError, ValueError):
- pass
-
- try:
- stamp = Timedelta(key)
- return Index.get_loc(self, stamp, method, tolerance)
- except (KeyError, ValueError):
- raise KeyError(key)
+ return Index.get_loc(self, key, method, tolerance)
- def _maybe_cast_slice_bound(self, label, side, kind):
+ def _maybe_cast_slice_bound(self, label, side: str, kind):
"""
If label is a string, cast it to timedelta according to resolution.
@@ -310,13 +282,13 @@ def _maybe_cast_slice_bound(self, label, side, kind):
----------
label : object
side : {'left', 'right'}
- kind : {'ix', 'loc', 'getitem'}
+ kind : {'loc', 'getitem'} or None
Returns
-------
label : object
"""
- assert kind in ["ix", "loc", "getitem", None]
+ assert kind in ["loc", "getitem", None]
if isinstance(label, str):
parsed = Timedelta(label)
@@ -330,44 +302,12 @@ def _maybe_cast_slice_bound(self, label, side, kind):
return label
- def _get_string_slice(self, key):
- if is_integer(key) or is_float(key) or key is NaT:
- self._invalid_indexer("slice", key)
- loc = self._partial_td_slice(key)
- return loc
-
- def _partial_td_slice(self, key):
-
+ def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):
+ # TODO: Check for non-True use_lhs/use_rhs
+ assert isinstance(key, str), type(key)
# given a key, try to figure out a location for a partial slice
- if not isinstance(key, str):
- return key
-
raise NotImplementedError
- @Substitution(klass="TimedeltaIndex")
- @Appender(_shared_docs["searchsorted"])
- def searchsorted(self, value, side="left", sorter=None):
- if isinstance(value, (np.ndarray, Index)):
- if not type(self._data)._is_recognized_dtype(value):
- raise TypeError(
- "searchsorted requires compatible dtype or scalar, "
- f"not {type(value).__name__}"
- )
- value = type(self._data)(value)
- self._data._check_compatible_with(value)
-
- elif isinstance(value, self._data._recognized_scalars):
- self._data._check_compatible_with(value)
- value = self._data._scalar_type(value)
-
- elif not isinstance(value, TimedeltaArray):
- raise TypeError(
- "searchsorted requires compatible dtype or scalar, "
- f"not {type(value).__name__}"
- )
-
- return self._data.searchsorted(value, side=side, sorter=sorter)
-
def is_type_compatible(self, typ) -> bool:
return typ == self.inferred_type or typ == "timedelta"
@@ -375,61 +315,6 @@ def is_type_compatible(self, typ) -> bool:
def inferred_type(self) -> str:
return "timedelta64"
- def insert(self, loc, item):
- """
- Make new Index inserting new item at location
-
- Parameters
- ----------
- loc : int
- item : object
- If not either a Python datetime or a numpy integer-like, returned
- Index dtype will be object rather than datetime.
-
- Returns
- -------
- new_index : Index
- """
- # try to convert if possible
- if isinstance(item, self._data._recognized_scalars):
- item = self._data._scalar_type(item)
- elif is_valid_nat_for_dtype(item, self.dtype):
- # GH 18295
- item = self._na_value
- elif is_scalar(item) and isna(item):
- # i.e. datetime64("NaT")
- raise TypeError(
- f"cannot insert {type(self).__name__} with incompatible label"
- )
-
- freq = None
- if isinstance(item, self._data._scalar_type) or item is NaT:
- self._data._check_compatible_with(item, setitem=True)
-
- # check freq can be preserved on edge cases
- if self.size and self.freq is not None:
- if item is NaT:
- pass
- elif (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
- freq = self.freq
- elif (loc == len(self)) and item - self.freq == self[-1]:
- freq = self.freq
- item = item.asm8
-
- try:
- new_i8s = np.concatenate(
- (self[:loc].asi8, [item.view(np.int64)], self[loc:].asi8)
- )
- return self._shallow_copy(new_i8s, freq=freq)
- except (AttributeError, TypeError):
-
- # fall back to object index
- if isinstance(item, str):
- return self.astype(object).insert(loc, item)
- raise TypeError(
- f"cannot insert {type(self).__name__} with incompatible label"
- )
-
TimedeltaIndex._add_logical_methods_disabled()
@@ -507,4 +392,4 @@ def timedelta_range(
freq, freq_infer = dtl.maybe_infer_freq(freq)
tdarr = TimedeltaArray._generate_range(start, end, periods, freq, closed=closed)
- return TimedeltaIndex._simple_new(tdarr._data, freq=tdarr.freq, name=name)
+ return TimedeltaIndex._simple_new(tdarr, name=name)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index ea59a6a49e649..7e56148b7569e 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -15,7 +15,6 @@
is_numeric_dtype,
is_scalar,
is_sequence,
- is_sparse,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import ABCDataFrame, ABCMultiIndex, ABCSeries
@@ -23,11 +22,12 @@
import pandas.core.common as com
from pandas.core.indexers import (
- check_bool_array_indexer,
+ check_array_indexer,
is_list_like_indexer,
length_of_indexer,
)
-from pandas.core.indexes.api import Index, InvalidIndexError
+from pandas.core.indexes.api import Index
+from pandas.core.indexes.base import InvalidIndexError
# "null slice"
_NS = slice(None, None)
@@ -579,39 +579,6 @@ def __call__(self, axis=None):
new_self.axis = axis
return new_self
- # TODO: remove once geopandas no longer needs this
- def __getitem__(self, key):
- # Used in ix and downstream in geopandas _CoordinateIndexer
- if type(key) is tuple:
- # Note: we check the type exactly instead of with isinstance
- # because NamedTuple is checked separately.
- key = tuple(com.apply_if_callable(x, self.obj) for x in key)
- try:
- values = self.obj._get_value(*key)
- except (KeyError, TypeError, InvalidIndexError, AttributeError):
- # TypeError occurs here if the key has non-hashable entries,
- # generally slice or list.
- # TODO(ix): most/all of the TypeError cases here are for ix,
- # so this check can be removed once ix is removed.
- # The InvalidIndexError is only catched for compatibility
- # with geopandas, see
- # https://github.com/pandas-dev/pandas/issues/27258
- # TODO: The AttributeError is for IntervalIndex which
- # incorrectly implements get_value, see
- # https://github.com/pandas-dev/pandas/issues/27865
- pass
- else:
- if is_scalar(values):
- return values
-
- return self._getitem_tuple(key)
- else:
- # we by definition only have the 0th axis
- axis = self.axis or 0
-
- key = com.apply_if_callable(key, self.obj)
- return self._getitem_axis(key, axis=axis)
-
def _get_label(self, label, axis: int):
if self.ndim == 1:
# for perf reasons we want to try _xs first
@@ -639,7 +606,7 @@ def _get_setitem_indexer(self, key):
if isinstance(ax, ABCMultiIndex) and self.name != "iloc":
try:
return ax.get_loc(key)
- except (TypeError, KeyError):
+ except (TypeError, KeyError, InvalidIndexError):
# TypeError e.g. passed a bool
pass
@@ -652,9 +619,8 @@ def _get_setitem_indexer(self, key):
if isinstance(key, range):
return list(key)
- axis = self.axis or 0
try:
- return self._convert_to_indexer(key, axis=axis)
+ return self._convert_to_indexer(key, axis=0)
except TypeError as e:
# invalid indexer type vs 'other' indexing errors
@@ -1340,9 +1306,6 @@ def _multi_take(self, tup: Tuple):
}
return o._reindex_with_indexers(d, copy=True, allow_dups=True)
- def _convert_for_reindex(self, key, axis: int):
- return key
-
def _handle_lowerdim_multi_index_axis0(self, tup: Tuple):
# we have an axis0 multi-index, handle or raise
axis = self.axis or 0
@@ -1463,42 +1426,6 @@ def _getitem_nested_tuple(self, tup: Tuple):
return obj
- # TODO: remove once geopandas no longer needs __getitem__
- def _getitem_axis(self, key, axis: int):
- if is_iterator(key):
- key = list(key)
- self._validate_key(key, axis)
-
- labels = self.obj._get_axis(axis)
- if isinstance(key, slice):
- return self._get_slice_axis(key, axis=axis)
- elif is_list_like_indexer(key) and not (
- isinstance(key, tuple) and isinstance(labels, ABCMultiIndex)
- ):
-
- if hasattr(key, "ndim") and key.ndim > 1:
- raise ValueError("Cannot index with multidimensional key")
-
- return self._getitem_iterable(key, axis=axis)
- else:
-
- # maybe coerce a float scalar to integer
- key = labels._maybe_cast_indexer(key)
-
- if is_integer(key):
- if axis == 0 and isinstance(labels, ABCMultiIndex):
- try:
- return self._get_label(key, axis=axis)
- except (KeyError, TypeError):
- if self.obj.index.levels[0].is_integer():
- raise
-
- # this is the fallback! (for a non-float, non-integer index)
- if not labels.is_floating() and not labels.is_integer():
- return self._get_loc(key, axis=axis)
-
- return self._get_label(key, axis=axis)
-
def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False):
"""
Transform a list-like of keys into a new index and an indexer.
@@ -1539,18 +1466,12 @@ def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False):
return ax[indexer], indexer
if ax.is_unique and not getattr(ax, "is_overlapping", False):
- # If we are trying to get actual keys from empty Series, we
- # patiently wait for a KeyError later on - otherwise, convert
- if len(ax) or not len(key):
- key = self._convert_for_reindex(key, axis)
indexer = ax.get_indexer_for(key)
keyarr = ax.reindex(keyarr)[0]
else:
keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr)
- self._validate_read_indexer(
- keyarr, indexer, o._get_axis_number(axis), raise_missing=raise_missing
- )
+ self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing)
return keyarr, indexer
def _getitem_iterable(self, key, axis: int):
@@ -1588,7 +1509,7 @@ def _getitem_iterable(self, key, axis: int):
# A boolean indexer
key = check_bool_indexer(labels, key)
(inds,) = key.nonzero()
- return self.obj.take(inds, axis=axis)
+ return self.obj._take_with_is_copy(inds, axis=axis)
else:
# A collection of keys
keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False)
@@ -1656,7 +1577,7 @@ def _validate_read_indexer(
"https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike" # noqa:E501
)
- def _convert_to_indexer(self, obj, axis: int, raise_missing: bool = False):
+ def _convert_to_indexer(self, key, axis: int):
"""
Convert indexing key into something we can use to do actual fancy
indexing on a ndarray.
@@ -1673,33 +1594,36 @@ def _convert_to_indexer(self, obj, axis: int, raise_missing: bool = False):
"""
labels = self.obj._get_axis(axis)
- if isinstance(obj, slice):
- return self._convert_slice_indexer(obj, axis)
+ if isinstance(key, slice):
+ return self._convert_slice_indexer(key, axis)
# try to find out correct indexer, if not type correct raise
try:
- obj = self._convert_scalar_indexer(obj, axis)
+ key = self._convert_scalar_indexer(key, axis)
except TypeError:
# but we will allow setting
pass
# see if we are positional in nature
is_int_index = labels.is_integer()
- is_int_positional = is_integer(obj) and not is_int_index
+ is_int_positional = is_integer(key) and not is_int_index
- # if we are a label return me
- try:
- return labels.get_loc(obj)
- except LookupError:
- if isinstance(obj, tuple) and isinstance(labels, ABCMultiIndex):
- if len(obj) == labels.nlevels:
- return {"key": obj}
- raise
- except TypeError:
- pass
- except ValueError:
- if not is_int_positional:
- raise
+ if is_scalar(key) or isinstance(labels, ABCMultiIndex):
+ # Otherwise get_loc will raise InvalidIndexError
+
+ # if we are a label return me
+ try:
+ return labels.get_loc(key)
+ except LookupError:
+ if isinstance(key, tuple) and isinstance(labels, ABCMultiIndex):
+ if len(key) == labels.nlevels:
+ return {"key": key}
+ raise
+ except TypeError:
+ pass
+ except ValueError:
+ if not is_int_positional:
+ raise
# a positional
if is_int_positional:
@@ -1709,54 +1633,47 @@ def _convert_to_indexer(self, obj, axis: int, raise_missing: bool = False):
if self.name == "loc":
# always valid
- return {"key": obj}
+ return {"key": key}
- if obj >= self.obj.shape[axis] and not isinstance(labels, ABCMultiIndex):
+ if key >= self.obj.shape[axis] and not isinstance(labels, ABCMultiIndex):
# a positional
raise ValueError("cannot set by positional indexing with enlargement")
- return obj
+ return key
- if is_nested_tuple(obj, labels):
- return labels.get_locs(obj)
+ if is_nested_tuple(key, labels):
+ return labels.get_locs(key)
- elif is_list_like_indexer(obj):
+ elif is_list_like_indexer(key):
- if com.is_bool_indexer(obj):
- obj = check_bool_indexer(labels, obj)
- (inds,) = obj.nonzero()
+ if com.is_bool_indexer(key):
+ key = check_bool_indexer(labels, key)
+ (inds,) = key.nonzero()
return inds
else:
# When setting, missing keys are not allowed, even with .loc:
- return self._get_listlike_indexer(obj, axis, raise_missing=True)[1]
+ return self._get_listlike_indexer(key, axis, raise_missing=True)[1]
else:
try:
- return labels.get_loc(obj)
+ return labels.get_loc(key)
except LookupError:
# allow a not found key only if we are a setter
- if not is_list_like_indexer(obj):
- return {"key": obj}
+ if not is_list_like_indexer(key):
+ return {"key": key}
raise
- def _get_slice_axis(self, slice_obj: slice, axis: int):
- # caller is responsible for ensuring non-None axis
- obj = self.obj
-
- if not need_slice(slice_obj):
- return obj.copy(deep=False)
-
- indexer = self._convert_slice_indexer(slice_obj, axis)
- return self._slice(indexer, axis=axis, kind="iloc")
-
class _LocationIndexer(_NDFrameIndexer):
+ _takeable: bool = False
+
def __getitem__(self, key):
if type(key) is tuple:
key = tuple(com.apply_if_callable(x, self.obj) for x in key)
if self._is_scalar_access(key):
try:
- return self._getitem_scalar(key)
+ return self.obj._get_value(*key, takeable=self._takeable)
except (KeyError, IndexError, AttributeError):
+ # AttributeError for IntervalTree get_value
pass
return self._getitem_tuple(key)
else:
@@ -1769,9 +1686,6 @@ def __getitem__(self, key):
def _is_scalar_access(self, key: Tuple):
raise NotImplementedError()
- def _getitem_scalar(self, key):
- raise NotImplementedError()
-
def _getitem_axis(self, key, axis: int):
raise NotImplementedError()
@@ -1780,28 +1694,7 @@ def _getbool_axis(self, key, axis: int):
labels = self.obj._get_axis(axis)
key = check_bool_indexer(labels, key)
inds = key.nonzero()[0]
- return self.obj.take(inds, axis=axis)
-
- def _get_slice_axis(self, slice_obj: slice, axis: int):
- """
- This is pretty simple as we just have to deal with labels.
- """
- # caller is responsible for ensuring non-None axis
- obj = self.obj
- if not need_slice(slice_obj):
- return obj.copy(deep=False)
-
- labels = obj._get_axis(axis)
- indexer = labels.slice_indexer(
- slice_obj.start, slice_obj.stop, slice_obj.step, kind=self.name
- )
-
- if isinstance(indexer, slice):
- return self._slice(indexer, axis=axis, kind="iloc")
- else:
- # DatetimeIndex overrides Index.slice_indexer and may
- # return a DatetimeIndex instead of a slice object.
- return self.obj.take(indexer, axis=axis)
+ return self.obj._take_with_is_copy(inds, axis=axis)
@Appender(IndexingMixin.loc.__doc__)
@@ -1860,12 +1753,6 @@ def _is_scalar_access(self, key: Tuple) -> bool:
return True
- def _getitem_scalar(self, key):
- # a fast-path to scalar access
- # if not, raise
- values = self.obj._get_value(*key)
- return values
-
def _get_partial_string_timestamp_match_key(self, key, labels):
"""
Translate any partial string timestamp matches in key, returning the
@@ -1963,6 +1850,27 @@ def _getitem_axis(self, key, axis: int):
self._validate_key(key, axis)
return self._get_label(key, axis=axis)
+ def _get_slice_axis(self, slice_obj: slice, axis: int):
+ """
+ This is pretty simple as we just have to deal with labels.
+ """
+ # caller is responsible for ensuring non-None axis
+ obj = self.obj
+ if not need_slice(slice_obj):
+ return obj.copy(deep=False)
+
+ labels = obj._get_axis(axis)
+ indexer = labels.slice_indexer(
+ slice_obj.start, slice_obj.stop, slice_obj.step, kind=self.name
+ )
+
+ if isinstance(indexer, slice):
+ return self._slice(indexer, axis=axis, kind="iloc")
+ else:
+ # DatetimeIndex overrides Index.slice_indexer and may
+ # return a DatetimeIndex instead of a slice object.
+ return self.obj.take(indexer, axis=axis)
+
@Appender(IndexingMixin.iloc.__doc__)
class _iLocIndexer(_LocationIndexer):
@@ -1970,7 +1878,7 @@ class _iLocIndexer(_LocationIndexer):
"integer, integer slice (START point is INCLUDED, END "
"point is EXCLUDED), listlike of integers, boolean array"
)
- _get_slice_axis = _NDFrameIndexer._get_slice_axis
+ _takeable = True
def _validate_key(self, key, axis: int):
if com.is_bool_indexer(key):
@@ -2035,12 +1943,6 @@ def _is_scalar_access(self, key: Tuple) -> bool:
return True
- def _getitem_scalar(self, key):
- # a fast-path to scalar access
- # if not, raise
- values = self.obj._get_value(*key, takeable=True)
- return values
-
def _validate_integer(self, key: int, axis: int) -> None:
"""
Check that 'key' is a valid position in the desired axis.
@@ -2107,7 +2009,7 @@ def _get_list_axis(self, key, axis: int):
`axis` can only be zero.
"""
try:
- return self.obj.take(key, axis=axis)
+ return self.obj._take_with_is_copy(key, axis=axis)
except IndexError:
# re-raise with different error message
raise IndexError("positional indexers are out-of-bounds")
@@ -2138,21 +2040,30 @@ def _getitem_axis(self, key, axis: int):
return self._get_loc(key, axis=axis)
- # raise_missing is included for compat with the parent class signature
- def _convert_to_indexer(self, obj, axis: int, raise_missing: bool = False):
+ def _get_slice_axis(self, slice_obj: slice, axis: int):
+ # caller is responsible for ensuring non-None axis
+ obj = self.obj
+
+ if not need_slice(slice_obj):
+ return obj.copy(deep=False)
+
+ indexer = self._convert_slice_indexer(slice_obj, axis)
+ return self._slice(indexer, axis=axis, kind="iloc")
+
+ def _convert_to_indexer(self, key, axis: int):
"""
Much simpler as we only have to deal with our valid types.
"""
# make need to convert a float key
- if isinstance(obj, slice):
- return self._convert_slice_indexer(obj, axis)
+ if isinstance(key, slice):
+ return self._convert_slice_indexer(key, axis)
- elif is_float(obj):
- return self._convert_scalar_indexer(obj, axis)
+ elif is_float(key):
+ return self._convert_scalar_indexer(key, axis)
try:
- self._validate_key(obj, axis)
- return obj
+ self._validate_key(key, axis)
+ return key
except ValueError:
raise ValueError(f"Can only index by location with a [{self._valid_types}]")
@@ -2189,8 +2100,7 @@ def __setitem__(self, key, value):
if len(key) != self.ndim:
raise ValueError("Not enough indexers for scalar access (setting)!")
key = list(self._convert_key(key, is_setter=True))
- key.append(value)
- self.obj._set_value(*key, takeable=self._takeable)
+ self.obj._set_value(*key, value=value, takeable=self._takeable)
@Appender(IndexingMixin.at.__doc__)
@@ -2214,7 +2124,7 @@ def _convert_key(self, key, is_setter: bool = False):
"can only have integer indexers"
)
else:
- if is_integer(i) and not ax.holds_integer():
+ if is_integer(i) and not (ax.holds_integer() or ax.is_floating()):
raise ValueError(
"At based indexing on an non-integer "
"index can only have non-integer "
@@ -2320,9 +2230,9 @@ def check_bool_indexer(index: Index, key) -> np.ndarray:
)
result = result.astype(bool)._values
else:
- if is_sparse(result):
- result = result.to_dense()
- result = check_bool_array_indexer(index, result)
+ # key might be sparse / object-dtype bool, check_array_indexer needs bool array
+ result = np.asarray(result, dtype=bool)
+ result = check_array_indexer(index, result)
return result
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index f74033924f64e..22901051ec345 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -66,7 +66,14 @@
)
import pandas.core.algorithms as algos
-from pandas.core.arrays import Categorical, DatetimeArray, PandasDtype, TimedeltaArray
+from pandas.core.arrays import (
+ Categorical,
+ DatetimeArray,
+ ExtensionArray,
+ PandasArray,
+ PandasDtype,
+ TimedeltaArray,
+)
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.construction import extract_array
@@ -192,16 +199,29 @@ def is_categorical_astype(self, dtype):
return False
- def external_values(self, dtype=None):
- """ return an outside world format, currently just the ndarray """
+ def external_values(self):
+ """
+ The array that Series.values returns (public attribute).
+
+ This has some historical constraints, and is overridden in block
+ subclasses to return the correct array (e.g. period returns
+ object ndarray and datetimetz a datetime64[ns] ndarray instead of
+ proper extension array).
+ """
return self.values
- def internal_values(self, dtype=None):
- """ return an internal format, currently just the ndarray
- this should be the pure internal API format
+ def internal_values(self):
+ """
+ The array that Series._values returns (internal values).
"""
return self.values
+ def array_values(self) -> ExtensionArray:
+ """
+ The array that Series.array returns. Always an ExtensionArray.
+ """
+ return PandasArray(self.values)
+
def get_values(self, dtype=None):
"""
return an internal format, currently just the ndarray
@@ -362,13 +382,17 @@ def delete(self, loc):
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
- def apply(self, func, **kwargs):
+ def apply(self, func, **kwargs) -> List["Block"]:
""" apply the function to my values; return a block if we are not
one
"""
with np.errstate(all="ignore"):
result = func(self.values, **kwargs)
+ return self._split_op_result(result)
+
+ def _split_op_result(self, result) -> List["Block"]:
+ # See also: split_and_operate
if is_extension_array_dtype(result) and result.ndim > 1:
# if we get a 2D ExtensionArray, we need to split it into 1D pieces
nbs = []
@@ -382,7 +406,7 @@ def apply(self, func, **kwargs):
if not isinstance(result, Block):
result = self.make_block(values=_block_shape(result, ndim=self.ndim))
- return result
+ return [result]
def fillna(self, value, limit=None, inplace=False, downcast=None):
""" fillna on the block with the value. If we fail, then convert to
@@ -852,7 +876,11 @@ def setitem(self, indexer, value):
# length checking
check_setitem_lengths(indexer, value, values)
-
+ exact_match = (
+ len(arr_value.shape)
+ and arr_value.shape[0] == values.shape[0]
+ and arr_value.size == values.size
+ )
if is_empty_indexer(indexer, arr_value):
# GH#8669 empty indexers
pass
@@ -862,14 +890,21 @@ def setitem(self, indexer, value):
# be e.g. a list; see GH#6043
values[indexer] = value
- # if we are an exact match (ex-broadcasting),
- # then use the resultant dtype
elif (
- len(arr_value.shape)
- and arr_value.shape[0] == values.shape[0]
- and arr_value.size == values.size
+ exact_match
+ and is_categorical_dtype(arr_value.dtype)
+ and not is_categorical_dtype(values)
):
+ # GH25495 - If the current dtype is not categorical,
+ # we need to create a new categorical block
values[indexer] = value
+ return self.make_block(Categorical(self.values, dtype=arr_value.dtype))
+
+ # if we are an exact match (ex-broadcasting),
+ # then use the resultant dtype
+ elif exact_match:
+ values[indexer] = value
+
try:
values = values.astype(arr_value.dtype)
except ValueError:
@@ -1270,7 +1305,10 @@ def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
def diff(self, n: int, axis: int = 1) -> List["Block"]:
""" return block for the diff of the values """
- new_values = algos.diff(self.values, n, axis=axis)
+ new_values = algos.diff(self.values, n, axis=axis, stacklevel=7)
+ # We use block_shape for ExtensionBlock subclasses, which may call here
+ # via a super.
+ new_values = _block_shape(new_values, ndim=self.ndim)
return [self.make_block(values=new_values)]
def shift(self, periods, axis=0, fill_value=None):
@@ -1352,8 +1390,7 @@ def where(
if not hasattr(cond, "shape"):
raise ValueError("where must have a condition that is ndarray like")
- # our where function
- def func(cond, values, other):
+ def where_func(cond, values, other):
if not (
(self.is_integer or self.is_bool)
@@ -1364,8 +1401,11 @@ def func(cond, values, other):
if not self._can_hold_element(other):
raise TypeError
if lib.is_scalar(other) and isinstance(values, np.ndarray):
+ # convert datetime to datetime64, timedelta to timedelta64
other = convert_scalar(values, other)
+ # By the time we get here, we should have all Series/Index
+ # args extracted to ndarray
fastres = expressions.where(cond, values, other)
return fastres
@@ -1375,7 +1415,7 @@ def func(cond, values, other):
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
try:
- result = func(cond, values, other)
+ result = where_func(cond, values, other)
except TypeError:
# we cannot coerce, return a compat dtype
@@ -1767,6 +1807,9 @@ def get_values(self, dtype=None):
values = values.reshape((1,) + values.shape)
return values
+ def array_values(self) -> ExtensionArray:
+ return self.values
+
def to_dense(self):
return np.asarray(self.values)
@@ -1850,6 +1893,12 @@ def interpolate(
placement=self.mgr_locs,
)
+ def diff(self, n: int, axis: int = 1) -> List["Block"]:
+ if axis == 1:
+ # we are by definition 1D.
+ axis = 0
+ return super().diff(n, axis)
+
def shift(
self,
periods: int,
@@ -1962,7 +2011,7 @@ class ObjectValuesExtensionBlock(ExtensionBlock):
Series[T].values is an ndarray of objects.
"""
- def external_values(self, dtype=None):
+ def external_values(self):
return self.values.astype(object)
@@ -2102,6 +2151,10 @@ def get_values(self, dtype=None):
return result.reshape(self.values.shape)
return self.values
+ def internal_values(self):
+ # Override to return DatetimeArray and TimedeltaArray
+ return self.array_values()
+
class DatetimeBlock(DatetimeLikeBlockMixin, Block):
__slots__ = ()
@@ -2224,6 +2277,9 @@ def set(self, locs, values):
def external_values(self):
return np.asarray(self.values.astype("datetime64[ns]", copy=False))
+ def array_values(self) -> ExtensionArray:
+ return DatetimeArray._simple_new(self.values)
+
class DatetimeTZBlock(ExtensionBlock, DatetimeBlock):
""" implement a datetime64 block with a tz attribute """
@@ -2232,6 +2288,7 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeBlock):
is_datetimetz = True
is_extension = True
+ internal_values = Block.internal_values
_can_hold_element = DatetimeBlock._can_hold_element
to_native_types = DatetimeBlock.to_native_types
fill_value = np.datetime64("NaT", "ns")
@@ -2478,9 +2535,12 @@ def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):
)
return rvalues
- def external_values(self, dtype=None):
+ def external_values(self):
return np.asarray(self.values.astype("timedelta64[ns]", copy=False))
+ def array_values(self) -> ExtensionArray:
+ return TimedeltaArray._simple_new(self.values)
+
class BoolBlock(NumericBlock):
__slots__ = ()
@@ -2984,7 +3044,6 @@ def make_block(values, placement, klass=None, ndim=None, dtype=None):
def _extend_blocks(result, blocks=None):
""" return a new extended blocks, given the result """
- from pandas.core.internals import BlockManager
if blocks is None:
blocks = []
@@ -2994,9 +3053,8 @@ def _extend_blocks(result, blocks=None):
blocks.extend(r)
else:
blocks.append(r)
- elif isinstance(result, BlockManager):
- blocks.extend(result.blocks)
else:
+ assert isinstance(result, Block), type(result)
blocks.append(result)
return blocks
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index c6f30ef65e9d5..c75373b82305c 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -350,7 +350,7 @@ def _get_empty_dtype_and_na(join_units):
dtype = upcast_classes["datetimetz"]
return dtype[0], tslibs.NaT
elif "datetime" in upcast_classes:
- return np.dtype("M8[ns]"), tslibs.iNaT
+ return np.dtype("M8[ns]"), np.datetime64("NaT", "ns")
elif "timedelta" in upcast_classes:
return np.dtype("m8[ns]"), np.timedelta64("NaT", "ns")
else: # pragma
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py
index 3a92cfd9bf16d..798386825d802 100644
--- a/pandas/core/internals/construction.py
+++ b/pandas/core/internals/construction.py
@@ -74,7 +74,7 @@ def arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
return create_block_manager_from_arrays(arrays, arr_names, axes)
-def masked_rec_array_to_mgr(data, index, columns, dtype, copy):
+def masked_rec_array_to_mgr(data, index, columns, dtype, copy: bool):
"""
Extract from a masked rec array and create the manager.
"""
@@ -143,7 +143,7 @@ def init_ndarray(values, index, columns, dtype=None, copy=False):
):
if not hasattr(values, "dtype"):
- values = prep_ndarray(values, copy=copy)
+ values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
@@ -166,7 +166,7 @@ def init_ndarray(values, index, columns, dtype=None, copy=False):
# by definition an array here
# the dtypes will be coerced to a single dtype
- values = prep_ndarray(values, copy=copy)
+ values = _prep_ndarray(values, copy=copy)
if dtype is not None:
if not is_dtype_equal(values.dtype, dtype):
@@ -257,7 +257,7 @@ def init_dict(data, index, columns, dtype=None):
# ---------------------------------------------------------------------
-def prep_ndarray(values, copy=True) -> np.ndarray:
+def _prep_ndarray(values, copy: bool = True) -> np.ndarray:
if not isinstance(values, (np.ndarray, ABCSeries, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
@@ -598,29 +598,24 @@ def convert(arr):
# Series-Based
-def sanitize_index(data, index, copy=False):
+def sanitize_index(data, index: Index):
"""
Sanitize an index type to return an ndarray of the underlying, pass
through a non-Index.
"""
- if index is None:
- return data
-
if len(data) != len(index):
raise ValueError("Length of values does not match length of index")
- if isinstance(data, ABCIndexClass) and not copy:
+ if isinstance(data, ABCIndexClass):
pass
elif isinstance(data, (ABCPeriodIndex, ABCDatetimeIndex)):
data = data._values
- if copy:
- data = data.copy()
elif isinstance(data, np.ndarray):
# coerce datetimelike types
if data.dtype.kind in ["M", "m"]:
- data = sanitize_array(data, index, copy=copy)
+ data = sanitize_array(data, index, copy=False)
return data
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 066689b3e374e..526863d2e5ec3 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -3,7 +3,7 @@
import itertools
import operator
import re
-from typing import List, Optional, Sequence, Tuple, Union
+from typing import Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
@@ -181,7 +181,7 @@ def set_axis(self, axis, new_labels):
self.axes[axis] = new_labels
- def rename_axis(self, mapper, axis, copy=True, level=None):
+ def rename_axis(self, mapper, axis, copy: bool = True, level=None):
"""
Rename one of axes.
@@ -189,7 +189,7 @@ def rename_axis(self, mapper, axis, copy=True, level=None):
----------
mapper : unary callable
axis : int
- copy : boolean, default True
+ copy : bool, default True
level : int, default None
"""
obj = self.copy(deep=copy)
@@ -197,7 +197,7 @@ def rename_axis(self, mapper, axis, copy=True, level=None):
return obj
@property
- def _is_single_block(self):
+ def _is_single_block(self) -> bool:
if self.ndim == 1:
return True
@@ -279,30 +279,7 @@ def unpickle_block(values, mgr_locs):
unpickle_block(b["values"], b["mgr_locs"]) for b in state["blocks"]
)
else:
- # discard anything after 3rd, support beta pickling format for a
- # little while longer
- ax_arrays, bvalues, bitems = state[:3]
-
- self.axes = [ensure_index(ax) for ax in ax_arrays]
-
- if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
- # This is a workaround for pre-0.14.1 pickles that didn't
- # support unpickling multi-block frames/panels with non-unique
- # columns/items, because given a manager with items ["a", "b",
- # "a"] there's no way of knowing which block's "a" is where.
- #
- # Single-block case can be supported under the assumption that
- # block items corresponded to manager items 1-to-1.
- all_mgr_locs = [slice(0, len(bitems[0]))]
- else:
- all_mgr_locs = [
- self.axes[0].get_indexer(blk_items) for blk_items in bitems
- ]
-
- self.blocks = tuple(
- unpickle_block(values, mgr_locs)
- for values, mgr_locs in zip(bvalues, all_mgr_locs)
- )
+ raise NotImplementedError("pre-0.14.1 pickles are no longer supported")
self._post_setstate()
@@ -464,9 +441,9 @@ def quantile(
Parameters
----------
axis: reduction axis, default 0
- consolidate: boolean, default True. Join together blocks having same
+ consolidate: bool, default True. Join together blocks having same
dtype
- transposed: boolean, default False
+ transposed: bool, default False
we are holding transposed data
interpolation : type of interpolation, default 'linear'
qs : a scalar or list of the quantiles to be computed
@@ -548,7 +525,9 @@ def get_axe(block, qs, axes):
values = values.take(indexer)
return SingleBlockManager(
- [make_block(values, ndim=1, placement=np.arange(len(values)))], axes[0]
+ make_block(values, ndim=1, placement=np.arange(len(values))),
+ axes[0],
+ fastpath=True,
)
def isna(self, func):
@@ -658,30 +637,24 @@ def _consolidate_check(self):
self._known_consolidated = True
@property
- def is_mixed_type(self):
+ def is_mixed_type(self) -> bool:
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
- def is_numeric_mixed_type(self):
+ def is_numeric_mixed_type(self) -> bool:
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all(block.is_numeric for block in self.blocks)
@property
- def is_datelike_mixed_type(self):
- # Warning, consolidation needs to get checked upstairs
- self._consolidate_inplace()
- return any(block.is_datelike for block in self.blocks)
-
- @property
- def any_extension_types(self):
+ def any_extension_types(self) -> bool:
"""Whether any of the blocks in this manager are extension blocks"""
return any(block.is_extension for block in self.blocks)
@property
- def is_view(self):
+ def is_view(self) -> bool:
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
@@ -695,21 +668,21 @@ def is_view(self):
return False
- def get_bool_data(self, copy=False):
+ def get_bool_data(self, copy: bool = False):
"""
Parameters
----------
- copy : boolean, default False
+ copy : bool, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
- def get_numeric_data(self, copy=False):
+ def get_numeric_data(self, copy: bool = False):
"""
Parameters
----------
- copy : boolean, default False
+ copy : bool, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
@@ -795,16 +768,14 @@ def copy_func(ax):
res.axes = new_axes
return res
- def as_array(self, transpose=False, items=None):
- """Convert the blockmanager data into an numpy array.
+ def as_array(self, transpose: bool = False) -> np.ndarray:
+ """
+ Convert the blockmanager data into an numpy array.
Parameters
----------
- transpose : boolean, default False
- If True, transpose the return array
- items : list of strings or None
- Names of block items that will be included in the returned
- array. ``None`` means that all block items will be used
+ transpose : bool, default False
+ If True, transpose the return array,
Returns
-------
@@ -814,10 +785,7 @@ def as_array(self, transpose=False, items=None):
arr = np.empty(self.shape, dtype=float)
return arr.transpose() if transpose else arr
- if items is not None:
- mgr = self.reindex_axis(items, axis=0)
- else:
- mgr = self
+ mgr = self
if self._is_single_block and mgr.blocks[0].is_datetimetz:
# TODO(Block.get_values): Make DatetimeTZBlock.get_values
@@ -859,13 +827,13 @@ def _interleave(self):
return result
- def to_dict(self, copy=True):
+ def to_dict(self, copy: bool = True):
"""
Return a dict of str(dtype) -> BlockManager
Parameters
----------
- copy : boolean, default True
+ copy : bool, default True
Returns
-------
@@ -877,7 +845,7 @@ def to_dict(self, copy=True):
"""
self._consolidate_inplace()
- bd = {}
+ bd: Dict[str, List[Block]] = {}
for b in self.blocks:
bd.setdefault(str(b.dtype), []).append(b)
@@ -978,21 +946,18 @@ def get(self, item):
def iget(self, i):
"""
- Return the data as a SingleBlockManager if possible
-
- Otherwise return as a ndarray
+ Return the data as a SingleBlockManager.
"""
block = self.blocks[self._blknos[i]]
values = block.iget(self._blklocs[i])
# shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager(
- [
- block.make_block_same_class(
- values, placement=slice(0, len(values)), ndim=1
- )
- ],
+ block.make_block_same_class(
+ values, placement=slice(0, len(values)), ndim=1
+ ),
self.axes[1],
+ fastpath=True,
)
def delete(self, item):
@@ -1341,7 +1306,7 @@ def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
# only one item and each mgr loc is a copy of that single
# item.
for mgr_loc in mgr_locs:
- newblk = blk.copy(deep=True)
+ newblk = blk.copy(deep=False)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
@@ -1394,7 +1359,7 @@ def take(self, indexer, axis=1, verify=True, convert=True):
new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True
)
- def equals(self, other):
+ def equals(self, other) -> bool:
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
@@ -1419,7 +1384,8 @@ def canonicalize(block):
)
def unstack(self, unstacker_func, fill_value):
- """Return a blockmanager with all blocks unstacked.
+ """
+ Return a BlockManager with all blocks unstacked..
Parameters
----------
@@ -1539,7 +1505,7 @@ def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
- return type(self)(self._block._slice(slobj), self.index[slobj], fastpath=True)
+ return type(self)(self._block._slice(slobj), self.index[slobj], fastpath=True,)
@property
def index(self):
@@ -1560,9 +1526,11 @@ def get_dtypes(self):
return np.array([self._block.dtype])
def external_values(self):
+ """The array that Series.values returns"""
return self._block.external_values()
def internal_values(self):
+ """The array that Series._values returns"""
return self._block.internal_values()
def get_values(self):
@@ -1570,7 +1538,7 @@ def get_values(self):
return np.array(self._block.to_dense(), copy=False)
@property
- def _can_hold_na(self):
+ def _can_hold_na(self) -> bool:
return self._block._can_hold_na
def is_consolidated(self):
@@ -1599,7 +1567,7 @@ def fast_xs(self, loc):
"""
return self._block.values[loc]
- def concat(self, to_concat, new_axis):
+ def concat(self, to_concat, new_axis) -> "SingleBlockManager":
"""
Concatenate a list of SingleBlockManagers into a single
SingleBlockManager.
@@ -1614,7 +1582,6 @@ def concat(self, to_concat, new_axis):
Returns
-------
SingleBlockManager
-
"""
non_empties = [x for x in to_concat if len(x) > 0]
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 6b03e76a1d691..2bf2be082f639 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -8,6 +8,7 @@
from pandas._config import get_option
from pandas._libs import NaT, Timedelta, Timestamp, iNaT, lib
+from pandas._typing import Dtype, Scalar
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import _int64_max, maybe_upcast_putmask
@@ -37,7 +38,7 @@
_USE_BOTTLENECK = False
-def set_use_bottleneck(v=True):
+def set_use_bottleneck(v: bool = True) -> None:
# set/unset to use bottleneck
global _USE_BOTTLENECK
if _BOTTLENECK_INSTALLED:
@@ -93,7 +94,9 @@ def __call__(self, alt):
bn_func = None
@functools.wraps(alt)
- def f(values, axis=None, skipna=True, **kwds):
+ def f(
+ values: np.ndarray, axis: Optional[int] = None, skipna: bool = True, **kwds
+ ):
if len(self.kwargs) > 0:
for k, v in self.kwargs.items():
if k not in kwds:
@@ -129,10 +132,10 @@ def f(values, axis=None, skipna=True, **kwds):
return f
-def _bn_ok_dtype(dt, name: str) -> bool:
+def _bn_ok_dtype(dtype: Dtype, name: str) -> bool:
# Bottleneck chokes on datetime64
- if not is_object_dtype(dt) and not (
- is_datetime_or_timedelta_dtype(dt) or is_datetime64tz_dtype(dt)
+ if not is_object_dtype(dtype) and not (
+ is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype)
):
# GH 15507
@@ -163,7 +166,9 @@ def _has_infs(result) -> bool:
return False
-def _get_fill_value(dtype, fill_value=None, fill_value_typ=None):
+def _get_fill_value(
+ dtype: Dtype, fill_value: Optional[Scalar] = None, fill_value_typ=None
+):
""" return the correct fill value for the dtype of the values """
if fill_value is not None:
return fill_value
@@ -326,12 +331,12 @@ def _get_values(
return values, mask, dtype, dtype_max, fill_value
-def _na_ok_dtype(dtype):
+def _na_ok_dtype(dtype) -> bool:
# TODO: what about datetime64tz? PeriodDtype?
return not issubclass(dtype.type, (np.integer, np.timedelta64, np.datetime64))
-def _wrap_results(result, dtype, fill_value=None):
+def _wrap_results(result, dtype: Dtype, fill_value=None):
""" wrap our results if needed """
if is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
@@ -362,7 +367,9 @@ def _wrap_results(result, dtype, fill_value=None):
return result
-def _na_for_min_count(values, axis: Optional[int]):
+def _na_for_min_count(
+ values: np.ndarray, axis: Optional[int]
+) -> Union[Scalar, np.ndarray]:
"""
Return the missing value for `values`.
@@ -393,7 +400,12 @@ def _na_for_min_count(values, axis: Optional[int]):
return result
-def nanany(values, axis=None, skipna: bool = True, mask=None):
+def nanany(
+ values: np.ndarray,
+ axis: Optional[int] = None,
+ skipna: bool = True,
+ mask: Optional[np.ndarray] = None,
+) -> bool:
"""
Check if any elements along an axis evaluate to True.
@@ -425,7 +437,12 @@ def nanany(values, axis=None, skipna: bool = True, mask=None):
return values.any(axis)
-def nanall(values, axis=None, skipna: bool = True, mask=None):
+def nanall(
+ values: np.ndarray,
+ axis: Optional[int] = None,
+ skipna: bool = True,
+ mask: Optional[np.ndarray] = None,
+) -> bool:
"""
Check if all elements along an axis evaluate to True.
@@ -458,7 +475,13 @@ def nanall(values, axis=None, skipna: bool = True, mask=None):
@disallow("M8")
-def nansum(values, axis=None, skipna=True, min_count=0, mask=None):
+def nansum(
+ values: np.ndarray,
+ axis: Optional[int] = None,
+ skipna: bool = True,
+ min_count: int = 0,
+ mask: Optional[np.ndarray] = None,
+) -> float:
"""
Sum the elements along an axis ignoring NaNs
@@ -629,7 +652,7 @@ def _get_counts_nanvar(
mask: Optional[np.ndarray],
axis: Optional[int],
ddof: int,
- dtype=float,
+ dtype: Dtype = float,
) -> Tuple[Union[int, np.ndarray], Union[int, np.ndarray]]:
""" Get the count of non-null values along an axis, accounting
for degrees of freedom.
@@ -776,7 +799,13 @@ def nanvar(values, axis=None, skipna=True, ddof=1, mask=None):
@disallow("M8", "m8")
-def nansem(values, axis=None, skipna=True, ddof=1, mask=None):
+def nansem(
+ values: np.ndarray,
+ axis: Optional[int] = None,
+ skipna: bool = True,
+ ddof: int = 1,
+ mask: Optional[np.ndarray] = None,
+) -> float:
"""
Compute the standard error in the mean along given axis while ignoring NaNs
@@ -821,7 +850,12 @@ def nansem(values, axis=None, skipna=True, ddof=1, mask=None):
def _nanminmax(meth, fill_value_typ):
@bottleneck_switch(name="nan" + meth)
- def reduction(values, axis=None, skipna=True, mask=None):
+ def reduction(
+ values: np.ndarray,
+ axis: Optional[int] = None,
+ skipna: bool = True,
+ mask: Optional[np.ndarray] = None,
+ ) -> Dtype:
values, mask, dtype, dtype_max, fill_value = _get_values(
values, skipna, fill_value_typ=fill_value_typ, mask=mask
@@ -847,7 +881,12 @@ def reduction(values, axis=None, skipna=True, mask=None):
@disallow("O")
-def nanargmax(values, axis=None, skipna=True, mask=None):
+def nanargmax(
+ values: np.ndarray,
+ axis: Optional[int] = None,
+ skipna: bool = True,
+ mask: Optional[np.ndarray] = None,
+) -> int:
"""
Parameters
----------
@@ -878,7 +917,12 @@ def nanargmax(values, axis=None, skipna=True, mask=None):
@disallow("O")
-def nanargmin(values, axis=None, skipna=True, mask=None):
+def nanargmin(
+ values: np.ndarray,
+ axis: Optional[int] = None,
+ skipna: bool = True,
+ mask: Optional[np.ndarray] = None,
+) -> int:
"""
Parameters
----------
@@ -909,7 +953,12 @@ def nanargmin(values, axis=None, skipna=True, mask=None):
@disallow("M8", "m8")
-def nanskew(values, axis=None, skipna=True, mask=None):
+def nanskew(
+ values: np.ndarray,
+ axis: Optional[int] = None,
+ skipna: bool = True,
+ mask: Optional[np.ndarray] = None,
+) -> float:
""" Compute the sample skewness.
The statistic computed here is the adjusted Fisher-Pearson standardized
@@ -987,7 +1036,12 @@ def nanskew(values, axis=None, skipna=True, mask=None):
@disallow("M8", "m8")
-def nankurt(values, axis=None, skipna=True, mask=None):
+def nankurt(
+ values: np.ndarray,
+ axis: Optional[int] = None,
+ skipna: bool = True,
+ mask: Optional[np.ndarray] = None,
+) -> float:
"""
Compute the sample excess kurtosis
@@ -1075,7 +1129,13 @@ def nankurt(values, axis=None, skipna=True, mask=None):
@disallow("M8", "m8")
-def nanprod(values, axis=None, skipna=True, min_count=0, mask=None):
+def nanprod(
+ values: np.ndarray,
+ axis: Optional[int] = None,
+ skipna: bool = True,
+ min_count: int = 0,
+ mask: Optional[np.ndarray] = None,
+) -> float:
"""
Parameters
----------
@@ -1088,7 +1148,8 @@ def nanprod(values, axis=None, skipna=True, min_count=0, mask=None):
Returns
-------
- result : dtype
+ Dtype
+ The product of all elements on a given axis. ( NaNs are treated as 1)
Examples
--------
@@ -1096,10 +1157,6 @@ def nanprod(values, axis=None, skipna=True, min_count=0, mask=None):
>>> s = pd.Series([1, 2, 3, np.nan])
>>> nanops.nanprod(s)
6.0
-
- Returns
- -------
- The product of all elements on a given axis. ( NaNs are treated as 1)
"""
mask = _maybe_get_mask(values, skipna, mask)
@@ -1138,7 +1195,7 @@ def _get_counts(
values_shape: Tuple[int],
mask: Optional[np.ndarray],
axis: Optional[int],
- dtype=float,
+ dtype: Dtype = float,
) -> Union[int, np.ndarray]:
""" Get the count of non-null values along an axis
@@ -1184,7 +1241,13 @@ def _maybe_null_out(
mask: Optional[np.ndarray],
shape: Tuple,
min_count: int = 1,
-) -> np.ndarray:
+) -> float:
+ """
+ Returns
+ -------
+ Dtype
+ The product of all elements on a given axis. ( NaNs are treated as 1)
+ """
if mask is not None and axis is not None and getattr(result, "ndim", False):
null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0
if np.any(null_mask):
@@ -1218,7 +1281,9 @@ def _zero_out_fperr(arg):
@disallow("M8", "m8")
-def nancorr(a, b, method="pearson", min_periods=None):
+def nancorr(
+ a: np.ndarray, b: np.ndarray, method="pearson", min_periods: Optional[int] = None,
+):
"""
a, b: ndarrays
"""
@@ -1268,7 +1333,7 @@ def _spearman(a, b):
@disallow("M8", "m8")
-def nancov(a, b, min_periods=None):
+def nancov(a: np.ndarray, b: np.ndarray, min_periods: Optional[int] = None):
if len(a) != len(b):
raise AssertionError("Operands to nancov must have same size")
@@ -1341,7 +1406,9 @@ def f(x, y):
nanne = make_nancomp(operator.ne)
-def _nanpercentile_1d(values, mask, q, na_value, interpolation):
+def _nanpercentile_1d(
+ values: np.ndarray, mask: np.ndarray, q, na_value: Scalar, interpolation
+) -> Union[Scalar, np.ndarray]:
"""
Wrapper for np.percentile that skips missing values, specialized to
1-dimensional case.
@@ -1372,7 +1439,15 @@ def _nanpercentile_1d(values, mask, q, na_value, interpolation):
return np.percentile(values, q, interpolation=interpolation)
-def nanpercentile(values, q, axis, na_value, mask, ndim, interpolation):
+def nanpercentile(
+ values: np.ndarray,
+ q,
+ axis: int,
+ na_value,
+ mask: np.ndarray,
+ ndim: int,
+ interpolation,
+):
"""
Wrapper for np.percentile that skips missing values.
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index f51d71d5507a0..6d2253c5dc87d 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -5,12 +5,13 @@
"""
import datetime
import operator
-from typing import Set, Tuple, Union
+from typing import Optional, Set, Tuple, Union
import numpy as np
from pandas._libs import Timedelta, Timestamp, lib
from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op # noqa:F401
+from pandas._typing import Level
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_list_like, is_timedelta64_dtype
@@ -329,19 +330,25 @@ def fill_binop(left, right, fill_value):
Notes
-----
- Makes copies if fill_value is not None
+ Makes copies if fill_value is not None and NAs are present.
"""
- # TODO: can we make a no-copy implementation?
if fill_value is not None:
left_mask = isna(left)
right_mask = isna(right)
- left = left.copy()
- right = right.copy()
# one but not both
mask = left_mask ^ right_mask
- left[left_mask & mask] = fill_value
- right[right_mask & mask] = fill_value
+
+ if left_mask.any():
+ # Avoid making a copy if we can
+ left = left.copy()
+ left[left_mask & mask] = fill_value
+
+ if right_mask.any():
+ # Avoid making a copy if we can
+ right = right.copy()
+ right[right_mask & mask] = fill_value
+
return left, right
@@ -584,33 +591,23 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
# DataFrame
-def _combine_series_frame(self, other, func, fill_value=None, axis=None, level=None):
+def _combine_series_frame(left, right, func, axis: int):
"""
Apply binary operator `func` to self, other using alignment and fill
- conventions determined by the fill_value, axis, and level kwargs.
+ conventions determined by the axis argument.
Parameters
----------
- self : DataFrame
- other : Series
+ left : DataFrame
+ right : Series
func : binary operator
- fill_value : object, default None
- axis : {0, 1, 'columns', 'index', None}, default None
- level : int or None, default None
+ axis : {0, 1}
Returns
-------
result : DataFrame
"""
- if fill_value is not None:
- raise NotImplementedError(f"fill_value {fill_value} not supported.")
-
- if axis is None:
- # default axis is columns
- axis = 1
-
- axis = self._get_axis_number(axis)
- left, right = self.align(other, join="outer", axis=axis, level=level, copy=False)
+ # We assume that self.align(other, ...) has already been called
if axis == 0:
new_data = left._combine_match_index(right, func)
else:
@@ -619,8 +616,27 @@ def _combine_series_frame(self, other, func, fill_value=None, axis=None, level=N
return left._construct_result(new_data)
-def _align_method_FRAME(left, right, axis):
- """ convert rhs to meet lhs dims if input is list, tuple or np.ndarray """
+def _align_method_FRAME(
+ left, right, axis, flex: Optional[bool] = False, level: Level = None
+):
+ """
+ Convert rhs to meet lhs dims if input is list, tuple or np.ndarray.
+
+ Parameters
+ ----------
+ left : DataFrame
+ right : Any
+ axis: int, str, or None
+ flex: bool or None, default False
+ Whether this is a flex op, in which case we reindex.
+ None indicates not to check for alignment.
+ level : int or level name, default None
+
+ Returns
+ -------
+ left : DataFrame
+ right : Any
+ """
def to_series(right):
msg = "Unable to coerce to Series, length must be {req_len}: given {given_len}"
@@ -664,15 +680,29 @@ def to_series(right):
elif right.ndim > 2:
raise ValueError(
- "Unable to coerce to Series/DataFrame, dim "
- f"must be <= 2: {right.shape}"
+ f"Unable to coerce to Series/DataFrame, dim must be <= 2: {right.shape}"
)
elif is_list_like(right) and not isinstance(right, (ABCSeries, ABCDataFrame)):
# GH17901
right = to_series(right)
- return right
+ if flex is not None and isinstance(right, ABCDataFrame):
+ if not left._indexed_same(right):
+ if flex:
+ left, right = left.align(right, join="outer", level=level, copy=False)
+ else:
+ raise ValueError(
+ "Can only compare identically-labeled DataFrame objects"
+ )
+ elif isinstance(right, ABCSeries):
+ # axis=1 is default for DataFrame-with-Series op
+ axis = left._get_axis_number(axis) if axis is not None else 1
+ left, right = left.align(
+ right, join="outer", axis=axis, level=level, copy=False
+ )
+
+ return left, right
def _arith_method_FRAME(cls, op, special):
@@ -692,25 +722,27 @@ def _arith_method_FRAME(cls, op, special):
@Appender(doc)
def f(self, other, axis=default_axis, level=None, fill_value=None):
- other = _align_method_FRAME(self, other, axis)
+ self, other = _align_method_FRAME(self, other, axis, flex=True, level=level)
if isinstance(other, ABCDataFrame):
# Another DataFrame
pass_op = op if should_series_dispatch(self, other, op) else na_op
pass_op = pass_op if not is_logical else op
- left, right = self.align(other, join="outer", level=level, copy=False)
- new_data = left._combine_frame(right, pass_op, fill_value)
- return left._construct_result(new_data)
+ new_data = self._combine_frame(other, pass_op, fill_value)
+ return self._construct_result(new_data)
elif isinstance(other, ABCSeries):
# For these values of `axis`, we end up dispatching to Series op,
# so do not want the masked op.
pass_op = op if axis in [0, "columns", None] else na_op
pass_op = pass_op if not is_logical else op
- return _combine_series_frame(
- self, other, pass_op, fill_value=fill_value, axis=axis, level=level
- )
+
+ if fill_value is not None:
+ raise NotImplementedError(f"fill_value {fill_value} not supported.")
+
+ axis = self._get_axis_number(axis) if axis is not None else 1
+ return _combine_series_frame(self, other, pass_op, axis=axis)
else:
# in this case we always have `np.ndim(other) == 0`
if fill_value is not None:
@@ -736,19 +768,16 @@ def _flex_comp_method_FRAME(cls, op, special):
@Appender(doc)
def f(self, other, axis=default_axis, level=None):
- other = _align_method_FRAME(self, other, axis)
+ self, other = _align_method_FRAME(self, other, axis, flex=True, level=level)
if isinstance(other, ABCDataFrame):
# Another DataFrame
- if not self._indexed_same(other):
- self, other = self.align(other, "outer", level=level, copy=False)
new_data = dispatch_to_series(self, other, op, str_rep)
return self._construct_result(new_data)
elif isinstance(other, ABCSeries):
- return _combine_series_frame(
- self, other, op, fill_value=None, axis=axis, level=level
- )
+ axis = self._get_axis_number(axis) if axis is not None else 1
+ return _combine_series_frame(self, other, op, axis=axis)
else:
# in this case we always have `np.ndim(other) == 0`
new_data = dispatch_to_series(self, other, op)
@@ -766,27 +795,24 @@ def _comp_method_FRAME(cls, op, special):
@Appender(f"Wrapper for comparison method {op_name}")
def f(self, other):
- other = _align_method_FRAME(self, other, axis=None)
+ self, other = _align_method_FRAME(
+ self, other, axis=None, level=None, flex=False
+ )
if isinstance(other, ABCDataFrame):
# Another DataFrame
- if not self._indexed_same(other):
- raise ValueError(
- "Can only compare identically-labeled DataFrame objects"
- )
new_data = dispatch_to_series(self, other, op, str_rep)
- return self._construct_result(new_data)
elif isinstance(other, ABCSeries):
- return _combine_series_frame(
- self, other, op, fill_value=None, axis=None, level=None
- )
+ new_data = dispatch_to_series(self, other, op, axis="columns")
+
else:
# straight boolean comparisons we want to allow all columns
# (regardless of dtype to pass thru) See #4537 for discussion.
new_data = dispatch_to_series(self, other, op)
- return self._construct_result(new_data)
+
+ return self._construct_result(new_data)
f.__name__ = op_name
diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py
index b84d468fff736..c393b8028113b 100644
--- a/pandas/core/ops/array_ops.py
+++ b/pandas/core/ops/array_ops.py
@@ -95,7 +95,9 @@ def masked_arith_op(x, y, op):
else:
if not is_scalar(y):
- raise TypeError(type(y))
+ raise TypeError(
+ f"Cannot broadcast np.ndarray with operand of type { type(y) }"
+ )
# mask is only meaningful for x
result = np.empty(x.size, dtype=x.dtype)
@@ -277,7 +279,7 @@ def na_logical_op(x: np.ndarray, y, op):
assert not (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype))
x = ensure_object(x)
y = ensure_object(y)
- result = libops.vec_binop(x, y, op)
+ result = libops.vec_binop(x.ravel(), y.ravel(), op)
else:
# let null fall thru
assert lib.is_scalar(y)
@@ -298,7 +300,7 @@ def na_logical_op(x: np.ndarray, y, op):
f"and scalar of type [{typ}]"
)
- return result
+ return result.reshape(x.shape)
def logical_op(
diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py
index 5039ffab33fbd..854d6072eea36 100644
--- a/pandas/core/ops/missing.py
+++ b/pandas/core/ops/missing.py
@@ -109,26 +109,23 @@ def mask_zero_div_zero(x, y, result):
return result
if zmask.any():
- shape = result.shape
# Flip sign if necessary for -0.0
zneg_mask = zmask & np.signbit(y)
zpos_mask = zmask & ~zneg_mask
- nan_mask = (zmask & (x == 0)).ravel()
+ nan_mask = zmask & (x == 0)
with np.errstate(invalid="ignore"):
- neginf_mask = ((zpos_mask & (x < 0)) | (zneg_mask & (x > 0))).ravel()
- posinf_mask = ((zpos_mask & (x > 0)) | (zneg_mask & (x < 0))).ravel()
+ neginf_mask = (zpos_mask & (x < 0)) | (zneg_mask & (x > 0))
+ posinf_mask = (zpos_mask & (x > 0)) | (zneg_mask & (x < 0))
if nan_mask.any() or neginf_mask.any() or posinf_mask.any():
# Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN
- result = result.astype("float64", copy=False).ravel()
-
- np.putmask(result, nan_mask, np.nan)
- np.putmask(result, posinf_mask, np.inf)
- np.putmask(result, neginf_mask, -np.inf)
+ result = result.astype("float64", copy=False)
- result = result.reshape(shape)
+ result[nan_mask] = np.nan
+ result[posinf_mask] = np.inf
+ result[neginf_mask] = -np.inf
return result
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 0e43880dfda07..fb837409a00f5 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -1262,7 +1262,7 @@ def _constructor(self):
return TimedeltaIndexResampler
-def resample(obj, kind=None, **kwds):
+def get_resampler(obj, kind=None, **kwds):
"""
Create a TimeGrouper and return our resampler.
"""
@@ -1270,7 +1270,7 @@ def resample(obj, kind=None, **kwds):
return tg._get_resampler(obj, kind=kind)
-resample.__doc__ = Resampler.__doc__
+get_resampler.__doc__ = Resampler.__doc__
def get_resampler_for_grouping(
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 502b8d1941fdf..b42497b507e1f 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -305,8 +305,7 @@ def __init__(
if isinstance(objs, (NDFrame, str)):
raise TypeError(
"first argument must be an iterable of pandas "
- "objects, you passed an object of type "
- '"{name}"'.format(name=type(objs).__name__)
+ f'objects, you passed an object of type "{type(objs).__name__}"'
)
if join == "outer":
@@ -500,9 +499,7 @@ def get_result(self):
new_data._consolidate_inplace()
cons = self.objs[0]._constructor
- return cons._from_axes(new_data, self.new_axes).__finalize__(
- self, method="concat"
- )
+ return cons(new_data).__finalize__(self, method="concat")
def _get_result_dim(self) -> int:
if self._is_series and self.axis == 1:
@@ -520,7 +517,11 @@ def _get_new_axes(self) -> List[Index]:
def _get_comb_axis(self, i: int) -> Index:
data_axis = self.objs[0]._get_block_manager_axis(i)
return get_objs_combined_axis(
- self.objs, axis=data_axis, intersect=self.intersect, sort=self.sort
+ self.objs,
+ axis=data_axis,
+ intersect=self.intersect,
+ sort=self.sort,
+ copy=self.copy,
)
def _get_concat_axis(self) -> Index:
@@ -577,10 +578,7 @@ def _maybe_check_integrity(self, concat_index: Index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index[concat_index.duplicated()].unique()
- raise ValueError(
- "Indexes have overlapping values: "
- "{overlap!s}".format(overlap=overlap)
- )
+ raise ValueError(f"Indexes have overlapping values: {overlap}")
def _concat_indexes(indexes) -> Index:
@@ -648,8 +646,7 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiInde
# make sure that all of the passed indices have the same nlevels
if not len({idx.nlevels for idx in indexes}) == 1:
raise AssertionError(
- "Cannot concat indices that do "
- "not have the same number of levels"
+ "Cannot concat indices that do not have the same number of levels"
)
# also copies
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index d4ccb19fc0dda..d04287e1e9088 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -52,8 +52,7 @@ def melt(
if not missing.empty:
raise KeyError(
"The following 'id_vars' are not present "
- "in the DataFrame: {missing}"
- "".format(missing=list(missing))
+ f"in the DataFrame: {list(missing)}"
)
else:
id_vars = []
@@ -74,8 +73,7 @@ def melt(
if not missing.empty:
raise KeyError(
"The following 'value_vars' are not present in "
- "the DataFrame: {missing}"
- "".format(missing=list(missing))
+ f"the DataFrame: {list(missing)}"
)
frame = frame.loc[:, id_vars + value_vars]
else:
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 5f92e4a88b568..ceee2f66dba42 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -600,13 +600,11 @@ def __init__(
if not is_bool(left_index):
raise ValueError(
- "left_index parameter must be of type bool, not "
- "{left_index}".format(left_index=type(left_index))
+ f"left_index parameter must be of type bool, not {type(left_index)}"
)
if not is_bool(right_index):
raise ValueError(
- "right_index parameter must be of type bool, not "
- "{right_index}".format(right_index=type(right_index))
+ f"right_index parameter must be of type bool, not {type(right_index)}"
)
# warn user when merging between different levels
@@ -1073,9 +1071,8 @@ def _maybe_coerce_merge_keys(self):
continue
msg = (
- "You are trying to merge on {lk_dtype} and "
- "{rk_dtype} columns. If you wish to proceed "
- "you should use pd.concat".format(lk_dtype=lk.dtype, rk_dtype=rk.dtype)
+ f"You are trying to merge on {lk.dtype} and "
+ f"{rk.dtype} columns. If you wish to proceed you should use pd.concat"
)
# if we are numeric, then allow differing
@@ -1092,8 +1089,7 @@ def _maybe_coerce_merge_keys(self):
warnings.warn(
"You are merging on int and float "
"columns where the float values "
- "are not equal to their int "
- "representation",
+ "are not equal to their int representation",
UserWarning,
)
continue
@@ -1103,8 +1099,7 @@ def _maybe_coerce_merge_keys(self):
warnings.warn(
"You are merging on int and float "
"columns where the float values "
- "are not equal to their int "
- "representation",
+ "are not equal to their int representation",
UserWarning,
)
continue
@@ -1251,20 +1246,17 @@ def _validate(self, validate: str):
)
elif not left_unique:
raise MergeError(
- "Merge keys are not unique in left dataset; "
- "not a one-to-one merge"
+ "Merge keys are not unique in left dataset; not a one-to-one merge"
)
elif not right_unique:
raise MergeError(
- "Merge keys are not unique in right dataset; "
- "not a one-to-one merge"
+ "Merge keys are not unique in right dataset; not a one-to-one merge"
)
elif validate in ["one_to_many", "1:m"]:
if not left_unique:
raise MergeError(
- "Merge keys are not unique in left dataset; "
- "not a one-to-many merge"
+ "Merge keys are not unique in left dataset; not a one-to-many merge"
)
elif validate in ["many_to_one", "m:1"]:
@@ -1833,8 +1825,7 @@ def _left_join_on_index(left_ax: Index, right_ax: Index, join_keys, sort: bool =
raise AssertionError(
"If more than one join key is given then "
"'right_ax' must be a MultiIndex and the "
- "number of join keys must be the number of "
- "levels in right_ax"
+ "number of join keys must be the number of levels in right_ax"
)
left_indexer, right_indexer = _get_multiindex_indexer(
@@ -2004,8 +1995,7 @@ def _validate_operand(obj: FrameOrSeries) -> "DataFrame":
return obj.to_frame()
else:
raise TypeError(
- "Can only merge Series or DataFrame objects, "
- "a {obj} was passed".format(obj=type(obj))
+ f"Can only merge Series or DataFrame objects, a {type(obj)} was passed"
)
@@ -2021,10 +2011,7 @@ def _items_overlap_with_suffix(left: Index, lsuffix, right: Index, rsuffix):
return left, right
if not lsuffix and not rsuffix:
- raise ValueError(
- "columns overlap but no suffix specified: "
- "{rename}".format(rename=to_rename)
- )
+ raise ValueError(f"columns overlap but no suffix specified: {to_rename}")
def renamer(x, suffix):
"""
@@ -2043,7 +2030,7 @@ def renamer(x, suffix):
x : renamed column name
"""
if x in to_rename and suffix is not None:
- return "{x}{suffix}".format(x=x, suffix=suffix)
+ return f"{x}{suffix}"
return x
lrenamer = partial(renamer, suffix=lsuffix)
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index b443ba142369c..a5a9ec9fb79ba 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -117,7 +117,9 @@ def pivot_table(
agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype)
table = agged
- if table.index.nlevels > 1:
+
+ # GH17038, this check should only happen if index is defined (not None)
+ if table.index.nlevels > 1 and index:
# Related GH #17123
# If index_names are integers, determine whether the integers refer
# to the level position or name.
@@ -132,13 +134,13 @@ def pivot_table(
table = agged.unstack(to_unstack)
if not dropna:
- if table.index.nlevels > 1:
+ if isinstance(table.index, MultiIndex):
m = MultiIndex.from_arrays(
cartesian_product(table.index.levels), names=table.index.names
)
table = table.reindex(m, axis=0)
- if table.columns.nlevels > 1:
+ if isinstance(table.columns, MultiIndex):
m = MultiIndex.from_arrays(
cartesian_product(table.columns.levels), names=table.columns.names
)
@@ -224,15 +226,7 @@ def _add_margins(
elif values:
marginal_result_set = _generate_marginal_results(
- table,
- data,
- values,
- rows,
- cols,
- aggfunc,
- observed,
- grand_margin,
- margins_name,
+ table, data, values, rows, cols, aggfunc, observed, margins_name,
)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
@@ -301,15 +295,7 @@ def _compute_grand_margin(data, values, aggfunc, margins_name: str = "All"):
def _generate_marginal_results(
- table,
- data,
- values,
- rows,
- cols,
- aggfunc,
- observed,
- grand_margin,
- margins_name: str = "All",
+ table, data, values, rows, cols, aggfunc, observed, margins_name: str = "All",
):
if len(cols) > 0:
# need to "interleave" the margins
@@ -343,12 +329,22 @@ def _all_key(key):
table_pieces.append(piece)
margin_keys.append(all_key)
else:
- margin = grand_margin
+ from pandas import DataFrame
+
cat_axis = 0
for key, piece in table.groupby(level=0, axis=cat_axis, observed=observed):
- all_key = _all_key(key)
+ if len(cols) > 1:
+ all_key = _all_key(key)
+ else:
+ all_key = margins_name
table_pieces.append(piece)
- table_pieces.append(Series(margin[key], index=[all_key]))
+ # GH31016 this is to calculate margin for each group, and assign
+ # corresponded key as index
+ transformed_piece = DataFrame(piece.apply(aggfunc)).T
+ transformed_piece.index = Index([all_key], name=piece.index.name)
+
+ # append piece for margin into table_piece
+ table_pieces.append(transformed_piece)
margin_keys.append(all_key)
result = concat(table_pieces, axis=cat_axis)
@@ -377,7 +373,7 @@ def _generate_marginal_results_without_values(
):
if len(cols) > 0:
# need to "interleave" the margins
- margin_keys = []
+ margin_keys: Union[List, Index] = []
def _all_key():
if len(cols) == 1:
@@ -427,6 +423,9 @@ def _convert_by(by):
@Substitution("\ndata : DataFrame")
@Appender(_shared_docs["pivot"], indents=1)
def pivot(data: "DataFrame", index=None, columns=None, values=None) -> "DataFrame":
+ if columns is None:
+ raise TypeError("pivot() missing 1 required argument: 'columns'")
+
if values is None:
cols = [columns] if index is None else [index, columns]
append = index is None
@@ -579,6 +578,8 @@ def crosstab(
from pandas import DataFrame
df = DataFrame(data, index=common_idx)
+ original_df_cols = df.columns
+
if values is None:
df["__dummy__"] = 0
kwargs = {"aggfunc": len, "fill_value": 0}
@@ -587,7 +588,7 @@ def crosstab(
kwargs = {"aggfunc": aggfunc}
table = df.pivot_table(
- "__dummy__",
+ ["__dummy__"],
index=rownames,
columns=colnames,
margins=margins,
@@ -596,6 +597,12 @@ def crosstab(
**kwargs,
)
+ # GH18321, after pivoting, an extra top level of column index of `__dummy__` is
+ # created, and this extra level should not be included in the further steps
+ if not table.empty:
+ cols_diff = df.columns.difference(original_df_cols)[0]
+ table = table[cols_diff]
+
# Post-process
if normalize is not False:
table = _normalize(
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 97f416e32d07b..f00ff0d4ba5ed 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -1,6 +1,6 @@
from functools import partial
import itertools
-from typing import List
+from typing import List, Optional, Union
import numpy as np
@@ -317,6 +317,10 @@ def _unstack_multiple(data, clocs, fill_value=None):
index = data.index
+ # GH 19966 Make sure if MultiIndexed index has tuple name, they will be
+ # recognised as a whole
+ if clocs in index.names:
+ clocs = [clocs]
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
@@ -371,6 +375,7 @@ def _unstack_multiple(data, clocs, fill_value=None):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
+ assert isinstance(unstcols, MultiIndex) # for mypy
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
@@ -429,15 +434,14 @@ def _unstack_frame(obj, level, fill_value=None):
blocks = obj._data.unstack(unstacker, fill_value=fill_value)
return obj._constructor(blocks)
else:
- unstacker = _Unstacker(
+ return _Unstacker(
obj.values,
obj.index,
level=level,
value_columns=obj.columns,
fill_value=fill_value,
constructor=obj._constructor,
- )
- return unstacker.get_result()
+ ).get_result()
def _unstack_extension_series(series, level, fill_value):
@@ -898,9 +902,10 @@ def check_len(item, name):
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]
+ with_dummies: List[DataFrame]
if data_to_encode.shape == data.shape:
# Encoding the entire df, do not prepend any dropped columns
- with_dummies: List[DataFrame] = []
+ with_dummies = []
elif columns is not None:
# Encoding only cols specified in columns. Get all cols not in
# columns to prepend to result.
@@ -990,6 +995,7 @@ def _make_col_name(prefix, prefix_sep, level) -> str:
dummy_cols = [_make_col_name(prefix, prefix_sep, level) for level in levels]
+ index: Optional[Index]
if isinstance(data, Series):
index = data.index
else:
@@ -997,6 +1003,7 @@ def _make_col_name(prefix, prefix_sep, level) -> str:
if sparse:
+ fill_value: Union[bool, float, int]
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
@@ -1006,7 +1013,7 @@ def _make_col_name(prefix, prefix_sep, level) -> str:
sparse_series = []
N = len(data)
- sp_indices = [[] for _ in range(len(dummy_cols))]
+ sp_indices: List[List] = [[] for _ in range(len(dummy_cols))]
mask = codes != -1
codes = codes[mask]
n_idx = np.arange(N)[mask]
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index 2e3eb9170b15c..00a7645d0c7a5 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -14,7 +14,9 @@
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
+ is_extension_array_dtype,
is_integer,
+ is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
@@ -205,6 +207,12 @@ def cut(
x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
+ # To support cut(IntegerArray), we convert to object dtype with NaN
+ # Will properly support in the future.
+ # https://github.com/pandas-dev/pandas/pull/31290
+ if is_extension_array_dtype(x.dtype) and is_integer_dtype(x.dtype):
+ x = x.to_numpy(dtype=object, na_value=np.nan)
+
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
@@ -363,8 +371,7 @@ def _bins_to_cuts(
if duplicates not in ["raise", "drop"]:
raise ValueError(
- "invalid value for 'duplicates' parameter, "
- "valid options are: raise, drop"
+ "invalid value for 'duplicates' parameter, valid options are: raise, drop"
)
if isinstance(bins, IntervalIndex):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 3e1f011fde51a..e5cea8ebfc914 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4,18 +4,31 @@
from io import StringIO
from shutil import get_terminal_size
from textwrap import dedent
-from typing import IO, Any, Callable, Hashable, List, Optional
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Hashable,
+ Iterable,
+ List,
+ Optional,
+ Tuple,
+ Type,
+)
import warnings
import numpy as np
from pandas._config import get_option
-from pandas._libs import index as libindex, lib, reshape, tslibs
+from pandas._libs import index as libindex, lib, properties, reshape, tslibs
+from pandas._typing import Label
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_bool_kwarg, validate_percentile
+from pandas.core.dtypes.cast import convert_dtypes
from pandas.core.dtypes.common import (
_is_unorderable_exception,
ensure_platform_int,
@@ -34,6 +47,8 @@
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
+ ABCMultiIndex,
+ ABCPeriodIndex,
ABCSeries,
ABCSparseArray,
)
@@ -58,12 +73,12 @@
is_empty_data,
sanitize_array,
)
-from pandas.core.groupby import generic as groupby_generic
from pandas.core.indexers import maybe_convert_indices
from pandas.core.indexes.accessors import CombinedDatetimelikeProperties
from pandas.core.indexes.api import (
Float64Index,
Index,
+ IntervalIndex,
InvalidIndexError,
MultiIndex,
ensure_index,
@@ -80,6 +95,10 @@
import pandas.io.formats.format as fmt
import pandas.plotting
+if TYPE_CHECKING:
+ from pandas.core.frame import DataFrame
+ from pandas.core.groupby.generic import SeriesGroupBy
+
__all__ = ["Series"]
_shared_doc_kwargs = dict(
@@ -161,6 +180,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
_name: Optional[Hashable]
_metadata: List[str] = ["name"]
+ _internal_names_set = {"index"} | generic.NDFrame._internal_names_set
_accessors = {"dt", "cat", "str", "sparse"}
_deprecations = (
base.IndexOpsMixin._deprecations
@@ -356,11 +376,11 @@ def _init_dict(self, data, index=None, dtype=None):
# ----------------------------------------------------------------------
@property
- def _constructor(self):
+ def _constructor(self) -> Type["Series"]:
return Series
@property
- def _constructor_expanddim(self):
+ def _constructor_expanddim(self) -> Type["DataFrame"]:
from pandas.core.frame import DataFrame
return DataFrame
@@ -372,7 +392,7 @@ def _can_hold_na(self):
_index = None
- def _set_axis(self, axis, labels, fastpath=False):
+ def _set_axis(self, axis, labels, fastpath: bool = False) -> None:
"""
Override generic, we want to set the _typ here.
"""
@@ -393,18 +413,10 @@ def _set_axis(self, axis, labels, fastpath=False):
# or not be a DatetimeIndex
pass
- self._set_subtyp(is_all_dates)
-
object.__setattr__(self, "_index", labels)
if not fastpath:
self._data.set_axis(axis, labels)
- def _set_subtyp(self, is_all_dates):
- if is_all_dates:
- object.__setattr__(self, "_subtyp", "time_series")
- else:
- object.__setattr__(self, "_subtyp", "series")
-
def _update_inplace(self, result, **kwargs):
# we want to call the generic version and not the IndexOpsMixin
return generic.NDFrame._update_inplace(self, result, **kwargs)
@@ -479,10 +491,46 @@ def values(self):
@property
def _values(self):
"""
- Return the internal repr of this data.
+ Return the internal repr of this data (defined by Block.interval_values).
+ This are the values as stored in the Block (ndarray or ExtensionArray
+ depending on the Block class), with datetime64[ns] and timedelta64[ns]
+ wrapped in ExtensionArrays to match Index._values behavior.
+
+ Differs from the public ``.values`` for certain data types, because of
+ historical backwards compatibility of the public attribute (e.g. period
+ returns object ndarray and datetimetz a datetime64[ns] ndarray for
+ ``.values`` while it returns an ExtensionArray for ``._values`` in those
+ cases).
+
+ Differs from ``.array`` in that this still returns the numpy array if
+ the Block is backed by a numpy array (except for datetime64 and
+ timedelta64 dtypes), while ``.array`` ensures to always return an
+ ExtensionArray.
+
+ Differs from ``._ndarray_values``, as that ensures to always return a
+ numpy array (it will call ``_ndarray_values`` on the ExtensionArray, if
+ the Series was backed by an ExtensionArray).
+
+ Overview:
+
+ dtype | values | _values | array | _ndarray_values |
+ ----------- | ------------- | ------------- | ------------- | --------------- |
+ Numeric | ndarray | ndarray | PandasArray | ndarray |
+ Category | Categorical | Categorical | Categorical | ndarray[int] |
+ dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | ndarray[M8ns] |
+ dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | ndarray[M8ns] |
+ td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | ndarray[m8ns] |
+ Period | ndarray[obj] | PeriodArray | PeriodArray | ndarray[int] |
+ Nullable | EA | EA | EA | ndarray |
+
"""
return self._data.internal_values()
+ @Appender(base.IndexOpsMixin.array.__doc__) # type: ignore
+ @property
+ def array(self) -> ExtensionArray:
+ return self._data._block.array_values()
+
def _internal_get_values(self):
"""
Same as values (but handles sparseness conversions); is a view.
@@ -517,7 +565,7 @@ def __len__(self) -> int:
"""
return len(self._data)
- def view(self, dtype=None):
+ def view(self, dtype=None) -> "Series":
"""
Create a new view of the Series.
@@ -686,7 +734,7 @@ def __array__(self, dtype=None) -> np.ndarray:
Returns
-------
numpy.ndarray
- The values in the series converted to a :class:`numpy.ndarary`
+ The values in the series converted to a :class:`numpy.ndarray`
with the specified `dtype`.
See Also
@@ -729,38 +777,9 @@ def __array__(self, dtype=None) -> np.ndarray:
# ----------------------------------------------------------------------
- def _unpickle_series_compat(self, state):
- if isinstance(state, dict):
- self._data = state["_data"]
- self.name = state["name"]
- self.index = self._data.index
-
- elif isinstance(state, tuple):
-
- # < 0.12 series pickle
-
- nd_state, own_state = state
-
- # recreate the ndarray
- data = np.empty(nd_state[1], dtype=nd_state[2])
- np.ndarray.__setstate__(data, nd_state)
-
- # backwards compat
- index, name = own_state[0], None
- if len(own_state) > 1:
- name = own_state[1]
-
- # recreate
- self._data = SingleBlockManager(data, index, fastpath=True)
- self._index = index
- self.name = name
-
- else:
- raise Exception(f"cannot unpickle legacy formats -> [{state}]")
-
# indexers
@property
- def axes(self):
+ def axes(self) -> List[Index]:
"""
Return a list of the row axis labels.
"""
@@ -770,7 +789,14 @@ def axes(self):
# Indexing Methods
@Appender(generic.NDFrame.take.__doc__)
- def take(self, indices, axis=0, is_copy=False, **kwargs):
+ def take(self, indices, axis=0, is_copy=None, **kwargs) -> "Series":
+ if is_copy is not None:
+ warnings.warn(
+ "is_copy is deprecated and will be removed in a future version. "
+ "'take' always returns a copy, so there is no need to specify this.",
+ FutureWarning,
+ stacklevel=2,
+ )
nv.validate_take(tuple(), kwargs)
indices = ensure_platform_int(indices)
@@ -785,16 +811,20 @@ def take(self, indices, axis=0, is_copy=False, **kwargs):
kwargs = {}
new_values = self._values.take(indices, **kwargs)
- result = self._constructor(
+ return self._constructor(
new_values, index=new_index, fastpath=True
).__finalize__(self)
- # Maybe set copy if we didn't actually change the index.
- if is_copy:
- if not result._get_axis(axis).equals(self._get_axis(axis)):
- result._set_is_copy(self)
+ def _take_with_is_copy(self, indices, axis=0, **kwargs):
+ """
+ Internal version of the `take` method that sets the `_is_copy`
+ attribute to keep track of the parent dataframe (using in indexing
+ for the SettingWithCopyWarning). For Series this does the same
+ as the public take (it never sets `_is_copy`).
- return result
+ See the docstring of `take` for full explanation of the parameters.
+ """
+ return self.take(indices=indices, axis=axis, **kwargs)
def _ixs(self, i: int, axis: int = 0):
"""
@@ -816,27 +846,19 @@ def _ixs(self, i: int, axis: int = 0):
else:
return values[i]
- def _slice(self, slobj: slice, axis: int = 0, kind=None):
+ def _slice(self, slobj: slice, axis: int = 0, kind=None) -> "Series":
slobj = self.index._convert_slice_indexer(slobj, kind=kind or "getitem")
return self._get_values(slobj)
def __getitem__(self, key):
key = com.apply_if_callable(key, self)
+
+ if key is Ellipsis:
+ return self
+
try:
result = self.index.get_value(self, key)
- if not is_scalar(result):
- if is_list_like(result) and not isinstance(result, Series):
-
- # we need to box if loc of the key isn't scalar here
- # otherwise have inline ndarray/lists
- try:
- if not is_scalar(self.index.get_loc(key)):
- result = self._constructor(
- result, index=[key] * len(result), dtype=self.dtype
- ).__finalize__(self)
- except KeyError:
- pass
return result
except InvalidIndexError:
pass
@@ -844,8 +866,6 @@ def __getitem__(self, key):
if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
# kludge
pass
- elif key is Ellipsis:
- return self
elif com.is_bool_indexer(key):
pass
else:
@@ -896,6 +916,9 @@ def _get_with(self, key):
if key_type == "integer":
if self.index.is_integer() or self.index.is_floating():
return self.loc[key]
+ elif isinstance(self.index, IntervalIndex):
+ indexer = self.index.get_indexer_for(key)
+ return self.iloc[indexer]
else:
return self._get_values(key)
elif key_type == "boolean":
@@ -917,7 +940,13 @@ def _get_with(self, key):
def _get_values_tuple(self, key):
# mpl hackaround
if com.any_none(*key):
- return self._get_values(key)
+ # suppress warning from slicing the index with a 2d indexer.
+ # eventually we'll want Series itself to warn.
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore", "Support for multi-dim", DeprecationWarning
+ )
+ return self._get_values(key)
if not isinstance(self.index, MultiIndex):
raise ValueError("Can only tuple-index with a MultiIndex")
@@ -953,7 +982,7 @@ def _get_value(self, label, takeable: bool = False):
"""
if takeable:
return com.maybe_box_datetimelike(self._values[label])
- return self.index.get_value(self._values, label)
+ return self.index.get_value(self, label)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
@@ -971,6 +1000,9 @@ def __setitem__(self, key, value):
self[:] = value
else:
self.loc[key] = value
+ except InvalidIndexError:
+ # e.g. slice
+ self._set_with(key, value)
except TypeError as e:
if isinstance(key, tuple) and not isinstance(self.index, MultiIndex):
@@ -1084,9 +1116,12 @@ def _set_value(self, label, value, takeable: bool = False):
try:
if takeable:
self._values[label] = value
- else:
+ elif isinstance(self._values, np.ndarray):
+ # i.e. not EA, so we can use _engine
self.index._engine.set_value(self._values, label, value)
- except (KeyError, TypeError):
+ else:
+ self.loc[label] = value
+ except KeyError:
# set using a non-recursive method
self.loc[label] = value
@@ -1100,7 +1135,7 @@ def _set_value(self, label, value, takeable: bool = False):
def _is_mixed_type(self):
return False
- def repeat(self, repeats, axis=None):
+ def repeat(self, repeats, axis=None) -> "Series":
"""
Repeat elements of a Series.
@@ -1389,8 +1424,8 @@ def to_string(
# catch contract violations
if not isinstance(result, str):
raise AssertionError(
- "result must be of type str, type"
- f" of result is {repr(type(result).__name__)}"
+ "result must be of type str, type "
+ f"of result is {repr(type(result).__name__)}"
)
if buf is None:
@@ -1425,7 +1460,7 @@ def to_markdown(
# ----------------------------------------------------------------------
- def items(self):
+ def items(self) -> Iterable[Tuple[Label, Any]]:
"""
Lazily iterate over (index, value) tuples.
@@ -1455,13 +1490,13 @@ def items(self):
return zip(iter(self.index), iter(self))
@Appender(items.__doc__)
- def iteritems(self):
+ def iteritems(self) -> Iterable[Tuple[Label, Any]]:
return self.items()
# ----------------------------------------------------------------------
# Misc public methods
- def keys(self):
+ def keys(self) -> Index:
"""
Return alias for index.
@@ -1507,7 +1542,7 @@ def to_dict(self, into=dict):
into_c = com.standardize_mapping(into)
return into_c(self.items())
- def to_frame(self, name=None):
+ def to_frame(self, name=None) -> "DataFrame":
"""
Convert Series to DataFrame.
@@ -1539,7 +1574,7 @@ def to_frame(self, name=None):
return df
- def _set_name(self, name, inplace=False):
+ def _set_name(self, name, inplace=False) -> "Series":
"""
Set the Series name.
@@ -1619,13 +1654,14 @@ def groupby(
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
- ) -> "groupby_generic.SeriesGroupBy":
+ ) -> "SeriesGroupBy":
+ from pandas.core.groupby.generic import SeriesGroupBy
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
- return groupby_generic.SeriesGroupBy(
+ return SeriesGroupBy(
obj=self,
keys=by,
axis=axis,
@@ -1681,7 +1717,7 @@ def count(self, level=None):
out = np.bincount(obs, minlength=len(lev) or None)
return self._constructor(out, index=lev, dtype="int64").__finalize__(self)
- def mode(self, dropna=True):
+ def mode(self, dropna=True) -> "Series":
"""
Return the mode(s) of the dataset.
@@ -1766,7 +1802,7 @@ def unique(self):
result = super().unique()
return result
- def drop_duplicates(self, keep="first", inplace=False):
+ def drop_duplicates(self, keep="first", inplace=False) -> "Series":
"""
Return Series with duplicate values removed.
@@ -1843,7 +1879,7 @@ def drop_duplicates(self, keep="first", inplace=False):
"""
return super().drop_duplicates(keep=keep, inplace=inplace)
- def duplicated(self, keep="first"):
+ def duplicated(self, keep="first") -> "Series":
"""
Indicate duplicate Series values.
@@ -2062,7 +2098,7 @@ def idxmax(self, axis=0, skipna=True, *args, **kwargs):
return np.nan
return self.index[i]
- def round(self, decimals=0, *args, **kwargs):
+ def round(self, decimals=0, *args, **kwargs) -> "Series":
"""
Round each value in a Series to the given number of decimals.
@@ -2157,7 +2193,7 @@ def quantile(self, q=0.5, interpolation="linear"):
# scalar
return result.iloc[0]
- def corr(self, other, method="pearson", min_periods=None):
+ def corr(self, other, method="pearson", min_periods=None) -> float:
"""
Compute correlation with `other` Series, excluding missing values.
@@ -2210,7 +2246,7 @@ def corr(self, other, method="pearson", min_periods=None):
f"'{method}' was supplied"
)
- def cov(self, other, min_periods=None):
+ def cov(self, other, min_periods=None) -> float:
"""
Compute covariance with Series, excluding missing values.
@@ -2239,7 +2275,7 @@ def cov(self, other, min_periods=None):
return np.nan
return nanops.nancov(this.values, other.values, min_periods=min_periods)
- def diff(self, periods=1):
+ def diff(self, periods=1) -> "Series":
"""
First discrete difference of element.
@@ -2264,6 +2300,11 @@ def diff(self, periods=1):
optional time freq.
DataFrame.diff: First discrete difference of object.
+ Notes
+ -----
+ For boolean dtypes, this uses :meth:`operator.xor` rather than
+ :meth:`operator.sub`.
+
Examples
--------
Difference with previous row
@@ -2300,10 +2341,10 @@ def diff(self, periods=1):
5 NaN
dtype: float64
"""
- result = algorithms.diff(com.values_from_object(self), periods)
+ result = algorithms.diff(self.array, periods)
return self._constructor(result, index=self.index).__finalize__(self)
- def autocorr(self, lag=1):
+ def autocorr(self, lag=1) -> float:
"""
Compute the lag-N autocorrelation.
@@ -2566,7 +2607,7 @@ def _binop(self, other, func, level=None, fill_value=None):
ret = ops._construct_result(self, result, new_index, name)
return ret
- def combine(self, other, func, fill_value=None):
+ def combine(self, other, func, fill_value=None) -> "Series":
"""
Combine the Series with a Series or scalar according to `func`.
@@ -2663,7 +2704,7 @@ def combine(self, other, func, fill_value=None):
new_values = try_cast_to_ea(self._values, new_values)
return self._constructor(new_values, index=new_index, name=new_name)
- def combine_first(self, other):
+ def combine_first(self, other) -> "Series":
"""
Combine Series values, choosing the calling Series's values first.
@@ -2703,7 +2744,7 @@ def combine_first(self, other):
return this.where(notna(this), other)
- def update(self, other):
+ def update(self, other) -> None:
"""
Modify Series in place using non-NA values from passed
Series. Aligns on index.
@@ -2762,10 +2803,10 @@ def sort_values(
self,
axis=0,
ascending=True,
- inplace=False,
- kind="quicksort",
- na_position="last",
- ignore_index=False,
+ inplace: bool = False,
+ kind: str = "quicksort",
+ na_position: str = "last",
+ ignore_index: bool = False,
):
"""
Sort by the values.
@@ -3117,7 +3158,7 @@ def sort_index(
else:
return result.__finalize__(self)
- def argsort(self, axis=0, kind="quicksort", order=None):
+ def argsort(self, axis=0, kind="quicksort", order=None) -> "Series":
"""
Override ndarray.argsort. Argsorts the value, omitting NA/null values,
and places the result in the same locations as the non-NA values.
@@ -3155,7 +3196,7 @@ def argsort(self, axis=0, kind="quicksort", order=None):
np.argsort(values, kind=kind), index=self.index, dtype="int64"
).__finalize__(self)
- def nlargest(self, n=5, keep="first"):
+ def nlargest(self, n=5, keep="first") -> "Series":
"""
Return the largest `n` elements.
@@ -3253,7 +3294,7 @@ def nlargest(self, n=5, keep="first"):
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()
- def nsmallest(self, n=5, keep="first"):
+ def nsmallest(self, n=5, keep="first") -> "Series":
"""
Return the smallest `n` elements.
@@ -3350,7 +3391,7 @@ def nsmallest(self, n=5, keep="first"):
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()
- def swaplevel(self, i=-2, j=-1, copy=True):
+ def swaplevel(self, i=-2, j=-1, copy=True) -> "Series":
"""
Swap levels i and j in a :class:`MultiIndex`.
@@ -3368,12 +3409,13 @@ def swaplevel(self, i=-2, j=-1, copy=True):
Series
Series with levels swapped in MultiIndex.
"""
+ assert isinstance(self.index, ABCMultiIndex)
new_index = self.index.swaplevel(i, j)
return self._constructor(self._values, index=new_index, copy=copy).__finalize__(
self
)
- def reorder_levels(self, order):
+ def reorder_levels(self, order) -> "Series":
"""
Rearrange index levels using input order.
@@ -3392,6 +3434,7 @@ def reorder_levels(self, order):
raise Exception("Can only reorder levels on a hierarchical axis.")
result = self.copy()
+ assert isinstance(result.index, ABCMultiIndex)
result.index = result.index.reorder_levels(order)
return result
@@ -3453,7 +3496,7 @@ def explode(self) -> "Series":
def unstack(self, level=-1, fill_value=None):
"""
- Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame.
+ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame.
The level involved will automatically get sorted.
Parameters
@@ -3497,7 +3540,7 @@ def unstack(self, level=-1, fill_value=None):
# ----------------------------------------------------------------------
# function application
- def map(self, arg, na_action=None):
+ def map(self, arg, na_action=None) -> "Series":
"""
Map values of Series according to input correspondence.
@@ -3575,7 +3618,7 @@ def map(self, arg, na_action=None):
new_values = super()._map_values(arg, na_action=na_action)
return self._constructor(new_values, index=self.index).__finalize__(self)
- def _gotitem(self, key, ndim, subset=None):
+ def _gotitem(self, key, ndim, subset=None) -> "Series":
"""
Sub-classes to define. Return a sliced object.
@@ -3969,6 +4012,32 @@ def rename(
else:
return self._set_name(index, inplace=inplace)
+ @Appender(
+ """
+ >>> s = pd.Series([1, 2, 3])
+ >>> s
+ 0 1
+ 1 2
+ 2 3
+ dtype: int64
+
+ >>> s.set_axis(['a', 'b', 'c'], axis=0)
+ a 1
+ b 2
+ c 3
+ dtype: int64
+ """
+ )
+ @Substitution(
+ **_shared_doc_kwargs,
+ extended_summary_sub="",
+ axis_description_sub="",
+ see_also_sub="",
+ )
+ @Appender(generic.NDFrame.set_axis.__doc__)
+ def set_axis(self, labels, axis=0, inplace=False):
+ return super().set_axis(labels, axis=axis, inplace=inplace)
+
@Substitution(**_shared_doc_kwargs)
@Appender(generic.NDFrame.reindex.__doc__)
def reindex(self, index=None, **kwargs):
@@ -3983,7 +4052,7 @@ def drop(
level=None,
inplace=False,
errors="raise",
- ):
+ ) -> "Series":
"""
Return Series with specified index labels removed.
@@ -4124,7 +4193,7 @@ def replace(
)
@Appender(generic._shared_docs["shift"] % _shared_doc_kwargs)
- def shift(self, periods=1, freq=None, axis=0, fill_value=None):
+ def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> "Series":
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
@@ -4183,7 +4252,7 @@ def memory_usage(self, index=True, deep=False):
v += self.index.memory_usage(deep=deep)
return v
- def isin(self, values):
+ def isin(self, values) -> "Series":
"""
Check whether `values` are contained in Series.
@@ -4239,7 +4308,7 @@ def isin(self, values):
result = algorithms.isin(self, values)
return self._constructor(result, index=self.index).__finalize__(self)
- def between(self, left, right, inclusive=True):
+ def between(self, left, right, inclusive=True) -> "Series":
"""
Return boolean Series equivalent to left <= series <= right.
@@ -4314,20 +4383,48 @@ def between(self, left, right, inclusive=True):
return lmask & rmask
+ # ----------------------------------------------------------------------
+ # Convert to types that support pd.NA
+
+ def _convert_dtypes(
+ self: ABCSeries,
+ infer_objects: bool = True,
+ convert_string: bool = True,
+ convert_integer: bool = True,
+ convert_boolean: bool = True,
+ ) -> "Series":
+ input_series = self
+ if infer_objects:
+ input_series = input_series.infer_objects()
+ if is_object_dtype(input_series):
+ input_series = input_series.copy()
+
+ if convert_string or convert_integer or convert_boolean:
+ inferred_dtype = convert_dtypes(
+ input_series._values, convert_string, convert_integer, convert_boolean
+ )
+ try:
+ result = input_series.astype(inferred_dtype)
+ except TypeError:
+ result = input_series.copy()
+ else:
+ result = input_series.copy()
+ return result
+
@Appender(generic._shared_docs["isna"] % _shared_doc_kwargs)
- def isna(self):
+ def isna(self) -> "Series":
return super().isna()
@Appender(generic._shared_docs["isna"] % _shared_doc_kwargs)
- def isnull(self):
+ def isnull(self) -> "Series":
return super().isnull()
@Appender(generic._shared_docs["notna"] % _shared_doc_kwargs)
- def notna(self):
+ def notna(self) -> "Series":
return super().notna()
@Appender(generic._shared_docs["notna"] % _shared_doc_kwargs)
- def notnull(self):
+ def notnull(self) -> "Series":
return super().notnull()
def dropna(self, axis=0, inplace=False, how=None):
@@ -4421,7 +4518,7 @@ def dropna(self, axis=0, inplace=False, how=None):
# ----------------------------------------------------------------------
# Time series-oriented methods
- def to_timestamp(self, freq=None, how="start", copy=True):
+ def to_timestamp(self, freq=None, how="start", copy=True) -> "Series":
"""
Cast to DatetimeIndex of Timestamps, at *beginning* of period.
@@ -4443,10 +4540,11 @@ def to_timestamp(self, freq=None, how="start", copy=True):
if copy:
new_values = new_values.copy()
+ assert isinstance(self.index, (ABCDatetimeIndex, ABCPeriodIndex))
new_index = self.index.to_timestamp(freq=freq, how=how)
return self._constructor(new_values, index=new_index).__finalize__(self)
- def to_period(self, freq=None, copy=True):
+ def to_period(self, freq=None, copy=True) -> "Series":
"""
Convert Series from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed).
@@ -4467,9 +4565,16 @@ def to_period(self, freq=None, copy=True):
if copy:
new_values = new_values.copy()
+ assert isinstance(self.index, ABCDatetimeIndex)
new_index = self.index.to_period(freq=freq)
return self._constructor(new_values, index=new_index).__finalize__(self)
+ # ----------------------------------------------------------------------
+ # Add index
+ index: "Index" = properties.AxisProperty(
+ axis=0, doc="The index (axis labels) of the Series."
+ )
+
# ----------------------------------------------------------------------
# Accessor Methods
# ----------------------------------------------------------------------
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index f8d9eeb211a1e..18c7504f2c2f8 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -8,6 +8,7 @@
import numpy as np
import pandas._libs.lib as lib
+import pandas._libs.missing as libmissing
import pandas._libs.ops as libops
from pandas._typing import ArrayLike, Dtype
from pandas.util._decorators import Appender
@@ -78,7 +79,7 @@ def cat_core(list_of_columns: List, sep: str):
return np.sum(arr_of_cols, axis=0)
list_with_sep = [sep] * (2 * len(list_of_columns) - 1)
list_with_sep[::2] = list_of_columns
- arr_with_sep = np.asarray(list_with_sep)
+ arr_with_sep = np.asarray(list_with_sep, dtype=object)
return np.sum(arr_with_sep, axis=0)
@@ -118,12 +119,15 @@ def cat_safe(list_of_columns: List, sep: str):
return result
-def _na_map(f, arr, na_result=np.nan, dtype=object):
- # should really _check_ for NA
+def _na_map(f, arr, na_result=None, dtype=object):
if is_extension_array_dtype(arr.dtype):
+ if na_result is None:
+ na_result = libmissing.NA
# just StringDtype
arr = extract_array(arr)
return _map_stringarray(f, arr, na_value=na_result, dtype=dtype)
+ if na_result is None:
+ na_result = np.nan
return _map_object(f, arr, na_mask=True, na_value=na_result, dtype=dtype)
@@ -880,11 +884,12 @@ def _str_extract_noexpand(arr, pat, flags=0):
if arr.empty:
result = DataFrame(columns=columns, dtype=object)
else:
+ dtype = _result_dtype(arr)
result = DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=arr.index,
- dtype=object,
+ dtype=dtype,
)
return result, name
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index cfa42d764ee44..0cf0f943ae442 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -12,7 +12,6 @@
DateParseError,
_format_is_iso,
_guess_datetime_format,
- parse_time_string,
)
from pandas._libs.tslibs.strptime import array_strptime
from pandas._typing import ArrayLike
@@ -38,9 +37,10 @@
)
from pandas.core.dtypes.missing import notna
-from pandas.arrays import IntegerArray
+from pandas.arrays import DatetimeArray, IntegerArray
from pandas.core import algorithms
from pandas.core.algorithms import unique
+from pandas.core.arrays.datetimes import tz_to_dtype
# ---------------------------------------------------------------------
# types used in annotations
@@ -231,9 +231,7 @@ def _return_parsed_timezone_results(result, timezones, tz, name):
"""
if tz is not None:
raise ValueError(
- "Cannot pass a tz argument when "
- "parsing strings with timezone "
- "information."
+ "Cannot pass a tz argument when parsing strings with timezone information."
)
tz_results = np.array(
[Timestamp(res).tz_localize(zone) for res, zone in zip(result, timezones)]
@@ -285,7 +283,6 @@ def _convert_listlike_datetimes(
Index-like of parsed dates
"""
from pandas import DatetimeIndex
- from pandas.core.arrays import DatetimeArray
from pandas.core.arrays.datetimes import (
maybe_convert_dtype,
objects_to_datetime64ns,
@@ -430,7 +427,8 @@ def _convert_listlike_datetimes(
# datetime objects are found without passing `utc=True`
try:
values, tz = conversion.datetime_to_datetime64(arg)
- return DatetimeIndex._simple_new(values, name=name, tz=tz)
+ dta = DatetimeArray(values, dtype=tz_to_dtype(tz))
+ return DatetimeIndex._simple_new(dta, name=name)
except (ValueError, TypeError):
raise e
@@ -450,7 +448,8 @@ def _convert_listlike_datetimes(
if tz_parsed is not None:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
- return DatetimeIndex._simple_new(result, name=name, tz=tz_parsed)
+ dta = DatetimeArray(result, dtype=tz_to_dtype(tz_parsed))
+ return DatetimeIndex._simple_new(dta, name=name)
utc = tz == "utc"
return _box_as_indexlike(result, utc=utc, name=name)
@@ -629,6 +628,7 @@ def to_datetime(
--------
DataFrame.astype : Cast argument to a specified dtype.
to_timedelta : Convert argument to timedelta.
+ convert_dtypes : Convert dtypes.
Examples
--------
@@ -817,8 +817,7 @@ def f(value):
required = ",".join(req)
raise ValueError(
"to assemble mappings requires at least that "
- f"[year, month, day] be specified: [{required}] "
- "is missing"
+ f"[year, month, day] be specified: [{required}] is missing"
)
# keys we don't recognize
diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py
index e59ed247bd87b..4939cbfc9cc96 100644
--- a/pandas/core/tools/numeric.py
+++ b/pandas/core/tools/numeric.py
@@ -70,6 +70,7 @@ def to_numeric(arg, errors="raise", downcast=None):
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
numpy.ndarray.astype : Cast a numpy array to a specified type.
+ convert_dtypes : Convert dtypes.
Examples
--------
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index 3e185feaea38e..3f0cfce39f6f9 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -49,6 +49,7 @@ def to_timedelta(arg, unit="ns", errors="raise"):
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
+ convert_dtypes : Convert dtypes.
Examples
--------
diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py
index 43655fa3ea913..3366f10b92604 100644
--- a/pandas/core/util/hashing.py
+++ b/pandas/core/util/hashing.py
@@ -2,6 +2,7 @@
data hash pandas / numpy objects
"""
import itertools
+from typing import Optional
import numpy as np
@@ -58,7 +59,7 @@ def hash_pandas_object(
obj,
index: bool = True,
encoding: str = "utf8",
- hash_key: str = _default_hash_key,
+ hash_key: Optional[str] = _default_hash_key,
categorize: bool = True,
):
"""
@@ -82,6 +83,9 @@ def hash_pandas_object(
"""
from pandas import Series
+ if hash_key is None:
+ hash_key = _default_hash_key
+
if isinstance(obj, ABCMultiIndex):
return Series(hash_tuples(obj, encoding, hash_key), dtype="uint64", copy=False)
diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py
index 64ec0e68e11b0..ed0b816f64800 100644
--- a/pandas/core/window/common.py
+++ b/pandas/core/window/common.py
@@ -98,8 +98,7 @@ def _flex_binary_moment(arg1, arg2, f, pairwise=False):
and isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame))
):
raise TypeError(
- "arguments to moment function must be of type "
- "np.ndarray/Series/DataFrame"
+ "arguments to moment function must be of type np.ndarray/Series/DataFrame"
)
if isinstance(arg1, (np.ndarray, ABCSeries)) and isinstance(
diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py
index 68c3514308cbc..a0bf3376d2352 100644
--- a/pandas/core/window/expanding.py
+++ b/pandas/core/window/expanding.py
@@ -1,4 +1,5 @@
from textwrap import dedent
+from typing import Dict, Optional
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution
@@ -148,8 +149,23 @@ def count(self, **kwargs):
@Substitution(name="expanding")
@Appender(_shared_docs["apply"])
- def apply(self, func, raw=False, args=(), kwargs={}):
- return super().apply(func, raw=raw, args=args, kwargs=kwargs)
+ def apply(
+ self,
+ func,
+ raw: bool = False,
+ engine: str = "cython",
+ engine_kwargs: Optional[Dict[str, bool]] = None,
+ args=None,
+ kwargs=None,
+ ):
+ return super().apply(
+ func,
+ raw=raw,
+ engine=engine,
+ engine_kwargs=engine_kwargs,
+ args=args,
+ kwargs=kwargs,
+ )
@Substitution(name="expanding")
@Appender(_shared_docs["sum"])
diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py
index 0fa24a0ba1b5a..921cdb3c2523f 100644
--- a/pandas/core/window/indexers.py
+++ b/pandas/core/window/indexers.py
@@ -32,7 +32,7 @@
class BaseIndexer:
- """Base class for window bounds calculations"""
+ """Base class for window bounds calculations."""
def __init__(
self, index_array: Optional[np.ndarray] = None, window_size: int = 0, **kwargs,
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index f612826132fd7..580c7cc0554d0 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -1182,17 +1182,13 @@ class _Rolling_and_Expanding(_Rolling):
def count(self):
blocks, obj = self._create_blocks()
-
- window = self._get_window()
- window = min(window, len(obj)) if not self.center else window
-
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(
result,
- window=window,
- min_periods=0,
+ window=self._get_window(),
+ min_periods=self.min_periods or 0,
center=self.center,
axis=self.axis,
closed=self.closed,
@@ -1203,7 +1199,7 @@ def count(self):
_shared_docs["apply"] = dedent(
r"""
- The %(name)s function's apply function.
+ Apply an arbitrary function to each %(name)s window.
Parameters
----------
@@ -1657,7 +1653,11 @@ def _get_cov(X, Y):
mean = lambda x: x.rolling(
window, self.min_periods, center=self.center
).mean(**kwargs)
- count = (X + Y).rolling(window=window, center=self.center).count(**kwargs)
+ count = (
+ (X + Y)
+ .rolling(window=window, min_periods=0, center=self.center)
+ .count(**kwargs)
+ )
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
@@ -1820,8 +1820,7 @@ def _on(self) -> Index:
else:
raise ValueError(
f"invalid on specified as {self.on}, "
- "must be a column (of DataFrame), an Index "
- "or None"
+ "must be a column (of DataFrame), an Index or None"
)
def validate(self):
@@ -1838,9 +1837,8 @@ def validate(self):
# we don't allow center
if self.center:
raise NotImplementedError(
- "center is not implemented "
- "for datetimelike and offset "
- "based windows"
+ "center is not implemented for "
+ "datetimelike and offset based windows"
)
# this will raise ValueError on non-fixed freqs
@@ -1886,8 +1884,7 @@ def _validate_freq(self):
except (TypeError, ValueError):
raise ValueError(
f"passed window {self.window} is not "
- "compatible with a datetimelike "
- "index"
+ "compatible with a datetimelike index"
)
_agg_see_also_doc = dedent(
diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py
index 34e8e03d8771e..97178261bdf72 100644
--- a/pandas/io/clipboards.py
+++ b/pandas/io/clipboards.py
@@ -69,8 +69,7 @@ def read_clipboard(sep=r"\s+", **kwargs): # pragma: no cover
kwargs["engine"] = "python"
elif len(sep) > 1 and kwargs.get("engine") == "c":
warnings.warn(
- "read_clipboard with regex separator does not work "
- "properly with c engine"
+ "read_clipboard with regex separator does not work properly with c engine"
)
return read_csv(StringIO(text), sep=sep, **kwargs)
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 771a302d647ec..00f2961e41617 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -78,8 +78,7 @@ def _expand_user(
def validate_header_arg(header) -> None:
if isinstance(header, bool):
raise TypeError(
- "Passing a bool to header is invalid. "
- "Use header=None for no header or "
+ "Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify "
"the row(s) making up the column names"
)
@@ -161,10 +160,9 @@ def get_filepath_or_buffer(
Returns
-------
- tuple of ({a filepath_ or buffer or S3File instance},
- encoding, str,
- compression, str,
- should_close, bool)
+ Tuple[FilePathOrBuffer, str, str, bool]
+ Tuple containing the filepath or buffer, the encoding, the compression
+ and should_close.
"""
filepath_or_buffer = stringify_path(filepath_or_buffer)
@@ -407,8 +405,8 @@ def get_handle(
raise ValueError(f"Zero files found in ZIP file {path_or_buf}")
else:
raise ValueError(
- "Multiple files found in ZIP file."
- f" Only one file per ZIP: {zip_names}"
+ "Multiple files found in ZIP file. "
+ f"Only one file per ZIP: {zip_names}"
)
# XZ Compression
diff --git a/pandas/io/date_converters.py b/pandas/io/date_converters.py
index 7fdca2d65b05d..07919dbda63ae 100644
--- a/pandas/io/date_converters.py
+++ b/pandas/io/date_converters.py
@@ -57,8 +57,7 @@ def _check_columns(cols):
for i, n in enumerate(map(len, tail)):
if n != N:
raise AssertionError(
- f"All columns must have the same length: {N}; "
- f"column {i} has length {n}"
+ f"All columns must have the same length: {N}; column {i} has length {n}"
)
return N
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 04015a08bce2f..2a91381b7fbeb 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -35,8 +35,9 @@
"""
Read an Excel file into a pandas DataFrame.
-Support both `xls` and `xlsx` file extensions from a local filesystem or URL.
-Support an option to read a single sheet or a list of sheets.
+Supports `xls`, `xlsx`, `xlsm`, `xlsb`, and `odf` file extensions
+read from a local filesystem or URL. Supports an option to read
+a single sheet or a list of sheets.
Parameters
----------
@@ -789,15 +790,21 @@ class ExcelFile:
If a string or path object, expected to be a path to xls, xlsx or odf file.
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
- Acceptable values are None, ``xlrd``, ``openpyxl`` or ``odf``.
+ Acceptable values are None, ``xlrd``, ``openpyxl``, ``odf``, or ``pyxlsb``.
Note that ``odf`` reads tables out of OpenDocument formatted files.
"""
from pandas.io.excel._odfreader import _ODFReader
from pandas.io.excel._openpyxl import _OpenpyxlReader
from pandas.io.excel._xlrd import _XlrdReader
-
- _engines = {"xlrd": _XlrdReader, "openpyxl": _OpenpyxlReader, "odf": _ODFReader}
+ from pandas.io.excel._pyxlsb import _PyxlsbReader
+
+ _engines = {
+ "xlrd": _XlrdReader,
+ "openpyxl": _OpenpyxlReader,
+ "odf": _ODFReader,
+ "pyxlsb": _PyxlsbReader,
+ }
def __init__(self, io, engine=None):
if engine is None:
diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py
new file mode 100644
index 0000000000000..0d96c8c4acdb8
--- /dev/null
+++ b/pandas/io/excel/_pyxlsb.py
@@ -0,0 +1,69 @@
+from typing import List
+
+from pandas._typing import FilePathOrBuffer, Scalar
+from pandas.compat._optional import import_optional_dependency
+
+from pandas.io.excel._base import _BaseExcelReader
+
+
+class _PyxlsbReader(_BaseExcelReader):
+ def __init__(self, filepath_or_buffer: FilePathOrBuffer):
+ """
+ Reader using pyxlsb engine.
+
+ Parameters
+ ----------
+ filepath_or_buffer: str, path object, or Workbook
+ Object to be parsed.
+ """
+ import_optional_dependency("pyxlsb")
+ # This will call load_workbook on the filepath or buffer
+ # And set the result to the book-attribute
+ super().__init__(filepath_or_buffer)
+
+ @property
+ def _workbook_class(self):
+ from pyxlsb import Workbook
+
+ return Workbook
+
+ def load_workbook(self, filepath_or_buffer: FilePathOrBuffer):
+ from pyxlsb import open_workbook
+
+ # TODO: hack in buffer capability
+ # This might need some modifications to the Pyxlsb library
+ # Actual work for opening it is in xlsbpackage.py, line 20-ish
+
+ return open_workbook(filepath_or_buffer)
+
+ @property
+ def sheet_names(self) -> List[str]:
+ return self.book.sheets
+
+ def get_sheet_by_name(self, name: str):
+ return self.book.get_sheet(name)
+
+ def get_sheet_by_index(self, index: int):
+ # pyxlsb sheets are indexed from 1 onwards
+ # There's a fix for this in the source, but the pypi package doesn't have it
+ return self.book.get_sheet(index + 1)
+
+ def _convert_cell(self, cell, convert_float: bool) -> Scalar:
+ # TODO: there is no way to distinguish between floats and datetimes in pyxlsb
+ # This means that there is no way to read datetime types from an xlsb file yet
+ if cell.v is None:
+ return "" # Prevents non-named columns from not showing up as Unnamed: i
+ if isinstance(cell.v, float) and convert_float:
+ val = int(cell.v)
+ if val == cell.v:
+ return val
+ else:
+ return float(cell.v)
+
+ return cell.v
+
+ def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]:
+ return [
+ [self._convert_cell(c, convert_float) for c in r]
+ for r in sheet.rows(sparse=False)
+ ]
diff --git a/pandas/io/excel/_util.py b/pandas/io/excel/_util.py
index a084be54dfa10..9d284c8031840 100644
--- a/pandas/io/excel/_util.py
+++ b/pandas/io/excel/_util.py
@@ -136,8 +136,7 @@ def _maybe_convert_usecols(usecols):
if is_integer(usecols):
raise ValueError(
"Passing an integer for `usecols` is no longer supported. "
- "Please pass in a list of int from 0 to `usecols` "
- "inclusive instead."
+ "Please pass in a list of int from 0 to `usecols` inclusive instead."
)
if isinstance(usecols, str):
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
index eb05004d9137c..5d4925620e75f 100644
--- a/pandas/io/feather_format.py
+++ b/pandas/io/feather_format.py
@@ -37,16 +37,13 @@ def to_feather(df: DataFrame, path):
typ = type(df.index)
raise ValueError(
f"feather does not support serializing {typ} "
- "for the index; you can .reset_index() "
- "to make the index into column(s)"
+ "for the index; you can .reset_index() to make the index into column(s)"
)
if not df.index.equals(RangeIndex.from_range(range(len(df)))):
raise ValueError(
- "feather does not support serializing a "
- "non-default index for the index; you "
- "can .reset_index() to make the index "
- "into column(s)"
+ "feather does not support serializing a non-default index for the index; "
+ "you can .reset_index() to make the index into column(s)"
)
if df.index.name is not None:
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 9b0f100c1b041..b0e8e4033edf2 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -1,14 +1,17 @@
-"""Utilities for conversion to writer-agnostic Excel representation
+"""
+Utilities for conversion to writer-agnostic Excel representation.
"""
from functools import reduce
import itertools
import re
-from typing import Callable, Dict, List, Optional, Sequence, Union
+from typing import Callable, Dict, Optional, Sequence, Union
import warnings
import numpy as np
+from pandas._typing import Label
+
from pandas.core.dtypes import missing
from pandas.core.dtypes.common import is_float, is_scalar
from pandas.core.dtypes.generic import ABCMultiIndex, ABCPeriodIndex
@@ -371,10 +374,10 @@ def __init__(
df,
na_rep: str = "",
float_format: Optional[str] = None,
- cols: Optional[Sequence] = None,
- header: Union[bool, List[str]] = True,
+ cols: Optional[Sequence[Label]] = None,
+ header: Union[Sequence[Label], bool] = True,
index: bool = True,
- index_label: Union[str, Sequence, None] = None,
+ index_label: Optional[Union[Label, Sequence[Label]]] = None,
merge_cells: bool = False,
inf_rep: str = "inf",
style_converter: Optional[Callable] = None,
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 6adf69a922000..149533bf0c238 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -57,10 +57,13 @@
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import (
+ ABCDatetimeIndex,
ABCIndexClass,
ABCMultiIndex,
+ ABCPeriodIndex,
ABCSeries,
ABCSparseArray,
+ ABCTimedeltaIndex,
)
from pandas.core.dtypes.missing import isna, notna
@@ -295,6 +298,9 @@ def _get_footer(self) -> str:
footer = ""
if getattr(self.series.index, "freq", None) is not None:
+ assert isinstance(
+ self.series.index, (ABCDatetimeIndex, ABCPeriodIndex, ABCTimedeltaIndex)
+ )
footer += "Freq: {freq}".format(freq=self.series.index.freqstr)
if self.name is not False and name is not None:
@@ -412,7 +418,7 @@ def __init__(self):
self.ambiguous_width = 1
# Definition of East Asian Width
- # http://unicode.org/reports/tr11/
+ # https://unicode.org/reports/tr11/
# Ambiguous width can be changed by option
self._EAW_MAP = {"Na": 1, "N": 1, "W": 2, "F": 2, "H": 1}
@@ -737,12 +743,8 @@ def _to_str_columns(self) -> List[List[str]]:
self.header = cast(List[str], self.header)
if len(self.header) != len(self.columns):
raise ValueError(
- (
- "Writing {ncols} cols but got {nalias} "
- "aliases".format(
- ncols=len(self.columns), nalias=len(self.header)
- )
- )
+ f"Writing {len(self.columns)} cols "
+ f"but got {len(self.header)} aliases"
)
str_columns = [[label] for label in self.header]
else:
diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py
index b46b2f6c671d6..e3161415fe2bc 100644
--- a/pandas/io/formats/html.py
+++ b/pandas/io/formats/html.py
@@ -216,8 +216,8 @@ def _write_table(self, indent: int = 0) -> None:
self.classes = self.classes.split()
if not isinstance(self.classes, (list, tuple)):
raise TypeError(
- "classes must be a string, list, or tuple, "
- "not {typ}".format(typ=type(self.classes))
+ "classes must be a string, list, "
+ f"or tuple, not {type(self.classes)}"
)
_classes.extend(self.classes)
diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py
index 008a99427f3c7..8ab56437d5c05 100644
--- a/pandas/io/formats/latex.py
+++ b/pandas/io/formats/latex.py
@@ -114,8 +114,7 @@ def pad_empties(x):
column_format = index_format + column_format
elif not isinstance(self.column_format, str): # pragma: no cover
raise AssertionError(
- "column_format must be str or unicode, "
- "not {typ}".format(typ=type(column_format))
+ f"column_format must be str or unicode, not {type(column_format)}"
)
else:
column_format = self.column_format
diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py
index 4b5b5e9a0ce15..13b18a0b5fb6f 100644
--- a/pandas/io/formats/printing.py
+++ b/pandas/io/formats/printing.py
@@ -6,12 +6,14 @@
from typing import (
Any,
Callable,
+ Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
+ TypeVar,
Union,
)
@@ -20,6 +22,8 @@
from pandas.core.dtypes.inference import is_sequence
EscapeChars = Union[Mapping[str, str], Iterable[str]]
+_KT = TypeVar("_KT")
+_VT = TypeVar("_VT")
def adjoin(space: int, *lists: List[str], **kwargs) -> str:
@@ -74,7 +78,7 @@ def justify(texts: Iterable[str], max_len: int, mode: str = "right") -> List[str
#
# pprinting utility functions for generating Unicode text or
# bytes(3.x)/str(2.x) representations of objects.
-# Try to use these as much as possible rather then rolling your own.
+# Try to use these as much as possible rather than rolling your own.
#
# When to use
# -----------
@@ -98,7 +102,7 @@ def _pprint_seq(
) -> str:
"""
internal. pprinter for iterables. you should probably use pprint_thing()
- rather then calling this directly.
+ rather than calling this directly.
bounds length of printed sequence, depending on options
"""
@@ -133,7 +137,7 @@ def _pprint_dict(
) -> str:
"""
internal. pprinter for iterables. you should probably use pprint_thing()
- rather then calling this directly.
+ rather than calling this directly.
"""
fmt = "{{{things}}}"
pairs = []
@@ -528,3 +532,10 @@ def format_object_attrs(
if len(obj) > max_seq_items:
attrs.append(("length", len(obj)))
return attrs
+
+
+class PrettyDict(Dict[_KT, _VT]):
+ """Dict extension to support abbreviated __repr__"""
+
+ def __repr__(self) -> str:
+ return pprint_thing(self)
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 8570875569e44..565752e269d79 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -1,6 +1,5 @@
"""
-Module for applying conditional formatting to
-DataFrames and Series.
+Module for applying conditional formatting to DataFrames and Series.
"""
from collections import defaultdict
@@ -8,7 +7,17 @@
import copy
from functools import partial
from itertools import product
-from typing import Any, Callable, DefaultDict, Dict, List, Optional, Sequence, Tuple
+from typing import (
+ Any,
+ Callable,
+ DefaultDict,
+ Dict,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+)
from uuid import uuid1
import numpy as np
@@ -16,6 +25,7 @@
from pandas._config import get_option
from pandas._libs import lib
+from pandas._typing import Axis, FrameOrSeries, FrameOrSeriesUnion, Label
from pandas.compat._optional import import_optional_dependency
from pandas.util._decorators import Appender
@@ -24,6 +34,7 @@
import pandas as pd
from pandas.api.types import is_dict_like, is_list_like
import pandas.core.common as com
+from pandas.core.frame import DataFrame
from pandas.core.generic import _shared_docs
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
@@ -41,7 +52,7 @@
@contextmanager
-def _mpl(func):
+def _mpl(func: Callable):
if has_mpl:
yield plt, colors
else:
@@ -125,13 +136,13 @@ class Styler:
def __init__(
self,
- data,
- precision=None,
- table_styles=None,
- uuid=None,
- caption=None,
- table_attributes=None,
- cell_ids=True,
+ data: FrameOrSeriesUnion,
+ precision: Optional[int] = None,
+ table_styles: Optional[List[Dict[str, List[Tuple[str, str]]]]] = None,
+ uuid: Optional[str] = None,
+ caption: Optional[str] = None,
+ table_attributes: Optional[str] = None,
+ cell_ids: bool = True,
na_rep: Optional[str] = None,
):
self.ctx: DefaultDict[Tuple[int, int], List[str]] = defaultdict(list)
@@ -175,7 +186,7 @@ def default_display_func(x):
Tuple[int, int], Callable[[Any], str]
] = defaultdict(lambda: default_display_func)
- def _repr_html_(self):
+ def _repr_html_(self) -> str:
"""
Hooks into Jupyter notebook rich display system.
"""
@@ -196,22 +207,22 @@ def _repr_html_(self):
def to_excel(
self,
excel_writer,
- sheet_name="Sheet1",
- na_rep="",
- float_format=None,
- columns=None,
- header=True,
- index=True,
- index_label=None,
- startrow=0,
- startcol=0,
- engine=None,
- merge_cells=True,
- encoding=None,
- inf_rep="inf",
- verbose=True,
- freeze_panes=None,
- ):
+ sheet_name: str = "Sheet1",
+ na_rep: str = "",
+ float_format: Optional[str] = None,
+ columns: Optional[Sequence[Label]] = None,
+ header: Union[Sequence[Label], bool] = True,
+ index: bool = True,
+ index_label: Optional[Union[Label, Sequence[Label]]] = None,
+ startrow: int = 0,
+ startcol: int = 0,
+ engine: Optional[str] = None,
+ merge_cells: bool = True,
+ encoding: Optional[str] = None,
+ inf_rep: str = "inf",
+ verbose: bool = True,
+ freeze_panes: Optional[Tuple[int, int]] = None,
+ ) -> None:
from pandas.io.formats.excel import ExcelFormatter
@@ -275,7 +286,7 @@ def format_attr(pair):
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
- cellstyle = []
+ cellstyle_map = defaultdict(list)
head = []
for r in range(n_clvls):
@@ -397,12 +408,17 @@ def format_attr(pair):
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
- props.append(x.split(":"))
+ props.append(tuple(x.split(":")))
else:
- props.append(["", ""])
- cellstyle.append({"props": props, "selector": f"row{r}_col{c}"})
+ props.append(("", ""))
+ cellstyle_map[tuple(props)].append(f"row{r}_col{c}")
body.append(row_es)
+ cellstyle = [
+ {"props": list(props), "selectors": selectors}
+ for props, selectors in cellstyle_map.items()
+ ]
+
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
@@ -423,7 +439,7 @@ def format_attr(pair):
table_attributes=table_attr,
)
- def format(self, formatter, subset=None, na_rep: Optional[str] = None):
+ def format(self, formatter, subset=None, na_rep: Optional[str] = None) -> "Styler":
"""
Format the text display value of cells.
@@ -496,7 +512,7 @@ def format(self, formatter, subset=None, na_rep: Optional[str] = None):
self._display_funcs[(i, j)] = formatter
return self
- def render(self, **kwargs):
+ def render(self, **kwargs) -> str:
"""
Render the built up styles to HTML.
@@ -545,16 +561,18 @@ def render(self, **kwargs):
d.update(kwargs)
return self.template.render(**d)
- def _update_ctx(self, attrs):
+ def _update_ctx(self, attrs: DataFrame) -> None:
"""
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
- attrs : Series or DataFrame
- should contain strings of '<property>: <value>;<prop2>: <val2>'
- Whitespace shouldn't matter and the final trailing ';' shouldn't
- matter.
+ Parameters
+ ----------
+ attrs : DataFrame
+ should contain strings of '<property>: <value>;<prop2>: <val2>'
+ Whitespace shouldn't matter and the final trailing ';' shouldn't
+ matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.items():
@@ -563,7 +581,7 @@ def _update_ctx(self, attrs):
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
- def _copy(self, deepcopy=False):
+ def _copy(self, deepcopy: bool = False) -> "Styler":
styler = Styler(
self.data,
precision=self.precision,
@@ -580,16 +598,16 @@ def _copy(self, deepcopy=False):
styler._todo = self._todo
return styler
- def __copy__(self):
+ def __copy__(self) -> "Styler":
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
- def __deepcopy__(self, memo):
+ def __deepcopy__(self, memo) -> "Styler":
return self._copy(deepcopy=True)
- def clear(self):
+ def clear(self) -> None:
"""
Reset the styler, removing any previously applied styles.
@@ -612,7 +630,13 @@ def _compute(self):
r = func(self)(*args, **kwargs)
return r
- def _apply(self, func, axis=0, subset=None, **kwargs):
+ def _apply(
+ self,
+ func: Callable[..., "Styler"],
+ axis: Optional[Axis] = 0,
+ subset=None,
+ **kwargs,
+ ) -> "Styler":
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
@@ -645,7 +669,13 @@ def _apply(self, func, axis=0, subset=None, **kwargs):
self._update_ctx(result)
return self
- def apply(self, func, axis=0, subset=None, **kwargs):
+ def apply(
+ self,
+ func: Callable[..., "Styler"],
+ axis: Optional[Axis] = 0,
+ subset=None,
+ **kwargs,
+ ) -> "Styler":
"""
Apply a function column-wise, row-wise, or table-wise.
@@ -696,7 +726,7 @@ def apply(self, func, axis=0, subset=None, **kwargs):
)
return self
- def _applymap(self, func, subset=None, **kwargs):
+ def _applymap(self, func: Callable, subset=None, **kwargs) -> "Styler":
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
@@ -705,7 +735,7 @@ def _applymap(self, func, subset=None, **kwargs):
self._update_ctx(result)
return self
- def applymap(self, func, subset=None, **kwargs):
+ def applymap(self, func: Callable, subset=None, **kwargs) -> "Styler":
"""
Apply a function elementwise.
@@ -734,7 +764,14 @@ def applymap(self, func, subset=None, **kwargs):
)
return self
- def where(self, cond, value, other=None, subset=None, **kwargs):
+ def where(
+ self,
+ cond: Callable,
+ value: str,
+ other: Optional[str] = None,
+ subset=None,
+ **kwargs,
+ ) -> "Styler":
"""
Apply a function elementwise.
@@ -773,7 +810,7 @@ def where(self, cond, value, other=None, subset=None, **kwargs):
lambda val: value if cond(val) else other, subset=subset, **kwargs
)
- def set_precision(self, precision):
+ def set_precision(self, precision: int) -> "Styler":
"""
Set the precision used to render.
@@ -788,7 +825,7 @@ def set_precision(self, precision):
self.precision = precision
return self
- def set_table_attributes(self, attributes):
+ def set_table_attributes(self, attributes: str) -> "Styler":
"""
Set the table attributes.
@@ -812,7 +849,7 @@ def set_table_attributes(self, attributes):
self.table_attributes = attributes
return self
- def export(self):
+ def export(self) -> List[Tuple[Callable, Tuple, Dict]]:
"""
Export the styles to applied to the current Styler.
@@ -828,7 +865,7 @@ def export(self):
"""
return self._todo
- def use(self, styles):
+ def use(self, styles: List[Tuple[Callable, Tuple, Dict]]) -> "Styler":
"""
Set the styles on the current Styler.
@@ -850,7 +887,7 @@ def use(self, styles):
self._todo.extend(styles)
return self
- def set_uuid(self, uuid):
+ def set_uuid(self, uuid: str) -> "Styler":
"""
Set the uuid for a Styler.
@@ -865,7 +902,7 @@ def set_uuid(self, uuid):
self.uuid = uuid
return self
- def set_caption(self, caption):
+ def set_caption(self, caption: str) -> "Styler":
"""
Set the caption on a Styler.
@@ -880,7 +917,7 @@ def set_caption(self, caption):
self.caption = caption
return self
- def set_table_styles(self, table_styles):
+ def set_table_styles(self, table_styles) -> "Styler":
"""
Set the table styles on a Styler.
@@ -927,7 +964,7 @@ def set_na_rep(self, na_rep: str) -> "Styler":
self.na_rep = na_rep
return self
- def hide_index(self):
+ def hide_index(self) -> "Styler":
"""
Hide any indices from rendering.
@@ -940,7 +977,7 @@ def hide_index(self):
self.hidden_index = True
return self
- def hide_columns(self, subset):
+ def hide_columns(self, subset) -> "Styler":
"""
Hide columns from rendering.
@@ -966,10 +1003,10 @@ def hide_columns(self, subset):
# -----------------------------------------------------------------------
@staticmethod
- def _highlight_null(v, null_color):
+ def _highlight_null(v, null_color: str) -> str:
return f"background-color: {null_color}" if pd.isna(v) else ""
- def highlight_null(self, null_color="red"):
+ def highlight_null(self, null_color: str = "red") -> "Styler":
"""
Shade the background ``null_color`` for missing values.
@@ -987,14 +1024,14 @@ def highlight_null(self, null_color="red"):
def background_gradient(
self,
cmap="PuBu",
- low=0,
- high=0,
- axis=0,
+ low: float = 0,
+ high: float = 0,
+ axis: Optional[Axis] = 0,
subset=None,
- text_color_threshold=0.408,
+ text_color_threshold: float = 0.408,
vmin: Optional[float] = None,
vmax: Optional[float] = None,
- ):
+ ) -> "Styler":
"""
Color the background in a gradient style.
@@ -1069,9 +1106,9 @@ def background_gradient(
def _background_gradient(
s,
cmap="PuBu",
- low=0,
- high=0,
- text_color_threshold=0.408,
+ low: float = 0,
+ high: float = 0,
+ text_color_threshold: float = 0.408,
vmin: Optional[float] = None,
vmax: Optional[float] = None,
):
@@ -1095,7 +1132,7 @@ def _background_gradient(
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.to_numpy(dtype=float)))
- def relative_luminance(rgba):
+ def relative_luminance(rgba) -> float:
"""
Calculate relative luminance of a color.
@@ -1117,7 +1154,7 @@ def relative_luminance(rgba):
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
- def css(rgba):
+ def css(rgba) -> str:
dark = relative_luminance(rgba) < text_color_threshold
text_color = "#f1f1f1" if dark else "#000000"
return f"background-color: {colors.rgb2hex(rgba)};color: {text_color};"
@@ -1131,7 +1168,7 @@ def css(rgba):
columns=s.columns,
)
- def set_properties(self, subset=None, **kwargs):
+ def set_properties(self, subset=None, **kwargs) -> "Styler":
"""
Method to set one or more non-data dependent properties or each cell.
@@ -1157,7 +1194,14 @@ def set_properties(self, subset=None, **kwargs):
return self.applymap(f, subset=subset)
@staticmethod
- def _bar(s, align, colors, width=100, vmin=None, vmax=None):
+ def _bar(
+ s,
+ align: str,
+ colors: List[str],
+ width: float = 100,
+ vmin: Optional[float] = None,
+ vmax: Optional[float] = None,
+ ):
"""
Draw bar chart in dataframe cells.
"""
@@ -1175,7 +1219,7 @@ def _bar(s, align, colors, width=100, vmin=None, vmax=None):
normed = width * (s.to_numpy(dtype=float) - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
- def css_bar(start, end, color):
+ def css_bar(start: float, end: float, color: str) -> str:
"""
Generate CSS code to draw a bar from start to end.
"""
@@ -1212,13 +1256,13 @@ def css(x):
def bar(
self,
subset=None,
- axis=0,
+ axis: Optional[Axis] = 0,
color="#d65f5f",
- width=100,
- align="left",
- vmin=None,
- vmax=None,
- ):
+ width: float = 100,
+ align: str = "left",
+ vmin: Optional[float] = None,
+ vmax: Optional[float] = None,
+ ) -> "Styler":
"""
Draw bar chart in the cell backgrounds.
@@ -1293,7 +1337,9 @@ def bar(
return self
- def highlight_max(self, subset=None, color="yellow", axis=0):
+ def highlight_max(
+ self, subset=None, color: str = "yellow", axis: Optional[Axis] = 0
+ ) -> "Styler":
"""
Highlight the maximum by shading the background.
@@ -1313,7 +1359,9 @@ def highlight_max(self, subset=None, color="yellow", axis=0):
"""
return self._highlight_handler(subset=subset, color=color, axis=axis, max_=True)
- def highlight_min(self, subset=None, color="yellow", axis=0):
+ def highlight_min(
+ self, subset=None, color: str = "yellow", axis: Optional[Axis] = 0
+ ) -> "Styler":
"""
Highlight the minimum by shading the background.
@@ -1335,7 +1383,13 @@ def highlight_min(self, subset=None, color="yellow", axis=0):
subset=subset, color=color, axis=axis, max_=False
)
- def _highlight_handler(self, subset=None, color="yellow", axis=None, max_=True):
+ def _highlight_handler(
+ self,
+ subset=None,
+ color: str = "yellow",
+ axis: Optional[Axis] = None,
+ max_: bool = True,
+ ) -> "Styler":
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(
self._highlight_extrema, color=color, axis=axis, subset=subset, max_=max_
@@ -1343,7 +1397,9 @@ def _highlight_handler(self, subset=None, color="yellow", axis=None, max_=True):
return self
@staticmethod
- def _highlight_extrema(data, color="yellow", max_=True):
+ def _highlight_extrema(
+ data: FrameOrSeries, color: str = "yellow", max_: bool = True
+ ):
"""
Highlight the min or max in a Series or DataFrame.
"""
@@ -1388,7 +1444,7 @@ class MyStyler(cls):
return MyStyler
- def pipe(self, func, *args, **kwargs):
+ def pipe(self, func: Callable, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``, and return the result.
@@ -1460,7 +1516,7 @@ def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
-def _is_visible(idx_row, idx_col, lengths):
+def _is_visible(idx_row, idx_col, lengths) -> bool:
"""
Index -> {(idx_row, idx_col): bool}).
"""
@@ -1510,7 +1566,9 @@ def _get_level_lengths(index, hidden_elements=None):
return non_zero_lengths
-def _maybe_wrap_formatter(formatter, na_rep: Optional[str]):
+def _maybe_wrap_formatter(
+ formatter: Union[Callable, str], na_rep: Optional[str]
+) -> Callable:
if isinstance(formatter, str):
formatter_func = lambda x: formatter.format(x)
elif callable(formatter):
diff --git a/pandas/io/formats/templates/html.tpl b/pandas/io/formats/templates/html.tpl
index 15feafcea6864..97bfda9af089d 100644
--- a/pandas/io/formats/templates/html.tpl
+++ b/pandas/io/formats/templates/html.tpl
@@ -14,7 +14,7 @@
{% block before_cellstyle %}{% endblock before_cellstyle %}
{% block cellstyle %}
{%- for s in cellstyle %}
- #T_{{uuid}}{{s.selector}} {
+ {%- for selector in s.selectors -%}{%- if not loop.first -%},{%- endif -%}#T_{{uuid}}{{selector}}{%- endfor -%} {
{% for p,val in s.props %}
{{p}}: {{val}};
{% endfor %}
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index 69ebc470fba6f..405bf27cac02d 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -64,9 +64,9 @@ def read_gbq(
when getting user credentials.
.. _local webserver flow:
- http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
+ https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
- http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
+ https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
dialect : str, default 'legacy'
diff --git a/pandas/io/html.py b/pandas/io/html.py
index eafcca0e85bb3..c676bfb1f0c74 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -591,9 +591,14 @@ def _setup_build_doc(self):
def _build_doc(self):
from bs4 import BeautifulSoup
- return BeautifulSoup(
- self._setup_build_doc(), features="html5lib", from_encoding=self.encoding
- )
+ bdoc = self._setup_build_doc()
+ if isinstance(bdoc, bytes) and self.encoding is not None:
+ udoc = bdoc.decode(self.encoding)
+ from_encoding = None
+ else:
+ udoc = bdoc
+ from_encoding = self.encoding
+ return BeautifulSoup(udoc, features="html5lib", from_encoding=from_encoding)
def _build_xpath_expr(attrs) -> str:
@@ -899,8 +904,7 @@ def _parse(flavor, io, match, attrs, encoding, displayed_only, **kwargs):
f"The flavor {flav} failed to parse your input. "
"Since you passed a non-rewindable file "
"object, we can't rewind it to try "
- "another parser. Try read_html() with a "
- "different flavor."
+ "another parser. Try read_html() with a different flavor."
)
retained = caught
@@ -982,7 +986,7 @@ def read_html(
is a valid attribute dictionary because the 'id' HTML tag attribute is
a valid HTML attribute for *any* HTML tag as per `this document
- <http://www.w3.org/TR/html-markup/global-attributes.html>`__. ::
+ <https://html.spec.whatwg.org/multipage/dom.html#global-attributes>`__. ::
attrs = {'asdf': 'table'}
@@ -991,7 +995,7 @@ def read_html(
table attributes can be found `here
<http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A
working draft of the HTML 5 spec can be found `here
- <http://www.w3.org/TR/html-markup/table.html>`__. It contains the
+ <https://html.spec.whatwg.org/multipage/tables.html>`__. It contains the
latest information on table attributes for the modern web.
parse_dates : bool, optional
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 12ce5e4a62d24..204807b55c877 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -439,8 +439,17 @@ def read_json(
Not applicable for ``orient='table'``.
convert_dates : bool or list of str, default True
- List of columns to parse for dates. If True, then try to parse
- datelike columns. A column label is datelike if
+ If True then default datelike columns may be converted (depending on
+ keep_default_dates).
+ If False, no dates will be converted.
+ If a list of column names, then those columns will be converted and
+ default datelike columns may also be converted (depending on
+ keep_default_dates).
+
+ keep_default_dates : bool, default True
+ If parsing dates (convert_dates is not False), then try to parse the
+ default datelike columns.
+ A column label is datelike if
* it ends with ``'_at'``,
@@ -452,9 +461,6 @@ def read_json(
* it is ``'date'``.
- keep_default_dates : bool, default True
- If parsing dates, then parse the default datelike columns.
-
numpy : bool, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
@@ -936,7 +942,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True):
if (new_data == data).all():
data = new_data
result = True
- except (TypeError, ValueError):
+ except (TypeError, ValueError, OverflowError):
pass
# coerce ints to 64
diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py
index c0596c984575a..b638bdc0bc1eb 100644
--- a/pandas/io/json/_normalize.py
+++ b/pandas/io/json/_normalize.py
@@ -116,7 +116,7 @@ def _json_normalize(
meta: Optional[Union[str, List[Union[str, List[str]]]]] = None,
meta_prefix: Optional[str] = None,
record_prefix: Optional[str] = None,
- errors: Optional[str] = "raise",
+ errors: str = "raise",
sep: str = ".",
max_level: Optional[int] = None,
) -> "DataFrame":
@@ -317,8 +317,7 @@ def _recursive_extract(data, path, seen_meta, level=0):
meta_val = np.nan
else:
raise KeyError(
- "Try running with "
- "errors='ignore' as key "
+ "Try running with errors='ignore' as key "
f"{e} is not always present"
)
meta_vals[key].append(meta_val)
diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py
index 5f23b95c10f8e..4e42533ca2744 100644
--- a/pandas/io/json/_table_schema.py
+++ b/pandas/io/json/_table_schema.py
@@ -1,7 +1,7 @@
"""
Table Schema builders
-http://specs.frictionlessdata.io/json-table-schema/
+https://specs.frictionlessdata.io/json-table-schema/
"""
import warnings
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 3a686a1a3b122..98f2eb3929b59 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -32,8 +32,7 @@ def get_engine(engine: str) -> "BaseImpl":
raise ImportError(
"Unable to find a usable engine; "
"tried using: 'pyarrow', 'fastparquet'.\n"
- "pyarrow or fastparquet is required for parquet "
- "support"
+ "pyarrow or fastparquet is required for parquet support"
)
if engine == "pyarrow":
@@ -52,7 +51,7 @@ def validate_dataframe(df: DataFrame):
raise ValueError("to_parquet only supports IO with DataFrames")
# must have value column names (strings only)
- if df.columns.inferred_type not in {"string", "unicode", "empty"}:
+ if df.columns.inferred_type not in {"string", "empty"}:
raise ValueError("parquet must have string column names")
# index level names must be strings
@@ -156,8 +155,7 @@ def write(
if "partition_on" in kwargs and partition_cols is not None:
raise ValueError(
"Cannot use both partition_on and "
- "partition_cols. Use partition_cols for "
- "partitioning data"
+ "partition_cols. Use partition_cols for partitioning data"
)
elif "partition_on" in kwargs:
partition_cols = kwargs.pop("partition_on")
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index b4eb2fb1411d0..84a8b5b2a94fe 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -612,8 +612,7 @@ def parser_f(
if delim_whitespace and delimiter != default_sep:
raise ValueError(
"Specified a delimiter with both sep and "
- "delim_whitespace=True; you can only "
- "specify one."
+ "delim_whitespace=True; you can only specify one."
)
if engine is not None:
@@ -907,8 +906,8 @@ def _get_options_with_defaults(self, engine):
pass
else:
raise ValueError(
- f"The {repr(argname)} option is not supported with the"
- f" {repr(engine)} engine"
+ f"The {repr(argname)} option is not supported with the "
+ f"{repr(engine)} engine"
)
else:
value = _deprecated_defaults.get(argname, default)
@@ -968,8 +967,7 @@ def _clean_options(self, options, engine):
fallback_reason = (
"the 'c' engine does not support "
"regex separators (separators > 1 char and "
- r"different from '\s+' are "
- "interpreted as regex)"
+ r"different from '\s+' are interpreted as regex)"
)
engine = "python"
elif delim_whitespace:
@@ -1000,8 +998,7 @@ def _clean_options(self, options, engine):
fallback_reason = (
"ord(quotechar) > 127, meaning the "
"quotechar is larger than one byte, "
- "and the 'c' engine does not support "
- "such quotechars"
+ "and the 'c' engine does not support such quotechars"
)
engine = "python"
@@ -1119,9 +1116,8 @@ def _make_engine(self, engine="c"):
klass = FixedWidthFieldParser
else:
raise ValueError(
- f"Unknown engine: {engine} (valid options are "
- '"c", "python", or '
- '"python-fwf")'
+ f"Unknown engine: {engine} (valid options "
+ 'are "c", "python", or "python-fwf")'
)
self._engine = klass(self.f, **self.options)
@@ -1230,8 +1226,7 @@ def _validate_usecols_names(usecols, names):
missing = [c for c in usecols if c not in names]
if len(missing) > 0:
raise ValueError(
- "Usecols do not match columns, "
- f"columns expected but not found: {missing}"
+ f"Usecols do not match columns, columns expected but not found: {missing}"
)
return usecols
@@ -1309,7 +1304,7 @@ def _validate_usecols_arg(usecols):
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
- if usecols_dtype not in ("empty", "integer", "string", "unicode"):
+ if usecols_dtype not in ("empty", "integer", "string"):
raise ValueError(msg)
usecols = set(usecols)
@@ -1325,8 +1320,7 @@ def _validate_parse_dates_arg(parse_dates):
that is the case.
"""
msg = (
- "Only booleans, lists, and "
- "dictionaries are accepted "
+ "Only booleans, lists, and dictionaries are accepted "
"for the 'parse_dates' parameter"
)
@@ -1680,8 +1674,7 @@ def _convert_to_ndarrays(
warnings.warn(
(
"Both a converter and dtype were specified "
- f"for column {c} - only the converter will "
- "be used"
+ f"for column {c} - only the converter will be used"
),
ParserWarning,
stacklevel=7,
@@ -1826,8 +1819,7 @@ def _cast_types(self, values, cast_type, column):
except NotImplementedError:
raise NotImplementedError(
f"Extension Array: {array_type} must implement "
- "_from_sequence_of_strings in order "
- "to be used in parser methods"
+ "_from_sequence_of_strings in order to be used in parser methods"
)
else:
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index d61d1cf7f0257..c1e12887b0150 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -58,7 +58,7 @@
concat,
isna,
)
-from pandas.core.arrays.categorical import Categorical
+from pandas.core.arrays import Categorical, DatetimeArray, PeriodArray
import pandas.core.common as com
from pandas.core.computation.pytables import PyTablesExpr, maybe_expression
from pandas.core.indexes.api import ensure_index
@@ -413,8 +413,8 @@ def read_hdf(
for group_to_check in groups[1:]:
if not _is_metadata_of(group_to_check, candidate_only_group):
raise ValueError(
- "key must be provided when HDF5 file "
- "contains multiple datasets."
+ "key must be provided when HDF5 "
+ "file contains multiple datasets."
)
key = candidate_only_group._v_pathname
return store.select(
@@ -1240,8 +1240,7 @@ def append_to_multiple(
if v is None:
if remain_key is not None:
raise ValueError(
- "append_to_multiple can only have one value in d that "
- "is None"
+ "append_to_multiple can only have one value in d that is None"
)
remain_key = k
else:
@@ -2313,8 +2312,7 @@ def validate_attr(self, append):
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if existing_dtype is not None and existing_dtype != self.dtype:
raise ValueError(
- "appended items dtype do not match existing "
- "items dtype in table!"
+ "appended items dtype do not match existing items dtype in table!"
)
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
@@ -2474,6 +2472,7 @@ class Fixed:
"""
pandas_kind: str
+ format_type: str = "fixed" # GH#30962 needed by dask
obj_type: Type[Union[DataFrame, Series]]
ndim: int
encoding: str
@@ -2658,7 +2657,8 @@ def _get_index_factory(self, klass):
def f(values, freq=None, tz=None):
# data are already in UTC, localize and convert if tz present
- result = DatetimeIndex._simple_new(values.values, name=None, freq=freq)
+ dta = DatetimeArray._simple_new(values.values, freq=freq)
+ result = DatetimeIndex._simple_new(dta, name=None)
if tz is not None:
result = result.tz_localize("UTC").tz_convert(tz)
return result
@@ -2667,7 +2667,8 @@ def f(values, freq=None, tz=None):
elif klass == PeriodIndex:
def f(values, freq=None, tz=None):
- return PeriodIndex._simple_new(values, name=None, freq=freq)
+ parr = PeriodArray._simple_new(values, freq=freq)
+ return PeriodIndex._simple_new(parr, name=None)
return f
@@ -2680,14 +2681,12 @@ def validate_read(self, columns, where):
if columns is not None:
raise TypeError(
"cannot pass a column specification when reading "
- "a Fixed format store. this store must be "
- "selected in its entirety"
+ "a Fixed format store. this store must be selected in its entirety"
)
if where is not None:
raise TypeError(
"cannot pass a where specification when reading "
- "from a Fixed format store. this store must be "
- "selected in its entirety"
+ "from a Fixed format store. this store must be selected in its entirety"
)
@property
@@ -2908,8 +2907,7 @@ def write_array(self, key: str, value: ArrayLike, items: Optional[Index] = None)
if is_categorical_dtype(value):
raise NotImplementedError(
- "Cannot store a category dtype in "
- "a HDF5 dataset that uses format="
+ "Cannot store a category dtype in a HDF5 dataset that uses format="
'"fixed". Use format="table".'
)
if not empty_array:
@@ -3132,6 +3130,7 @@ class Table(Fixed):
"""
pandas_kind = "wide_table"
+ format_type: str = "table" # GH#30962 needed by dask
table_type: str
levels = 1
is_table = True
diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx
index bb5bce96bc64b..40fea0aaf0d07 100644
--- a/pandas/io/sas/sas.pyx
+++ b/pandas/io/sas/sas.pyx
@@ -13,8 +13,7 @@ ctypedef unsigned short uint16_t
# algorithm. It is partially documented here:
#
# https://cran.r-project.org/package=sas7bdat/vignettes/sas7bdat.pdf
-cdef const uint8_t[:] rle_decompress(int result_length,
- const uint8_t[:] inbuff):
+cdef const uint8_t[:] rle_decompress(int result_length, const uint8_t[:] inbuff):
cdef:
uint8_t control_byte, x
@@ -33,7 +32,7 @@ cdef const uint8_t[:] rle_decompress(int result_length,
raise ValueError("Unexpected non-zero end_of_first_byte")
nbytes = <int>(inbuff[ipos]) + 64
ipos += 1
- for i in range(nbytes):
+ for _ in range(nbytes):
result[rpos] = inbuff[ipos]
rpos += 1
ipos += 1
@@ -42,20 +41,20 @@ cdef const uint8_t[:] rle_decompress(int result_length,
nbytes = end_of_first_byte * 16
nbytes += <int>(inbuff[ipos])
ipos += 1
- for i in range(nbytes):
+ for _ in range(nbytes):
result[rpos] = inbuff[ipos]
rpos += 1
ipos += 1
elif control_byte == 0x60:
nbytes = end_of_first_byte * 256 + <int>(inbuff[ipos]) + 17
ipos += 1
- for i in range(nbytes):
+ for _ in range(nbytes):
result[rpos] = 0x20
rpos += 1
elif control_byte == 0x70:
nbytes = end_of_first_byte * 256 + <int>(inbuff[ipos]) + 17
ipos += 1
- for i in range(nbytes):
+ for _ in range(nbytes):
result[rpos] = 0x00
rpos += 1
elif control_byte == 0x80:
@@ -86,22 +85,22 @@ cdef const uint8_t[:] rle_decompress(int result_length,
nbytes = end_of_first_byte + 3
x = inbuff[ipos]
ipos += 1
- for i in range(nbytes):
+ for _ in range(nbytes):
result[rpos] = x
rpos += 1
elif control_byte == 0xD0:
nbytes = end_of_first_byte + 2
- for i in range(nbytes):
+ for _ in range(nbytes):
result[rpos] = 0x40
rpos += 1
elif control_byte == 0xE0:
nbytes = end_of_first_byte + 2
- for i in range(nbytes):
+ for _ in range(nbytes):
result[rpos] = 0x20
rpos += 1
elif control_byte == 0xF0:
nbytes = end_of_first_byte + 2
- for i in range(nbytes):
+ for _ in range(nbytes):
result[rpos] = 0x00
rpos += 1
else:
@@ -117,8 +116,7 @@ cdef const uint8_t[:] rle_decompress(int result_length,
# rdc_decompress decompresses data using the Ross Data Compression algorithm:
#
# http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm
-cdef const uint8_t[:] rdc_decompress(int result_length,
- const uint8_t[:] inbuff):
+cdef const uint8_t[:] rdc_decompress(int result_length, const uint8_t[:] inbuff):
cdef:
uint8_t cmd
@@ -233,8 +231,7 @@ cdef class Parser:
int subheader_pointer_length
int current_page_type
bint is_little_endian
- const uint8_t[:] (*decompress)(int result_length,
- const uint8_t[:] inbuff)
+ const uint8_t[:] (*decompress)(int result_length, const uint8_t[:] inbuff)
object parser
def __init__(self, object parser):
@@ -267,8 +264,7 @@ cdef class Parser:
elif column_types[j] == b's':
self.column_types[j] = column_type_string
else:
- raise ValueError("unknown column type: "
- f"{self.parser.columns[j].ctype}")
+ raise ValueError(f"unknown column type: {self.parser.columns[j].ctype}")
# compression
if parser.compression == const.rle_compression:
@@ -288,15 +284,14 @@ cdef class Parser:
bint done
int i
- for i in range(nrows):
+ for _ in range(nrows):
done = self.readline()
if done:
break
# update the parser
self.parser._current_row_on_page_index = self.current_row_on_page_index
- self.parser._current_row_in_chunk_index =\
- self.current_row_in_chunk_index
+ self.parser._current_row_in_chunk_index = self.current_row_in_chunk_index
self.parser._current_row_in_file_index = self.current_row_in_file_index
cdef bint read_next_page(self):
@@ -317,9 +312,9 @@ cdef class Parser:
self.current_page_type = self.parser._current_page_type
self.current_page_block_count = self.parser._current_page_block_count
self.current_page_data_subheader_pointers_len = len(
- self.parser._current_page_data_subheader_pointers)
- self.current_page_subheaders_count =\
- self.parser._current_page_subheaders_count
+ self.parser._current_page_data_subheader_pointers
+ )
+ self.current_page_subheaders_count = self.parser._current_page_subheaders_count
cdef readline(self):
@@ -357,19 +352,18 @@ cdef class Parser:
return False
elif (self.current_page_type == page_mix_types_0 or
self.current_page_type == page_mix_types_1):
- align_correction = (bit_offset + subheader_pointers_offset +
- self.current_page_subheaders_count *
- subheader_pointer_length)
+ align_correction = (
+ bit_offset
+ + subheader_pointers_offset
+ + self.current_page_subheaders_count * subheader_pointer_length
+ )
align_correction = align_correction % 8
offset = bit_offset + align_correction
offset += subheader_pointers_offset
- offset += (self.current_page_subheaders_count *
- subheader_pointer_length)
+ offset += self.current_page_subheaders_count * subheader_pointer_length
offset += self.current_row_on_page_index * self.row_length
- self.process_byte_array_with_data(offset,
- self.row_length)
- mn = min(self.parser.row_count,
- self.parser._mix_page_row_count)
+ self.process_byte_array_with_data(offset, self.row_length)
+ mn = min(self.parser.row_count, self.parser._mix_page_row_count)
if self.current_row_on_page_index == mn:
done = self.read_next_page()
if done:
@@ -377,11 +371,12 @@ cdef class Parser:
return False
elif self.current_page_type & page_data_type == page_data_type:
self.process_byte_array_with_data(
- bit_offset + subheader_pointers_offset +
- self.current_row_on_page_index * self.row_length,
- self.row_length)
- flag = (self.current_row_on_page_index ==
- self.current_page_block_count)
+ bit_offset
+ + subheader_pointers_offset
+ + self.current_row_on_page_index * self.row_length,
+ self.row_length,
+ )
+ flag = self.current_row_on_page_index == self.current_page_block_count
if flag:
done = self.read_next_page()
if done:
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index f917477b81489..9b40778dbcfdf 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -459,8 +459,7 @@ def _process_columnsize_subheader(self, offset, length):
if self.col_count_p1 + self.col_count_p2 != self.column_count:
print(
f"Warning: column count mismatch ({self.col_count_p1} + "
- f"{self.col_count_p2} != "
- f"{self.column_count})\n"
+ f"{self.col_count_p2} != {self.column_count})\n"
)
# Unknown purpose
@@ -672,8 +671,7 @@ def _read_next_page(self):
self.close()
msg = (
"failed to read complete page from file (read "
- f"{len(self._cached_page):d} of "
- f"{self._page_length:d} bytes)"
+ f"{len(self._cached_page):d} of {self._page_length:d} bytes)"
)
raise ValueError(msg)
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index 56ebb583bc2f9..27d56d4ede403 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -49,8 +49,7 @@ def read_sas(
if format is None:
buffer_error_msg = (
"If this is a buffer object rather "
- "than a string name, you must specify "
- "a format string"
+ "than a string name, you must specify a format string"
)
filepath_or_buffer = stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, str):
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index f4527994db0d2..58fed0d18dd4a 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -977,8 +977,7 @@ def _sqlalchemy_type(self, col):
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
- "written as integer values (ns frequency) to the "
- "database.",
+ "written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=8,
)
@@ -1413,8 +1412,7 @@ def _get_valid_sqlite_name(name):
_SAFE_NAMES_WARNING = (
"The spaces in these column names will not be changed. "
- "In pandas versions < 0.14, spaces were converted to "
- "underscores."
+ "In pandas versions < 0.14, spaces were converted to underscores."
)
@@ -1528,8 +1526,7 @@ def _sql_type_name(self, col):
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
- "written as integer values (ns frequency) to the "
- "database.",
+ "written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=8,
)
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index b216ee80c3940..06bf906be7093 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -7,15 +7,16 @@
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
-http://www.statsmodels.org/devel/
+https://www.statsmodels.org/devel/
"""
from collections import abc
import datetime
-from io import BytesIO
+from io import BytesIO, IOBase
import os
+from pathlib import Path
import struct
import sys
-from typing import Any
+from typing import Any, AnyStr, BinaryIO, Dict, List, Optional, Sequence, Tuple, Union
import warnings
from dateutil.relativedelta import relativedelta
@@ -23,6 +24,7 @@
from pandas._libs.lib import infer_dtype
from pandas._libs.writers import max_len_string_array
+from pandas._typing import FilePathOrBuffer, Label
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import (
@@ -42,14 +44,16 @@
to_timedelta,
)
from pandas.core.frame import DataFrame
+from pandas.core.indexes.base import Index
from pandas.core.series import Series
from pandas.io.common import get_filepath_or_buffer, stringify_path
_version_error = (
- "Version of given Stata file is not 104, 105, 108, "
- "111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
- "115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)"
+ "Version of given Stata file is {version}. pandas supports importing "
+ "versions 104, 105, 108, 111 (Stata 7SE), 113 (Stata 8/9), "
+ "114 (Stata 10/11), 115 (Stata 12), 117 (Stata 13), 118 (Stata 14/15/16),"
+ "and 119 (Stata 15/16, over 32,767 variables)."
)
_statafile_processing_params1 = """\
@@ -158,51 +162,18 @@
"""
-@Appender(_read_stata_doc)
-def read_stata(
- filepath_or_buffer,
- convert_dates=True,
- convert_categoricals=True,
- index_col=None,
- convert_missing=False,
- preserve_dtypes=True,
- columns=None,
- order_categoricals=True,
- chunksize=None,
- iterator=False,
-):
-
- reader = StataReader(
- filepath_or_buffer,
- convert_dates=convert_dates,
- convert_categoricals=convert_categoricals,
- index_col=index_col,
- convert_missing=convert_missing,
- preserve_dtypes=preserve_dtypes,
- columns=columns,
- order_categoricals=order_categoricals,
- chunksize=chunksize,
- )
-
- if iterator or chunksize:
- data = reader
- else:
- try:
- data = reader.read()
- finally:
- reader.close()
- return data
-
-
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
-def _stata_elapsed_date_to_datetime_vec(dates, fmt):
+# TODO: Add typing. As of January 2020 it is not possible to type this function since
+# mypy doesn't understand that a Series and an int can be combined using mathematical
+# operations. (+, -).
+def _stata_elapsed_date_to_datetime_vec(dates, fmt) -> Series:
"""
- Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
+ Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime
Parameters
----------
@@ -245,9 +216,6 @@ def _stata_elapsed_date_to_datetime_vec(dates, fmt):
half-years since 1960h1 yearly
date - ty
years since 0000
-
- If you don't have pandas with datetime support, then you can't do
- milliseconds accurately.
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
@@ -255,7 +223,7 @@ def _stata_elapsed_date_to_datetime_vec(dates, fmt):
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
- def convert_year_month_safe(year, month):
+ def convert_year_month_safe(year, month) -> Series:
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas.
@@ -270,7 +238,7 @@ def convert_year_month_safe(year, month):
[datetime.datetime(y, m, 1) for y, m in zip(year, month)], index=index
)
- def convert_year_days_safe(year, days):
+ def convert_year_days_safe(year, days) -> Series:
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
@@ -285,7 +253,7 @@ def convert_year_days_safe(year, days):
]
return Series(value, index=index)
- def convert_delta_safe(base, deltas, unit):
+ def convert_delta_safe(base, deltas, unit) -> Series:
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
@@ -346,16 +314,16 @@ def convert_delta_safe(base, deltas, unit):
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%tq", "tq")): # Delta quarters relative to base
year = stata_epoch.year + dates // 4
- month = (dates % 4) * 3 + 1
- conv_dates = convert_year_month_safe(year, month)
+ quarter_month = (dates % 4) * 3 + 1
+ conv_dates = convert_year_month_safe(year, quarter_month)
elif fmt.startswith(("%th", "th")): # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%ty", "ty")): # Years -- not delta
year = dates
- month = np.ones_like(dates)
- conv_dates = convert_year_month_safe(year, month)
+ first_month = np.ones_like(dates)
+ conv_dates = convert_year_month_safe(year, first_month)
else:
raise ValueError(f"Date fmt {fmt} not understood")
@@ -365,9 +333,9 @@ def convert_delta_safe(base, deltas, unit):
return conv_dates
-def _datetime_to_stata_elapsed_vec(dates, fmt):
+def _datetime_to_stata_elapsed_vec(dates: Series, fmt: str) -> Series:
"""
- Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
+ Convert from datetime to SIF. https://www.stata.com/help.cgi?datetime
Parameters
----------
@@ -385,21 +353,26 @@ def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.values):
if delta:
- delta = dates - stata_epoch
- d["delta"] = delta.values.astype(np.int64) // 1000 # microseconds
+ time_delta = dates - stata_epoch
+ d["delta"] = time_delta.values.astype(np.int64) // 1000 # microseconds
if days or year:
- dates = DatetimeIndex(dates)
- d["year"], d["month"] = dates.year, dates.month
+ # ignore since mypy reports that DatetimeIndex has no year/month
+ date_index = DatetimeIndex(dates)
+ d["year"] = date_index.year # type: ignore
+ d["month"] = date_index.month # type: ignore
if days:
- days = dates.astype(np.int64) - to_datetime(
+ days_in_ns = dates.astype(np.int64) - to_datetime(
d["year"], format="%Y"
).astype(np.int64)
- d["days"] = days // NS_PER_DAY
+ d["days"] = days_in_ns // NS_PER_DAY
elif infer_dtype(dates, skipna=False) == "datetime":
if delta:
delta = dates.values - stata_epoch
- f = lambda x: US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
+
+ def f(x: datetime.timedelta) -> float:
+ return US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
+
v = np.vectorize(f)
d["delta"] = v(delta)
if year:
@@ -407,8 +380,11 @@ def parse_dates_safe(dates, delta=False, year=False, days=False):
d["year"] = year_month.values // 100
d["month"] = year_month.values - d["year"] * 100
if days:
- f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days
- v = np.vectorize(f)
+
+ def g(x: datetime.datetime) -> int:
+ return (x - datetime.datetime(x.year, 1, 1)).days
+
+ v = np.vectorize(g)
d["days"] = v(dates)
else:
raise ValueError(
@@ -505,7 +481,7 @@ class InvalidColumnName(Warning):
"""
-def _cast_to_stata_types(data):
+def _cast_to_stata_types(data: DataFrame) -> DataFrame:
"""Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
@@ -599,13 +575,13 @@ class StataValueLabel:
Parameters
----------
- catarray : Categorical
+ catarray : Series
Categorical Series to encode
encoding : {"latin-1", "utf-8"}
Encoding to use for value labels.
"""
- def __init__(self, catarray, encoding="latin-1"):
+ def __init__(self, catarray: Series, encoding: str = "latin-1"):
if encoding not in ("latin-1", "utf-8"):
raise ValueError("Only latin-1 and utf-8 are supported.")
@@ -614,10 +590,10 @@ def __init__(self, catarray, encoding="latin-1"):
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
- self.text_len = np.int32(0)
- self.off = []
- self.val = []
- self.txt = []
+ self.text_len = 0
+ self.off: List[int] = []
+ self.val: List[int] = []
+ self.txt: List[bytes] = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
@@ -639,8 +615,7 @@ def __init__(self, catarray, encoding="latin-1"):
if self.text_len > 32000:
raise ValueError(
"Stata value labels for a single variable must "
- "have a combined length less than 32,000 "
- "characters."
+ "have a combined length less than 32,000 characters."
)
# Ensure int32
@@ -650,13 +625,7 @@ def __init__(self, catarray, encoding="latin-1"):
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
- def _encode(self, s):
- """
- Python 3 compatibility shim
- """
- return s.encode(self._encoding)
-
- def generate_value_label(self, byteorder):
+ def generate_value_label(self, byteorder: str) -> bytes:
"""
Generate the binary representation of the value labals.
@@ -678,7 +647,7 @@ def generate_value_label(self, byteorder):
bio.write(struct.pack(byteorder + "i", self.len))
# labname
- labname = self.labname[:32].encode(encoding)
+ labname = str(self.labname)[:32].encode(encoding)
lab_len = 32 if encoding not in ("utf-8", "utf8") else 128
labname = _pad_bytes(labname, lab_len + 1)
bio.write(labname)
@@ -716,19 +685,12 @@ class StataMissingValue:
Parameters
----------
- value : int8, int16, int32, float32 or float64
+ value : {int, float}
The Stata missing value code
- Attributes
- ----------
- string : string
- String representation of the Stata missing value
- value : int8, int16, int32, float32 or float64
- The original encoded missing value
-
Notes
-----
- More information: <http://www.stata.com/help.cgi?missing>
+ More information: <https://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
@@ -755,7 +717,7 @@ class StataMissingValue:
"""
# Construct a dictionary of missing values
- MISSING_VALUES = {}
+ MISSING_VALUES: Dict[float, str] = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
@@ -766,21 +728,21 @@ class StataMissingValue:
float32_base = b"\x00\x00\x00\x7f"
increment = struct.unpack("<i", b"\x00\x08\x00\x00")[0]
for i in range(27):
- value = struct.unpack("<f", float32_base)[0]
- MISSING_VALUES[value] = "."
+ key = struct.unpack("<f", float32_base)[0]
+ MISSING_VALUES[key] = "."
if i > 0:
- MISSING_VALUES[value] += chr(96 + i)
- int_value = struct.unpack("<i", struct.pack("<f", value))[0] + increment
+ MISSING_VALUES[key] += chr(96 + i)
+ int_value = struct.unpack("<i", struct.pack("<f", key))[0] + increment
float32_base = struct.pack("<i", int_value)
float64_base = b"\x00\x00\x00\x00\x00\x00\xe0\x7f"
increment = struct.unpack("q", b"\x00\x00\x00\x00\x00\x01\x00\x00")[0]
for i in range(27):
- value = struct.unpack("<d", float64_base)[0]
- MISSING_VALUES[value] = "."
+ key = struct.unpack("<d", float64_base)[0]
+ MISSING_VALUES[key] = "."
if i > 0:
- MISSING_VALUES[value] += chr(96 + i)
- int_value = struct.unpack("q", struct.pack("<d", value))[0] + increment
+ MISSING_VALUES[key] += chr(96 + i)
+ int_value = struct.unpack("q", struct.pack("<d", key))[0] + increment
float64_base = struct.pack("q", int_value)
BASE_MISSING_VALUES = {
@@ -791,19 +753,35 @@ class StataMissingValue:
"float64": struct.unpack("<d", float64_base)[0],
}
- def __init__(self, value):
+ def __init__(self, value: Union[int, float]):
self._value = value
# Conversion to int to avoid hash issues on 32 bit platforms #8968
value = int(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
- string = property(
- lambda self: self._str,
- doc="The Stata representation of the missing value: '.', '.a'..'.z'",
- )
- value = property(
- lambda self: self._value, doc="The binary representation of the missing value."
- )
+ @property
+ def string(self) -> str:
+ """
+ The Stata representation of the missing value: '.', '.a'..'.z'
+
+ Returns
+ -------
+ str
+ The representation of the missing value.
+ """
+ return self._str
+
+ @property
+ def value(self) -> Union[int, float]:
+ """
+ The binary representation of the missing value.
+
+ Returns
+ -------
+ {int, float}
+ The binary representation of the missing value.
+ """
+ return self._value
def __str__(self) -> str:
return self.string
@@ -819,7 +797,7 @@ def __eq__(self, other: Any) -> bool:
)
@classmethod
- def get_base_missing_value(cls, dtype):
+ def get_base_missing_value(cls, dtype: np.dtype) -> Union[int, float]:
if dtype == np.int8:
value = cls.BASE_MISSING_VALUES["int8"]
elif dtype == np.int16:
@@ -1004,18 +982,18 @@ class StataReader(StataParser, abc.Iterator):
def __init__(
self,
- path_or_buf,
- convert_dates=True,
- convert_categoricals=True,
- index_col=None,
- convert_missing=False,
- preserve_dtypes=True,
- columns=None,
- order_categoricals=True,
- chunksize=None,
+ path_or_buf: FilePathOrBuffer,
+ convert_dates: bool = True,
+ convert_categoricals: bool = True,
+ index_col: Optional[str] = None,
+ convert_missing: bool = False,
+ preserve_dtypes: bool = True,
+ columns: Optional[Sequence[str]] = None,
+ order_categoricals: bool = True,
+ chunksize: Optional[int] = None,
):
super().__init__()
- self.col_sizes = ()
+ self.col_sizes: List[int] = []
# Arguments to the reader (can be temporarily overridden in
# calls to read).
@@ -1026,7 +1004,7 @@ def __init__(
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
- self._encoding = None
+ self._encoding = ""
self._chunksize = chunksize
# State variables for the file
@@ -1046,7 +1024,7 @@ def __init__(
if isinstance(path_or_buf, (str, bytes)):
self.path_or_buf = open(path_or_buf, "rb")
- else:
+ elif isinstance(path_or_buf, IOBase):
# Copy to BytesIO, and ensure no encoding
contents = path_or_buf.read()
self.path_or_buf = BytesIO(contents)
@@ -1054,22 +1032,22 @@ def __init__(
self._read_header()
self._setup_dtype()
- def __enter__(self):
+ def __enter__(self) -> "StataReader":
""" enter context manager """
return self
- def __exit__(self, exc_type, exc_value, traceback):
+ def __exit__(self, exc_type, exc_value, traceback) -> None:
""" exit context manager """
self.close()
- def close(self):
+ def close(self) -> None:
""" close the handle if its open """
try:
self.path_or_buf.close()
except IOError:
pass
- def _set_encoding(self):
+ def _set_encoding(self) -> None:
"""
Set string encoding which depends on file version
"""
@@ -1078,10 +1056,10 @@ def _set_encoding(self):
else:
self._encoding = "utf-8"
- def _read_header(self):
+ def _read_header(self) -> None:
first_char = self.path_or_buf.read(1)
if struct.unpack("c", first_char)[0] == b"<":
- self._read_new_header(first_char)
+ self._read_new_header()
else:
self._read_old_header(first_char)
@@ -1090,12 +1068,12 @@ def _read_header(self):
# calculate size of a data record
self.col_sizes = [self._calcsize(typ) for typ in self.typlist]
- def _read_new_header(self, first_char):
- # The first part of the header is common to 117 and 118.
+ def _read_new_header(self) -> None:
+ # The first part of the header is common to 117 - 119.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118, 119]:
- raise ValueError(_version_error)
+ raise ValueError(_version_error.format(version=self.format_version))
self._set_encoding()
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == b"MSF" and ">" or "<"
@@ -1167,15 +1145,17 @@ def _read_new_header(self, first_char):
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-119.
- def _get_dtypes(self, seek_vartypes):
+ def _get_dtypes(
+ self, seek_vartypes: int
+ ) -> Tuple[List[Union[int, str]], List[Union[int, np.dtype]]]:
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [
struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
- for i in range(self.nvar)
+ for _ in range(self.nvar)
]
- def f(typ):
+ def f(typ: int) -> Union[int, str]:
if typ <= 2045:
return typ
try:
@@ -1185,7 +1165,7 @@ def f(typ):
typlist = [f(x) for x in raw_typlist]
- def f(typ):
+ def g(typ: int) -> Union[str, np.dtype]:
if typ <= 2045:
return str(typ)
try:
@@ -1193,20 +1173,17 @@ def f(typ):
except KeyError:
raise ValueError(f"cannot convert stata dtype [{typ}]")
- dtyplist = [f(x) for x in raw_typlist]
+ dtyplist = [g(x) for x in raw_typlist]
return typlist, dtyplist
- def _get_varlist(self):
- if self.format_version == 117:
- b = 33
- elif self.format_version >= 118:
- b = 129
-
- return [self._decode(self.path_or_buf.read(b)) for i in range(self.nvar)]
+ def _get_varlist(self) -> List[str]:
+ # 33 in order formats, 129 in formats 118 and 119
+ b = 33 if self.format_version < 118 else 129
+ return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
# Returns the format list
- def _get_fmtlist(self):
+ def _get_fmtlist(self) -> List[str]:
if self.format_version >= 118:
b = 57
elif self.format_version > 113:
@@ -1216,40 +1193,40 @@ def _get_fmtlist(self):
else:
b = 7
- return [self._decode(self.path_or_buf.read(b)) for i in range(self.nvar)]
+ return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
# Returns the label list
- def _get_lbllist(self):
+ def _get_lbllist(self) -> List[str]:
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
- return [self._decode(self.path_or_buf.read(b)) for i in range(self.nvar)]
+ return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
- def _get_variable_labels(self):
+ def _get_variable_labels(self) -> List[str]:
if self.format_version >= 118:
vlblist = [
- self._decode(self.path_or_buf.read(321)) for i in range(self.nvar)
+ self._decode(self.path_or_buf.read(321)) for _ in range(self.nvar)
]
elif self.format_version > 105:
vlblist = [
- self._decode(self.path_or_buf.read(81)) for i in range(self.nvar)
+ self._decode(self.path_or_buf.read(81)) for _ in range(self.nvar)
]
else:
vlblist = [
- self._decode(self.path_or_buf.read(32)) for i in range(self.nvar)
+ self._decode(self.path_or_buf.read(32)) for _ in range(self.nvar)
]
return vlblist
- def _get_nobs(self):
+ def _get_nobs(self) -> int:
if self.format_version >= 118:
return struct.unpack(self.byteorder + "Q", self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
- def _get_data_label(self):
+ def _get_data_label(self) -> str:
if self.format_version >= 118:
strlen = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
@@ -1261,7 +1238,7 @@ def _get_data_label(self):
else:
return self._decode(self.path_or_buf.read(32))
- def _get_time_stamp(self):
+ def _get_time_stamp(self) -> str:
if self.format_version >= 118:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
@@ -1273,9 +1250,9 @@ def _get_time_stamp(self):
else:
raise ValueError()
- def _get_seek_variable_labels(self):
+ def _get_seek_variable_labels(self) -> int:
if self.format_version == 117:
- self.path_or_buf.read(8) # <variable_lables>, throw away
+ self.path_or_buf.read(8) # <variable_labels>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
@@ -1285,10 +1262,10 @@ def _get_seek_variable_labels(self):
else:
raise ValueError()
- def _read_old_header(self, first_char):
+ def _read_old_header(self, first_char: bytes) -> None:
self.format_version = struct.unpack("b", first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
- raise ValueError(_version_error)
+ raise ValueError(_version_error.format(version=self.format_version))
self._set_encoding()
self.byteorder = (
struct.unpack("b", self.path_or_buf.read(1))[0] == 0x1 and ">" or "<"
@@ -1305,7 +1282,7 @@ def _read_old_header(self, first_char):
# descriptors
if self.format_version > 108:
- typlist = [ord(self.path_or_buf.read(1)) for i in range(self.nvar)]
+ typlist = [ord(self.path_or_buf.read(1)) for _ in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
@@ -1329,11 +1306,11 @@ def _read_old_header(self, first_char):
if self.format_version > 108:
self.varlist = [
- self._decode(self.path_or_buf.read(33)) for i in range(self.nvar)
+ self._decode(self.path_or_buf.read(33)) for _ in range(self.nvar)
]
else:
self.varlist = [
- self._decode(self.path_or_buf.read(9)) for i in range(self.nvar)
+ self._decode(self.path_or_buf.read(9)) for _ in range(self.nvar)
]
self.srtlist = struct.unpack(
self.byteorder + ("h" * (self.nvar + 1)),
@@ -1371,26 +1348,27 @@ def _read_old_header(self, first_char):
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
- def _setup_dtype(self):
+ def _setup_dtype(self) -> np.dtype:
"""Map between numpy and state dtypes"""
if self._dtype is not None:
return self._dtype
- dtype = [] # Convert struct data types to numpy data type
+ dtypes = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
- dtype.append(("s" + str(i), self.byteorder + self.NUMPY_TYPE_MAP[typ]))
+ dtypes.append(("s" + str(i), self.byteorder + self.NUMPY_TYPE_MAP[typ]))
else:
- dtype.append(("s" + str(i), "S" + str(typ)))
- dtype = np.dtype(dtype)
- self._dtype = dtype
+ dtypes.append(("s" + str(i), "S" + str(typ)))
+ self._dtype = np.dtype(dtypes)
return self._dtype
- def _calcsize(self, fmt):
- return type(fmt) is int and fmt or struct.calcsize(self.byteorder + fmt)
+ def _calcsize(self, fmt: Union[int, str]) -> int:
+ if isinstance(fmt, int):
+ return fmt
+ return struct.calcsize(self.byteorder + fmt)
- def _decode(self, s):
+ def _decode(self, s: bytes) -> str:
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
try:
@@ -1407,24 +1385,25 @@ def _decode(self, s):
warnings.warn(msg, UnicodeWarning)
return s.decode("latin-1")
- def _read_value_labels(self):
+ def _read_value_labels(self) -> None:
if self._value_labels_read:
# Don't read twice
return
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
self._value_labels_read = True
- self.value_label_dict = dict()
+ self.value_label_dict: Dict[str, Dict[Union[float, int], str]] = {}
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
+ assert self._dtype is not None
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
- self.value_label_dict = dict()
+ self.value_label_dict = {}
while True:
if self.format_version >= 117:
@@ -1460,7 +1439,7 @@ def _read_value_labels(self):
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
- def _read_strls(self):
+ def _read_strls(self) -> None:
self.path_or_buf.seek(self.seek_strls)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO = {"0": ""}
@@ -1475,23 +1454,26 @@ def _read_strls(self):
# Only tested on little endian file on little endian machine.
v_size = 2 if self.format_version == 118 else 3
if self.byteorder == "<":
- buf = buf[0:v_size] + buf[4 : 12 - v_size]
+ buf = buf[0:v_size] + buf[4 : (12 - v_size)]
else:
# This path may not be correct, impossible to test
- buf = buf[0:v_size] + buf[4 + v_size :]
+ buf = buf[0:v_size] + buf[(4 + v_size) :]
v_o = struct.unpack("Q", buf)[0]
typ = struct.unpack("B", self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
- va = va[0:-1].decode(self._encoding)
- # Wrap v_o in a string to allow uint64 values as keys on 32bit OS
- self.GSO[str(v_o)] = va
+ decoded_va = va[0:-1].decode(self._encoding)
+ else:
+ # Stata says typ 129 can be binary, so use str
+ decoded_va = str(va)
+ # Wrap v_o in a string to allow uint64 values as keys on 32bit OS
+ self.GSO[str(v_o)] = decoded_va
- def __next__(self):
+ def __next__(self) -> DataFrame:
return self.read(nrows=self._chunksize or 1)
- def get_chunk(self, size=None):
+ def get_chunk(self, size: Optional[int] = None) -> DataFrame:
"""
Reads lines from Stata file and returns as dataframe
@@ -1511,15 +1493,15 @@ def get_chunk(self, size=None):
@Appender(_read_method_doc)
def read(
self,
- nrows=None,
- convert_dates=None,
- convert_categoricals=None,
- index_col=None,
- convert_missing=None,
- preserve_dtypes=None,
- columns=None,
- order_categoricals=None,
- ):
+ nrows: Optional[int] = None,
+ convert_dates: Optional[bool] = None,
+ convert_categoricals: Optional[bool] = None,
+ index_col: Optional[str] = None,
+ convert_missing: Optional[bool] = None,
+ preserve_dtypes: Optional[bool] = None,
+ columns: Optional[Sequence[str]] = None,
+ order_categoricals: Optional[bool] = None,
+ ) -> DataFrame:
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
@@ -1553,6 +1535,7 @@ def read(
self._read_strls()
# Read data
+ assert self._dtype is not None
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
@@ -1672,7 +1655,7 @@ def any_startswith(x: str) -> bool:
return data
- def _do_convert_missing(self, data, convert_missing):
+ def _do_convert_missing(self, data: DataFrame, convert_missing: bool) -> DataFrame:
# Check for missing values, and replace if found
replacements = {}
for i, colname in enumerate(data):
@@ -1688,7 +1671,6 @@ def _do_convert_missing(self, data, convert_missing):
continue
if convert_missing: # Replacement follows Stata notation
-
missing_loc = np.argwhere(missing._ndarray_values)
umissing, umissing_loc = np.unique(series[missing], return_inverse=True)
replacement = Series(series, dtype=np.object)
@@ -1706,12 +1688,12 @@ def _do_convert_missing(self, data, convert_missing):
replacements[colname] = replacement
if replacements:
columns = data.columns
- replacements = DataFrame(replacements)
- data = concat([data.drop(replacements.columns, 1), replacements], 1)
- data = data[columns]
+ replacement_df = DataFrame(replacements)
+ replaced = concat([data.drop(replacement_df.columns, 1), replacement_df], 1)
+ data = replaced[columns]
return data
- def _insert_strls(self, data):
+ def _insert_strls(self, data: DataFrame) -> DataFrame:
if not hasattr(self, "GSO") or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
@@ -1721,7 +1703,7 @@ def _insert_strls(self, data):
data.iloc[:, i] = [self.GSO[str(k)] for k in data.iloc[:, i]]
return data
- def _do_select_columns(self, data, columns):
+ def _do_select_columns(self, data: DataFrame, columns: Sequence[str]) -> DataFrame:
if not self._column_selector_set:
column_set = set(columns)
@@ -1729,9 +1711,10 @@ def _do_select_columns(self, data, columns):
raise ValueError("columns contains duplicate entries")
unmatched = column_set.difference(data.columns)
if unmatched:
+ joined = ", ".join(list(unmatched))
raise ValueError(
- "The following columns were not found in the "
- "Stata data set: " + ", ".join(list(unmatched))
+ "The following columns were not "
+ f"found in the Stata data set: {joined}"
)
# Copy information for retained columns for later processing
dtyplist = []
@@ -1753,9 +1736,13 @@ def _do_select_columns(self, data, columns):
return data[columns]
+ @staticmethod
def _do_convert_categoricals(
- self, data, value_label_dict, lbllist, order_categoricals
- ):
+ data: DataFrame,
+ value_label_dict: Dict[str, Dict[Union[float, int], str]],
+ lbllist: Sequence[str],
+ order_categoricals: bool,
+ ) -> DataFrame:
"""
Converts categorical columns to Categorical type.
"""
@@ -1775,8 +1762,8 @@ def _do_convert_categoricals(
cat_data.categories = categories
except ValueError:
vc = Series(categories).value_counts()
- repeats = list(vc.index[vc > 1])
- repeats = "-" * 80 + "\n" + "\n".join(repeats)
+ repeated_cats = list(vc.index[vc > 1])
+ repeats = "-" * 80 + "\n" + "\n".join(repeated_cats)
# GH 25772
msg = f"""
Value labels for column {col} are not unique. These cannot be converted to
@@ -1791,21 +1778,21 @@ def _do_convert_categoricals(
"""
raise ValueError(msg)
# TODO: is the next line needed above in the data(...) method?
- cat_data = Series(cat_data, index=data.index)
- cat_converted_data.append((col, cat_data))
+ cat_series = Series(cat_data, index=data.index)
+ cat_converted_data.append((col, cat_series))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame.from_dict(dict(cat_converted_data))
return data
@property
- def data_label(self):
+ def data_label(self) -> str:
"""
Return data label of Stata file.
"""
return self._data_label
- def variable_labels(self):
+ def variable_labels(self) -> Dict[str, str]:
"""
Return variable labels as a dict, associating each variable name
with corresponding label.
@@ -1816,7 +1803,7 @@ def variable_labels(self):
"""
return dict(zip(self.varlist, self._variable_labels))
- def value_labels(self):
+ def value_labels(self) -> Dict[str, Dict[Union[float, int], str]]:
"""
Return a dict, associating each variable name a dict, associating
each value its corresponding label.
@@ -1831,7 +1818,43 @@ def value_labels(self):
return self.value_label_dict
-def _open_file_binary_write(fname):
+@Appender(_read_stata_doc)
+def read_stata(
+ filepath_or_buffer: FilePathOrBuffer,
+ convert_dates: bool = True,
+ convert_categoricals: bool = True,
+ index_col: Optional[str] = None,
+ convert_missing: bool = False,
+ preserve_dtypes: bool = True,
+ columns: Optional[Sequence[str]] = None,
+ order_categoricals: bool = True,
+ chunksize: Optional[int] = None,
+ iterator: bool = False,
+) -> Union[DataFrame, StataReader]:
+
+ reader = StataReader(
+ filepath_or_buffer,
+ convert_dates=convert_dates,
+ convert_categoricals=convert_categoricals,
+ index_col=index_col,
+ convert_missing=convert_missing,
+ preserve_dtypes=preserve_dtypes,
+ columns=columns,
+ order_categoricals=order_categoricals,
+ chunksize=chunksize,
+ )
+
+ if iterator or chunksize:
+ return reader
+
+ try:
+ data = reader.read()
+ finally:
+ reader.close()
+ return data
+
+
+def _open_file_binary_write(fname: FilePathOrBuffer) -> Tuple[BinaryIO, bool]:
"""
Open a binary file or no-op if file-like.
@@ -1847,12 +1870,15 @@ def _open_file_binary_write(fname):
True if the file was created, otherwise False
"""
if hasattr(fname, "write"):
- # if 'b' not in fname.mode:
- return fname, False
- return open(fname, "wb"), True
+ # See https://github.com/python/mypy/issues/1424 for hasattr challenges
+ return fname, False # type: ignore
+ elif isinstance(fname, (str, Path)):
+ return open(fname, "wb"), True
+ else:
+ raise TypeError("fname must be a binary file, buffer or path-like.")
-def _set_endianness(endianness):
+def _set_endianness(endianness: str) -> str:
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
@@ -1861,7 +1887,7 @@ def _set_endianness(endianness):
raise ValueError(f"Endianness {endianness} not understood")
-def _pad_bytes(name, length):
+def _pad_bytes(name: AnyStr, length: int) -> AnyStr:
"""
Take a char string and pads it with null bytes until it's length chars.
"""
@@ -1870,7 +1896,7 @@ def _pad_bytes(name, length):
return name + "\x00" * (length - len(name))
-def _convert_datetime_to_stata_type(fmt):
+def _convert_datetime_to_stata_type(fmt: str) -> np.dtype:
"""
Convert from one of the stata date formats to a type in TYPE_MAP.
"""
@@ -1895,7 +1921,7 @@ def _convert_datetime_to_stata_type(fmt):
raise NotImplementedError(f"Format {fmt} not implemented")
-def _maybe_convert_to_int_keys(convert_dates, varlist):
+def _maybe_convert_to_int_keys(convert_dates: Dict, varlist: List[Label]) -> Dict:
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
@@ -1909,7 +1935,7 @@ def _maybe_convert_to_int_keys(convert_dates, varlist):
return new_dict
-def _dtype_to_stata_type(dtype, column):
+def _dtype_to_stata_type(dtype: np.dtype, column: Series) -> int:
"""
Convert dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
@@ -1945,7 +1971,9 @@ def _dtype_to_stata_type(dtype, column):
raise NotImplementedError(f"Data type {dtype} not supported.")
-def _dtype_to_default_stata_fmt(dtype, column, dta_version=114, force_strl=False):
+def _dtype_to_default_stata_fmt(
+ dtype, column: Series, dta_version: int = 114, force_strl: bool = False
+) -> str:
"""
Map numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
@@ -2058,14 +2086,14 @@ class StataWriter(StataParser):
def __init__(
self,
- fname,
- data,
- convert_dates=None,
- write_index=True,
- byteorder=None,
- time_stamp=None,
- data_label=None,
- variable_labels=None,
+ fname: FilePathOrBuffer,
+ data: DataFrame,
+ convert_dates: Optional[Dict[Label, str]] = None,
+ write_index: bool = True,
+ byteorder: Optional[str] = None,
+ time_stamp: Optional[datetime.datetime] = None,
+ data_label: Optional[str] = None,
+ variable_labels: Optional[Dict[Label, str]] = None,
):
super().__init__()
self._convert_dates = {} if convert_dates is None else convert_dates
@@ -2082,21 +2110,30 @@ def __init__(
self._byteorder = _set_endianness(byteorder)
self._fname = stringify_path(fname)
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
- self._converted_names = {}
+ self._converted_names: Dict[Label, str] = {}
+ self._file: Optional[BinaryIO] = None
- def _write(self, to_write):
+ def _write(self, to_write: str) -> None:
"""
Helper to call encode before writing to file for Python 3 compat.
"""
- self._file.write(to_write.encode(self._encoding or self._default_encoding))
+ assert self._file is not None
+ self._file.write(to_write.encode(self._encoding))
- def _prepare_categoricals(self, data):
+ def _write_bytes(self, value: bytes) -> None:
+ """
+ Helper to assert file is open before writing.
+ """
+ assert self._file is not None
+ self._file.write(value)
+
+ def _prepare_categoricals(self, data: DataFrame) -> DataFrame:
"""Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [is_categorical_dtype(data[col]) for col in data]
self._is_col_cat = is_cat
- self._value_labels = []
+ self._value_labels: List[StataValueLabel] = []
if not any(is_cat):
return data
@@ -2131,7 +2168,7 @@ def _prepare_categoricals(self, data):
data_formatted.append((col, data[col]))
return DataFrame.from_dict(dict(data_formatted))
- def _replace_nans(self, data):
+ def _replace_nans(self, data: DataFrame) -> DataFrame:
# return data
"""Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)"""
@@ -2146,11 +2183,11 @@ def _replace_nans(self, data):
return data
- def _update_strl_names(self):
+ def _update_strl_names(self) -> None:
"""No-op, forward compatibility"""
pass
- def _validate_variable_name(self, name):
+ def _validate_variable_name(self, name: str) -> str:
"""
Validate variable names for Stata export.
@@ -2180,7 +2217,7 @@ def _validate_variable_name(self, name):
name = name.replace(c, "_")
return name
- def _check_column_names(self, data):
+ def _check_column_names(self, data: DataFrame) -> DataFrame:
"""
Checks column names to ensure that they are valid Stata column names.
This includes checks for:
@@ -2193,8 +2230,8 @@ def _check_column_names(self, data):
dates are exported, the variable name is propagated to the date
conversion dictionary
"""
- converted_names = {}
- columns = list(data.columns)
+ converted_names: Dict[Label, str] = {}
+ columns: List[Label] = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
@@ -2210,7 +2247,7 @@ def _check_column_names(self, data):
name = "_" + name
# Variable name may not start with a number
- if name[0] >= "0" and name[0] <= "9":
+ if "0" <= name[0] <= "9":
name = "_" + name
name = name[: min(len(name), 32)]
@@ -2226,7 +2263,7 @@ def _check_column_names(self, data):
columns[j] = name
- data.columns = columns
+ data.columns = Index(columns)
# Check date conversion, and fix key if needed
if self._convert_dates:
@@ -2238,11 +2275,6 @@ def _check_column_names(self, data):
if converted_names:
conversion_warning = []
for orig_name, name in converted_names.items():
- # need to possibly encode the orig name if its unicode
- try:
- orig_name = orig_name.encode("utf-8")
- except (UnicodeDecodeError, AttributeError):
- pass
msg = f"{orig_name} -> {name}"
conversion_warning.append(msg)
@@ -2254,21 +2286,23 @@ def _check_column_names(self, data):
return data
- def _set_formats_and_types(self, dtypes):
- self.typlist = []
- self.fmtlist = []
+ def _set_formats_and_types(self, dtypes: Series) -> None:
+ self.fmtlist: List[str] = []
+ self.typlist: List[int] = []
for col, dtype in dtypes.items():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, self.data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, self.data[col]))
- def _prepare_pandas(self, data):
+ def _prepare_pandas(self, data: DataFrame) -> None:
# NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
data = data.copy()
if self._write_index:
- data = data.reset_index()
+ temp = data.reset_index()
+ if isinstance(temp, DataFrame):
+ data = temp
# Ensure column names are strings
data = self._check_column_names(data)
@@ -2311,9 +2345,10 @@ def _prepare_pandas(self, data):
# set the given format for the datetime cols
if self._convert_dates is not None:
for key in self._convert_dates:
- self.fmtlist[key] = self._convert_dates[key]
+ if isinstance(key, int):
+ self.fmtlist[key] = self._convert_dates[key]
- def _encode_strings(self):
+ def _encode_strings(self) -> None:
"""
Encode strings in dta-specific encoding
@@ -2332,7 +2367,7 @@ def _encode_strings(self):
dtype = column.dtype
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column, skipna=True)
- if not ((inferred_dtype in ("string", "unicode")) or len(column) == 0):
+ if not ((inferred_dtype == "string") or len(column) == 0):
col = column.name
raise ValueError(
f"""\
@@ -2350,7 +2385,7 @@ def _encode_strings(self):
):
self.data[col] = encoded
- def write_file(self):
+ def write_file(self) -> None:
self._file, self._own_file = _open_file_binary_write(self._fname)
try:
self._write_header(data_label=self._data_label, time_stamp=self._time_stamp)
@@ -2363,8 +2398,8 @@ def write_file(self):
self._write_variable_labels()
self._write_expansion_fields()
self._write_characteristics()
- self._prepare_data()
- self._write_data()
+ records = self._prepare_data()
+ self._write_data(records)
self._write_strls()
self._write_value_labels()
self._write_file_close_tag()
@@ -2373,7 +2408,8 @@ def write_file(self):
self._close()
if self._own_file:
try:
- os.unlink(self._fname)
+ if isinstance(self._fname, (str, Path)):
+ os.unlink(self._fname)
except OSError:
warnings.warn(
f"This save was not successful but {self._fname} could not "
@@ -2384,7 +2420,7 @@ def write_file(self):
else:
self._close()
- def _close(self):
+ def _close(self) -> None:
"""
Close the file if it was created by the writer.
@@ -2394,6 +2430,7 @@ def _close(self):
(if supported)
"""
# Some file-like objects might not support flush
+ assert self._file is not None
try:
self._file.flush()
except AttributeError:
@@ -2401,34 +2438,38 @@ def _close(self):
if self._own_file:
self._file.close()
- def _write_map(self):
+ def _write_map(self) -> None:
"""No-op, future compatibility"""
pass
- def _write_file_close_tag(self):
+ def _write_file_close_tag(self) -> None:
"""No-op, future compatibility"""
pass
- def _write_characteristics(self):
+ def _write_characteristics(self) -> None:
"""No-op, future compatibility"""
pass
- def _write_strls(self):
+ def _write_strls(self) -> None:
"""No-op, future compatibility"""
pass
- def _write_expansion_fields(self):
+ def _write_expansion_fields(self) -> None:
"""Write 5 zeros for expansion fields"""
self._write(_pad_bytes("", 5))
- def _write_value_labels(self):
+ def _write_value_labels(self) -> None:
for vl in self._value_labels:
- self._file.write(vl.generate_value_label(self._byteorder))
+ self._write_bytes(vl.generate_value_label(self._byteorder))
- def _write_header(self, data_label=None, time_stamp=None):
+ def _write_header(
+ self,
+ data_label: Optional[str] = None,
+ time_stamp: Optional[datetime.datetime] = None,
+ ) -> None:
byteorder = self._byteorder
# ds_format - just use 114
- self._file.write(struct.pack("b", 114))
+ self._write_bytes(struct.pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
@@ -2436,14 +2477,16 @@ def _write_header(self, data_label=None, time_stamp=None):
# unused
self._write("\x00")
# number of vars, 2 bytes
- self._file.write(struct.pack(byteorder + "h", self.nvar)[:2])
+ self._write_bytes(struct.pack(byteorder + "h", self.nvar)[:2])
# number of obs, 4 bytes
- self._file.write(struct.pack(byteorder + "i", self.nobs)[:4])
+ self._write_bytes(struct.pack(byteorder + "i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
- self._file.write(self._null_terminate(_pad_bytes("", 80)))
+ self._write_bytes(self._null_terminate_bytes(_pad_bytes("", 80)))
else:
- self._file.write(self._null_terminate(_pad_bytes(data_label[:80], 80)))
+ self._write_bytes(
+ self._null_terminate_bytes(_pad_bytes(data_label[:80], 80))
+ )
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
@@ -2472,43 +2515,43 @@ def _write_header(self, data_label=None, time_stamp=None):
+ month_lookup[time_stamp.month]
+ time_stamp.strftime(" %Y %H:%M")
)
- self._file.write(self._null_terminate(ts))
+ self._write_bytes(self._null_terminate_bytes(ts))
- def _write_variable_types(self):
+ def _write_variable_types(self) -> None:
for typ in self.typlist:
- self._file.write(struct.pack("B", typ))
+ self._write_bytes(struct.pack("B", typ))
- def _write_varnames(self):
+ def _write_varnames(self) -> None:
# varlist names are checked by _check_column_names
# varlist, requires null terminated
for name in self.varlist:
- name = self._null_terminate(name, True)
+ name = self._null_terminate_str(name)
name = _pad_bytes(name[:32], 33)
self._write(name)
- def _write_sortlist(self):
+ def _write_sortlist(self) -> None:
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", 2 * (self.nvar + 1))
self._write(srtlist)
- def _write_formats(self):
+ def _write_formats(self) -> None:
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
- def _write_value_label_names(self):
+ def _write_value_label_names(self) -> None:
# lbllist, 33*nvar, char array
for i in range(self.nvar):
# Use variable name when categorical
if self._is_col_cat[i]:
name = self.varlist[i]
- name = self._null_terminate(name, True)
+ name = self._null_terminate_str(name)
name = _pad_bytes(name[:32], 33)
self._write(name)
else: # Default is empty label
self._write(_pad_bytes("", 33))
- def _write_variable_labels(self):
+ def _write_variable_labels(self) -> None:
# Missing labels are 80 blank characters plus null termination
blank = _pad_bytes("", 81)
@@ -2532,11 +2575,11 @@ def _write_variable_labels(self):
else:
self._write(blank)
- def _convert_strls(self, data):
+ def _convert_strls(self, data: DataFrame) -> DataFrame:
"""No-op, future compatibility"""
return data
- def _prepare_data(self):
+ def _prepare_data(self) -> np.recarray:
data = self.data
typlist = self.typlist
convert_dates = self._convert_dates
@@ -2566,23 +2609,21 @@ def _prepare_data(self):
dtype = dtype.newbyteorder(self._byteorder)
dtypes[col] = dtype
- self.data = data.to_records(index=False, column_dtypes=dtypes)
+ return data.to_records(index=False, column_dtypes=dtypes)
- def _write_data(self):
- data = self.data
- self._file.write(data.tobytes())
-
- def _null_terminate(self, s, as_string=False):
- null_byte = "\x00"
- s += null_byte
-
- if not as_string:
- s = s.encode(self._encoding)
+ def _write_data(self, records: np.recarray) -> None:
+ self._write_bytes(records.tobytes())
+ @staticmethod
+ def _null_terminate_str(s: str) -> str:
+ s += "\x00"
return s
+ def _null_terminate_bytes(self, s: str) -> bytes:
+ return self._null_terminate_str(s).encode(self._encoding)
-def _dtype_to_stata_type_117(dtype, column, force_strl):
+
+def _dtype_to_stata_type_117(dtype: np.dtype, column: Series, force_strl: bool) -> int:
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
@@ -2624,7 +2665,7 @@ def _dtype_to_stata_type_117(dtype, column, force_strl):
raise NotImplementedError(f"Data type {dtype} not supported.")
-def _pad_bytes_new(name, length):
+def _pad_bytes_new(name: Union[str, bytes], length: int) -> bytes:
"""
Takes a bytes instance and pads it with null bytes until it's length chars.
"""
@@ -2644,7 +2685,7 @@ class StataStrLWriter:
----------
df : DataFrame
DataFrame to convert
- columns : list
+ columns : Sequence[str]
List of columns names to convert to StrL
version : int, optional
dta version. Currently supports 117, 118 and 119
@@ -2662,7 +2703,13 @@ class StataStrLWriter:
characters.
"""
- def __init__(self, df, columns, version=117, byteorder=None):
+ def __init__(
+ self,
+ df: DataFrame,
+ columns: Sequence[str],
+ version: int = 117,
+ byteorder: Optional[str] = None,
+ ):
if version not in (117, 118, 119):
raise ValueError("Only dta versions 117, 118 and 119 supported")
self._dta_ver = version
@@ -2689,13 +2736,13 @@ def __init__(self, df, columns, version=117, byteorder=None):
self._gso_o_type = gso_o_type
self._gso_v_type = gso_v_type
- def _convert_key(self, key):
+ def _convert_key(self, key: Tuple[int, int]) -> int:
v, o = key
return v + self._o_offet * o
- def generate_table(self):
+ def generate_table(self) -> Tuple[Dict[str, Tuple[int, int]], DataFrame]:
"""
- Generates the GSO lookup table for the DataFRame
+ Generates the GSO lookup table for the DataFrame
Returns
-------
@@ -2745,7 +2792,7 @@ def generate_table(self):
return gso_table, gso_df
- def generate_blob(self, gso_table):
+ def generate_blob(self, gso_table: Dict[str, Tuple[int, int]]) -> bytes:
"""
Generates the binary blob of GSOs that is written to the dta file.
@@ -2888,18 +2935,20 @@ class StataWriter117(StataWriter):
def __init__(
self,
- fname,
- data,
- convert_dates=None,
- write_index=True,
- byteorder=None,
- time_stamp=None,
- data_label=None,
- variable_labels=None,
- convert_strl=None,
+ fname: FilePathOrBuffer,
+ data: DataFrame,
+ convert_dates: Optional[Dict[Label, str]] = None,
+ write_index: bool = True,
+ byteorder: Optional[str] = None,
+ time_stamp: Optional[datetime.datetime] = None,
+ data_label: Optional[str] = None,
+ variable_labels: Optional[Dict[Label, str]] = None,
+ convert_strl: Optional[Sequence[Label]] = None,
):
- # Shallow copy since convert_strl might be modified later
- self._convert_strl = [] if convert_strl is None else convert_strl[:]
+ # Copy to new list since convert_strl might be modified later
+ self._convert_strl: List[Label] = []
+ if convert_strl is not None:
+ self._convert_strl.extend(convert_strl)
super().__init__(
fname,
@@ -2911,42 +2960,47 @@ def __init__(
data_label=data_label,
variable_labels=variable_labels,
)
- self._map = None
- self._strl_blob = None
+ self._map: Dict[str, int] = {}
+ self._strl_blob = b""
@staticmethod
- def _tag(val, tag):
+ def _tag(val: Union[str, bytes], tag: str) -> bytes:
"""Surround val with <tag></tag>"""
if isinstance(val, str):
val = bytes(val, "utf-8")
return bytes("<" + tag + ">", "utf-8") + val + bytes("</" + tag + ">", "utf-8")
- def _update_map(self, tag):
+ def _update_map(self, tag: str) -> None:
"""Update map location for tag with file position"""
+ assert self._file is not None
self._map[tag] = self._file.tell()
- def _write_header(self, data_label=None, time_stamp=None):
+ def _write_header(
+ self,
+ data_label: Optional[str] = None,
+ time_stamp: Optional[datetime.datetime] = None,
+ ) -> None:
"""Write the file header"""
byteorder = self._byteorder
- self._file.write(bytes("<stata_dta>", "utf-8"))
+ self._write_bytes(bytes("<stata_dta>", "utf-8"))
bio = BytesIO()
# ds_format - 117
bio.write(self._tag(bytes(str(self._dta_version), "utf-8"), "release"))
# byteorder
bio.write(self._tag(byteorder == ">" and "MSF" or "LSF", "byteorder"))
- # number of vars, 2 bytes
- assert self.nvar < 2 ** 16
- bio.write(self._tag(struct.pack(byteorder + "H", self.nvar), "K"))
+ # number of vars, 2 bytes in 117 and 118, 4 byte in 119
+ nvar_type = "H" if self._dta_version <= 118 else "I"
+ bio.write(self._tag(struct.pack(byteorder + nvar_type, self.nvar), "K"))
# 117 uses 4 bytes, 118 uses 8
nobs_size = "I" if self._dta_version == 117 else "Q"
bio.write(self._tag(struct.pack(byteorder + nobs_size, self.nobs), "N"))
# data label 81 bytes, char, null terminated
label = data_label[:80] if data_label is not None else ""
- label = label.encode(self._encoding)
+ encoded_label = label.encode(self._encoding)
label_size = "B" if self._dta_version == 117 else "H"
- label_len = struct.pack(byteorder + label_size, len(label))
- label = label_len + label
- bio.write(self._tag(label, "label"))
+ label_len = struct.pack(byteorder + label_size, len(encoded_label))
+ encoded_label = label_len + encoded_label
+ bio.write(self._tag(encoded_label, "label"))
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
@@ -2975,16 +3029,17 @@ def _write_header(self, data_label=None, time_stamp=None):
+ time_stamp.strftime(" %Y %H:%M")
)
# '\x11' added due to inspection of Stata file
- ts = b"\x11" + bytes(ts, "utf-8")
- bio.write(self._tag(ts, "timestamp"))
+ stata_ts = b"\x11" + bytes(ts, "utf-8")
+ bio.write(self._tag(stata_ts, "timestamp"))
bio.seek(0)
- self._file.write(self._tag(bio.read(), "header"))
+ self._write_bytes(self._tag(bio.read(), "header"))
- def _write_map(self):
+ def _write_map(self) -> None:
"""Called twice during file write. The first populates the values in
the map with 0s. The second call writes the final map locations when
all blocks have been written."""
- if self._map is None:
+ assert self._file is not None
+ if not self._map:
self._map = dict(
(
("stata_data", 0),
@@ -3009,42 +3064,43 @@ def _write_map(self):
for val in self._map.values():
bio.write(struct.pack(self._byteorder + "Q", val))
bio.seek(0)
- self._file.write(self._tag(bio.read(), "map"))
+ self._write_bytes(self._tag(bio.read(), "map"))
- def _write_variable_types(self):
+ def _write_variable_types(self) -> None:
self._update_map("variable_types")
bio = BytesIO()
for typ in self.typlist:
bio.write(struct.pack(self._byteorder + "H", typ))
bio.seek(0)
- self._file.write(self._tag(bio.read(), "variable_types"))
+ self._write_bytes(self._tag(bio.read(), "variable_types"))
- def _write_varnames(self):
+ def _write_varnames(self) -> None:
self._update_map("varnames")
bio = BytesIO()
# 118 scales by 4 to accommodate utf-8 data worst case encoding
vn_len = 32 if self._dta_version == 117 else 128
for name in self.varlist:
- name = self._null_terminate(name, True)
+ name = self._null_terminate_str(name)
name = _pad_bytes_new(name[:32].encode(self._encoding), vn_len + 1)
bio.write(name)
bio.seek(0)
- self._file.write(self._tag(bio.read(), "varnames"))
+ self._write_bytes(self._tag(bio.read(), "varnames"))
- def _write_sortlist(self):
+ def _write_sortlist(self) -> None:
self._update_map("sortlist")
- self._file.write(self._tag(b"\x00\00" * (self.nvar + 1), "sortlist"))
+ sort_size = 2 if self._dta_version < 119 else 4
+ self._write_bytes(self._tag(b"\x00" * sort_size * (self.nvar + 1), "sortlist"))
- def _write_formats(self):
+ def _write_formats(self) -> None:
self._update_map("formats")
bio = BytesIO()
fmt_len = 49 if self._dta_version == 117 else 57
for fmt in self.fmtlist:
bio.write(_pad_bytes_new(fmt.encode(self._encoding), fmt_len))
bio.seek(0)
- self._file.write(self._tag(bio.read(), "formats"))
+ self._write_bytes(self._tag(bio.read(), "formats"))
- def _write_value_label_names(self):
+ def _write_value_label_names(self) -> None:
self._update_map("value_label_names")
bio = BytesIO()
# 118 scales by 4 to accommodate utf-8 data worst case encoding
@@ -3054,13 +3110,13 @@ def _write_value_label_names(self):
name = "" # default name
if self._is_col_cat[i]:
name = self.varlist[i]
- name = self._null_terminate(name, True)
- name = _pad_bytes_new(name[:32].encode(self._encoding), vl_len + 1)
- bio.write(name)
+ name = self._null_terminate_str(name)
+ encoded_name = _pad_bytes_new(name[:32].encode(self._encoding), vl_len + 1)
+ bio.write(encoded_name)
bio.seek(0)
- self._file.write(self._tag(bio.read(), "value_label_names"))
+ self._write_bytes(self._tag(bio.read(), "value_label_names"))
- def _write_variable_labels(self):
+ def _write_variable_labels(self) -> None:
# Missing labels are 80 blank characters plus null termination
self._update_map("variable_labels")
bio = BytesIO()
@@ -3072,7 +3128,7 @@ def _write_variable_labels(self):
for _ in range(self.nvar):
bio.write(blank)
bio.seek(0)
- self._file.write(self._tag(bio.read(), "variable_labels"))
+ self._write_bytes(self._tag(bio.read(), "variable_labels"))
return
for col in self.data:
@@ -3092,31 +3148,27 @@ def _write_variable_labels(self):
else:
bio.write(blank)
bio.seek(0)
- self._file.write(self._tag(bio.read(), "variable_labels"))
+ self._write_bytes(self._tag(bio.read(), "variable_labels"))
- def _write_characteristics(self):
+ def _write_characteristics(self) -> None:
self._update_map("characteristics")
- self._file.write(self._tag(b"", "characteristics"))
+ self._write_bytes(self._tag(b"", "characteristics"))
- def _write_data(self):
+ def _write_data(self, records) -> None:
self._update_map("data")
- data = self.data
- self._file.write(b"<data>")
- self._file.write(data.tobytes())
- self._file.write(b"</data>")
+ self._write_bytes(b"<data>")
+ self._write_bytes(records.tobytes())
+ self._write_bytes(b"</data>")
- def _write_strls(self):
+ def _write_strls(self) -> None:
self._update_map("strls")
- strls = b""
- if self._strl_blob is not None:
- strls = self._strl_blob
- self._file.write(self._tag(strls, "strls"))
+ self._write_bytes(self._tag(self._strl_blob, "strls"))
- def _write_expansion_fields(self):
+ def _write_expansion_fields(self) -> None:
"""No-op in dta 117+"""
pass
- def _write_value_labels(self):
+ def _write_value_labels(self) -> None:
self._update_map("value_labels")
bio = BytesIO()
for vl in self._value_labels:
@@ -3124,14 +3176,14 @@ def _write_value_labels(self):
lab = self._tag(lab, "lbl")
bio.write(lab)
bio.seek(0)
- self._file.write(self._tag(bio.read(), "value_labels"))
+ self._write_bytes(self._tag(bio.read(), "value_labels"))
- def _write_file_close_tag(self):
+ def _write_file_close_tag(self) -> None:
self._update_map("stata_data_close")
- self._file.write(bytes("</stata_dta>", "utf-8"))
+ self._write_bytes(bytes("</stata_dta>", "utf-8"))
self._update_map("end-of-file")
- def _update_strl_names(self):
+ def _update_strl_names(self) -> None:
"""Update column names for conversion to strl if they might have been
changed to comply with Stata naming rules"""
# Update convert_strl if names changed
@@ -3140,7 +3192,7 @@ def _update_strl_names(self):
idx = self._convert_strl.index(orig)
self._convert_strl[idx] = new
- def _convert_strls(self, data):
+ def _convert_strls(self, data: DataFrame) -> DataFrame:
"""Convert columns to StrLs if either very large or in the
convert_strl variable"""
convert_cols = [
@@ -3156,7 +3208,7 @@ def _convert_strls(self, data):
self._strl_blob = ssw.generate_blob(tab)
return data
- def _set_formats_and_types(self, dtypes):
+ def _set_formats_and_types(self, dtypes: Series) -> None:
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.items():
@@ -3173,13 +3225,14 @@ def _set_formats_and_types(self, dtypes):
)
-class StataWriter118(StataWriter117):
+class StataWriterUTF8(StataWriter117):
"""
- A class for writing Stata binary dta files in Stata 15 format (118)
+ Stata binary dta file writing in Stata 15 (118) and 16 (119) formats
- DTA 118 format files support unicode string data (both fixed and strL)
- format. Unicode is also supported in value labels, variable labels and
- the dataset label.
+ DTA 118 and 119 format files support unicode string data (both fixed
+ and strL) format. Unicode is also supported in value labels, variable
+ labels and the dataset label. Format 119 is automatically used if the
+ file contains more than 32,767 variables.
.. versionadded:: 1.0.0
@@ -3192,34 +3245,38 @@ class StataWriter118(StataWriter117):
is written.
data : DataFrame
Input to save
- convert_dates : dict
+ convert_dates : dict, default None
Dictionary mapping columns containing datetime types to stata internal
format to use when writing the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
- write_index : bool
+ write_index : bool, default True
Write the index to Stata dataset.
- byteorder : str
+ byteorder : str, default None
Can be ">", "<", "little", or "big". default is `sys.byteorder`
- time_stamp : datetime
+ time_stamp : datetime, default None
A datetime to use as file creation date. Default is the current time
- data_label : str
+ data_label : str, default None
A label for the data set. Must be 80 characters or smaller.
- variable_labels : dict
+ variable_labels : dict, default None
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
- convert_strl : list
+ convert_strl : list, default None
List of columns names to convert to Stata StrL format. Columns with
more than 2045 characters are automatically written as StrL.
Smaller columns can be converted by including the column name. Using
StrLs can reduce output file size when strings are longer than 8
characters, and either frequently repeated or sparse.
+ version : int, default None
+ The dta version to use. By default, uses the size of data to determine
+ the version. 118 is used if data.shape[1] <= 32767, and 119 is used
+ for storing larger DataFrames.
Returns
-------
- StataWriter118
+ StataWriterUTF8
The instance has a write_file method, which will write the file to the
given `fname`.
@@ -3238,24 +3295,60 @@ class StataWriter118(StataWriter117):
--------
Using Unicode data and column names
- >>> from pandas.io.stata import StataWriter118
+ >>> from pandas.io.stata import StataWriterUTF8
>>> data = pd.DataFrame([[1.0, 1, 'ᴬ']], columns=['a', 'β', 'ĉ'])
- >>> writer = StataWriter118('./data_file.dta', data)
+ >>> writer = StataWriterUTF8('./data_file.dta', data)
>>> writer.write_file()
Or with long strings stored in strl format
>>> data = pd.DataFrame([['ᴀ relatively long ŝtring'], [''], ['']],
... columns=['strls'])
- >>> writer = StataWriter118('./data_file_with_long_strings.dta', data,
- ... convert_strl=['strls'])
+ >>> writer = StataWriterUTF8('./data_file_with_long_strings.dta', data,
+ ... convert_strl=['strls'])
>>> writer.write_file()
"""
_encoding = "utf-8"
- _dta_version = 118
- def _validate_variable_name(self, name):
+ def __init__(
+ self,
+ fname: FilePathOrBuffer,
+ data: DataFrame,
+ convert_dates: Optional[Dict[Label, str]] = None,
+ write_index: bool = True,
+ byteorder: Optional[str] = None,
+ time_stamp: Optional[datetime.datetime] = None,
+ data_label: Optional[str] = None,
+ variable_labels: Optional[Dict[Label, str]] = None,
+ convert_strl: Optional[Sequence[Label]] = None,
+ version: Optional[int] = None,
+ ):
+ if version is None:
+ version = 118 if data.shape[1] <= 32767 else 119
+ elif version not in (118, 119):
+ raise ValueError("version must be either 118 or 119.")
+ elif version == 118 and data.shape[1] > 32767:
+ raise ValueError(
+ "You must use version 119 for data sets containing more than"
+ "32,767 variables"
+ )
+
+ super().__init__(
+ fname,
+ data,
+ convert_dates=convert_dates,
+ write_index=write_index,
+ byteorder=byteorder,
+ time_stamp=time_stamp,
+ data_label=data_label,
+ variable_labels=variable_labels,
+ convert_strl=convert_strl,
+ )
+ # Override version set in StataWriter117 init
+ self._dta_version = version
+
+ def _validate_variable_name(self, name: str) -> str:
"""
Validate variable names for Stata export.
@@ -3272,7 +3365,7 @@ def _validate_variable_name(self, name):
Notes
-----
- Stata 118 support most unicode characters. The only limatation is in
+ Stata 118+ support most unicode characters. The only limitation is in
the ascii range where the characters supported are a-z, A-Z, 0-9 and _.
"""
# High code points appear to be acceptable
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index dd907457f7c32..139e0f2bbad8b 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -374,7 +374,6 @@ def hist_frame(
<class 'numpy.ndarray'>
"""
-
_backend_doc = """\
backend : str, default None
Backend to use instead of the backend specified in the option
@@ -386,6 +385,45 @@ def hist_frame(
"""
+_bar_or_line_doc = """
+ Parameters
+ ----------
+ x : label or position, optional
+ Allows plotting of one column versus another. If not specified,
+ the index of the DataFrame is used.
+ y : label or position, optional
+ Allows plotting of one column versus another. If not specified,
+ all numerical columns are used.
+ color : str, array_like, or dict, optional
+ The color for each of the DataFrame's columns. Possible values are:
+
+ - A single color string referred to by name, RGB or RGBA code,
+ for instance 'red' or '#a98d19'.
+
+ - A sequence of color strings referred to by name, RGB or RGBA
+ code, which will be used for each column recursively. For
+ instance ['green','yellow'] each column's %(kind)s will be filled in
+ green or yellow, alternatively.
+
+ - A dict of the form {column name : color}, so that each column will be
+ colored accordingly. For example, if your columns are called `a` and
+ `b`, then passing {'a': 'green', 'b': 'red'} will color %(kind)ss for
+ column `a` in green and %(kind)ss for column `b` in red.
+
+ .. versionadded:: 1.1.0
+
+ **kwargs
+ Additional keyword arguments are documented in
+ :meth:`DataFrame.plot`.
+
+ Returns
+ -------
+ matplotlib.axes.Axes or np.ndarray of them
+ An ndarray is returned with one :class:`matplotlib.axes.Axes`
+ per column when ``subplots=True``.
+"""
+
+
@Substitution(backend="")
@Appender(_boxplot_doc)
def boxplot(
@@ -847,31 +885,10 @@ def __call__(self, *args, **kwargs):
return plot_backend.plot(data, kind=kind, **kwargs)
- def line(self, x=None, y=None, **kwargs):
- """
- Plot Series or DataFrame as lines.
-
- This function is useful to plot lines using DataFrame's values
- as coordinates.
-
- Parameters
- ----------
- x : int or str, optional
- Columns to use for the horizontal axis.
- Either the location or the label of the columns to be used.
- By default, it will use the DataFrame indices.
- y : int, str, or list of them, optional
- The values to be plotted.
- Either the location or the label of the columns to be used.
- By default, it will use the remaining DataFrame numeric columns.
- **kwargs
- Keyword arguments to pass on to :meth:`DataFrame.plot`.
-
- Returns
- -------
- :class:`matplotlib.axes.Axes` or :class:`numpy.ndarray`
- Return an ndarray when ``subplots=True``.
+ __call__.__doc__ = __doc__
+ @Appender(
+ """
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
@@ -906,6 +923,16 @@ def line(self, x=None, y=None, **kwargs):
>>> type(axes)
<class 'numpy.ndarray'>
+ .. plot::
+ :context: close-figs
+
+ Let's repeat the same example, but specifying colors for
+ each column (in this case, for each animal).
+
+ >>> axes = df.plot.line(
+ ... subplots=True, color={"pig": "pink", "horse": "#742802"}
+ ... )
+
.. plot::
:context: close-figs
@@ -914,36 +941,20 @@ def line(self, x=None, y=None, **kwargs):
>>> lines = df.plot.line(x='pig', y='horse')
"""
- return self(kind="line", x=x, y=y, **kwargs)
-
- def bar(self, x=None, y=None, **kwargs):
+ )
+ @Substitution(kind="line")
+ @Appender(_bar_or_line_doc)
+ def line(self, x=None, y=None, **kwargs):
"""
- Vertical bar plot.
-
- A bar plot is a plot that presents categorical data with
- rectangular bars with lengths proportional to the values that they
- represent. A bar plot shows comparisons among discrete categories. One
- axis of the plot shows the specific categories being compared, and the
- other axis represents a measured value.
-
- Parameters
- ----------
- x : label or position, optional
- Allows plotting of one column versus another. If not specified,
- the index of the DataFrame is used.
- y : label or position, optional
- Allows plotting of one column versus another. If not specified,
- all numerical columns are used.
- **kwargs
- Additional keyword arguments are documented in
- :meth:`DataFrame.plot`.
+ Plot Series or DataFrame as lines.
- Returns
- -------
- matplotlib.axes.Axes or np.ndarray of them
- An ndarray is returned with one :class:`matplotlib.axes.Axes`
- per column when ``subplots=True``.
+ This function is useful to plot lines using DataFrame's values
+ as coordinates.
+ """
+ return self(kind="line", x=x, y=y, **kwargs)
+ @Appender(
+ """
See Also
--------
DataFrame.plot.barh : Horizontal bar plot.
@@ -985,6 +996,17 @@ def bar(self, x=None, y=None, **kwargs):
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
+ If you don't like the default colours, you can specify how you'd
+ like each column to be colored.
+
+ .. plot::
+ :context: close-figs
+
+ >>> axes = df.plot.bar(
+ ... rot=0, subplots=True, color={"speed": "red", "lifespan": "green"}
+ ... )
+ >>> axes[1].legend(loc=2) # doctest: +SKIP
+
Plot a single column.
.. plot::
@@ -998,32 +1020,24 @@ def bar(self, x=None, y=None, **kwargs):
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0)
+ """
+ )
+ @Substitution(kind="bar")
+ @Appender(_bar_or_line_doc)
+ def bar(self, x=None, y=None, **kwargs):
"""
- return self(kind="bar", x=x, y=y, **kwargs)
-
- def barh(self, x=None, y=None, **kwargs):
- """
- Make a horizontal bar plot.
+ Vertical bar plot.
- A horizontal bar plot is a plot that presents quantitative data with
+ A bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
+ """
+ return self(kind="bar", x=x, y=y, **kwargs)
- Parameters
- ----------
- x : label or position, default DataFrame.index
- Column to be used for categories.
- y : label or position, default All numeric columns in dataframe
- Columns to be plotted from the DataFrame.
- **kwargs
- Keyword arguments to pass on to :meth:`DataFrame.plot`.
-
- Returns
- -------
- :class:`matplotlib.axes.Axes` or numpy.ndarray of them
-
+ @Appender(
+ """
See Also
--------
DataFrame.plot.bar: Vertical bar plot.
@@ -1053,6 +1067,13 @@ def barh(self, x=None, y=None, **kwargs):
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh()
+ We can specify colors for each column
+
+ .. plot::
+ :context: close-figs
+
+ >>> ax = df.plot.barh(color={"speed": "red", "lifespan": "green"})
+
Plot a column of the DataFrame to a horizontal bar plot
.. plot::
@@ -1078,6 +1099,19 @@ def barh(self, x=None, y=None, **kwargs):
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(x='lifespan')
+ """
+ )
+ @Substitution(kind="bar")
+ @Appender(_bar_or_line_doc)
+ def barh(self, x=None, y=None, **kwargs):
+ """
+ Make a horizontal bar plot.
+
+ A horizontal bar plot is a plot that presents quantitative data with
+ rectangular bars with lengths proportional to the values that they
+ represent. A bar plot shows comparisons among discrete categories. One
+ axis of the plot shows the specific categories being compared, and the
+ other axis represents a measured value.
"""
return self(kind="barh", x=x, y=y, **kwargs)
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index 5b37ebb42aecc..a1035fd0823bb 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -421,8 +421,7 @@ def __call__(self):
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
"MillisecondLocator estimated to generate "
- f"{estimate:d} ticks from {dmin} to {dmax}: "
- "exceeds Locator.MAXTICKS"
+ f"{estimate:d} ticks from {dmin} to {dmax}: exceeds Locator.MAXTICKS"
f"* 2 ({self.MAXTICKS * 2:d}) "
)
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 2d68bb46a8ada..de09460bb833d 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -726,7 +726,10 @@ def _apply_style_colors(self, colors, kwds, col_num, label):
has_color = "color" in kwds or self.colormap is not None
nocolor_style = style is None or re.match("[a-z]+", style) is None
if (has_color or self.subplots) and nocolor_style:
- kwds["color"] = colors[col_num % len(colors)]
+ if isinstance(colors, dict):
+ kwds["color"] = colors[label]
+ else:
+ kwds["color"] = colors[col_num % len(colors)]
return style, kwds
def _get_colors(self, num_colors=None, color_kwds="color"):
@@ -1347,6 +1350,8 @@ def _make_plot(self):
kwds = self.kwds.copy()
if self._is_series:
kwds["color"] = colors
+ elif isinstance(colors, dict):
+ kwds["color"] = colors[label]
else:
kwds["color"] = colors[i % ncolors]
diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py
index f8b2c7ab123d0..d54fc73b495ba 100644
--- a/pandas/plotting/_matplotlib/hist.py
+++ b/pandas/plotting/_matplotlib/hist.py
@@ -318,8 +318,7 @@ def hist_series(
if "figure" in kwds:
raise ValueError(
"Cannot pass 'figure' when using the "
- "'by' argument, since a new 'Figure' instance "
- "will be created"
+ "'by' argument, since a new 'Figure' instance will be created"
)
axes = _grouped_hist(
self,
diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py
index fd69265b18a5b..7990bff4f517c 100644
--- a/pandas/plotting/_matplotlib/style.py
+++ b/pandas/plotting/_matplotlib/style.py
@@ -27,7 +27,11 @@ def _get_standard_colors(
warnings.warn(
"'color' and 'colormap' cannot be used simultaneously. Using 'color'"
)
- colors = list(color) if is_list_like(color) else color
+ colors = (
+ list(color)
+ if is_list_like(color) and not isinstance(color, dict)
+ else color
+ )
else:
if color_type == "default":
# need to call list() on the result to copy so we don't
diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index dd4034a97f58e..d7732c86911b8 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -190,8 +190,7 @@ def _subplots(
if sharex or sharey:
warnings.warn(
"When passing multiple axes, sharex and sharey "
- "are ignored. These settings must be specified "
- "when creating axes",
+ "are ignored. These settings must be specified when creating axes",
UserWarning,
stacklevel=4,
)
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index ccd42d3940431..1369adcd80269 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -149,7 +149,7 @@ def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
influence of all dimensions.
More info available at the `original article
- <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.889>`_
+ <https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.889>`_
describing RadViz.
Parameters
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 8b897524cb053..406d5f055797d 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -1,6 +1,9 @@
+import subprocess
import sys
from typing import List
+import pytest
+
import pandas as pd
from pandas import api, compat
import pandas._testing as tm
@@ -311,3 +314,18 @@ def test_util_testing_deprecated_direct(self):
assert "pandas.util.testing is deprecated" in str(m[0].message)
assert "pandas.testing instead" in str(m[0].message)
+
+ def test_util_in_top_level(self):
+ # in a subprocess to avoid import caching issues
+ out = subprocess.check_output(
+ [
+ sys.executable,
+ "-c",
+ "import pandas; pandas.util.testing.assert_series_equal",
+ ],
+ stderr=subprocess.STDOUT,
+ ).decode()
+ assert "pandas.util.testing is deprecated" in out
+
+ with pytest.raises(AttributeError, match="foo"):
+ pd.util.foo
diff --git a/pandas/tests/arithmetic/test_array_ops.py b/pandas/tests/arithmetic/test_array_ops.py
new file mode 100644
index 0000000000000..d8aaa3183a1c6
--- /dev/null
+++ b/pandas/tests/arithmetic/test_array_ops.py
@@ -0,0 +1,21 @@
+import operator
+
+import numpy as np
+import pytest
+
+import pandas._testing as tm
+from pandas.core.ops.array_ops import na_logical_op
+
+
+def test_na_logical_op_2d():
+ left = np.arange(8).reshape(4, 2)
+ right = left.astype(object)
+ right[0, 0] = np.nan
+
+ # Check that we fall back to the vec_binop branch
+ with pytest.raises(TypeError, match="unsupported operand type"):
+ operator.or_(left, right)
+
+ result = na_logical_op(left, right, operator.or_)
+ expected = right
+ tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index f55e2b98ee912..7c0f94001d306 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -135,10 +135,11 @@ def test_div_td64arr(self, left, box_cls):
result = right // left
tm.assert_equal(result, expected)
- with pytest.raises(TypeError):
+ msg = "Cannot divide"
+ with pytest.raises(TypeError, match=msg):
left / right
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
left // right
# TODO: de-duplicate with test_numeric_arr_mul_tdscalar
@@ -187,7 +188,8 @@ def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box):
result = three_days / index
tm.assert_equal(result, expected)
- with pytest.raises(TypeError):
+ msg = "cannot use operands with types dtype"
+ with pytest.raises(TypeError, match=msg):
index / three_days
@pytest.mark.parametrize(
@@ -205,13 +207,19 @@ def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box):
)
def test_add_sub_timedeltalike_invalid(self, numeric_idx, other, box):
left = tm.box_expected(numeric_idx, box)
- with pytest.raises(TypeError):
+ msg = (
+ "unsupported operand type|"
+ "Addition/subtraction of integers and integer-arrays|"
+ "Instead of adding/subtracting|"
+ "cannot use operands with types dtype"
+ )
+ with pytest.raises(TypeError, match=msg):
left + other
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
other + left
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
left - other
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
other - left
@pytest.mark.parametrize(
@@ -229,13 +237,18 @@ def test_add_sub_datetimelike_invalid(self, numeric_idx, other, box):
# NullFrequencyError instead of TypeError so is excluded.
left = tm.box_expected(numeric_idx, box)
- with pytest.raises(TypeError):
+ msg = (
+ "unsupported operand type|"
+ "Cannot (add|subtract) NaT (to|from) ndarray|"
+ "Addition/subtraction of integers and integer-arrays"
+ )
+ with pytest.raises(TypeError, match=msg):
left + other
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
other + left
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
left - other
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
other - left
@@ -607,14 +620,16 @@ def test_mul_index(self, numeric_idx):
def test_mul_datelike_raises(self, numeric_idx):
idx = numeric_idx
- with pytest.raises(TypeError):
+ msg = "cannot perform __rmul__ with this index type"
+ with pytest.raises(TypeError, match=msg):
idx * pd.date_range("20130101", periods=5)
def test_mul_size_mismatch_raises(self, numeric_idx):
idx = numeric_idx
- with pytest.raises(ValueError):
+ msg = "operands could not be broadcast together"
+ with pytest.raises(ValueError, match=msg):
idx * idx[0:3]
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
idx * np.array([1, 2])
@pytest.mark.parametrize("op", [operator.pow, ops.rpow])
@@ -792,10 +807,11 @@ def test_series_frame_radd_bug(self):
# really raise this time
now = pd.Timestamp.now().to_pydatetime()
- with pytest.raises(TypeError):
+ msg = "unsupported operand type"
+ with pytest.raises(TypeError, match=msg):
now + ts
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
ts + now
# TODO: This came from series.test.test_operators, needs cleanup
@@ -816,7 +832,8 @@ def test_datetime64_with_index(self):
result = ser - ser.index
tm.assert_series_equal(result, expected)
- with pytest.raises(TypeError):
+ msg = "cannot subtract period"
+ with pytest.raises(TypeError, match=msg):
# GH#18850
result = ser - ser.index.to_period()
diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py
index 799ef3492e53f..c0cb522b516ab 100644
--- a/pandas/tests/arithmetic/test_object.py
+++ b/pandas/tests/arithmetic/test_object.py
@@ -1,6 +1,7 @@
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for object dtype
+import datetime
from decimal import Decimal
import operator
@@ -137,7 +138,13 @@ def test_objarr_radd_str_invalid(self, dtype, data, box_with_array):
ser = Series(data, dtype=dtype)
ser = tm.box_expected(ser, box_with_array)
- with pytest.raises(TypeError):
+ msg = (
+ "can only concatenate str|"
+ "did not contain a loop with signature matching types|"
+ "unsupported operand type|"
+ "must be str"
+ )
+ with pytest.raises(TypeError, match=msg):
"foo_" + ser
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
@@ -149,9 +156,10 @@ def test_objarr_add_invalid(self, op, box_with_array):
obj_ser.name = "objects"
obj_ser = tm.box_expected(obj_ser, box)
- with pytest.raises(Exception):
+ msg = "can only concatenate str|unsupported operand type|must be str"
+ with pytest.raises(Exception, match=msg):
op(obj_ser, 1)
- with pytest.raises(Exception):
+ with pytest.raises(Exception, match=msg):
op(obj_ser, np.array(1, dtype=np.int64))
# TODO: Moved from tests.series.test_operators; needs cleanup
@@ -275,13 +283,15 @@ def test_add(self):
def test_sub_fail(self):
index = tm.makeStringIndex(100)
- with pytest.raises(TypeError):
+
+ msg = "unsupported operand type|Cannot broadcast"
+ with pytest.raises(TypeError, match=msg):
index - "a"
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
index - index
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
index - index.tolist()
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
index.tolist() - index
def test_sub_object(self):
@@ -295,10 +305,11 @@ def test_sub_object(self):
result = index - pd.Index([Decimal(1), Decimal(1)])
tm.assert_index_equal(result, expected)
- with pytest.raises(TypeError):
+ msg = "unsupported operand type"
+ with pytest.raises(TypeError, match=msg):
index - "foo"
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
index - np.array([2, "foo"])
def test_rsub_object(self):
@@ -312,8 +323,54 @@ def test_rsub_object(self):
result = np.array([Decimal(2), Decimal(2)]) - index
tm.assert_index_equal(result, expected)
- with pytest.raises(TypeError):
+ msg = "unsupported operand type"
+ with pytest.raises(TypeError, match=msg):
"foo" - index
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
np.array([True, pd.Timestamp.now()]) - index
+
+
+class MyIndex(pd.Index):
+ # Simple index subclass that tracks ops calls.
+
+ _calls: int
+
+ @classmethod
+ def _simple_new(cls, values, name=None, dtype=None):
+ result = object.__new__(cls)
+ result._data = values
+ result._index_data = values
+ result._name = name
+ result._calls = 0
+
+ return result._reset_identity()
+
+ def __add__(self, other):
+ self._calls += 1
+ return self._simple_new(self._index_data)
+
+ def __radd__(self, other):
+ return self.__add__(other)
+
+
+@pytest.mark.parametrize(
+ "other",
+ [
+ [datetime.timedelta(1), datetime.timedelta(2)],
+ [datetime.datetime(2000, 1, 1), datetime.datetime(2000, 1, 2)],
+ [pd.Period("2000"), pd.Period("2001")],
+ ["a", "b"],
+ ],
+ ids=["timedelta", "datetime", "period", "object"],
+)
+def test_index_ops_defer_to_unknown_subclasses(other):
+ # https://github.com/pandas-dev/pandas/issues/31109
+ values = np.array(
+ [datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)], dtype=object
+ )
+ a = MyIndex._simple_new(values)
+ other = pd.Index(other)
+ result = other + a
+ assert isinstance(result, MyIndex)
+ assert a._calls == 1
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index 158da37aa7239..abdeb1b30b626 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -48,7 +48,8 @@ def test_compare_timedelta64_zerodim(self, box_with_array):
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
- with pytest.raises(TypeError):
+ msg = "Invalid comparison between dtype"
+ with pytest.raises(TypeError, match=msg):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
@@ -442,7 +443,8 @@ def test_addition_ops(self):
tdi[0:1] + dti
# random indexes
- with pytest.raises(TypeError):
+ msg = "Addition/subtraction of integers and integer-arrays"
+ with pytest.raises(TypeError, match=msg):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
@@ -604,6 +606,7 @@ def test_tdi_add_timestamp_nat_masking(self):
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
+ # TODO: Make raised error message more informative and test
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, "D") + Timestamp("2000")
with pytest.raises(OutOfBoundsDatetime):
@@ -700,13 +703,14 @@ def test_timedelta_ops_with_missing_values(self):
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
- with pytest.raises(TypeError):
+ msg = "unsupported operand type"
+ with pytest.raises(TypeError, match=msg):
s1 + np.nan
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
np.nan + s1
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
s1 - np.nan
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
-np.nan + s1
actual = s1 + pd.NaT
@@ -738,9 +742,10 @@ def test_timedelta_ops_with_missing_values(self):
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
- with pytest.raises(TypeError):
+ msg = "cannot subtract a datelike from|unsupported operand type"
+ with pytest.raises(TypeError, match=msg):
df1 + np.nan
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
@@ -957,7 +962,8 @@ def test_td64arr_add_sub_datetimelike_scalar(self, ts, box_with_array):
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
- with pytest.raises(TypeError):
+ msg = "cannot subtract a datelike"
+ with pytest.raises(TypeError, match=msg):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
@@ -969,7 +975,8 @@ def test_tdi_sub_dt64_array(self, box_with_array):
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
- with pytest.raises(TypeError):
+ msg = "cannot subtract a datelike from"
+ with pytest.raises(TypeError, match=msg):
tdi - dtarr
# TimedeltaIndex.__rsub__
@@ -1025,7 +1032,8 @@ def test_td64arr_sub_periodlike(self, box_with_array, tdi_freq, pi_freq):
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
- with pytest.raises(TypeError):
+ msg = "cannot subtract|unsupported operand type"
+ with pytest.raises(TypeError, match=msg):
tdi - pi
# FIXME: don't leave commented-out
@@ -1034,9 +1042,9 @@ def test_td64arr_sub_periodlike(self, box_with_array, tdi_freq, pi_freq):
# pi - tdi
# GH#13078 subtraction of Period scalar not supported
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
tdi - pi[0]
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
pi[0] - tdi
@pytest.mark.parametrize(
@@ -1499,16 +1507,17 @@ def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_with_array):
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
- with pytest.raises(TypeError):
+ msg = "has incorrect type|cannot add the type MonthEnd"
+ with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
@@ -1533,7 +1542,8 @@ def test_td64arr_add_sub_object_array(self, box_with_array):
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
- with pytest.raises(TypeError):
+ msg = "unsupported operand type|cannot subtract a datelike"
+ with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(warn):
tdarr - other
@@ -1588,7 +1598,8 @@ def test_td64arr_mul_int(self, box_with_array):
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range("1 days", "10 days", name="foo")
rng = tm.box_expected(rng, box_with_array)
- with pytest.raises(TypeError):
+ msg = "argument must be an integer|cannot use operands with types dtype"
+ with pytest.raises(TypeError, match=msg):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
@@ -1777,12 +1788,13 @@ def test_tdarr_div_length_mismatch(self, box_with_array):
mismatched = [1, 2, 3, 4]
rng = tm.box_expected(rng, box_with_array)
+ msg = "Cannot divide vectors|Unable to coerce to Series"
for obj in [mismatched, mismatched[:2]]:
# one shorter, one longer
for other in [obj, np.array(obj), pd.Index(obj)]:
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
rng / other
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
other / rng
# ------------------------------------------------------------------
@@ -1908,7 +1920,8 @@ def test_td64arr_mod_int(self, box_with_array):
result = tdarr % 2
tm.assert_equal(result, expected)
- with pytest.raises(TypeError):
+ msg = "Cannot divide int by"
+ with pytest.raises(TypeError, match=msg):
2 % tdarr
if box_with_array is pd.DataFrame:
@@ -1957,15 +1970,21 @@ def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td):
def test_td64arr_mul_too_short_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
- with pytest.raises(TypeError):
+ msg = (
+ "cannot use operands with types dtype|"
+ "Cannot multiply with unequal lengths|"
+ "Unable to coerce to Series"
+ )
+ with pytest.raises(TypeError, match=msg):
idx * idx[:3]
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
- with pytest.raises(TypeError):
+ msg = "cannot use operands with types dtype"
+ with pytest.raises(TypeError, match=msg):
idx * idx
# ------------------------------------------------------------------
diff --git a/pandas/tests/arrays/categorical/test_algos.py b/pandas/tests/arrays/categorical/test_algos.py
index 52640044565fc..835aa87a7c21b 100644
--- a/pandas/tests/arrays/categorical/test_algos.py
+++ b/pandas/tests/arrays/categorical/test_algos.py
@@ -90,6 +90,21 @@ def test_isin_empty(empty):
tm.assert_numpy_array_equal(expected, result)
+def test_diff():
+ s = pd.Series([1, 2, 3], dtype="category")
+ with tm.assert_produces_warning(FutureWarning):
+ result = s.diff()
+ expected = pd.Series([np.nan, 1, 1])
+ tm.assert_series_equal(result, expected)
+
+ expected = expected.to_frame(name="A")
+ df = s.to_frame(name="A")
+ with tm.assert_produces_warning(FutureWarning):
+ result = df.diff()
+
+ tm.assert_frame_equal(result, expected)
+
+
class TestTake:
# https://github.com/pandas-dev/pandas/issues/20664
@@ -111,7 +126,7 @@ def test_take_bounds(self, allow_fill):
if allow_fill:
msg = "indices are out-of-bounds"
else:
- msg = "index 4 is out of bounds for size 3"
+ msg = "index 4 is out of bounds for( axis 0 with)? size 3"
with pytest.raises(IndexError, match=msg):
cat.take([4, 5], allow_fill=allow_fill)
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index 70a23e9748dd1..cfba3da354d44 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -605,6 +605,6 @@ def test_constructor_imaginary(self):
@pytest.mark.skipif(_np_version_under1p16, reason="Skipping for NumPy <1.16")
def test_constructor_string_and_tuples(self):
# GH 21416
- c = pd.Categorical(["c", ("a", "b"), ("b", "a"), "c"])
+ c = pd.Categorical(np.array(["c", ("a", "b"), ("b", "a"), "c"], dtype=object))
expected_index = pd.Index([("a", "b"), ("b", "a"), "c"])
assert c.categories.equals(expected_index)
diff --git a/pandas/tests/arrays/categorical/test_missing.py b/pandas/tests/arrays/categorical/test_missing.py
index 211bf091ee17d..8889f45a84237 100644
--- a/pandas/tests/arrays/categorical/test_missing.py
+++ b/pandas/tests/arrays/categorical/test_missing.py
@@ -77,7 +77,7 @@ def test_fillna_iterable_category(self, named):
Point = collections.namedtuple("Point", "x y")
else:
Point = lambda *args: args # tuple
- cat = Categorical([Point(0, 0), Point(0, 1), None])
+ cat = Categorical(np.array([Point(0, 0), Point(0, 1), None], dtype=object))
result = cat.fillna(Point(0, 0))
expected = Categorical([Point(0, 0), Point(0, 1), Point(0, 0)])
diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py
index 8643e7f6f89c1..0c830c65e0f8b 100644
--- a/pandas/tests/arrays/categorical/test_operators.py
+++ b/pandas/tests/arrays/categorical/test_operators.py
@@ -97,8 +97,8 @@ def test_comparisons(self):
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
msg = (
- "Cannot compare a Categorical for op __gt__ with type"
- r" <class 'numpy\.ndarray'>"
+ "Cannot compare a Categorical for op __gt__ with type "
+ r"<class 'numpy\.ndarray'>"
)
with pytest.raises(TypeError, match=msg):
cat > s
@@ -265,8 +265,8 @@ def test_comparisons(self, data, reverse, base):
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
msg = (
- "Cannot compare a Categorical for op __gt__ with type"
- r" <class 'numpy\.ndarray'>"
+ "Cannot compare a Categorical for op __gt__ with type "
+ r"<class 'numpy\.ndarray'>"
)
with pytest.raises(TypeError, match=msg):
cat > s
diff --git a/pandas/tests/arrays/categorical/test_warnings.py b/pandas/tests/arrays/categorical/test_warnings.py
index f66c327e9967d..9e164a250cdb1 100644
--- a/pandas/tests/arrays/categorical/test_warnings.py
+++ b/pandas/tests/arrays/categorical/test_warnings.py
@@ -14,6 +14,16 @@ async def test_tab_complete_warning(self, ip):
code = "import pandas as pd; c = Categorical([])"
await ip.run_code(code)
- with tm.assert_produces_warning(None):
+
+ # GH 31324 newer jedi version raises Deprecation warning
+ import jedi
+
+ if jedi.__version__ < "0.16.0":
+ warning = tm.assert_produces_warning(None)
+ else:
+ warning = tm.assert_produces_warning(
+ DeprecationWarning, check_stacklevel=False
+ )
+ with warning:
with provisionalcompleter("ignore"):
list(ip.Completer.completions("c.", 1))
diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py
index e046d87780bb4..35eda4a0ec5bc 100644
--- a/pandas/tests/arrays/interval/test_interval.py
+++ b/pandas/tests/arrays/interval/test_interval.py
@@ -152,7 +152,7 @@ def test_arrow_array():
assert result.equals(expected)
# unsupported conversions
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match="Not supported to convert IntervalArray"):
pa.array(intervals, type="float64")
with pytest.raises(TypeError, match="different 'subtype'"):
diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py
index 76442a63ccb0f..bf7d275e4ff7b 100644
--- a/pandas/tests/arrays/sparse/test_arithmetics.py
+++ b/pandas/tests/arrays/sparse/test_arithmetics.py
@@ -388,6 +388,14 @@ def test_mixed_array_comparison(self, kind):
assert b.dtype == SparseDtype(rdtype, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
+ def test_xor(self):
+ s = SparseArray([True, True, False, False])
+ t = SparseArray([True, False, True, False])
+ result = s ^ t
+ sp_index = pd.core.arrays.sparse.IntIndex(4, np.array([0, 1, 2], dtype="int32"))
+ expected = SparseArray([False, True, True], sparse_index=sp_index)
+ tm.assert_sp_array_equal(result, expected)
+
@pytest.mark.parametrize("op", [operator.eq, operator.add])
def test_with_list(op):
@@ -468,6 +476,14 @@ def test_invert(fill_value):
expected = SparseArray(~arr, fill_value=not fill_value)
tm.assert_sp_array_equal(result, expected)
+ result = ~pd.Series(sparray)
+ expected = pd.Series(expected)
+ tm.assert_series_equal(result, expected)
+
+ result = ~pd.DataFrame({"A": sparray})
+ expected = pd.DataFrame({"A": expected})
+ tm.assert_frame_equal(result, expected)
+
@pytest.mark.parametrize("fill_value", [0, np.nan])
@pytest.mark.parametrize("op", [operator.pos, operator.neg])
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index 33e68f029922e..5e2f14af341ab 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -194,6 +194,25 @@ def test_constructor_raises():
with pytest.raises(ValueError, match="sequence of strings"):
pd.arrays.StringArray(np.array([]))
+ with pytest.raises(ValueError, match="strings or pandas.NA"):
+ pd.arrays.StringArray(np.array(["a", np.nan], dtype=object))
+
+ with pytest.raises(ValueError, match="strings or pandas.NA"):
+ pd.arrays.StringArray(np.array(["a", None], dtype=object))
+
+ with pytest.raises(ValueError, match="strings or pandas.NA"):
+ pd.arrays.StringArray(np.array(["a", pd.NaT], dtype=object))
+
+
+@pytest.mark.parametrize("copy", [True, False])
+def test_from_sequence_no_mutate(copy):
+ a = np.array(["a", np.nan], dtype=object)
+ original = a.copy()
+ result = pd.arrays.StringArray._from_sequence(a, copy=copy)
+ expected = pd.arrays.StringArray(np.array(["a", pd.NA], dtype=object))
+ tm.assert_extension_array_equal(result, expected)
+ tm.assert_numpy_array_equal(a, original)
+
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.xfail(reason="Not implemented StringArray.sum")
diff --git a/pandas/tests/arrays/test_boolean.py b/pandas/tests/arrays/test_boolean.py
index cc8d0cdcb518d..cb9b07db4a0df 100644
--- a/pandas/tests/arrays/test_boolean.py
+++ b/pandas/tests/arrays/test_boolean.py
@@ -251,6 +251,22 @@ def test_coerce_to_numpy_array():
np.array(arr, dtype="bool")
+def test_to_boolean_array_from_strings():
+ result = BooleanArray._from_sequence_of_strings(
+ np.array(["True", "False", np.nan], dtype=object)
+ )
+ expected = BooleanArray(
+ np.array([True, False, False]), np.array([False, False, True])
+ )
+
+ tm.assert_extension_array_equal(result, expected)
+
+
+def test_to_boolean_array_from_strings_invalid_string():
+ with pytest.raises(ValueError, match="cannot be cast"):
+ BooleanArray._from_sequence_of_strings(["donkey"])
+
+
def test_repr():
df = pd.DataFrame({"A": pd.array([True, False, None], dtype="boolean")})
expected = " A\n0 True\n1 False\n2 <NA>"
@@ -455,6 +471,24 @@ def test_ufunc_reduce_raises(values):
np.add.reduce(a)
+class TestUnaryOps:
+ def test_invert(self):
+ a = pd.array([True, False, None], dtype="boolean")
+ expected = pd.array([False, True, None], dtype="boolean")
+ tm.assert_extension_array_equal(~a, expected)
+
+ expected = pd.Series(expected, index=["a", "b", "c"], name="name")
+ result = ~pd.Series(a, index=["a", "b", "c"], name="name")
+ tm.assert_series_equal(result, expected)
+
+ df = pd.DataFrame({"A": a, "B": [True, False, False]}, index=["a", "b", "c"])
+ result = ~df
+ expected = pd.DataFrame(
+ {"A": expected, "B": [False, True, True]}, index=["a", "b", "c"]
+ )
+ tm.assert_frame_equal(result, expected)
+
+
class TestLogicalOps(BaseOpsUtil):
def test_numpy_scalars_ok(self, all_logical_operators):
a = pd.array([True, False, None], dtype="boolean")
@@ -879,3 +913,19 @@ def test_value_counts_na():
result = arr.value_counts(dropna=True)
expected = pd.Series([1, 1], index=[True, False], dtype="Int64")
tm.assert_series_equal(result, expected)
+
+
+def test_diff():
+ a = pd.array(
+ [True, True, False, False, True, None, True, None, False], dtype="boolean"
+ )
+ result = pd.core.algorithms.diff(a, 1)
+ expected = pd.array(
+ [None, False, True, False, True, None, None, None, None], dtype="boolean"
+ )
+ tm.assert_extension_array_equal(result, expected)
+
+ s = pd.Series(a)
+ result = s.diff()
+ expected = pd.Series(expected)
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index fa45db93c6102..87b825c8c27bd 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -65,8 +65,8 @@ def test_compare_len1_raises(self):
# to the case where one has length-1, which numpy would broadcast
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
- idx = self.index_cls._simple_new(data, freq="D")
- arr = self.array_cls(idx)
+ idx = self.array_cls._simple_new(data, freq="D")
+ arr = self.index_cls(idx)
with pytest.raises(ValueError, match="Lengths must match"):
arr == arr[:1]
@@ -79,8 +79,8 @@ def test_take(self):
data = np.arange(100, dtype="i8") * 24 * 3600 * 10 ** 9
np.random.shuffle(data)
- idx = self.index_cls._simple_new(data, freq="D")
- arr = self.array_cls(idx)
+ arr = self.array_cls._simple_new(data, freq="D")
+ idx = self.index_cls._simple_new(arr)
takers = [1, 4, 94]
result = arr.take(takers)
@@ -97,8 +97,7 @@ def test_take(self):
def test_take_fill(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
- idx = self.index_cls._simple_new(data, freq="D")
- arr = self.array_cls(idx)
+ arr = self.array_cls._simple_new(data, freq="D")
result = arr.take([-1, 1], allow_fill=True, fill_value=None)
assert result[0] is pd.NaT
@@ -121,7 +120,9 @@ def test_take_fill(self):
def test_concat_same_type(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
- idx = self.index_cls._simple_new(data, freq="D").insert(0, pd.NaT)
+ arr = self.array_cls._simple_new(data, freq="D")
+ idx = self.index_cls(arr)
+ idx = idx.insert(0, pd.NaT)
arr = self.array_cls(idx)
result = arr._concat_same_type([arr[:-1], arr[1:], arr])
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index 5608ab5fbd9db..a59ed429cc404 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -331,25 +331,19 @@ def test_searchsorted_tzawareness_compat(self, index):
pd.Timestamp.now().to_period("D"),
],
)
- @pytest.mark.parametrize(
- "index",
- [
- True,
- pytest.param(
- False,
- marks=pytest.mark.xfail(
- reason="Raises ValueError instead of TypeError", raises=ValueError
- ),
- ),
- ],
- )
+ @pytest.mark.parametrize("index", [True, False])
def test_searchsorted_invalid_types(self, other, index):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = DatetimeArray(data, freq="D")
if index:
arr = pd.Index(arr)
- msg = "searchsorted requires compatible dtype or scalar"
+ msg = "|".join(
+ [
+ "searchsorted requires compatible dtype or scalar",
+ "Unexpected type for 'value'",
+ ]
+ )
with pytest.raises(TypeError, match=msg):
arr.searchsorted(other)
diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py
index 0c8980c43c370..cc81ae4504dd8 100644
--- a/pandas/tests/arrays/test_integer.py
+++ b/pandas/tests/arrays/test_integer.py
@@ -363,24 +363,26 @@ def test_divide_by_zero(self, zero, negative):
tm.assert_numpy_array_equal(result, expected)
def test_pow_scalar(self):
- a = pd.array([0, 1, None, 2], dtype="Int64")
+ a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a ** 0
- expected = pd.array([1, 1, 1, 1], dtype="Int64")
+ expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** 1
- expected = pd.array([0, 1, None, 2], dtype="Int64")
+ expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** pd.NA
- expected = pd.array([None, 1, None, None], dtype="Int64")
+ expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** np.nan
- expected = np.array([np.nan, 1, np.nan, np.nan], dtype="float64")
+ expected = np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# reversed
+ a = a[1:] # Can't raise integers to negative powers.
+
result = 0 ** a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
@@ -678,6 +680,13 @@ def test_astype_str(self):
tm.assert_numpy_array_equal(a.astype(str), expected)
tm.assert_numpy_array_equal(a.astype("str"), expected)
+ def test_astype_boolean(self):
+ # https://github.com/pandas-dev/pandas/issues/31102
+ a = pd.array([1, 0, -1, 2, None], dtype="Int64")
+ result = a.astype("boolean")
+ expected = pd.array([True, False, True, True, None], dtype="boolean")
+ tm.assert_extension_array_equal(result, expected)
+
def test_frame_repr(data_missing):
@@ -933,6 +942,8 @@ def test_astype_nansafe():
@pytest.mark.parametrize("ufunc", [np.abs, np.sign])
+# np.sign emits a warning with nans, <https://github.com/numpy/numpy/issues/15127>
+@pytest.mark.filterwarnings("ignore:invalid value encountered in sign")
def test_ufuncs_single_int(ufunc):
a = integer_array([1, 2, -3, np.nan])
result = ufunc(a)
@@ -1050,6 +1061,36 @@ def test_value_counts_na():
tm.assert_series_equal(result, expected)
+@pytest.mark.parametrize("bins", [3, [0, 5, 15]])
+@pytest.mark.parametrize("right", [True, False])
+@pytest.mark.parametrize("include_lowest", [True, False])
+def test_cut(bins, right, include_lowest):
+ a = np.random.randint(0, 10, size=50).astype(object)
+ a[::2] = np.nan
+ result = pd.cut(
+ pd.array(a, dtype="Int64"), bins, right=right, include_lowest=include_lowest
+ )
+ expected = pd.cut(a, bins, right=right, include_lowest=include_lowest)
+ tm.assert_categorical_equal(result, expected)
+
+
+def test_array_setitem_nullable_boolean_mask():
+ # GH 31446
+ ser = pd.Series([1, 2], dtype="Int64")
+ result = ser.where(ser > 1)
+ expected = pd.Series([pd.NA, 2], dtype="Int64")
+ tm.assert_series_equal(result, expected)
+
+
+def test_array_setitem():
+ # GH 31446
+ arr = pd.Series([1, 2], dtype="Int64").array
+ arr[arr > 1] = 1
+
+ expected = pd.array([1, 1], dtype="Int64")
+ tm.assert_extension_array_equal(arr, expected)
+
+
# TODO(jreback) - these need testing / are broken
# shift
diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py
index 62cb4766171a4..c86b4f71ee592 100644
--- a/pandas/tests/arrays/test_timedeltas.py
+++ b/pandas/tests/arrays/test_timedeltas.py
@@ -154,25 +154,19 @@ def test_setitem_objects(self, obj):
pd.Timestamp.now().to_period("D"),
],
)
- @pytest.mark.parametrize(
- "index",
- [
- True,
- pytest.param(
- False,
- marks=pytest.mark.xfail(
- reason="Raises ValueError instead of TypeError", raises=ValueError
- ),
- ),
- ],
- )
+ @pytest.mark.parametrize("index", [True, False])
def test_searchsorted_invalid_types(self, other, index):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = TimedeltaArray(data, freq="D")
if index:
arr = pd.Index(arr)
- msg = "searchsorted requires compatible dtype or scalar"
+ msg = "|".join(
+ [
+ "searchsorted requires compatible dtype or scalar",
+ "Unexpected type for 'value'",
+ ]
+ )
with pytest.raises(TypeError, match=msg):
arr.searchsorted(other)
diff --git a/pandas/tests/base/test_constructors.py b/pandas/tests/base/test_constructors.py
index 0b7274399aafc..e27b5c307cd99 100644
--- a/pandas/tests/base/test_constructors.py
+++ b/pandas/tests/base/test_constructors.py
@@ -53,13 +53,16 @@ def test_invalid_delegation(self):
delegate = self.Delegate(self.Delegator())
- with pytest.raises(TypeError):
+ msg = "You cannot access the property foo"
+ with pytest.raises(TypeError, match=msg):
delegate.foo
- with pytest.raises(TypeError):
+ msg = "The property foo cannot be set"
+ with pytest.raises(TypeError, match=msg):
delegate.foo = 5
- with pytest.raises(TypeError):
+ msg = "You cannot access the property foo"
+ with pytest.raises(TypeError, match=msg):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
@@ -85,8 +88,8 @@ class T(NoNewAttributesMixin):
t._freeze()
assert "__frozen" in dir(t)
assert getattr(t, "__frozen")
-
- with pytest.raises(AttributeError):
+ msg = "You cannot add any new attribute"
+ with pytest.raises(AttributeError, match=msg):
t.b = "test"
assert not hasattr(t, "b")
@@ -129,7 +132,8 @@ def test_constructor_datetime_outofbound(self, a, klass):
# datetime64[non-ns] raise error, other cases result in object dtype
# and preserve original data
if a.dtype.kind == "M":
- with pytest.raises(pd.errors.OutOfBoundsDatetime):
+ msg = "Out of bounds"
+ with pytest.raises(pd.errors.OutOfBoundsDatetime, match=msg):
klass(a)
else:
result = klass(a)
@@ -138,5 +142,6 @@ def test_constructor_datetime_outofbound(self, a, klass):
# Explicit dtype specified
# Forced conversion fails for all -> all cases raise error
- with pytest.raises(pd.errors.OutOfBoundsDatetime):
+ msg = "Out of bounds"
+ with pytest.raises(pd.errors.OutOfBoundsDatetime, match=msg):
klass(a, dtype="datetime64[ns]")
diff --git a/pandas/tests/base/test_conversion.py b/pandas/tests/base/test_conversion.py
index 07a15d0619bb6..46fd1551e6170 100644
--- a/pandas/tests/base/test_conversion.py
+++ b/pandas/tests/base/test_conversion.py
@@ -303,7 +303,8 @@ def test_array(array, attr, index_or_series):
def test_array_multiindex_raises():
idx = pd.MultiIndex.from_product([["A"], ["a", "b"]])
- with pytest.raises(ValueError, match="MultiIndex"):
+ msg = "MultiIndex has no single backing array"
+ with pytest.raises(ValueError, match=msg):
idx.array
@@ -429,11 +430,11 @@ def test_to_numpy_na_value_numpy_dtype(container, values, dtype, na_value, expec
def test_to_numpy_kwargs_raises():
# numpy
s = pd.Series([1, 2, 3])
- match = r"to_numpy\(\) got an unexpected keyword argument 'foo'"
- with pytest.raises(TypeError, match=match):
+ msg = r"to_numpy\(\) got an unexpected keyword argument 'foo'"
+ with pytest.raises(TypeError, match=msg):
s.to_numpy(foo=True)
# extension
s = pd.Series([1, 2, 3], dtype="Int64")
- with pytest.raises(TypeError, match=match):
+ with pytest.raises(TypeError, match=msg):
s.to_numpy(foo=True)
diff --git a/pandas/tests/base/test_ops.py b/pandas/tests/base/test_ops.py
index 2693eb12dda71..08ec57bd69ad4 100644
--- a/pandas/tests/base/test_ops.py
+++ b/pandas/tests/base/test_ops.py
@@ -123,11 +123,11 @@ def check_ops_properties(self, props, filter=None, ignore_failures=False):
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
+ msg = "no attribute"
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
-
- with pytest.raises(err):
+ with pytest.raises(err, match=msg):
getattr(o, op)
@pytest.mark.parametrize("klass", [Series, DataFrame])
@@ -211,9 +211,10 @@ def test_none_comparison(self):
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
- with pytest.raises(TypeError):
+ msg = "Invalid comparison"
+ with pytest.raises(TypeError, match=msg):
None > o
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
o > None
else:
result = None > o
@@ -235,7 +236,8 @@ def test_ndarray_compat_properties(self):
for p in ["flags", "strides", "itemsize", "base", "data"]:
assert not hasattr(o, p)
- with pytest.raises(ValueError):
+ msg = "can only convert an array of size 1 to a Python scalar"
+ with pytest.raises(ValueError, match=msg):
o.item() # len > 1
assert o.ndim == 1
@@ -438,7 +440,8 @@ def test_value_counts_bins(self, index_or_series):
s = klass(s_values)
# bins
- with pytest.raises(TypeError):
+ msg = "bins argument only works with numeric data"
+ with pytest.raises(TypeError, match=msg):
s.value_counts(bins=1)
s1 = Series([1, 1, 2, 3])
@@ -857,7 +860,8 @@ def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
- with pytest.raises(ValueError):
+ msg = "expected type bool"
+ with pytest.raises(ValueError, match=msg):
self.int_series.drop_duplicates(inplace=value)
def test_getitem(self):
@@ -870,9 +874,11 @@ def test_getitem(self):
assert i[-1] == i[9]
- with pytest.raises(IndexError):
+ msg = "index 20 is out of bounds for axis 0 with size 10"
+ with pytest.raises(IndexError, match=msg):
i[20]
- with pytest.raises(IndexError):
+ msg = "single positional indexer is out-of-bounds"
+ with pytest.raises(IndexError, match=msg):
s.iloc[20]
@pytest.mark.parametrize("indexer_klass", [list, pd.Index])
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index 7f68abb92ba43..a240e6cef5930 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -274,9 +274,9 @@ def check_operands(left, right, cmp_op):
def check_simple_cmp_op(self, lhs, cmp1, rhs):
ex = f"lhs {cmp1} rhs"
msg = (
- r"only list-like( or dict-like)? objects are allowed to be"
- r" passed to (DataFrame\.)?isin\(\), you passed a"
- r" (\[|')bool(\]|')|"
+ r"only list-like( or dict-like)? objects are allowed to be "
+ r"passed to (DataFrame\.)?isin\(\), you passed a "
+ r"(\[|')bool(\]|')|"
"argument of type 'bool' is not iterable"
)
if cmp1 in ("in", "not in") and not is_list_like(rhs):
@@ -408,9 +408,9 @@ def check_compound_invert_op(self, lhs, cmp1, rhs):
ex = f"~(lhs {cmp1} rhs)"
msg = (
- r"only list-like( or dict-like)? objects are allowed to be"
- r" passed to (DataFrame\.)?isin\(\), you passed a"
- r" (\[|')float(\]|')|"
+ r"only list-like( or dict-like)? objects are allowed to be "
+ r"passed to (DataFrame\.)?isin\(\), you passed a "
+ r"(\[|')float(\]|')|"
"argument of type 'float' is not iterable"
)
if is_scalar(rhs) and cmp1 in skip_these:
@@ -573,45 +573,39 @@ def test_series_negate(self):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_series_equal(expect, result)
- def test_frame_pos(self):
+ @pytest.mark.parametrize(
+ "lhs",
+ [
+ # Float
+ DataFrame(randn(5, 2)),
+ # Int
+ DataFrame(randint(5, size=(5, 2))),
+ # bool doesn't work with numexpr but works elsewhere
+ DataFrame(rand(5, 2) > 0.5),
+ ],
+ )
+ def test_frame_pos(self, lhs):
expr = self.ex("+")
-
- # float
- lhs = DataFrame(randn(5, 2))
expect = lhs
- result = pd.eval(expr, engine=self.engine, parser=self.parser)
- tm.assert_frame_equal(expect, result)
- # int
- lhs = DataFrame(randint(5, size=(5, 2)))
- expect = lhs
- result = pd.eval(expr, engine=self.engine, parser=self.parser)
- tm.assert_frame_equal(expect, result)
-
- # bool doesn't work with numexpr but works elsewhere
- lhs = DataFrame(rand(5, 2) > 0.5)
- expect = lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(expect, result)
- def test_series_pos(self):
+ @pytest.mark.parametrize(
+ "lhs",
+ [
+ # Float
+ Series(randn(5)),
+ # Int
+ Series(randint(5, size=5)),
+ # bool doesn't work with numexpr but works elsewhere
+ Series(rand(5) > 0.5),
+ ],
+ )
+ def test_series_pos(self, lhs):
expr = self.ex("+")
-
- # float
- lhs = Series(randn(5))
- expect = lhs
- result = pd.eval(expr, engine=self.engine, parser=self.parser)
- tm.assert_series_equal(expect, result)
-
- # int
- lhs = Series(randint(5, size=5))
expect = lhs
- result = pd.eval(expr, engine=self.engine, parser=self.parser)
- tm.assert_series_equal(expect, result)
- # bool doesn't work with numexpr but works elsewhere
- lhs = Series(rand(5) > 0.5)
- expect = lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_series_equal(expect, result)
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index ce925891f62c0..097e83d93ee71 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -668,7 +668,8 @@ def test__get_dtype(input_param, result):
(None, "Cannot deduce dtype from null object"),
(1, "data type not understood"),
(1.2, "data type not understood"),
- ("random string", 'data type "random string" not understood'),
+ # numpy dev changed from double-quotes to single quotes
+ ("random string", "data type [\"']random string[\"'] not understood"),
(pd.DataFrame([1, 2]), "data type not understood"),
],
)
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index fddd6239df309..a599a086ae92b 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -244,11 +244,12 @@ def test_construct_from_string_raises(self):
with pytest.raises(TypeError, match="notatz"):
DatetimeTZDtype.construct_from_string("datetime64[ns, notatz]")
- msg = "^Cannot construct a 'DatetimeTZDtype'"
- with pytest.raises(TypeError, match=msg):
+ msg = "'construct_from_string' expects a string, got <class 'list'>"
+ with pytest.raises(TypeError, match=re.escape(msg)):
# list instead of string
DatetimeTZDtype.construct_from_string(["datetime64[ns, notatz]"])
+ msg = "^Cannot construct a 'DatetimeTZDtype'"
with pytest.raises(TypeError, match=msg):
# non-nano unit
DatetimeTZDtype.construct_from_string("datetime64[ps, UTC]")
@@ -547,9 +548,9 @@ def test_construction_from_string(self):
@pytest.mark.parametrize("string", [0, 3.14, ("a", "b"), None])
def test_construction_from_string_errors(self, string):
# these are invalid entirely
- msg = "a string needs to be passed, got type"
+ msg = f"'construct_from_string' expects a string, got {type(string)}"
- with pytest.raises(TypeError, match=msg):
+ with pytest.raises(TypeError, match=re.escape(msg)):
IntervalDtype.construct_from_string(string)
@pytest.mark.parametrize("string", ["foo", "foo[int64]", "IntervalA"])
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index d022b0e97877a..48f9262ad3486 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -1114,11 +1114,16 @@ def test_is_string_array(self):
assert lib.is_string_array(np.array(["foo", "bar"]))
assert not lib.is_string_array(
- np.array(["foo", "bar", np.nan], dtype=object), skipna=False
+ np.array(["foo", "bar", pd.NA], dtype=object), skipna=False
)
assert lib.is_string_array(
+ np.array(["foo", "bar", pd.NA], dtype=object), skipna=True
+ )
+ # NaN is not valid for string array, just NA
+ assert not lib.is_string_array(
np.array(["foo", "bar", np.nan], dtype=object), skipna=True
)
+
assert not lib.is_string_array(np.array([1, 2]))
def test_to_object_array_tuples(self):
@@ -1341,9 +1346,11 @@ def test_is_scalar_builtin_scalars(self):
assert is_scalar(None)
assert is_scalar(True)
assert is_scalar(False)
- assert is_scalar(Number())
assert is_scalar(Fraction())
assert is_scalar(0.0)
+ assert is_scalar(1)
+ assert is_scalar(complex(2))
+ assert is_scalar(float("NaN"))
assert is_scalar(np.nan)
assert is_scalar("foobar")
assert is_scalar(b"foobar")
@@ -1352,6 +1359,7 @@ def test_is_scalar_builtin_scalars(self):
assert is_scalar(time(12, 0))
assert is_scalar(timedelta(hours=1))
assert is_scalar(pd.NaT)
+ assert is_scalar(pd.NA)
def test_is_scalar_builtin_nonscalars(self):
assert not is_scalar({})
@@ -1366,6 +1374,7 @@ def test_is_scalar_numpy_array_scalars(self):
assert is_scalar(np.int64(1))
assert is_scalar(np.float64(1.0))
assert is_scalar(np.int32(1))
+ assert is_scalar(np.complex64(2))
assert is_scalar(np.object_("foobar"))
assert is_scalar(np.str_("foobar"))
assert is_scalar(np.unicode_("foobar"))
@@ -1405,6 +1414,21 @@ def test_is_scalar_pandas_containers(self):
assert not is_scalar(Index([]))
assert not is_scalar(Index([1]))
+ def test_is_scalar_number(self):
+ # Number() is not recognied by PyNumber_Check, so by extension
+ # is not recognized by is_scalar, but instances of non-abstract
+ # subclasses are.
+
+ class Numeric(Number):
+ def __init__(self, value):
+ self.value = value
+
+ def __int__(self):
+ return self.value
+
+ num = Numeric(1)
+ assert is_scalar(num)
+
def test_datetimeindex_from_empty_datetime64_array():
for unit in ["ms", "us", "ns"]:
diff --git a/pandas/tests/extension/arrow/arrays.py b/pandas/tests/extension/arrow/arrays.py
index b0e5a6f85feeb..b67ca4cfab83d 100644
--- a/pandas/tests/extension/arrow/arrays.py
+++ b/pandas/tests/extension/arrow/arrays.py
@@ -7,6 +7,7 @@
"""
import copy
import itertools
+from typing import Type
import numpy as np
import pyarrow as pa
@@ -29,14 +30,7 @@ class ArrowBoolDtype(ExtensionDtype):
na_value = pa.NULL
@classmethod
- def construct_from_string(cls, string):
- if string == cls.name:
- return cls()
- else:
- raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
-
- @classmethod
- def construct_array_type(cls):
+ def construct_array_type(cls) -> Type["ArrowBoolArray"]:
"""
Return the array type associated with this dtype.
@@ -46,7 +40,8 @@ def construct_array_type(cls):
"""
return ArrowBoolArray
- def _is_boolean(self):
+ @property
+ def _is_boolean(self) -> bool:
return True
@@ -59,14 +54,7 @@ class ArrowStringDtype(ExtensionDtype):
na_value = pa.NULL
@classmethod
- def construct_from_string(cls, string):
- if string == cls.name:
- return cls()
- else:
- raise TypeError(f"Cannot construct a '{cls}' from '{string}'")
-
- @classmethod
- def construct_array_type(cls):
+ def construct_array_type(cls) -> Type["ArrowStringArray"]:
"""
Return the array type associated with this dtype.
diff --git a/pandas/tests/extension/base/__init__.py b/pandas/tests/extension/base/__init__.py
index 090df35bd94c9..e2b6ea0304f6a 100644
--- a/pandas/tests/extension/base/__init__.py
+++ b/pandas/tests/extension/base/__init__.py
@@ -49,7 +49,12 @@ class TestMyDtype(BaseDtypeTests):
from .io import BaseParsingTests # noqa
from .methods import BaseMethodsTests # noqa
from .missing import BaseMissingTests # noqa
-from .ops import BaseArithmeticOpsTests, BaseComparisonOpsTests, BaseOpsUtil # noqa
+from .ops import ( # noqa
+ BaseArithmeticOpsTests,
+ BaseComparisonOpsTests,
+ BaseOpsUtil,
+ BaseUnaryOpsTests,
+)
from .printing import BasePrintingTests # noqa
from .reduce import ( # noqa
BaseBooleanReduceTests,
diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py
index b6c12b5844086..b01867624cb16 100644
--- a/pandas/tests/extension/base/dtype.py
+++ b/pandas/tests/extension/base/dtype.py
@@ -105,3 +105,10 @@ def test_construct_from_string_another_type_raises(self, dtype):
msg = f"Cannot construct a '{type(dtype).__name__}' from 'another_type'"
with pytest.raises(TypeError, match=msg):
type(dtype).construct_from_string("another_type")
+
+ def test_construct_from_string_wrong_type_raises(self, dtype):
+ with pytest.raises(
+ TypeError,
+ match="'construct_from_string' expects a string, got <class 'int'>",
+ ):
+ type(dtype).construct_from_string(0)
diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py
index dc1f62c4c97c5..8615a8df22dcc 100644
--- a/pandas/tests/extension/base/getitem.py
+++ b/pandas/tests/extension/base/getitem.py
@@ -97,6 +97,15 @@ def test_getitem_scalar_na(self, data_missing, na_cmp, na_value):
result = data_missing[0]
assert na_cmp(result, na_value)
+ def test_getitem_empty(self, data):
+ # Indexing with empty list
+ result = data[[]]
+ assert len(result) == 0
+ assert isinstance(result, type(data))
+
+ expected = data[np.array([], dtype="int64")]
+ self.assert_extension_array_equal(result, expected)
+
def test_getitem_mask(self, data):
# Empty mask, raw array
mask = np.zeros(len(data), dtype=bool)
@@ -152,7 +161,12 @@ def test_getitem_boolean_array_mask(self, data):
def test_getitem_boolean_array_mask_raises(self, data):
mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")
mask[:2] = pd.NA
- with pytest.raises(ValueError):
+
+ msg = (
+ "Cannot mask with a boolean indexer containing NA values|"
+ "cannot mask with array containing NA / NaN values"
+ )
+ with pytest.raises(ValueError, match=msg):
data[mask]
s = pd.Series(data)
@@ -160,6 +174,38 @@ def test_getitem_boolean_array_mask_raises(self, data):
with pytest.raises(ValueError):
s[mask]
+ @pytest.mark.parametrize(
+ "idx",
+ [[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
+ ids=["list", "integer-array", "numpy-array"],
+ )
+ def test_getitem_integer_array(self, data, idx):
+ result = data[idx]
+ assert len(result) == 3
+ assert isinstance(result, type(data))
+ expected = data.take([0, 1, 2])
+ self.assert_extension_array_equal(result, expected)
+
+ expected = pd.Series(expected)
+ result = pd.Series(data)[idx]
+ self.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "idx",
+ [[0, 1, 2, pd.NA], pd.array([0, 1, 2, pd.NA], dtype="Int64")],
+ ids=["list", "integer-array"],
+ )
+ def test_getitem_integer_with_missing_raises(self, data, idx):
+ msg = "Cannot index with an integer indexer containing NA values"
+ with pytest.raises(ValueError, match=msg):
+ data[idx]
+
+ # TODO this raises KeyError about labels not found (it tries label-based)
+ # import pandas._testing as tm
+ # s = pd.Series(data, index=[tm.rands(4) for _ in range(len(data))])
+ # with pytest.raises(ValueError, match=msg):
+ # s[idx]
+
def test_getitem_slice(self, data):
# getitem[slice] should return an array
result = data[slice(0)] # empty
@@ -245,7 +291,9 @@ def test_take_non_na_fill_value(self, data_missing):
fill_value = data_missing[1] # valid
na = data_missing[0]
- array = data_missing._from_sequence([na, fill_value, na])
+ array = data_missing._from_sequence(
+ [na, fill_value, na], dtype=data_missing.dtype
+ )
result = array.take([-1, 1], fill_value=fill_value, allow_fill=True)
expected = array.take([1, 1])
self.assert_extension_array_equal(result, expected)
@@ -293,10 +341,12 @@ def test_reindex_non_na_fill_value(self, data_missing):
valid = data_missing[1]
na = data_missing[0]
- array = data_missing._from_sequence([na, valid])
+ array = data_missing._from_sequence([na, valid], dtype=data_missing.dtype)
ser = pd.Series(array)
result = ser.reindex([0, 1, 2], fill_value=valid)
- expected = pd.Series(data_missing._from_sequence([na, valid, valid]))
+ expected = pd.Series(
+ data_missing._from_sequence([na, valid, valid], dtype=data_missing.dtype)
+ )
self.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index 1e427c6319cab..4a84a21084de2 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -1,6 +1,10 @@
+import operator
+
import numpy as np
import pytest
+from pandas.core.dtypes.common import is_bool_dtype
+
import pandas as pd
import pandas._testing as tm
from pandas.core.sorting import nargsort
@@ -231,6 +235,32 @@ def test_container_shift(self, data, frame, periods, indices):
compare(result, expected)
+ @pytest.mark.parametrize("periods", [1, -2])
+ def test_diff(self, data, periods):
+ data = data[:5]
+ if is_bool_dtype(data.dtype):
+ op = operator.xor
+ else:
+ op = operator.sub
+ try:
+ # does this array implement ops?
+ op(data, data)
+ except Exception:
+ pytest.skip(f"{type(data)} does not support diff")
+ s = pd.Series(data)
+ result = s.diff(periods)
+ expected = pd.Series(op(data, data.shift(periods)))
+ self.assert_series_equal(result, expected)
+
+ df = pd.DataFrame({"A": data, "B": [1.0] * 5})
+ result = df.diff(periods)
+ if periods == 1:
+ b = [np.nan, 0, 0, 0, 0]
+ else:
+ b = [0, 0, 0, np.nan, np.nan]
+ expected = pd.DataFrame({"A": expected, "B": b})
+ self.assert_frame_equal(result, expected)
+
@pytest.mark.parametrize(
"periods, indices",
[[-4, [-1, -1]], [-1, [1, -1]], [0, [0, 1]], [1, [-1, 0]], [4, [-1, -1]]],
@@ -261,6 +291,11 @@ def test_shift_fill_value(self, data):
expected = data.take([2, 3, 0, 0])
self.assert_extension_array_equal(result, expected)
+ def test_not_hashable(self, data):
+ # We are in general mutable, so not hashable
+ with pytest.raises(TypeError, match="unhashable type"):
+ hash(data)
+
def test_hash_pandas_object_works(self, data, as_frame):
# https://github.com/pandas-dev/pandas/issues/23066
data = pd.Series(data)
diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py
index 20d06ef2e5647..0609f19c8e0c3 100644
--- a/pandas/tests/extension/base/ops.py
+++ b/pandas/tests/extension/base/ops.py
@@ -168,3 +168,11 @@ def test_direct_arith_with_series_returns_not_implemented(self, data):
assert result is NotImplemented
else:
raise pytest.skip(f"{type(data).__name__} does not implement __eq__")
+
+
+class BaseUnaryOpsTests(BaseOpsUtil):
+ def test_invert(self, data):
+ s = pd.Series(data, name="name")
+ result = ~s
+ expected = pd.Series(~data, name="name")
+ self.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index 0bb8aede6298c..e0ca603aaa0ed 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -4,6 +4,7 @@
import pytest
import pandas as pd
+from pandas.core.arrays.numpy_ import PandasDtype
from .base import BaseExtensionTests
@@ -195,3 +196,14 @@ def test_setitem_preserves_views(self, data):
data[0] = data[1]
assert view1[0] == data[1]
assert view2[0] == data[1]
+
+ def test_setitem_nullable_mask(self, data):
+ # GH 31446
+ # TODO: there is some issue with PandasArray, therefore,
+ # TODO: skip the setitem test for now, and fix it later
+ if data.dtype != PandasDtype("object"):
+ arr = data[:5]
+ expected = data.take([0, 0, 0, 3, 4])
+ mask = pd.array([True, True, True, False, False])
+ arr[mask] = data[0]
+ self.assert_extension_array_equal(expected, arr)
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 85bd5f7a33fe1..2614d8c72c342 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -2,6 +2,7 @@
import numbers
import random
import sys
+from typing import Type
import numpy as np
@@ -10,6 +11,7 @@
import pandas as pd
from pandas.api.extensions import no_default, register_extension_dtype
from pandas.core.arrays import ExtensionArray, ExtensionScalarOpsMixin
+from pandas.core.indexers import check_array_indexer
@register_extension_dtype
@@ -26,7 +28,7 @@ def __repr__(self) -> str:
return f"DecimalDtype(context={self.context})"
@classmethod
- def construct_array_type(cls):
+ def construct_array_type(cls) -> Type["DecimalArray"]:
"""
Return the array type associated with this dtype.
@@ -36,15 +38,8 @@ def construct_array_type(cls):
"""
return DecimalArray
- @classmethod
- def construct_from_string(cls, string):
- if string == cls.name:
- return cls()
- else:
- raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
-
@property
- def _is_numeric(self):
+ def _is_numeric(self) -> bool:
return True
@@ -116,14 +111,7 @@ def __getitem__(self, item):
return self._data[item]
else:
# array, slice.
- if pd.api.types.is_list_like(item):
- if not pd.api.types.is_array_like(item):
- item = pd.array(item)
- dtype = item.dtype
- if pd.api.types.is_bool_dtype(dtype):
- item = pd.api.indexers.check_bool_array_indexer(self, item)
- elif pd.api.types.is_integer_dtype(dtype):
- item = np.asarray(item, dtype="int")
+ item = pd.api.indexers.check_array_indexer(self, item)
return type(self)(self._data[item])
def take(self, indexer, allow_fill=False, fill_value=None):
@@ -151,6 +139,8 @@ def __setitem__(self, key, value):
value = [decimal.Decimal(v) for v in value]
else:
value = decimal.Decimal(value)
+
+ key = check_array_indexer(self, key)
self._data[key] = value
def __len__(self) -> int:
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 17bc2773aad19..9e741bb7f267c 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -16,6 +16,7 @@
import random
import string
import sys
+from typing import Type
import numpy as np
@@ -29,7 +30,7 @@ class JSONDtype(ExtensionDtype):
na_value = UserDict()
@classmethod
- def construct_array_type(cls):
+ def construct_array_type(cls) -> Type["JSONArray"]:
"""
Return the array type associated with this dtype.
@@ -39,13 +40,6 @@ def construct_array_type(cls):
"""
return JSONArray
- @classmethod
- def construct_from_string(cls, string):
- if string == cls.name:
- return cls()
- else:
- raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
-
class JSONArray(ExtensionArray):
dtype = JSONDtype()
@@ -82,11 +76,8 @@ def __getitem__(self, item):
# slice
return type(self)(self.data[item])
else:
- if not pd.api.types.is_array_like(item):
- item = pd.array(item)
- dtype = item.dtype
- if pd.api.types.is_bool_dtype(dtype):
- item = pd.api.indexers.check_bool_array_indexer(self, item)
+ item = pd.api.indexers.check_array_indexer(self, item)
+ if pd.api.types.is_bool_dtype(item.dtype):
return self._from_sequence([x for x, m in zip(self, item) if m])
# integer
return type(self)([self.data[i] for i in item])
@@ -113,6 +104,11 @@ def __setitem__(self, key, value):
def __len__(self) -> int:
return len(self.data)
+ def __array__(self, dtype=None):
+ if dtype is None:
+ dtype = object
+ return np.asarray(self.data, dtype=dtype)
+
@property
def nbytes(self) -> int:
return sys.getsizeof(self.data)
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 4d3145109e3c2..dc03a1f1dcf72 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -163,10 +163,6 @@ def test_unstack(self, data, index):
# this matches otherwise
return super().test_unstack(data, index)
- @pytest.mark.xfail(reason="Inconsistent sizes.")
- def test_transpose(self, data):
- super().test_transpose(data)
-
class TestGetitem(BaseJSON, base.BaseGetitemTests):
pass
diff --git a/pandas/tests/extension/list/array.py b/pandas/tests/extension/list/array.py
index 6dd00ad3b06ba..7c1da5e8102e2 100644
--- a/pandas/tests/extension/list/array.py
+++ b/pandas/tests/extension/list/array.py
@@ -6,6 +6,7 @@
import numbers
import random
import string
+from typing import Type
import numpy as np
@@ -21,7 +22,7 @@ class ListDtype(ExtensionDtype):
na_value = np.nan
@classmethod
- def construct_array_type(cls):
+ def construct_array_type(cls) -> Type["ListArray"]:
"""
Return the array type associated with this dtype.
@@ -31,13 +32,6 @@ def construct_array_type(cls):
"""
return ListArray
- @classmethod
- def construct_from_string(cls, string):
- if string == cls.name:
- return cls()
- else:
- raise TypeError(f"Cannot construct a '{cls}' from '{string}'")
-
class ListArray(ExtensionArray):
dtype = ListDtype()
diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py
index a7ce0fb097599..0c6b187eac1fc 100644
--- a/pandas/tests/extension/test_boolean.py
+++ b/pandas/tests/extension/test_boolean.py
@@ -327,7 +327,9 @@ def check_reduce(self, s, op_name, skipna):
result = getattr(s, op_name)(skipna=skipna)
expected = getattr(s.astype("float64"), op_name)(skipna=skipna)
# override parent function to cast to bool for min/max
- if op_name in ("min", "max") and not pd.isna(expected):
+ if np.isnan(expected):
+ expected = pd.NA
+ elif op_name in ("min", "max"):
expected = bool(expected)
tm.assert_almost_equal(result, expected)
@@ -340,6 +342,10 @@ class TestPrinting(base.BasePrintingTests):
pass
+class TestUnaryOps(base.BaseUnaryOpsTests):
+ pass
+
+
# TODO parsing not yet supported
# class TestParsing(base.BaseParsingTests):
# pass
diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py
index afb8412f12ea9..f55ec75b47dfa 100644
--- a/pandas/tests/extension/test_integer.py
+++ b/pandas/tests/extension/test_integer.py
@@ -19,6 +19,7 @@
from pandas.core.dtypes.common import is_extension_array_dtype
import pandas as pd
+import pandas._testing as tm
from pandas.core.arrays import integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
@@ -233,7 +234,14 @@ class TestGroupby(base.BaseGroupbyTests):
class TestNumericReduce(base.BaseNumericReduceTests):
- pass
+ def check_reduce(self, s, op_name, skipna):
+ # overwrite to ensure pd.NA is tested instead of np.nan
+ # https://github.com/pandas-dev/pandas/issues/30958
+ result = getattr(s, op_name)(skipna=skipna)
+ expected = getattr(s.astype("float64"), op_name)(skipna=skipna)
+ if np.isnan(expected):
+ expected = pd.NA
+ tm.assert_almost_equal(result, expected)
class TestBooleanReduce(base.BaseBooleanReduceTests):
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index 7db38f41d4573..8a820c8746857 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -248,6 +248,10 @@ def test_repeat(self, data, repeats, as_series, use_numpy):
# Fails creating expected
super().test_repeat(data, repeats, as_series, use_numpy)
+ @pytest.mark.xfail(reason="PandasArray.diff may fail on dtype")
+ def test_diff(self, data, periods):
+ return super().test_diff(data, periods)
+
@skip_nested
class TestArithmetics(BaseNumPyTests, base.BaseArithmeticOpsTests):
diff --git a/pandas/tests/frame/indexing/test_categorical.py b/pandas/tests/frame/indexing/test_categorical.py
index 5de38915f04c1..a29c193676db2 100644
--- a/pandas/tests/frame/indexing/test_categorical.py
+++ b/pandas/tests/frame/indexing/test_categorical.py
@@ -354,6 +354,16 @@ def test_functions_no_warnings(self):
df.value, range(0, 105, 10), right=False, labels=labels
)
+ def test_setitem_single_row_categorical(self):
+ # GH 25495
+ df = DataFrame({"Alpha": ["a"], "Numeric": [0]})
+ categories = pd.Categorical(df["Alpha"], categories=["a", "b", "c"])
+ df.loc[:, "Alpha"] = categories
+
+ result = df["Alpha"]
+ expected = Series(categories, index=df.index, name="Alpha")
+ tm.assert_series_equal(result, expected)
+
def test_loc_indexing_preserves_index_category_dtype(self):
# GH 15166
df = DataFrame(
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 33c0e92845484..cbb9dd09bbede 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -447,8 +447,8 @@ def test_setitem(self, float_frame):
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
msg = (
- r"\"None of \[Float64Index\(\[.*dtype='float64'\)\] are in the"
- r" \[columns\]\""
+ r"\"None of \[Float64Index\(\[.*dtype='float64'\)\] are in the "
+ r"\[columns\]\""
)
with pytest.raises(KeyError, match=msg):
float_frame[np.random.randn(len(float_frame) + 1)] = 1
@@ -1039,9 +1039,9 @@ def test_getitem_setitem_float_labels(self):
# positional slicing only via iloc!
msg = (
- "cannot do slice indexing on"
- r" <class 'pandas\.core\.indexes\.numeric\.Float64Index'> with"
- r" these indexers \[1.0\] of <class 'float'>"
+ "cannot do slice indexing on "
+ r"<class 'pandas\.core\.indexes\.numeric\.Float64Index'> with "
+ r"these indexers \[1.0\] of <class 'float'>"
)
with pytest.raises(TypeError, match=msg):
df.iloc[1.0:5]
@@ -2179,7 +2179,7 @@ def test_type_error_multiindex(self):
dg = df.pivot_table(index="i", columns="c", values=["x", "y"])
with pytest.raises(TypeError, match="is an invalid key"):
- str(dg[:, 0])
+ dg[:, 0]
index = Index(range(2), name="i")
columns = MultiIndex(
diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py
index d128a51f4b390..9fc3629e794e2 100644
--- a/pandas/tests/frame/methods/test_append.py
+++ b/pandas/tests/frame/methods/test_append.py
@@ -50,6 +50,10 @@ def test_append_series_dict(self):
)
tm.assert_frame_equal(result, expected.loc[:, result.columns])
+ msg = "Can only append a dict if ignore_index=True"
+ with pytest.raises(TypeError, match=msg):
+ df.append(series.to_dict())
+
# can append when name set
row = df.loc[4]
row.name = 5
diff --git a/pandas/tests/frame/methods/test_asof.py b/pandas/tests/frame/methods/test_asof.py
index 0291be0a4083e..e2b417972638e 100644
--- a/pandas/tests/frame/methods/test_asof.py
+++ b/pandas/tests/frame/methods/test_asof.py
@@ -143,3 +143,16 @@ def test_time_zone_aware_index(self, stamp, expected):
result = df.asof(stamp)
tm.assert_series_equal(result, expected)
+
+ def test_is_copy(self, date_range_frame):
+ # GH-27357, GH-30784: ensure the result of asof is an actual copy and
+ # doesn't track the parent dataframe / doesn't give SettingWithCopy warnings
+ df = date_range_frame
+ N = 50
+ df.loc[15:30, "A"] = np.nan
+ dates = date_range("1/1/1990", periods=N * 3, freq="25s")
+
+ result = df.asof(dates)
+
+ with tm.assert_produces_warning(None):
+ result["C"] = 1
diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py
index 251563e51e15a..127233ed2713e 100644
--- a/pandas/tests/frame/methods/test_describe.py
+++ b/pandas/tests/frame/methods/test_describe.py
@@ -253,52 +253,19 @@ def test_describe_tz_values(self, tz_naive_fixture):
expected = DataFrame(
{
- "s1": [
- 5,
- np.nan,
- np.nan,
- np.nan,
- np.nan,
- np.nan,
- 2,
- 1.581139,
- 0,
- 1,
- 2,
- 3,
- 4,
- ],
+ "s1": [5, 2, 0, 1, 2, 3, 4, 1.581139],
"s2": [
5,
- 5,
- s2.value_counts().index[0],
- 1,
+ Timestamp(2018, 1, 3).tz_localize(tz),
start.tz_localize(tz),
+ s2[1],
+ s2[2],
+ s2[3],
end.tz_localize(tz),
np.nan,
- np.nan,
- np.nan,
- np.nan,
- np.nan,
- np.nan,
- np.nan,
],
},
- index=[
- "count",
- "unique",
- "top",
- "freq",
- "first",
- "last",
- "mean",
- "std",
- "min",
- "25%",
- "50%",
- "75%",
- "max",
- ],
+ index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"],
)
result = df.describe(include="all")
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_diff.py b/pandas/tests/frame/methods/test_diff.py
index 43c25f4c05c2d..ffdb6d41ebda5 100644
--- a/pandas/tests/frame/methods/test_diff.py
+++ b/pandas/tests/frame/methods/test_diff.py
@@ -15,7 +15,7 @@ def test_diff(self, datetime_frame):
)
# int dtype
- a = 10000000000000000
+ a = 10_000_000_000_000_000
b = a + 1
s = Series([a, b])
diff --git a/pandas/tests/frame/methods/test_to_records.py b/pandas/tests/frame/methods/test_to_records.py
index 54a3affdc3024..d0181f0309af1 100644
--- a/pandas/tests/frame/methods/test_to_records.py
+++ b/pandas/tests/frame/methods/test_to_records.py
@@ -235,7 +235,7 @@ def test_to_records_with_categorical(self):
# Check that bad types raise
(
dict(index=False, column_dtypes={"A": "int32", "B": "foo"}),
- (TypeError, 'data type "foo" not understood'),
+ (TypeError, "data type [\"']foo[\"'] not understood"),
),
],
)
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 910230c737a2a..25b2997eb088f 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -823,6 +823,16 @@ def test_sum_bool(self, float_frame):
bools.sum(1)
bools.sum(0)
+ def test_sum_mixed_datetime(self):
+ # GH#30886
+ df = pd.DataFrame(
+ {"A": pd.date_range("2000", periods=4), "B": [1, 2, 3, 4]}
+ ).reindex([2, 3, 4])
+ result = df.sum()
+
+ expected = pd.Series({"B": 7.0})
+ tm.assert_series_equal(result, expected)
+
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 9263409f7a7f8..9de5d6fe16a0d 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -382,8 +382,8 @@ def test_swapaxes(self):
tm.assert_frame_equal(df.T, df.swapaxes(1, 0))
tm.assert_frame_equal(df, df.swapaxes(0, 0))
msg = (
- "No axis named 2 for object type"
- r" <class 'pandas.core(.sparse)?.frame.(Sparse)?DataFrame'>"
+ "No axis named 2 for object type "
+ r"<class 'pandas.core(.sparse)?.frame.(Sparse)?DataFrame'>"
)
with pytest.raises(ValueError, match=msg):
df.swapaxes(2, 5)
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 659b55756c4b6..c6eacf2bbcd84 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -332,6 +332,21 @@ def test_df_flex_cmp_constant_return_types_empty(self, opname):
class TestFrameFlexArithmetic:
+ def test_floordiv_axis0(self):
+ # make sure we df.floordiv(ser, axis=0) matches column-wise result
+ arr = np.arange(3)
+ ser = pd.Series(arr)
+ df = pd.DataFrame({"A": ser, "B": ser})
+
+ result = df.floordiv(ser, axis=0)
+
+ expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
+
+ tm.assert_frame_equal(result, expected)
+
+ result2 = df.floordiv(ser.values, axis=0)
+ tm.assert_frame_equal(result2, expected)
+
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index ea1e339f44d93..7b1a9d8ff6ae3 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -412,6 +412,12 @@ def test_constructor_dict_order_insertion(self):
expected = DataFrame(data=d, columns=list("ba"))
tm.assert_frame_equal(frame, expected)
+ def test_constructor_dict_nan_key_and_columns(self):
+ # GH 16894
+ result = pd.DataFrame({np.nan: [1, 2], 2: [2, 3]}, columns=[np.nan, 2])
+ expected = pd.DataFrame([[1, 2], [2, 3]], columns=[np.nan, 2])
+ tm.assert_frame_equal(result, expected)
+
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
@@ -1854,9 +1860,9 @@ def check(df):
# No NaN found -> error
if len(indexer) == 0:
msg = (
- "cannot do label indexing on"
- r" <class 'pandas\.core\.indexes\.range\.RangeIndex'>"
- r" with these indexers \[nan\] of <class 'float'>"
+ "cannot do label indexing on "
+ r"<class 'pandas\.core\.indexes\.range\.RangeIndex'> "
+ r"with these indexers \[nan\] of <class 'float'>"
)
with pytest.raises(TypeError, match=msg):
df.loc[:, np.nan]
@@ -2433,6 +2439,24 @@ def test_datetime_date_tuple_columns_from_dict(self):
expected = DataFrame([0, 1, 2], columns=pd.Index(pd.Series([tup])))
tm.assert_frame_equal(result, expected)
+ def test_construct_with_two_categoricalindex_series(self):
+ # GH 14600
+ s1 = pd.Series(
+ [39, 6, 4], index=pd.CategoricalIndex(["female", "male", "unknown"])
+ )
+ s2 = pd.Series(
+ [2, 152, 2, 242, 150],
+ index=pd.CategoricalIndex(["f", "female", "m", "male", "unknown"]),
+ )
+ result = pd.DataFrame([s1, s2])
+ expected = pd.DataFrame(
+ np.array(
+ [[np.nan, 39.0, np.nan, 6.0, 4.0], [2.0, 152.0, 2.0, 242.0, 150.0]]
+ ),
+ columns=["f", "female", "m", "male", "unknown"],
+ )
+ tm.assert_frame_equal(result, expected)
+
class TestDataFrameConstructorWithDatetimeTZ:
def test_from_dict(self):
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index 06bb040224455..966f0d416676c 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -897,15 +897,15 @@ def test_astype_to_incorrect_datetimelike(self, unit):
df = DataFrame(np.array([[1, 2, 3]], dtype=dtype))
msg = (
- r"cannot astype a datetimelike from \[datetime64\[ns\]\] to"
- r" \[timedelta64\[{}\]\]"
+ r"cannot astype a datetimelike from \[datetime64\[ns\]\] to "
+ r"\[timedelta64\[{}\]\]"
).format(unit)
with pytest.raises(TypeError, match=msg):
df.astype(other)
msg = (
- r"cannot astype a timedelta from \[timedelta64\[ns\]\] to"
- r" \[datetime64\[{}\]\]"
+ r"cannot astype a timedelta from \[timedelta64\[ns\]\] to "
+ r"\[datetime64\[{}\]\]"
).format(unit)
df = DataFrame(np.array([[1, 2, 3]], dtype=other))
with pytest.raises(TypeError, match=msg):
@@ -1072,6 +1072,27 @@ def test_str_to_small_float_conversion_type(self):
expected = pd.DataFrame(col_data, columns=["A"], dtype=float)
tm.assert_frame_equal(result, expected)
+ @pytest.mark.parametrize(
+ "convert_integer, expected", [(False, np.dtype("int32")), (True, "Int32")]
+ )
+ def test_convert_dtypes(self, convert_integer, expected):
+ # Specific types are tested in tests/series/test_dtypes.py
+ # Just check that it works for DataFrame here
+ df = pd.DataFrame(
+ {
+ "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
+ "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
+ }
+ )
+ result = df.convert_dtypes(True, True, convert_integer, False)
+ expected = pd.DataFrame(
+ {
+ "a": pd.Series([1, 2, 3], dtype=expected),
+ "b": pd.Series(["x", "y", "z"], dtype="string"),
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+
class TestDataFrameDatetimeWithTZ:
def test_interleave(self, timezone_frame):
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 2e6759cb1a238..ae0516dd29a1f 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -670,8 +670,8 @@ def test_fillna_invalid_value(self, float_frame):
float_frame.fillna((1, 2))
# frame with series
msg = (
- '"value" parameter must be a scalar, dict or Series, but you'
- ' passed a "DataFrame"'
+ '"value" parameter must be a scalar, dict or Series, but you '
+ 'passed a "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
float_frame.iloc[:, 0].fillna(float_frame)
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index c727cb398d53e..162f3c114fa5d 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -61,6 +61,27 @@ def test_invert(self, float_frame):
tm.assert_frame_equal(-(df < 0), ~(df < 0))
+ def test_invert_mixed(self):
+ shape = (10, 5)
+ df = pd.concat(
+ [
+ pd.DataFrame(np.zeros(shape, dtype="bool")),
+ pd.DataFrame(np.zeros(shape, dtype=int)),
+ ],
+ axis=1,
+ ignore_index=True,
+ )
+ result = ~df
+ expected = pd.concat(
+ [
+ pd.DataFrame(np.ones(shape, dtype="bool")),
+ pd.DataFrame(-np.ones(shape, dtype=int)),
+ ],
+ axis=1,
+ ignore_index=True,
+ )
+ tm.assert_frame_equal(result, expected)
+
@pytest.mark.parametrize(
"df",
[
@@ -843,10 +864,10 @@ def test_alignment_non_pandas(self):
]:
tm.assert_series_equal(
- align(df, val, "index"), Series([1, 2, 3], index=df.index)
+ align(df, val, "index")[1], Series([1, 2, 3], index=df.index)
)
tm.assert_series_equal(
- align(df, val, "columns"), Series([1, 2, 3], index=df.columns)
+ align(df, val, "columns")[1], Series([1, 2, 3], index=df.columns)
)
# length mismatch
@@ -861,10 +882,11 @@ def test_alignment_non_pandas(self):
val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(
- align(df, val, "index"), DataFrame(val, index=df.index, columns=df.columns)
+ align(df, val, "index")[1],
+ DataFrame(val, index=df.index, columns=df.columns),
)
tm.assert_frame_equal(
- align(df, val, "columns"),
+ align(df, val, "columns")[1],
DataFrame(val, index=df.index, columns=df.columns),
)
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index 05bdec4a3a4d2..49e6fe4940e18 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -164,13 +164,13 @@ def test_repr_column_name_unicode_truncation_bug(self):
"Id": [7117434],
"StringCol": (
"Is it possible to modify drop plot code"
- " so that the output graph is displayed "
+ "so that the output graph is displayed "
"in iphone simulator, Is it possible to "
"modify drop plot code so that the "
"output graph is \xe2\x80\xa8displayed "
"in iphone simulator.Now we are adding "
- "the CSV file externally. I want to Call"
- " the File through the code.."
+ "the CSV file externally. I want to Call "
+ "the File through the code.."
),
}
)
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py
index 56a0c8cf4f5bd..b3af5a7b7317e 100644
--- a/pandas/tests/frame/test_reshape.py
+++ b/pandas/tests/frame/test_reshape.py
@@ -336,6 +336,80 @@ def test_unstack_fill_frame_categorical(self):
)
tm.assert_frame_equal(result, expected)
+ def test_unstack_tuplename_in_multiindex(self):
+ # GH 19966
+ idx = pd.MultiIndex.from_product(
+ [["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
+ )
+ df = pd.DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx)
+ result = df.unstack(("A", "a"))
+
+ expected = pd.DataFrame(
+ [[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
+ columns=pd.MultiIndex.from_tuples(
+ [
+ ("d", "a"),
+ ("d", "b"),
+ ("d", "c"),
+ ("e", "a"),
+ ("e", "b"),
+ ("e", "c"),
+ ],
+ names=[None, ("A", "a")],
+ ),
+ index=pd.Index([1, 2, 3], name=("B", "b")),
+ )
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "unstack_idx, expected_values, expected_index, expected_columns",
+ [
+ (
+ ("A", "a"),
+ [[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]],
+ pd.MultiIndex.from_tuples(
+ [(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]
+ ),
+ pd.MultiIndex.from_tuples(
+ [("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")],
+ names=[None, ("A", "a")],
+ ),
+ ),
+ (
+ (("A", "a"), "B"),
+ [[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]],
+ pd.Index([3, 4], name="C"),
+ pd.MultiIndex.from_tuples(
+ [
+ ("d", "a", 1),
+ ("d", "a", 2),
+ ("d", "b", 1),
+ ("d", "b", 2),
+ ("e", "a", 1),
+ ("e", "a", 2),
+ ("e", "b", 1),
+ ("e", "b", 2),
+ ],
+ names=[None, ("A", "a"), "B"],
+ ),
+ ),
+ ],
+ )
+ def test_unstack_mixed_type_name_in_multiindex(
+ self, unstack_idx, expected_values, expected_index, expected_columns
+ ):
+ # GH 19966
+ idx = pd.MultiIndex.from_product(
+ [["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
+ )
+ df = pd.DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx)
+ result = df.unstack(unstack_idx)
+
+ expected = pd.DataFrame(
+ expected_values, columns=expected_columns, index=expected_index,
+ )
+ tm.assert_frame_equal(result, expected)
+
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = pd.DataFrame(
@@ -424,8 +498,8 @@ def test_stack_mixed_levels(self):
# When mixed types are passed and the ints are not level
# names, raise
msg = (
- "level should contain all level names or all level numbers, not"
- " a mixture of the two"
+ "level should contain all level names or all level numbers, not "
+ "a mixture of the two"
)
with pytest.raises(ValueError, match=msg):
df2.stack(level=["animal", 0])
diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py
index 4a436d70dc48f..a2e7dc527c4b8 100644
--- a/pandas/tests/frame/test_subclass.py
+++ b/pandas/tests/frame/test_subclass.py
@@ -557,3 +557,17 @@ def strech(row):
result = df.apply(lambda x: [1, 2, 3], axis=1)
assert not isinstance(result, tm.SubclassedDataFrame)
tm.assert_series_equal(result, expected)
+
+ def test_subclassed_numeric_reductions(self, all_numeric_reductions):
+ # GH 25596
+
+ df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
+ result = getattr(df, all_numeric_reductions)()
+ assert isinstance(result, tm.SubclassedSeries)
+
+ def test_subclassed_boolean_reductions(self, all_boolean_reductions):
+ # GH 25596
+
+ df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
+ result = getattr(df, all_boolean_reductions)()
+ assert isinstance(result, tm.SubclassedSeries)
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index 10a1e09a09bf8..7645c6b4cf709 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -446,6 +446,15 @@ def test_sample_upsampling_without_replacement(self):
with pytest.raises(ValueError, match=msg):
df.sample(frac=2, replace=False)
+ def test_sample_is_copy(self):
+ # GH-27357, GH-30784: ensure the result of sample is an actual copy and
+ # doesn't track the parent dataframe / doesn't give SettingWithCopy warnings
+ df = pd.DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
+ df2 = df.sample(3)
+
+ with tm.assert_produces_warning(None):
+ df2["d"] = 1
+
def test_size_compat(self):
# GH8846
# size property should be defined
@@ -548,9 +557,6 @@ def test_validate_bool_args(self):
with pytest.raises(ValueError):
super(DataFrame, df).drop("a", axis=1, inplace=value)
- with pytest.raises(ValueError):
- super(DataFrame, df).sort_index(inplace=value)
-
with pytest.raises(ValueError):
super(DataFrame, df)._consolidate(inplace=value)
@@ -820,18 +826,23 @@ def test_take_invalid_kwargs(self):
with pytest.raises(ValueError, match=msg):
obj.take(indices, mode="clip")
- def test_depr_take_kwarg_is_copy(self):
+ @pytest.mark.parametrize("is_copy", [True, False])
+ def test_depr_take_kwarg_is_copy(self, is_copy):
# GH 27357
df = DataFrame({"A": [1, 2, 3]})
msg = (
"is_copy is deprecated and will be removed in a future version. "
- "take will always return a copy in the future."
+ "'take' always returns a copy, so there is no need to specify this."
)
with tm.assert_produces_warning(FutureWarning) as w:
- df.take([0, 1], is_copy=True)
+ df.take([0, 1], is_copy=is_copy)
assert w[0].message.args[0] == msg
+ s = Series([1, 2, 3])
+ with tm.assert_produces_warning(FutureWarning):
+ s.take([0, 1], is_copy=is_copy)
+
def test_equals(self):
s1 = pd.Series([1, 2, 3], index=[0, 2, 1])
s2 = s1.copy()
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index 0b72a61ed84de..2d31996a8a964 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -10,7 +10,6 @@
from pandas import DataFrame, Index, MultiIndex, Series, concat
import pandas._testing as tm
from pandas.core.base import SpecificationError
-from pandas.core.groupby.generic import _make_unique, _maybe_mangle_lambdas
from pandas.core.groupby.grouper import Grouping
@@ -361,6 +360,23 @@ def test_func_duplicates_raises():
df.groupby("A").agg(["min", "min"])
+@pytest.mark.parametrize(
+ "index",
+ [
+ pd.CategoricalIndex(list("abc")),
+ pd.interval_range(0, 3),
+ pd.period_range("2020", periods=3, freq="D"),
+ pd.MultiIndex.from_tuples([("a", 0), ("a", 1), ("b", 0)]),
+ ],
+)
+def test_agg_index_has_complex_internals(index):
+ # GH 31223
+ df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index)
+ result = df.groupby("group").agg({"value": Series.nunique})
+ expected = DataFrame({"group": [1, 2], "value": [2, 1]}).set_index("group")
+ tm.assert_frame_equal(result, expected)
+
+
class TestNamedAggregationSeries:
def test_series_named_agg(self):
df = pd.Series([1, 2, 3, 4])
@@ -631,42 +647,44 @@ def test_lambda_named_agg(func):
tm.assert_frame_equal(result, expected)
-class TestLambdaMangling:
- def test_maybe_mangle_lambdas_passthrough(self):
- assert _maybe_mangle_lambdas("mean") == "mean"
- assert _maybe_mangle_lambdas(lambda x: x).__name__ == "<lambda>"
- # don't mangel single lambda.
- assert _maybe_mangle_lambdas([lambda x: x])[0].__name__ == "<lambda>"
-
- def test_maybe_mangle_lambdas_listlike(self):
- aggfuncs = [lambda x: 1, lambda x: 2]
- result = _maybe_mangle_lambdas(aggfuncs)
- assert result[0].__name__ == "<lambda_0>"
- assert result[1].__name__ == "<lambda_1>"
- assert aggfuncs[0](None) == result[0](None)
- assert aggfuncs[1](None) == result[1](None)
-
- def test_maybe_mangle_lambdas(self):
- func = {"A": [lambda x: 0, lambda x: 1]}
- result = _maybe_mangle_lambdas(func)
- assert result["A"][0].__name__ == "<lambda_0>"
- assert result["A"][1].__name__ == "<lambda_1>"
-
- def test_maybe_mangle_lambdas_args(self):
- func = {"A": [lambda x, a, b=1: (0, a, b), lambda x: 1]}
- result = _maybe_mangle_lambdas(func)
- assert result["A"][0].__name__ == "<lambda_0>"
- assert result["A"][1].__name__ == "<lambda_1>"
-
- assert func["A"][0](0, 1) == (0, 1, 1)
- assert func["A"][0](0, 1, 2) == (0, 1, 2)
- assert func["A"][0](0, 2, b=3) == (0, 2, 3)
-
- def test_maybe_mangle_lambdas_named(self):
- func = {"C": np.mean, "D": {"foo": np.mean, "bar": np.mean}}
- result = _maybe_mangle_lambdas(func)
- assert result == func
+def test_aggregate_mixed_types():
+ # GH 16916
+ df = pd.DataFrame(
+ data=np.array([0] * 9).reshape(3, 3), columns=list("XYZ"), index=list("abc")
+ )
+ df["grouping"] = ["group 1", "group 1", 2]
+ result = df.groupby("grouping").aggregate(lambda x: x.tolist())
+ expected_data = [[[0], [0], [0]], [[0, 0], [0, 0], [0, 0]]]
+ expected = pd.DataFrame(
+ expected_data,
+ index=Index([2, "group 1"], dtype="object", name="grouping"),
+ columns=Index(["X", "Y", "Z"], dtype="object"),
+ )
+ tm.assert_frame_equal(result, expected)
+
+@pytest.mark.xfail(reason="Not implemented.")
+def test_aggregate_udf_na_extension_type():
+ # https://github.com/pandas-dev/pandas/pull/31359
+ # This is currently failing to cast back to Int64Dtype.
+ # The presence of the NA causes two problems
+ # 1. NA is not an instance of Int64Dtype.type (numpy.int64)
+ # 2. The presence of an NA forces object type, so the non-NA values is
+ # a Python int rather than a NumPy int64. Python ints aren't
+ # instances of numpy.int64.
+ def aggfunc(x):
+ if all(x > 2):
+ return 1
+ else:
+ return pd.NA
+
+ df = pd.DataFrame({"A": pd.array([1, 2, 3])})
+ result = df.groupby([1, 1, 2]).agg(aggfunc)
+ expected = pd.DataFrame({"A": pd.array([1, pd.NA], dtype="Int64")}, index=[1, 2])
+ tm.assert_frame_equal(result, expected)
+
+
+class TestLambdaMangling:
def test_basic(self):
df = pd.DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
result = df.groupby("A").agg({"B": [lambda x: 0, lambda x: 1]})
@@ -784,48 +802,3 @@ def test_agg_multiple_lambda(self):
weight_min=pd.NamedAgg(column="weight", aggfunc=lambda x: np.min(x)),
)
tm.assert_frame_equal(result2, expected)
-
- @pytest.mark.parametrize(
- "order, expected_reorder",
- [
- (
- [
- ("height", "<lambda>"),
- ("height", "max"),
- ("weight", "max"),
- ("height", "<lambda>"),
- ("weight", "<lambda>"),
- ],
- [
- ("height", "<lambda>_0"),
- ("height", "max"),
- ("weight", "max"),
- ("height", "<lambda>_1"),
- ("weight", "<lambda>"),
- ],
- ),
- (
- [
- ("col2", "min"),
- ("col1", "<lambda>"),
- ("col1", "<lambda>"),
- ("col1", "<lambda>"),
- ],
- [
- ("col2", "min"),
- ("col1", "<lambda>_0"),
- ("col1", "<lambda>_1"),
- ("col1", "<lambda>_2"),
- ],
- ),
- (
- [("col", "<lambda>"), ("col", "<lambda>"), ("col", "<lambda>")],
- [("col", "<lambda>_0"), ("col", "<lambda>_1"), ("col", "<lambda>_2")],
- ),
- ],
- )
- def test_make_unique(self, order, expected_reorder):
- # GH 27519, test if make_unique function reorders correctly
- result = _make_unique(order)
-
- assert result == expected_reorder
diff --git a/pandas/tests/groupby/conftest.py b/pandas/tests/groupby/conftest.py
index 8901af7a90acc..ebac36c5f8c78 100644
--- a/pandas/tests/groupby/conftest.py
+++ b/pandas/tests/groupby/conftest.py
@@ -112,7 +112,7 @@ def reduction_func(request):
return request.param
-@pytest.fixture(params=transformation_kernels)
+@pytest.fixture(params=sorted(transformation_kernels))
def transformation_func(request):
"""yields the string names of all groupby transformation functions."""
return request.param
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 2f2f97f2cd993..c18ef73203914 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -467,6 +467,29 @@ def filt2(x):
tm.assert_frame_equal(result, expected)
+@pytest.mark.parametrize("test_series", [True, False])
+def test_apply_with_duplicated_non_sorted_axis(test_series):
+ # GH 30667
+ df = pd.DataFrame(
+ [["x", "p"], ["x", "p"], ["x", "o"]], columns=["X", "Y"], index=[1, 2, 2]
+ )
+ if test_series:
+ ser = df.set_index("Y")["X"]
+ result = ser.groupby(level=0).apply(lambda x: x)
+
+ # not expecting the order to remain the same for duplicated axis
+ result = result.sort_index()
+ expected = ser.sort_index()
+ tm.assert_series_equal(result, expected)
+ else:
+ result = df.groupby("Y").apply(lambda x: x)
+
+ # not expecting the order to remain the same for duplicated axis
+ result = result.sort_values("Y")
+ expected = df.sort_values("Y")
+ tm.assert_frame_equal(result, expected)
+
+
def test_apply_corner_cases():
# #535, can't use sliding iterator
@@ -752,3 +775,55 @@ def most_common_values(df):
["17661101"], index=pd.DatetimeIndex(["2015-02-24"], name="day"), name="userId"
)
tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("category", [False, True])
+def test_apply_multi_level_name(category):
+ # https://github.com/pandas-dev/pandas/issues/31068
+ b = [1, 2] * 5
+ if category:
+ b = pd.Categorical(b, categories=[1, 2, 3])
+ df = pd.DataFrame(
+ {"A": np.arange(10), "B": b, "C": list(range(10)), "D": list(range(10))}
+ ).set_index(["A", "B"])
+ result = df.groupby("B").apply(lambda x: x.sum())
+ expected = pd.DataFrame(
+ {"C": [20, 25], "D": [20, 25]}, index=pd.Index([1, 2], name="B")
+ )
+ tm.assert_frame_equal(result, expected)
+ assert df.index.names == ["A", "B"]
+
+
+def test_groupby_apply_datetime_result_dtypes():
+ # GH 14849
+ data = pd.DataFrame.from_records(
+ [
+ (pd.Timestamp(2016, 1, 1), "red", "dark", 1, "8"),
+ (pd.Timestamp(2015, 1, 1), "green", "stormy", 2, "9"),
+ (pd.Timestamp(2014, 1, 1), "blue", "bright", 3, "10"),
+ (pd.Timestamp(2013, 1, 1), "blue", "calm", 4, "potato"),
+ ],
+ columns=["observation", "color", "mood", "intensity", "score"],
+ )
+ result = data.groupby("color").apply(lambda g: g.iloc[0]).dtypes
+ expected = Series(
+ [np.dtype("datetime64[ns]"), np.object, np.object, np.int64, np.object],
+ index=["observation", "color", "mood", "intensity", "score"],
+ )
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "index",
+ [
+ pd.CategoricalIndex(list("abc")),
+ pd.interval_range(0, 3),
+ pd.period_range("2020", periods=3, freq="D"),
+ pd.MultiIndex.from_tuples([("a", 0), ("a", 1), ("b", 0)]),
+ ],
+)
+def test_apply_index_has_complex_internals(index):
+ # GH 31248
+ df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index)
+ result = df.groupby("group").apply(lambda x: x)
+ tm.assert_frame_equal(result, df)
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 9323946581a0d..1c2de8c8c223f 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -1342,3 +1342,37 @@ def test_series_groupby_categorical_aggregation_getitem():
result = groups["foo"].agg("mean")
expected = groups.agg("mean")["foo"]
tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "func, expected_values",
+ [(pd.Series.nunique, [1, 1, 2]), (pd.Series.count, [1, 2, 2])],
+)
+def test_groupby_agg_categorical_columns(func, expected_values):
+ # 31256
+ df = pd.DataFrame(
+ {
+ "id": [0, 1, 2, 3, 4],
+ "groups": [0, 1, 1, 2, 2],
+ "value": pd.Categorical([0, 0, 0, 0, 1]),
+ }
+ ).set_index("id")
+ result = df.groupby("groups").agg(func)
+
+ expected = pd.DataFrame(
+ {"value": expected_values}, index=pd.Index([0, 1, 2], name="groups"),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_groupby_agg_non_numeric():
+ df = pd.DataFrame(
+ {"A": pd.Categorical(["a", "a", "b"], categories=["a", "b", "c"])}
+ )
+ expected = pd.DataFrame({"A": [2, 1]}, index=[1, 2])
+
+ result = df.groupby([1, 2, 1]).agg(pd.Series.nunique)
+ tm.assert_frame_equal(result, expected)
+
+ result = df.groupby([1, 2, 1]).nunique()
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 7e374811d1960..b7d7124a3a5e5 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1952,6 +1952,13 @@ def test_shift_bfill_ffill_tz(tz_naive_fixture, op, expected):
tm.assert_frame_equal(result, expected)
+def test_ffill_missing_arguments():
+ # GH 14955
+ df = pd.DataFrame({"a": [1, 2], "b": [1, 1]})
+ with pytest.raises(ValueError, match="Must specify a fill"):
+ df.groupby("b").fillna()
+
+
def test_groupby_only_none_group():
# see GH21624
# this was crashing with "ValueError: Length of passed values is 1, index implies 0"
@@ -2030,3 +2037,23 @@ def test_groupby_list_level():
expected = pd.DataFrame(np.arange(0, 9).reshape(3, 3))
result = expected.groupby(level=[0]).mean()
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "max_seq_items, expected",
+ [
+ (5, "{0: [0], 1: [1], 2: [2], 3: [3], 4: [4]}"),
+ (4, "{0: [0], 1: [1], 2: [2], 3: [3], ...}"),
+ ],
+)
+def test_groups_repr_truncates(max_seq_items, expected):
+ # GH 1135
+ df = pd.DataFrame(np.random.randn(5, 1))
+ df["a"] = df.index
+
+ with pd.option_context("display.max_seq_items", max_seq_items):
+ result = df.groupby("a").groups.__repr__()
+ assert result == expected
+
+ result = df.groupby(np.array(df.a)).groups.__repr__()
+ assert result == expected
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index 70ba21d89d22f..4273139b32828 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -388,7 +388,7 @@ def test_groupby_grouper_f_sanity_checked(self):
# if it fails on the elements, map tries it on the entire index as
# a sequence. That can yield invalid results that cause trouble
# down the line.
- # the surprise comes from using key[0:6] rather then str(key)[0:6]
+ # the surprise comes from using key[0:6] rather than str(key)[0:6]
# when the elements are Timestamp.
# the result is Index[0:6], very confusing.
@@ -725,10 +725,7 @@ def test_get_group(self):
g.get_group("foo")
with pytest.raises(ValueError, match=msg):
g.get_group(("foo"))
- msg = (
- "must supply a same-length tuple to get_group with multiple"
- " grouping keys"
- )
+ msg = "must supply a same-length tuple to get_group with multiple grouping keys"
with pytest.raises(ValueError, match=msg):
g.get_group(("foo", "bar", "baz"))
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py
index 6c05c4038a829..8967ef06f50fb 100644
--- a/pandas/tests/groupby/test_transform.py
+++ b/pandas/tests/groupby/test_transform.py
@@ -317,6 +317,32 @@ def test_dispatch_transform(tsframe):
tm.assert_frame_equal(filled, expected)
+def test_transform_transformation_func(transformation_func):
+ # GH 30918
+ df = DataFrame(
+ {
+ "A": ["foo", "foo", "foo", "foo", "bar", "bar", "baz"],
+ "B": [1, 2, np.nan, 3, 3, np.nan, 4],
+ }
+ )
+
+ if transformation_func in ["pad", "backfill", "tshift", "corrwith", "cumcount"]:
+ # These transformation functions are not yet covered in this test
+ pytest.xfail("See GH 31269 and GH 31270")
+ elif transformation_func == "fillna":
+ test_op = lambda x: x.transform("fillna", value=0)
+ mock_op = lambda x: x.fillna(value=0)
+ else:
+ test_op = lambda x: x.transform(transformation_func)
+ mock_op = lambda x: getattr(x, transformation_func)()
+
+ result = test_op(df.groupby("A"))
+ groups = [df[["B"]].iloc[:4], df[["B"]].iloc[4:6], df[["B"]].iloc[6:]]
+ expected = concat([mock_op(g) for g in groups])
+
+ tm.assert_frame_equal(result, expected)
+
+
def test_transform_select_columns(df):
f = lambda x: x.mean()
result = df.groupby("A")[["C", "D"]].transform(f)
diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py
index e027641288bb9..d870259c2539b 100644
--- a/pandas/tests/indexes/categorical/test_category.py
+++ b/pandas/tests/indexes/categorical/test_category.py
@@ -976,8 +976,19 @@ def test_engine_type(self, dtype, engine_type):
assert np.issubdtype(ci.codes.dtype, dtype)
assert isinstance(ci._engine, engine_type)
- def test_getitem_2d_deprecated(self):
- # GH#30588 multi-dim indexing is deprecated, but raising is also acceptable
- idx = self.create_index()
- with pytest.raises(ValueError, match="cannot mask with array containing NA"):
- idx[:, None]
+ @pytest.mark.parametrize(
+ "data, categories",
+ [
+ (list("abcbca"), list("cab")),
+ (pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)),
+ ],
+ ids=["string", "interval"],
+ )
+ def test_map_str(self, data, categories, ordered_fixture):
+ # GH 31202 - override base class since we want to maintain categorical/ordered
+ index = CategoricalIndex(data, categories=categories, ordered=ordered_fixture)
+ result = index.map(str)
+ expected = CategoricalIndex(
+ map(str, data), categories=map(str, categories), ordered=ordered_fixture
+ )
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index a16017b0e12c0..26d120619defc 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -6,6 +6,7 @@
from pandas._libs.tslib import iNaT
+from pandas.core.dtypes.common import is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
@@ -37,8 +38,8 @@ class Base:
def test_pickle_compat_construction(self):
# need an object to create with
msg = (
- r"Index\(\.\.\.\) must be called with a collection of some"
- r" kind, None was passed|"
+ r"Index\(\.\.\.\) must be called with a collection of some "
+ r"kind, None was passed|"
r"__new__\(\) missing 1 required positional argument: 'data'|"
r"__new__\(\) takes at least 2 arguments \(1 given\)"
)
@@ -301,6 +302,9 @@ def test_ensure_copied_data(self, indices):
index_type = type(indices)
result = index_type(indices.values, copy=True, **init_kwargs)
+ if is_datetime64tz_dtype(indices.dtype):
+ result = result.tz_localize("UTC").tz_convert(indices.tz)
+
tm.assert_index_equal(indices, result)
tm.assert_numpy_array_equal(
indices._ndarray_values, result._ndarray_values, check_same="copy"
@@ -464,6 +468,11 @@ def test_intersection_base(self, indices):
intersect = first.intersection(second)
assert tm.equalContents(intersect, second)
+ if is_datetime64tz_dtype(indices.dtype):
+ # The second.values below will drop tz, so the rest of this test
+ # is not applicable.
+ return
+
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
@@ -482,6 +491,11 @@ def test_union_base(self, indices):
union = first.union(second)
assert tm.equalContents(union, everything)
+ if is_datetime64tz_dtype(indices.dtype):
+ # The second.values below will drop tz, so the rest of this test
+ # is not applicable.
+ return
+
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
@@ -808,6 +822,13 @@ def test_map_dictlike(self, mapper):
result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
+ def test_map_str(self):
+ # GH 31202
+ index = self.create_index()
+ result = index.map(str)
+ expected = Index([str(x) for x in index], dtype=object)
+ tm.assert_index_equal(result, expected)
+
def test_putmask_with_wrong_mask(self):
# GH18368
index = self.create_index()
@@ -883,3 +904,11 @@ def test_getitem_2d_deprecated(self):
res = idx[:, None]
assert isinstance(res, np.ndarray), type(res)
+
+ def test_contains_requires_hashable_raises(self):
+ idx = self.create_index()
+ with pytest.raises(TypeError, match="unhashable type"):
+ [] in idx
+
+ with pytest.raises(TypeError):
+ {} in idx._engine
diff --git a/pandas/tests/indexes/conftest.py b/pandas/tests/indexes/conftest.py
index e3e7ff4093b76..8e0366138f71e 100644
--- a/pandas/tests/indexes/conftest.py
+++ b/pandas/tests/indexes/conftest.py
@@ -1,7 +1,5 @@
-import numpy as np
import pytest
-import pandas as pd
import pandas._testing as tm
from pandas.core.indexes.api import Index, MultiIndex
@@ -9,6 +7,7 @@
"unicode": tm.makeUnicodeIndex(100),
"string": tm.makeStringIndex(100),
"datetime": tm.makeDateIndex(100),
+ "datetime-tz": tm.makeDateIndex(100, tz="US/Pacific"),
"period": tm.makePeriodIndex(100),
"timedelta": tm.makeTimedeltaIndex(100),
"int": tm.makeIntIndex(100),
@@ -28,25 +27,3 @@
def indices(request):
# copy to avoid mutation, e.g. setting .name
return indices_dict[request.param].copy()
-
-
-@pytest.fixture(params=[1, np.array(1, dtype=np.int64)])
-def one(request):
- # zero-dim integer array behaves like an integer
- return request.param
-
-
-zeros = [
- box([0] * 5, dtype=dtype)
- for box in [pd.Index, np.array]
- for dtype in [np.int64, np.uint64, np.float64]
-]
-zeros.extend([np.array(0, dtype=dtype) for dtype in [np.int64, np.uint64, np.float64]])
-zeros.extend([0, 0.0])
-
-
-@pytest.fixture(params=zeros)
-def zero(request):
- # For testing division by (or of) zero for Index with length 5, this
- # gives several scalar-zeros and length-5 vector-zeros
- return request.param
diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py
index ffe51dd1fb9f5..68285d41bda70 100644
--- a/pandas/tests/indexes/datetimes/test_constructors.py
+++ b/pandas/tests/indexes/datetimes/test_constructors.py
@@ -644,8 +644,8 @@ def test_constructor_dtype(self):
)
msg = (
- "cannot supply both a tz and a timezone-naive dtype"
- r" \(i\.e\. datetime64\[ns\]\)"
+ "cannot supply both a tz and a timezone-naive dtype "
+ r"\(i\.e\. datetime64\[ns\]\)"
)
with pytest.raises(ValueError, match=msg):
DatetimeIndex(idx, dtype="datetime64[ns]")
@@ -950,3 +950,17 @@ def test_datetimeindex_constructor_misc(self):
)
assert len(idx1) == len(idx2)
assert idx1.freq == idx2.freq
+
+
+def test_timedelta_constructor_identity():
+ # Test for #30543
+ expected = pd.Timedelta(np.timedelta64(1, "s"))
+ result = pd.Timedelta(expected)
+ assert result is expected
+
+
+def test_timestamp_constructor_identity():
+ # Test for #30543
+ expected = pd.Timestamp("2017-01-01T12")
+ result = pd.Timestamp(expected)
+ assert result is expected
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index 4c600e510790a..2f954117f48d7 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -7,6 +7,7 @@
import pandas as pd
from pandas import DatetimeIndex, Index, Timestamp, date_range, notna
import pandas._testing as tm
+from pandas.core.indexes.base import InvalidIndexError
from pandas.tseries.offsets import BDay, CDay
@@ -621,17 +622,21 @@ def test_get_value(self):
# specifically make sure we have test for np.datetime64 key
dti = pd.date_range("2016-01-01", periods=3)
- arr = np.arange(6, 8)
+ arr = np.arange(6, 9)
+ ser = pd.Series(arr, index=dti)
key = dti[1]
- result = dti.get_value(arr, key)
+ with pytest.raises(AttributeError, match="has no attribute '_values'"):
+ dti.get_value(arr, key)
+
+ result = dti.get_value(ser, key)
assert result == 7
- result = dti.get_value(arr, key.to_pydatetime())
+ result = dti.get_value(ser, key.to_pydatetime())
assert result == 7
- result = dti.get_value(arr, key.to_datetime64())
+ result = dti.get_value(ser, key.to_datetime64())
assert result == 7
def test_get_loc(self):
@@ -693,7 +698,7 @@ def test_get_loc(self):
with pytest.raises(KeyError, match="'foobar'"):
idx.get_loc("foobar")
- with pytest.raises(TypeError):
+ with pytest.raises(InvalidIndexError, match=r"slice\(None, 2, None\)"):
idx.get_loc(slice(2))
idx = pd.to_datetime(["2000-01-01", "2000-01-04"])
@@ -769,3 +774,14 @@ def test_get_loc_nat(self):
# GH#20464
index = DatetimeIndex(["1/3/2000", "NaT"])
assert index.get_loc(pd.NaT) == 1
+
+ assert index.get_loc(None) == 1
+
+ assert index.get_loc(np.nan) == 1
+
+ assert index.get_loc(pd.NA) == 1
+
+ assert index.get_loc(np.datetime64("NaT")) == 1
+
+ with pytest.raises(KeyError, match="NaT"):
+ index.get_loc(np.timedelta64("NaT"))
diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py
index e30cc4449e01e..946d658e90132 100644
--- a/pandas/tests/indexes/datetimes/test_partial_slicing.py
+++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py
@@ -142,6 +142,26 @@ def test_slice_year(self):
expected = slice(3288, 3653)
assert result == expected
+ @pytest.mark.parametrize(
+ "partial_dtime",
+ [
+ "2019",
+ "2019Q4",
+ "Dec 2019",
+ "2019-12-31",
+ "2019-12-31 23",
+ "2019-12-31 23:59",
+ ],
+ )
+ def test_slice_end_of_period_resolution(self, partial_dtime):
+ # GH#31064
+ dti = date_range("2019-12-31 23:59:55.999999999", periods=10, freq="s")
+
+ ser = pd.Series(range(10), index=dti)
+ result = ser[partial_dtime]
+ expected = ser.iloc[:5]
+ tm.assert_series_equal(result, expected)
+
def test_slice_quarter(self):
dti = date_range(freq="D", start=datetime(2000, 6, 1), periods=500)
diff --git a/pandas/tests/indexes/datetimes/test_snap.py b/pandas/tests/indexes/datetimes/test_snap.py
new file mode 100644
index 0000000000000..8baea9fe8341f
--- /dev/null
+++ b/pandas/tests/indexes/datetimes/test_snap.py
@@ -0,0 +1,41 @@
+import pytest
+
+from pandas import DatetimeIndex, date_range
+import pandas._testing as tm
+
+
+@pytest.mark.filterwarnings("ignore::DeprecationWarning")
+@pytest.mark.parametrize("tz", [None, "Asia/Shanghai", "Europe/Berlin"])
+@pytest.mark.parametrize("name", [None, "my_dti"])
+def test_dti_snap(name, tz):
+ dti = DatetimeIndex(
+ [
+ "1/1/2002",
+ "1/2/2002",
+ "1/3/2002",
+ "1/4/2002",
+ "1/5/2002",
+ "1/6/2002",
+ "1/7/2002",
+ ],
+ name=name,
+ tz=tz,
+ freq="D",
+ )
+
+ result = dti.snap(freq="W-MON")
+ expected = date_range("12/31/2001", "1/7/2002", name=name, tz=tz, freq="w-mon")
+ expected = expected.repeat([3, 4])
+ tm.assert_index_equal(result, expected)
+ assert result.tz == expected.tz
+ assert result.freq is None
+ assert expected.freq is None
+
+ result = dti.snap(freq="B")
+
+ expected = date_range("1/1/2002", "1/7/2002", name=name, tz=tz, freq="b")
+ expected = expected.repeat([1, 1, 1, 2, 2])
+ tm.assert_index_equal(result, expected)
+ assert result.tz == expected.tz
+ assert result.freq is None
+ assert expected.freq is None
diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py
index 1505ac1dff29c..cd8e8c3542cce 100644
--- a/pandas/tests/indexes/datetimes/test_timezones.py
+++ b/pandas/tests/indexes/datetimes/test_timezones.py
@@ -573,13 +573,7 @@ def test_dti_construction_ambiguous_endpoint(self, tz):
"2013-10-26 23:00", "2013-10-27 01:00", freq="H", tz=tz, ambiguous="infer"
)
assert times[0] == Timestamp("2013-10-26 23:00", tz=tz, freq="H")
-
- if str(tz).startswith("dateutil"):
- # fixed ambiguous behavior
- # see GH#14621
- assert times[-1] == Timestamp("2013-10-27 01:00:00+0100", tz=tz, freq="H")
- else:
- assert times[-1] == Timestamp("2013-10-27 01:00:00+0000", tz=tz, freq="H")
+ assert times[-1] == Timestamp("2013-10-27 01:00:00+0000", tz=tz, freq="H")
@pytest.mark.parametrize(
"tz, option, expected",
@@ -587,12 +581,7 @@ def test_dti_construction_ambiguous_endpoint(self, tz):
["US/Pacific", "shift_forward", "2019-03-10 03:00"],
["dateutil/US/Pacific", "shift_forward", "2019-03-10 03:00"],
["US/Pacific", "shift_backward", "2019-03-10 01:00"],
- pytest.param(
- "dateutil/US/Pacific",
- "shift_backward",
- "2019-03-10 01:00",
- marks=pytest.mark.xfail(reason="GH 24329"),
- ),
+ ["dateutil/US/Pacific", "shift_backward", "2019-03-10 01:00"],
["US/Pacific", timedelta(hours=1), "2019-03-10 03:00"],
],
)
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index fe65653ba6545..7abf810e6bcfc 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -1583,7 +1583,7 @@ def test_string_na_nat_conversion(self, cache):
for i in range(5):
x = series[i]
if isna(x):
- expected[i] = iNaT
+ expected[i] = pd.NaT
else:
expected[i] = to_datetime(x, cache=cache)
@@ -1874,7 +1874,7 @@ def test_parsers(self, date_str, expected, cache):
# https://github.com/dateutil/dateutil/issues/217
yearfirst = True
- result1, _, _ = parsing.parse_time_string(date_str, yearfirst=yearfirst)
+ result1, _ = parsing.parse_time_string(date_str, yearfirst=yearfirst)
result2 = to_datetime(date_str, yearfirst=yearfirst)
result3 = to_datetime([date_str], yearfirst=yearfirst)
# result5 is used below
@@ -1910,7 +1910,7 @@ def test_na_values_with_cache(
def test_parsers_nat(self):
# Test that each of several string-accepting methods return pd.NaT
- result1, _, _ = parsing.parse_time_string("NaT")
+ result1, _ = parsing.parse_time_string("NaT")
result2 = to_datetime("NaT")
result3 = Timestamp("NaT")
result4 = DatetimeIndex(["NaT"])[0]
@@ -1986,7 +1986,7 @@ def test_parsers_dayfirst_yearfirst(self, cache):
)
assert dateutil_result == expected
- result1, _, _ = parsing.parse_time_string(
+ result1, _ = parsing.parse_time_string(
date_str, dayfirst=dayfirst, yearfirst=yearfirst
)
@@ -2016,7 +2016,7 @@ def test_parsers_timestring(self, cache):
}
for date_str, (exp_now, exp_def) in cases.items():
- result1, _, _ = parsing.parse_time_string(date_str)
+ result1, _ = parsing.parse_time_string(date_str)
result2 = to_datetime(date_str)
result3 = to_datetime([date_str])
result4 = Timestamp(date_str)
diff --git a/pandas/tests/indexes/interval/test_astype.py b/pandas/tests/indexes/interval/test_astype.py
index 2b1742d58b77e..c94af6c0d533e 100644
--- a/pandas/tests/indexes/interval/test_astype.py
+++ b/pandas/tests/indexes/interval/test_astype.py
@@ -67,7 +67,7 @@ def test_astype_cannot_cast(self, index, dtype):
index.astype(dtype)
def test_astype_invalid_dtype(self, index):
- msg = 'data type "fake_dtype" not understood'
+ msg = "data type [\"']fake_dtype[\"'] not understood"
with pytest.raises(TypeError, match=msg):
index.astype("fake_dtype")
diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py
index 13a45df743cf5..837c124db2bed 100644
--- a/pandas/tests/indexes/interval/test_constructors.py
+++ b/pandas/tests/indexes/interval/test_constructors.py
@@ -164,7 +164,7 @@ def test_generic_errors(self, constructor):
constructor(dtype="int64", **filler)
# invalid dtype
- msg = 'data type "invalid" not understood'
+ msg = "data type [\"']invalid[\"'] not understood"
with pytest.raises(TypeError, match=msg):
constructor(dtype="invalid", **filler)
diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py
index 1bfc58733a110..87b72f702e2aa 100644
--- a/pandas/tests/indexes/interval/test_indexing.py
+++ b/pandas/tests/indexes/interval/test_indexing.py
@@ -312,6 +312,18 @@ def test_get_indexer_non_unique_with_int_and_float(self, query, expected):
# TODO we may also want to test get_indexer for the case when
# the intervals are duplicated, decreasing, non-monotonic, etc..
+ def test_get_indexer_non_monotonic(self):
+ # GH 16410
+ idx1 = IntervalIndex.from_tuples([(2, 3), (4, 5), (0, 1)])
+ idx2 = IntervalIndex.from_tuples([(0, 1), (2, 3), (6, 7), (8, 9)])
+ result = idx1.get_indexer(idx2)
+ expected = np.array([2, 0, -1, -1], dtype=np.intp)
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = idx1.get_indexer(idx1[1:])
+ expected = np.array([1, 2], dtype=np.intp)
+ tm.assert_numpy_array_equal(result, expected)
+
class TestSliceLocs:
def test_slice_locs_with_interval(self):
diff --git a/pandas/tests/indexes/multi/test_analytics.py b/pandas/tests/indexes/multi/test_analytics.py
index ac1e0893683d1..e64511efd7ffb 100644
--- a/pandas/tests/indexes/multi/test_analytics.py
+++ b/pandas/tests/indexes/multi/test_analytics.py
@@ -218,7 +218,7 @@ def test_take_fill_value():
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
- msg = "index -5 is out of bounds for size 4"
+ msg = "index -5 is out of bounds for( axis 0 with)? size 4"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
@@ -326,7 +326,7 @@ def test_map_dictlike(idx, mapper):
)
def test_numpy_ufuncs(idx, func):
# test ufuncs of numpy. see:
- # http://docs.scipy.org/doc/numpy/reference/ufuncs.html
+ # https://docs.scipy.org/doc/numpy/reference/ufuncs.html
if _np_version_under1p17:
expected_exception = AttributeError
@@ -334,8 +334,8 @@ def test_numpy_ufuncs(idx, func):
else:
expected_exception = TypeError
msg = (
- "loop of ufunc does not support argument 0 of type tuple which"
- f" has no callable {func.__name__} method"
+ "loop of ufunc does not support argument 0 of type tuple which "
+ f"has no callable {func.__name__} method"
)
with pytest.raises(expected_exception, match=msg):
func(idx)
diff --git a/pandas/tests/indexes/multi/test_compat.py b/pandas/tests/indexes/multi/test_compat.py
index d92cff1e10496..545a7ddef29bb 100644
--- a/pandas/tests/indexes/multi/test_compat.py
+++ b/pandas/tests/indexes/multi/test_compat.py
@@ -112,10 +112,6 @@ def test_ndarray_compat_properties(idx, compat_props):
idx.values.nbytes
-def test_compat(indices):
- assert indices.tolist() == list(indices)
-
-
def test_pickle_compat_construction(holder):
# this is testing for pickle compat
# need an object to create with
diff --git a/pandas/tests/indexes/multi/test_conversion.py b/pandas/tests/indexes/multi/test_conversion.py
index 8956e6ed4996f..bfc432a18458a 100644
--- a/pandas/tests/indexes/multi/test_conversion.py
+++ b/pandas/tests/indexes/multi/test_conversion.py
@@ -142,17 +142,6 @@ def test_roundtrip_pickle_with_tz():
assert index.equal_levels(unpickled)
-def test_pickle(indices):
- return # FIXME: this can't be right?
-
- unpickled = tm.round_trip_pickle(indices)
- assert indices.equals(unpickled)
- original_name, indices.name = indices.name, "foo"
- unpickled = tm.round_trip_pickle(indices)
- assert indices.equals(unpickled)
- indices.name = original_name
-
-
def test_to_series(idx):
# assert that we are creating a copy of the index
diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py
index ad6f06d065150..b08280a712642 100644
--- a/pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas/tests/indexes/multi/test_indexing.py
@@ -396,7 +396,8 @@ def test_get_loc_missing_nan():
idx.get_loc(3)
with pytest.raises(KeyError, match=r"^nan$"):
idx.get_loc(np.nan)
- with pytest.raises(KeyError, match=r"^\[nan\]$"):
+ with pytest.raises(TypeError, match="unhashable type: 'list'"):
+ # listlike/non-hashable raises TypeError
idx.get_loc([np.nan])
diff --git a/pandas/tests/indexes/multi/test_integrity.py b/pandas/tests/indexes/multi/test_integrity.py
index f2ec15e0af88c..fd150bb4d57a2 100644
--- a/pandas/tests/indexes/multi/test_integrity.py
+++ b/pandas/tests/indexes/multi/test_integrity.py
@@ -250,25 +250,6 @@ def test_rangeindex_fallback_coercion_bug():
tm.assert_index_equal(result, expected)
-def test_hash_error(indices):
- index = indices
- with pytest.raises(TypeError, match=f"unhashable type: '{type(index).__name__}'"):
- hash(indices)
-
-
-def test_mutability(indices):
- if not len(indices):
- return
- msg = "Index does not support mutable operations"
- with pytest.raises(TypeError, match=msg):
- indices[0] = indices[0]
-
-
-def test_wrong_number_names(indices):
- with pytest.raises(ValueError, match="^Length"):
- indices.names = ["apple", "banana", "carrot"]
-
-
def test_memory_usage(idx):
result = idx.memory_usage()
if len(idx):
diff --git a/pandas/tests/indexes/multi/test_monotonic.py b/pandas/tests/indexes/multi/test_monotonic.py
index b5c73d5e97745..ca1cb0932f63d 100644
--- a/pandas/tests/indexes/multi/test_monotonic.py
+++ b/pandas/tests/indexes/multi/test_monotonic.py
@@ -1,9 +1,7 @@
import numpy as np
-import pytest
import pandas as pd
-from pandas import Index, IntervalIndex, MultiIndex
-from pandas.api.types import is_scalar
+from pandas import Index, MultiIndex
def test_is_monotonic_increasing():
@@ -176,55 +174,3 @@ def test_is_strictly_monotonic_decreasing():
)
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is False
-
-
-def test_searchsorted_monotonic(indices):
- # GH17271
- # not implemented for tuple searches in MultiIndex
- # or Intervals searches in IntervalIndex
- if isinstance(indices, (MultiIndex, IntervalIndex)):
- return
-
- # nothing to test if the index is empty
- if indices.empty:
- return
- value = indices[0]
-
- # determine the expected results (handle dupes for 'right')
- expected_left, expected_right = 0, (indices == value).argmin()
- if expected_right == 0:
- # all values are the same, expected_right should be length
- expected_right = len(indices)
-
- # test _searchsorted_monotonic in all cases
- # test searchsorted only for increasing
- if indices.is_monotonic_increasing:
- ssm_left = indices._searchsorted_monotonic(value, side="left")
- assert is_scalar(ssm_left)
- assert expected_left == ssm_left
-
- ssm_right = indices._searchsorted_monotonic(value, side="right")
- assert is_scalar(ssm_right)
- assert expected_right == ssm_right
-
- ss_left = indices.searchsorted(value, side="left")
- assert is_scalar(ss_left)
- assert expected_left == ss_left
-
- ss_right = indices.searchsorted(value, side="right")
- assert is_scalar(ss_right)
- assert expected_right == ss_right
-
- elif indices.is_monotonic_decreasing:
- ssm_left = indices._searchsorted_monotonic(value, side="left")
- assert is_scalar(ssm_left)
- assert expected_left == ssm_left
-
- ssm_right = indices._searchsorted_monotonic(value, side="right")
- assert is_scalar(ssm_right)
- assert expected_right == ssm_right
-
- else:
- # non-monotonic should raise.
- with pytest.raises(ValueError):
- indices._searchsorted_monotonic(value, side="left")
diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py
index 841e3b3f17b38..f949db537de67 100644
--- a/pandas/tests/indexes/multi/test_setops.py
+++ b/pandas/tests/indexes/multi/test_setops.py
@@ -110,6 +110,17 @@ def test_symmetric_difference(idx, sort):
first.symmetric_difference([1, 2, 3], sort=sort)
+def test_multiindex_symmetric_difference():
+ # GH 13490
+ idx = MultiIndex.from_product([["a", "b"], ["A", "B"]], names=["a", "b"])
+ result = idx ^ idx
+ assert result.names == idx.names
+
+ idx2 = idx.copy().rename(["A", "B"])
+ result = idx ^ idx2
+ assert result.names == [None, None]
+
+
def test_empty(idx):
# GH 15270
assert not idx.empty
diff --git a/pandas/tests/indexes/multi/test_sorting.py b/pandas/tests/indexes/multi/test_sorting.py
index 277bd79cfe953..50242c1cac549 100644
--- a/pandas/tests/indexes/multi/test_sorting.py
+++ b/pandas/tests/indexes/multi/test_sorting.py
@@ -66,11 +66,6 @@ def test_sortlevel_deterministic():
assert sorted_idx.equals(expected[::-1])
-def test_sort(indices):
- with pytest.raises(TypeError):
- indices.sort()
-
-
def test_numpy_argsort(idx):
result = np.argsort(idx)
expected = idx.argsort()
diff --git a/pandas/tests/indexes/period/test_constructors.py b/pandas/tests/indexes/period/test_constructors.py
index 27ee915e48e5c..dcd3c8e946e9a 100644
--- a/pandas/tests/indexes/period/test_constructors.py
+++ b/pandas/tests/indexes/period/test_constructors.py
@@ -322,22 +322,33 @@ def test_constructor_mixed(self):
def test_constructor_simple_new(self):
idx = period_range("2007-01", name="p", periods=2, freq="M")
- result = idx._simple_new(idx, name="p", freq=idx.freq)
+
+ with pytest.raises(AssertionError, match="<class .*PeriodIndex'>"):
+ idx._simple_new(idx, name="p", freq=idx.freq)
+
+ result = idx._simple_new(idx._data, name="p", freq=idx.freq)
tm.assert_index_equal(result, idx)
- result = idx._simple_new(idx.astype("i8"), name="p", freq=idx.freq)
+ with pytest.raises(AssertionError):
+ # Need ndarray, not Int64Index
+ type(idx._data)._simple_new(idx.astype("i8"), freq=idx.freq)
+
+ arr = type(idx._data)._simple_new(idx.asi8, freq=idx.freq)
+ result = idx._simple_new(arr, name="p")
tm.assert_index_equal(result, idx)
def test_constructor_simple_new_empty(self):
# GH13079
idx = PeriodIndex([], freq="M", name="p")
- result = idx._simple_new(idx, name="p", freq="M")
+ with pytest.raises(AssertionError, match="<class .*PeriodIndex'>"):
+ idx._simple_new(idx, name="p", freq="M")
+
+ result = idx._simple_new(idx._data, name="p", freq="M")
tm.assert_index_equal(result, idx)
@pytest.mark.parametrize("floats", [[1.1, 2.1], np.array([1.1, 2.1])])
def test_constructor_floats(self, floats):
- msg = r"PeriodIndex\._simple_new does not accept floats"
- with pytest.raises(TypeError, match=msg):
+ with pytest.raises(AssertionError, match="<class "):
pd.PeriodIndex._simple_new(floats, freq="M")
msg = "PeriodIndex does not allow floating point in construction"
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index 7dbefbdaff98e..38514594efe09 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -1,4 +1,5 @@
from datetime import datetime, timedelta
+import re
import numpy as np
import pytest
@@ -8,6 +9,7 @@
import pandas as pd
from pandas import DatetimeIndex, Period, PeriodIndex, Series, notna, period_range
import pandas._testing as tm
+from pandas.core.indexes.base import InvalidIndexError
class TestGetItem:
@@ -357,7 +359,7 @@ def test_take_fill_value(self):
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
- msg = "index -5 is out of bounds for size 3"
+ msg = "index -5 is out of bounds for( axis 0 with)? size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
@@ -408,11 +410,7 @@ def test_get_loc(self):
with pytest.raises(KeyError, match=r"^1\.1$"):
idx0.get_loc(1.1)
- msg = (
- r"'PeriodIndex\(\['2017-09-01', '2017-09-02', '2017-09-03'\],"
- r" dtype='period\[D\]', freq='D'\)' is an invalid key"
- )
- with pytest.raises(TypeError, match=msg):
+ with pytest.raises(InvalidIndexError, match=re.escape(str(idx0))):
idx0.get_loc(idx0)
# get the location of p1/p2 from
@@ -433,11 +431,7 @@ def test_get_loc(self):
with pytest.raises(KeyError, match=r"^1\.1$"):
idx1.get_loc(1.1)
- msg = (
- r"'PeriodIndex\(\['2017-09-02', '2017-09-02', '2017-09-03'\],"
- r" dtype='period\[D\]', freq='D'\)' is an invalid key"
- )
- with pytest.raises(TypeError, match=msg):
+ with pytest.raises(InvalidIndexError, match=re.escape(str(idx1))):
idx1.get_loc(idx1)
# get the location of p1/p2 from
@@ -451,6 +445,58 @@ def test_get_loc(self):
tm.assert_numpy_array_equal(idx2.get_loc(p2), expected_idx2_p2)
tm.assert_numpy_array_equal(idx2.get_loc(str(p2)), expected_idx2_p2)
+ def test_get_loc_integer(self):
+ dti = pd.date_range("2016-01-01", periods=3)
+ pi = dti.to_period("D")
+ with pytest.raises(KeyError, match="16801"):
+ pi.get_loc(16801)
+
+ pi2 = dti.to_period("Y") # duplicates, ordinals are all 46
+ with pytest.raises(KeyError, match="46"):
+ pi2.get_loc(46)
+
+ @pytest.mark.parametrize("freq", ["H", "D"])
+ def test_get_value_datetime_hourly(self, freq):
+ # get_loc and get_value should treat datetime objects symmetrically
+ dti = pd.date_range("2016-01-01", periods=3, freq="MS")
+ pi = dti.to_period(freq)
+ ser = pd.Series(range(7, 10), index=pi)
+
+ ts = dti[0]
+
+ assert pi.get_loc(ts) == 0
+ assert pi.get_value(ser, ts) == 7
+ assert ser[ts] == 7
+ assert ser.loc[ts] == 7
+
+ ts2 = ts + pd.Timedelta(hours=3)
+ if freq == "H":
+ with pytest.raises(KeyError, match="2016-01-01 03:00"):
+ pi.get_loc(ts2)
+ with pytest.raises(KeyError, match="2016-01-01 03:00"):
+ pi.get_value(ser, ts2)
+ with pytest.raises(KeyError, match="2016-01-01 03:00"):
+ ser[ts2]
+ with pytest.raises(KeyError, match="2016-01-01 03:00"):
+ ser.loc[ts2]
+ else:
+ assert pi.get_loc(ts2) == 0
+ assert pi.get_value(ser, ts2) == 7
+ assert ser[ts2] == 7
+ assert ser.loc[ts2] == 7
+
+ def test_get_value_integer(self):
+ dti = pd.date_range("2016-01-01", periods=3)
+ pi = dti.to_period("D")
+ ser = pd.Series(range(3), index=pi)
+ with pytest.raises(IndexError, match="index out of bounds"):
+ pi.get_value(ser, 16801)
+
+ pi2 = dti.to_period("Y") # duplicates, ordinals are all 46
+ ser2 = pd.Series(range(3), index=pi2)
+ with pytest.raises(IndexError, match="index out of bounds"):
+ pi2.get_value(ser2, 46)
+
def test_is_monotonic_increasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
@@ -496,12 +542,20 @@ def test_contains(self):
ps0 = [p0, p1, p2]
idx0 = pd.PeriodIndex(ps0)
+ ser = pd.Series(range(6, 9), index=idx0)
for p in ps0:
assert p in idx0
assert str(p) in idx0
- assert "2017-09-01 00:00:01" in idx0
+ # GH#31172
+ # Higher-resolution period-like are _not_ considered as contained
+ key = "2017-09-01 00:00:01"
+ assert key not in idx0
+ with pytest.raises(KeyError, match=key):
+ idx0.get_loc(key)
+ with pytest.raises(KeyError, match=key):
+ idx0.get_value(ser, key)
assert "2017-09" in idx0
@@ -514,25 +568,25 @@ def test_get_value(self):
p2 = pd.Period("2017-09-03")
idx0 = pd.PeriodIndex([p0, p1, p2])
- input0 = np.array([1, 2, 3])
+ input0 = pd.Series(np.array([1, 2, 3]), index=idx0)
expected0 = 2
result0 = idx0.get_value(input0, p1)
assert result0 == expected0
idx1 = pd.PeriodIndex([p1, p1, p2])
- input1 = np.array([1, 2, 3])
- expected1 = np.array([1, 2])
+ input1 = pd.Series(np.array([1, 2, 3]), index=idx1)
+ expected1 = input1.iloc[[0, 1]]
result1 = idx1.get_value(input1, p1)
- tm.assert_numpy_array_equal(result1, expected1)
+ tm.assert_series_equal(result1, expected1)
idx2 = pd.PeriodIndex([p1, p2, p1])
- input2 = np.array([1, 2, 3])
- expected2 = np.array([1, 3])
+ input2 = pd.Series(np.array([1, 2, 3]), index=idx2)
+ expected2 = input2.iloc[[0, 2]]
result2 = idx2.get_value(input2, p1)
- tm.assert_numpy_array_equal(result2, expected2)
+ tm.assert_series_equal(result2, expected2)
def test_get_indexer(self):
# GH 17717
diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py
index 9ca2dd169416f..833901ea7ba22 100644
--- a/pandas/tests/indexes/period/test_partial_slicing.py
+++ b/pandas/tests/indexes/period/test_partial_slicing.py
@@ -7,9 +7,6 @@
class TestPeriodIndex:
- def setup_method(self, method):
- pass
-
def test_slice_with_negative_step(self):
ts = Series(np.arange(20), period_range("2014-01", periods=20, freq="M"))
SLC = pd.IndexSlice
@@ -133,3 +130,53 @@ def test_range_slice_outofbounds(self):
tm.assert_frame_equal(df["2013/10/15":"2013/10/17"], empty)
tm.assert_frame_equal(df["2013-06":"2013-09"], empty)
tm.assert_frame_equal(df["2013-11":"2013-12"], empty)
+
+ def test_partial_slice_doesnt_require_monotonicity(self):
+ # See also: DatetimeIndex test ofm the same name
+ dti = pd.date_range("2014-01-01", periods=30, freq="30D")
+ pi = dti.to_period("D")
+
+ ser_montonic = pd.Series(np.arange(30), index=pi)
+
+ shuffler = list(range(0, 30, 2)) + list(range(1, 31, 2))
+ ser = ser_montonic[shuffler]
+ nidx = ser.index
+
+ # Manually identified locations of year==2014
+ indexer_2014 = np.array(
+ [0, 1, 2, 3, 4, 5, 6, 15, 16, 17, 18, 19, 20], dtype=np.intp
+ )
+ assert (nidx[indexer_2014].year == 2014).all()
+ assert not (nidx[~indexer_2014].year == 2014).any()
+
+ result = nidx.get_loc("2014")
+ tm.assert_numpy_array_equal(result, indexer_2014)
+
+ expected = ser[indexer_2014]
+
+ result = nidx.get_value(ser, "2014")
+ tm.assert_series_equal(result, expected)
+
+ result = ser.loc["2014"]
+ tm.assert_series_equal(result, expected)
+
+ result = ser["2014"]
+ tm.assert_series_equal(result, expected)
+
+ # Manually identified locations where ser.index is within Mat 2015
+ indexer_may2015 = np.array([23], dtype=np.intp)
+ assert nidx[23].year == 2015 and nidx[23].month == 5
+
+ result = nidx.get_loc("May 2015")
+ tm.assert_numpy_array_equal(result, indexer_may2015)
+
+ expected = ser[indexer_may2015]
+
+ result = nidx.get_value(ser, "May 2015")
+ tm.assert_series_equal(result, expected)
+
+ result = ser.loc["May 2015"]
+ tm.assert_series_equal(result, expected)
+
+ result = ser["May 2015"]
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py
index 28ab14af71362..23350fdff4b78 100644
--- a/pandas/tests/indexes/period/test_tools.py
+++ b/pandas/tests/indexes/period/test_tools.py
@@ -249,7 +249,12 @@ def test_searchsorted_invalid(self):
other = np.array([0, 1], dtype=np.int64)
- msg = "requires either a Period or PeriodArray"
+ msg = "|".join(
+ [
+ "searchsorted requires compatible dtype or scalar",
+ "Unexpected type for 'value'",
+ ]
+ )
with pytest.raises(TypeError, match=msg):
pidx.searchsorted(other)
diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py
index 8d98ab18963b6..0e5d1d45ad6db 100644
--- a/pandas/tests/indexes/ranges/test_range.py
+++ b/pandas/tests/indexes/ranges/test_range.py
@@ -735,8 +735,9 @@ def test_engineless_lookup(self):
assert "_engine" not in idx._cache
- # The engine is still required for lookup of a different dtype scalar:
+ # Different types of scalars can be excluded immediately, no need to
+ # use the _engine
with pytest.raises(KeyError, match="'a'"):
- assert idx.get_loc("a") == -1
+ idx.get_loc("a")
- assert "_engine" in idx._cache
+ assert "_engine" not in idx._cache
diff --git a/pandas/tests/indexes/test_any_index.py b/pandas/tests/indexes/test_any_index.py
new file mode 100644
index 0000000000000..0db63f615c4f8
--- /dev/null
+++ b/pandas/tests/indexes/test_any_index.py
@@ -0,0 +1,34 @@
+"""
+Tests that can be parametrized over _any_ Index object.
+
+TODO: consider using hypothesis for these.
+"""
+import pytest
+
+
+def test_sort(indices):
+ with pytest.raises(TypeError):
+ indices.sort()
+
+
+def test_hash_error(indices):
+ index = indices
+ with pytest.raises(TypeError, match=f"unhashable type: '{type(index).__name__}'"):
+ hash(indices)
+
+
+def test_mutability(indices):
+ if not len(indices):
+ return
+ msg = "Index does not support mutable operations"
+ with pytest.raises(TypeError, match=msg):
+ indices[0] = indices[0]
+
+
+def test_wrong_number_names(indices):
+ with pytest.raises(ValueError, match="^Length"):
+ indices.names = ["apple", "banana", "carrot"]
+
+
+def test_tolist_matches_list(indices):
+ assert indices.tolist() == list(indices)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 1047c457d6b82..e72963de09ab4 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1915,7 +1915,12 @@ def test_get_value(self, index):
values = np.random.randn(100)
value = index[67]
- tm.assert_almost_equal(index.get_value(values, value), values[67])
+ with pytest.raises(AttributeError, match="has no attribute '_values'"):
+ # Index.get_value requires a Series, not an ndarray
+ index.get_value(values, value)
+
+ result = index.get_value(Series(values, index=values), value)
+ tm.assert_almost_equal(result, values[67])
@pytest.mark.parametrize("values", [["foo", "bar", "quux"], {"foo", "bar", "quux"}])
@pytest.mark.parametrize(
@@ -2408,7 +2413,17 @@ async def test_tab_complete_warning(self, ip):
code = "import pandas as pd; idx = pd.Index([1, 2])"
await ip.run_code(code)
- with tm.assert_produces_warning(None):
+
+ # GH 31324 newer jedi version raises Deprecation warning
+ import jedi
+
+ if jedi.__version__ < "0.16.0":
+ warning = tm.assert_produces_warning(None)
+ else:
+ warning = tm.assert_produces_warning(
+ DeprecationWarning, check_stacklevel=False
+ )
+ with warning:
with provisionalcompleter("ignore"):
list(ip.Completer.completions("idx.", 4))
diff --git a/pandas/tests/indexing/test_indexing_engines.py b/pandas/tests/indexes/test_engines.py
similarity index 69%
rename from pandas/tests/indexing/test_indexing_engines.py
rename to pandas/tests/indexes/test_engines.py
index edb5d7d7f3a57..9ea70a457e516 100644
--- a/pandas/tests/indexing/test_indexing_engines.py
+++ b/pandas/tests/indexes/test_engines.py
@@ -1,10 +1,85 @@
+import re
+
import numpy as np
+import pytest
from pandas._libs import algos as libalgos, index as libindex
+import pandas as pd
import pandas._testing as tm
+@pytest.fixture(
+ params=[
+ (libindex.Int64Engine, np.int64),
+ (libindex.Int32Engine, np.int32),
+ (libindex.Int16Engine, np.int16),
+ (libindex.Int8Engine, np.int8),
+ (libindex.UInt64Engine, np.uint64),
+ (libindex.UInt32Engine, np.uint32),
+ (libindex.UInt16Engine, np.uint16),
+ (libindex.UInt8Engine, np.uint8),
+ (libindex.Float64Engine, np.float64),
+ (libindex.Float32Engine, np.float32),
+ ],
+ ids=lambda x: x[0].__name__,
+)
+def numeric_indexing_engine_type_and_dtype(request):
+ return request.param
+
+
+class TestDatetimeEngine:
+ @pytest.mark.parametrize(
+ "scalar",
+ [
+ pd.Timedelta(pd.Timestamp("2016-01-01").asm8.view("m8[ns]")),
+ pd.Timestamp("2016-01-01").value,
+ pd.Timestamp("2016-01-01").to_pydatetime(),
+ pd.Timestamp("2016-01-01").to_datetime64(),
+ ],
+ )
+ def test_not_contains_requires_timestamp(self, scalar):
+ dti1 = pd.date_range("2016-01-01", periods=3)
+ dti2 = dti1.insert(1, pd.NaT) # non-monotonic
+ dti3 = dti1.insert(3, dti1[0]) # non-unique
+ dti4 = pd.date_range("2016-01-01", freq="ns", periods=2_000_000)
+ dti5 = dti4.insert(0, dti4[0]) # over size threshold, not unique
+
+ msg = "|".join([re.escape(str(scalar)), re.escape(repr(scalar))])
+ for dti in [dti1, dti2, dti3, dti4, dti5]:
+ with pytest.raises(TypeError, match=msg):
+ scalar in dti._engine
+
+ with pytest.raises(KeyError, match=msg):
+ dti._engine.get_loc(scalar)
+
+
+class TestTimedeltaEngine:
+ @pytest.mark.parametrize(
+ "scalar",
+ [
+ pd.Timestamp(pd.Timedelta(days=42).asm8.view("datetime64[ns]")),
+ pd.Timedelta(days=42).value,
+ pd.Timedelta(days=42).to_pytimedelta(),
+ pd.Timedelta(days=42).to_timedelta64(),
+ ],
+ )
+ def test_not_contains_requires_timestamp(self, scalar):
+ tdi1 = pd.timedelta_range("42 days", freq="9h", periods=1234)
+ tdi2 = tdi1.insert(1, pd.NaT) # non-monotonic
+ tdi3 = tdi1.insert(3, tdi1[0]) # non-unique
+ tdi4 = pd.timedelta_range("42 days", freq="ns", periods=2_000_000)
+ tdi5 = tdi4.insert(0, tdi4[0]) # over size threshold, not unique
+
+ msg = "|".join([re.escape(str(scalar)), re.escape(repr(scalar))])
+ for tdi in [tdi1, tdi2, tdi3, tdi4, tdi5]:
+ with pytest.raises(TypeError, match=msg):
+ scalar in tdi._engine
+
+ with pytest.raises(KeyError, match=msg):
+ tdi._engine.get_loc(scalar)
+
+
class TestNumericEngine:
def test_is_monotonic(self, numeric_indexing_engine_type_and_dtype):
engine_type, dtype = numeric_indexing_engine_type_and_dtype
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index f025168643ab9..992a91ad8a528 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -188,8 +188,8 @@ def test_constructor_invalid(self):
# invalid
msg = (
- r"Float64Index\(\.\.\.\) must be called with a collection of"
- r" some kind, 0\.0 was passed"
+ r"Float64Index\(\.\.\.\) must be called with a collection of "
+ r"some kind, 0\.0 was passed"
)
with pytest.raises(TypeError, match=msg):
Float64Index(0.0)
@@ -389,9 +389,52 @@ def test_get_loc_missing_nan(self):
idx.get_loc(3)
with pytest.raises(KeyError, match="^nan$"):
idx.get_loc(np.nan)
- with pytest.raises(KeyError, match=r"^\[nan\]$"):
+ with pytest.raises(TypeError, match=r"'\[nan\]' is an invalid key"):
+ # listlike/non-hashable raises TypeError
idx.get_loc([np.nan])
+ @pytest.mark.parametrize(
+ "vals",
+ [
+ pd.date_range("2016-01-01", periods=3),
+ pd.timedelta_range("1 Day", periods=3),
+ ],
+ )
+ def test_lookups_datetimelike_values(self, vals):
+ # If we have datetime64 or timedelta64 values, make sure they are
+ # wrappped correctly GH#31163
+ ser = pd.Series(vals, index=range(3, 6))
+ ser.index = ser.index.astype("float64")
+
+ expected = vals[1]
+
+ result = ser.index.get_value(ser, 4.0)
+ assert isinstance(result, type(expected)) and result == expected
+ result = ser.index.get_value(ser, 4)
+ assert isinstance(result, type(expected)) and result == expected
+
+ result = ser[4.0]
+ assert isinstance(result, type(expected)) and result == expected
+ result = ser[4]
+ assert isinstance(result, type(expected)) and result == expected
+
+ result = ser.loc[4.0]
+ assert isinstance(result, type(expected)) and result == expected
+ result = ser.loc[4]
+ assert isinstance(result, type(expected)) and result == expected
+
+ result = ser.at[4.0]
+ assert isinstance(result, type(expected)) and result == expected
+ # GH#31329 .at[4] should cast to 4.0, matching .loc behavior
+ result = ser.at[4]
+ assert isinstance(result, type(expected)) and result == expected
+
+ result = ser.iloc[1]
+ assert isinstance(result, type(expected)) and result == expected
+
+ result = ser.iat[1]
+ assert isinstance(result, type(expected)) and result == expected
+
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
assert np.nan in i
diff --git a/pandas/tests/indexes/test_numpy_compat.py b/pandas/tests/indexes/test_numpy_compat.py
index 583556656ac87..8b62de7030096 100644
--- a/pandas/tests/indexes/test_numpy_compat.py
+++ b/pandas/tests/indexes/test_numpy_compat.py
@@ -46,7 +46,7 @@
)
def test_numpy_ufuncs_basic(indices, func):
# test ufuncs of numpy, see:
- # http://docs.scipy.org/doc/numpy/reference/ufuncs.html
+ # https://docs.scipy.org/doc/numpy/reference/ufuncs.html
idx = indices
if isinstance(idx, DatetimeIndexOpsMixin):
@@ -77,10 +77,13 @@ def test_numpy_ufuncs_basic(indices, func):
)
def test_numpy_ufuncs_other(indices, func):
# test ufuncs of numpy, see:
- # http://docs.scipy.org/doc/numpy/reference/ufuncs.html
+ # https://docs.scipy.org/doc/numpy/reference/ufuncs.html
idx = indices
if isinstance(idx, (DatetimeIndex, TimedeltaIndex)):
+ if isinstance(idx, DatetimeIndex) and idx.tz is not None:
+ if func in [np.isfinite, np.isnan, np.isinf]:
+ pytest.xfail(reason="__array_ufunc__ is not defined")
if not _np_version_under1p18 and func in [np.isfinite, np.isinf, np.isnan]:
# numpy 1.18(dev) changed isinf and isnan to not raise on dt64/tfd64
diff --git a/pandas/tests/indexes/timedeltas/test_constructors.py b/pandas/tests/indexes/timedeltas/test_constructors.py
index 39abbf59d1e56..32e6821e87f05 100644
--- a/pandas/tests/indexes/timedeltas/test_constructors.py
+++ b/pandas/tests/indexes/timedeltas/test_constructors.py
@@ -47,6 +47,12 @@ def test_infer_from_tdi_mismatch(self):
# GH#23789
TimedeltaArray(tdi, freq="D")
+ with pytest.raises(ValueError, match=msg):
+ TimedeltaIndex(tdi._data, freq="D")
+
+ with pytest.raises(ValueError, match=msg):
+ TimedeltaArray(tdi._data, freq="D")
+
def test_dt64_data_invalid(self):
# GH#23539
# passing tz-aware DatetimeIndex raises, naive or ndarray[datetime64]
diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py
index e8665ee1a3555..14fff6f9c85b5 100644
--- a/pandas/tests/indexes/timedeltas/test_indexing.py
+++ b/pandas/tests/indexes/timedeltas/test_indexing.py
@@ -1,4 +1,5 @@
from datetime import datetime, timedelta
+import re
import numpy as np
import pytest
@@ -48,12 +49,19 @@ def test_getitem(self):
@pytest.mark.parametrize(
"key",
- [pd.Timestamp("1970-01-01"), pd.Timestamp("1970-01-02"), datetime(1970, 1, 1)],
+ [
+ pd.Timestamp("1970-01-01"),
+ pd.Timestamp("1970-01-02"),
+ datetime(1970, 1, 1),
+ pd.Timestamp("1970-01-03").to_datetime64(),
+ # non-matching NA values
+ np.datetime64("NaT"),
+ ],
)
def test_timestamp_invalid_key(self, key):
# GH#20464
tdi = pd.timedelta_range(0, periods=10)
- with pytest.raises(TypeError):
+ with pytest.raises(KeyError, match=re.escape(repr(key))):
tdi.get_loc(key)
diff --git a/pandas/tests/indexing/conftest.py b/pandas/tests/indexing/conftest.py
deleted file mode 100644
index 142bedaa943a6..0000000000000
--- a/pandas/tests/indexing/conftest.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import numpy as np
-import pytest
-
-from pandas._libs import index as libindex
-
-
-@pytest.fixture(
- params=[
- (libindex.Int64Engine, np.int64),
- (libindex.Int32Engine, np.int32),
- (libindex.Int16Engine, np.int16),
- (libindex.Int8Engine, np.int8),
- (libindex.UInt64Engine, np.uint64),
- (libindex.UInt32Engine, np.uint32),
- (libindex.UInt16Engine, np.uint16),
- (libindex.UInt8Engine, np.uint8),
- (libindex.Float64Engine, np.float64),
- (libindex.Float32Engine, np.float32),
- ],
- ids=lambda x: x[0].__name__,
-)
-def numeric_indexing_engine_type_and_dtype(request):
- return request.param
diff --git a/pandas/tests/indexing/multiindex/test_getitem.py b/pandas/tests/indexing/multiindex/test_getitem.py
index 8ea825da8f94f..c15fa34283f21 100644
--- a/pandas/tests/indexing/multiindex/test_getitem.py
+++ b/pandas/tests/indexing/multiindex/test_getitem.py
@@ -250,3 +250,13 @@ def test_frame_mi_access_returns_frame(dataframe_with_duplicate_index):
).T
result = df["A"]["B2"]
tm.assert_frame_equal(result, expected)
+
+
+def test_frame_mi_empty_slice():
+ # GH 15454
+ df = DataFrame(0, index=range(2), columns=MultiIndex.from_product([[1], [2]]))
+ result = df[[]]
+ expected = DataFrame(
+ index=[0, 1], columns=MultiIndex(levels=[[1], [2]], codes=[[], []])
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexing/multiindex/test_set_ops.py b/pandas/tests/indexing/multiindex/test_insert.py
similarity index 72%
rename from pandas/tests/indexing/multiindex/test_set_ops.py
rename to pandas/tests/indexing/multiindex/test_insert.py
index f2cbfadb3cfa5..835e61da2fb3e 100644
--- a/pandas/tests/indexing/multiindex/test_set_ops.py
+++ b/pandas/tests/indexing/multiindex/test_insert.py
@@ -4,17 +4,7 @@
import pandas._testing as tm
-class TestMultiIndexSetOps:
- def test_multiindex_symmetric_difference(self):
- # GH 13490
- idx = MultiIndex.from_product([["a", "b"], ["A", "B"]], names=["a", "b"])
- result = idx ^ idx
- assert result.names == idx.names
-
- idx2 = idx.copy().rename(["A", "B"])
- result = idx ^ idx2
- assert result.names == [None, None]
-
+class TestMultiIndexInsertion:
def test_mixed_depth_insert(self):
arrays = [
["a", "top", "top", "routine1", "routine1", "routine2"],
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index 3b8aa963ac698..b7802d9b8fe0c 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -468,3 +468,22 @@ def test_loc_period_string_indexing():
),
)
tm.assert_series_equal(result, expected)
+
+
+def test_loc_datetime_mask_slicing():
+ # GH 16699
+ dt_idx = pd.to_datetime(["2017-05-04", "2017-05-05"])
+ m_idx = pd.MultiIndex.from_product([dt_idx, dt_idx], names=["Idx1", "Idx2"])
+ df = pd.DataFrame(
+ data=[[1, 2], [3, 4], [5, 6], [7, 6]], index=m_idx, columns=["C1", "C2"]
+ )
+ result = df.loc[(dt_idx[0], (df.index.get_level_values(1) > "2017-05-04")), "C1"]
+ expected = pd.Series(
+ [3],
+ name="C1",
+ index=MultiIndex.from_tuples(
+ [(pd.Timestamp("2017-05-04"), pd.Timestamp("2017-05-05"))],
+ names=["Idx1", "Idx2"],
+ ),
+ )
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/indexing/multiindex/test_multiindex.py b/pandas/tests/indexing/multiindex/test_multiindex.py
index 8163de8588232..0064187a94265 100644
--- a/pandas/tests/indexing/multiindex/test_multiindex.py
+++ b/pandas/tests/indexing/multiindex/test_multiindex.py
@@ -92,3 +92,22 @@ def test_contains(self):
assert tx[0] in idx
assert "element_not_exit" not in idx
assert "0 day 09:30:00" in idx
+
+ def test_nested_tuples_duplicates(self):
+ # GH#30892
+
+ dti = pd.to_datetime(["20190101", "20190101", "20190102"])
+ idx = pd.Index(["a", "a", "c"])
+ mi = pd.MultiIndex.from_arrays([dti, idx], names=["index1", "index2"])
+
+ df = pd.DataFrame({"c1": [1, 2, 3], "c2": [np.nan, np.nan, np.nan]}, index=mi)
+
+ expected = pd.DataFrame({"c1": df["c1"], "c2": [1.0, 1.0, np.nan]}, index=mi)
+
+ df2 = df.copy(deep=True)
+ df2.loc[(dti[0], "a"), "c2"] = 1.0
+ tm.assert_frame_equal(df2, expected)
+
+ df3 = df.copy(deep=True)
+ df3.loc[[(dti[0], "a")], "c2"] = 1.0
+ tm.assert_frame_equal(df3, expected)
diff --git a/pandas/tests/indexing/test_check_indexer.py b/pandas/tests/indexing/test_check_indexer.py
new file mode 100644
index 0000000000000..82f8c12229824
--- /dev/null
+++ b/pandas/tests/indexing/test_check_indexer.py
@@ -0,0 +1,97 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+import pandas._testing as tm
+from pandas.api.indexers import check_array_indexer
+
+
+@pytest.mark.parametrize(
+ "indexer, expected",
+ [
+ # integer
+ ([1, 2], np.array([1, 2], dtype=np.intp)),
+ (np.array([1, 2], dtype="int64"), np.array([1, 2], dtype=np.intp)),
+ (pd.array([1, 2], dtype="Int32"), np.array([1, 2], dtype=np.intp)),
+ (pd.Index([1, 2]), np.array([1, 2], dtype=np.intp)),
+ # boolean
+ ([True, False, True], np.array([True, False, True], dtype=np.bool_)),
+ (np.array([True, False, True]), np.array([True, False, True], dtype=np.bool_)),
+ (
+ pd.array([True, False, True], dtype="boolean"),
+ np.array([True, False, True], dtype=np.bool_),
+ ),
+ # other
+ ([], np.array([], dtype=np.intp)),
+ ],
+)
+def test_valid_input(indexer, expected):
+ array = np.array([1, 2, 3])
+ result = check_array_indexer(array, indexer)
+ tm.assert_numpy_array_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "indexer", [[True, False, None], pd.array([True, False, None], dtype="boolean")],
+)
+def test_bool_raise_missing_values(indexer):
+ array = np.array([1, 2, 3])
+
+ msg = "Cannot mask with a boolean indexer containing NA values"
+ with pytest.raises(ValueError, match=msg):
+ check_array_indexer(array, indexer)
+
+
+@pytest.mark.parametrize(
+ "indexer",
+ [
+ [True, False],
+ pd.array([True, False], dtype="boolean"),
+ np.array([True, False], dtype=np.bool_),
+ ],
+)
+def test_bool_raise_length(indexer):
+ array = np.array([1, 2, 3])
+
+ msg = "Boolean index has wrong length"
+ with pytest.raises(IndexError, match=msg):
+ check_array_indexer(array, indexer)
+
+
+@pytest.mark.parametrize(
+ "indexer", [[0, 1, None], pd.array([0, 1, pd.NA], dtype="Int64")],
+)
+def test_int_raise_missing_values(indexer):
+ array = np.array([1, 2, 3])
+
+ msg = "Cannot index with an integer indexer containing NA values"
+ with pytest.raises(ValueError, match=msg):
+ check_array_indexer(array, indexer)
+
+
+@pytest.mark.parametrize(
+ "indexer",
+ [
+ [0.0, 1.0],
+ np.array([1.0, 2.0], dtype="float64"),
+ np.array([True, False], dtype=object),
+ pd.Index([True, False], dtype=object),
+ pd.array(["a", "b"], dtype="string"),
+ ],
+)
+def test_raise_invalid_array_dtypes(indexer):
+ array = np.array([1, 2, 3])
+
+ msg = "arrays used as indices must be of integer or boolean type"
+ with pytest.raises(IndexError, match=msg):
+ check_array_indexer(array, indexer)
+
+
+@pytest.mark.parametrize(
+ "indexer", [None, Ellipsis, slice(0, 3), (None,)],
+)
+def test_pass_through_non_array_likes(indexer):
+ array = np.array([1, 2, 3])
+
+ result = check_array_indexer(array, indexer)
+ assert result == indexer
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index 2cc8232566aa9..5530896a90941 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -123,9 +123,9 @@ def test_scalar_non_numeric(self):
# setting with a float fails with iloc
msg = (
- r"cannot do (label|index|positional) indexing"
- r" on {klass} with these indexers \[3\.0\] of"
- r" {kind}".format(klass=type(i), kind=str(float))
+ r"cannot do (label|index|positional) indexing "
+ r"on {klass} with these indexers \[3\.0\] of "
+ r"{kind}".format(klass=type(i), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s.iloc[3.0] = 0
@@ -160,9 +160,9 @@ def test_scalar_non_numeric(self):
s = Series(np.arange(len(i)), index=i)
s[3]
msg = (
- r"cannot do (label|index) indexing"
- r" on {klass} with these indexers \[3\.0\] of"
- r" {kind}".format(klass=type(i), kind=str(float))
+ r"cannot do (label|index) indexing "
+ r"on {klass} with these indexers \[3\.0\] of "
+ r"{kind}".format(klass=type(i), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[3.0]
@@ -177,9 +177,9 @@ def test_scalar_with_mixed(self):
for idxr in [lambda x: x, lambda x: x.iloc]:
msg = (
- r"cannot do label indexing"
- r" on {klass} with these indexers \[1\.0\] of"
- r" {kind}|"
+ r"cannot do label indexing "
+ r"on {klass} with these indexers \[1\.0\] of "
+ r"{kind}|"
"Cannot index by location index with a non-integer key".format(
klass=str(Index), kind=str(float)
)
@@ -199,9 +199,9 @@ def test_scalar_with_mixed(self):
for idxr in [lambda x: x]:
msg = (
- r"cannot do label indexing"
- r" on {klass} with these indexers \[1\.0\] of"
- r" {kind}".format(klass=str(Index), kind=str(float))
+ r"cannot do label indexing "
+ r"on {klass} with these indexers \[1\.0\] of "
+ r"{kind}".format(klass=str(Index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
idxr(s3)[1.0]
@@ -313,9 +313,9 @@ def test_scalar_float(self):
s.iloc[3.0]
msg = (
- r"cannot do positional indexing"
- r" on {klass} with these indexers \[3\.0\] of"
- r" {kind}".format(klass=str(Float64Index), kind=str(float))
+ r"cannot do positional indexing "
+ r"on {klass} with these indexers \[3\.0\] of "
+ r"{kind}".format(klass=str(Float64Index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s2.iloc[3.0] = 0
@@ -379,10 +379,10 @@ def test_slice_non_numeric(self):
for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
msg = (
- "cannot do slice indexing"
- r" on {klass} with these indexers"
- r" \[(3|4)(\.0)?\]"
- r" of ({kind_float}|{kind_int})".format(
+ "cannot do slice indexing "
+ r"on {klass} with these indexers "
+ r"\[(3|4)(\.0)?\] "
+ r"of ({kind_float}|{kind_int})".format(
klass=type(index),
kind_float=str(float),
kind_int=str(int),
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 26dedf02e7333..d67259e8b7d40 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -249,10 +249,10 @@ def test_iloc_getitem_bool(self):
def test_iloc_getitem_bool_diff_len(self, index):
# GH26658
s = Series([1, 2, 3])
- with pytest.raises(
- IndexError,
- match=("Item wrong length {} instead of {}.".format(len(index), len(s))),
- ):
+ msg = "Boolean index has wrong length: {} instead of {}".format(
+ len(index), len(s)
+ )
+ with pytest.raises(IndexError, match=msg):
_ = s.iloc[index]
def test_iloc_getitem_slice(self):
@@ -437,9 +437,9 @@ def test_iloc_getitem_labelled_frame(self):
# trying to use a label
msg = (
- r"Location based indexing can only have \[integer, integer"
- r" slice \(START point is INCLUDED, END point is EXCLUDED\),"
- r" listlike of integers, boolean array\] types"
+ r"Location based indexing can only have \[integer, integer "
+ r"slice \(START point is INCLUDED, END point is EXCLUDED\), "
+ r"listlike of integers, boolean array\] types"
)
with pytest.raises(ValueError, match=msg):
df.iloc["j", "D"]
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 448a06070c45c..1913caae93932 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -81,8 +81,8 @@ def test_getitem_ndarray_3d(self, index, obj, idxr, idxr_id):
nd3 = np.random.randint(5, size=(2, 2, 2))
msg = (
- r"Buffer has wrong number of dimensions \(expected 1,"
- r" got 3\)|"
+ r"Buffer has wrong number of dimensions \(expected 1, "
+ r"got 3\)|"
"Cannot index with multidimensional key|"
r"Wrong number of dimensions. values.ndim != ndim \[3 != 1\]|"
"Index data must be 1-dimensional"
@@ -134,8 +134,8 @@ def test_setitem_ndarray_3d(self, index, obj, idxr, idxr_id):
nd3 = np.random.randint(5, size=(2, 2, 2))
msg = (
- r"Buffer has wrong number of dimensions \(expected 1,"
- r" got 3\)|"
+ r"Buffer has wrong number of dimensions \(expected 1, "
+ r"got 3\)|"
"'pandas._libs.interval.IntervalTree' object has no attribute "
"'set_value'|" # AttributeError
"unhashable type: 'numpy.ndarray'|" # TypeError
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index a36078b11c663..b9dc96adfa738 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -200,10 +200,10 @@ def test_loc_getitem_bool(self):
def test_loc_getitem_bool_diff_len(self, index):
# GH26658
s = Series([1, 2, 3])
- with pytest.raises(
- IndexError,
- match=("Item wrong length {} instead of {}.".format(len(index), len(s))),
- ):
+ msg = "Boolean index has wrong length: {} instead of {}".format(
+ len(index), len(s)
+ )
+ with pytest.raises(IndexError, match=msg):
_ = s.loc[index]
def test_loc_getitem_int_slice(self):
@@ -219,8 +219,8 @@ def test_loc_to_fail(self):
# raise a KeyError?
msg = (
- r"\"None of \[Int64Index\(\[1, 2\], dtype='int64'\)\] are"
- r" in the \[index\]\""
+ r"\"None of \[Int64Index\(\[1, 2\], dtype='int64'\)\] are "
+ r"in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
df.loc[[1, 2], [1, 2]]
@@ -236,8 +236,8 @@ def test_loc_to_fail(self):
s.loc[-1]
msg = (
- r"\"None of \[Int64Index\(\[-1, -2\], dtype='int64'\)\] are"
- r" in the \[index\]\""
+ r"\"None of \[Int64Index\(\[-1, -2\], dtype='int64'\)\] are "
+ r"in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
s.loc[[-1, -2]]
@@ -252,8 +252,8 @@ def test_loc_to_fail(self):
s["a"] = 2
msg = (
- r"\"None of \[Int64Index\(\[-2\], dtype='int64'\)\] are"
- r" in the \[index\]\""
+ r"\"None of \[Int64Index\(\[-2\], dtype='int64'\)\] are "
+ r"in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
s.loc[[-2]]
@@ -268,8 +268,8 @@ def test_loc_to_fail(self):
df = DataFrame([["a"], ["b"]], index=[1, 2], columns=["value"])
msg = (
- r"\"None of \[Int64Index\(\[3\], dtype='int64'\)\] are"
- r" in the \[index\]\""
+ r"\"None of \[Int64Index\(\[3\], dtype='int64'\)\] are "
+ r"in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
df.loc[[3], :]
@@ -1002,3 +1002,13 @@ def test_loc_axis_1_slice():
),
)
tm.assert_frame_equal(result, expected)
+
+
+def test_loc_set_dataframe_multiindex():
+ # GH 14592
+ expected = pd.DataFrame(
+ "a", index=range(2), columns=pd.MultiIndex.from_product([range(2), range(2)])
+ )
+ result = expected.copy()
+ result.loc[0, [(0, 1)]] = result.loc[0, [(0, 1)]]
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 5fda759020f1a..2ce07ec41758f 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -205,8 +205,8 @@ def test_series_partial_set(self):
# raises as nothing in in the index
msg = (
- r"\"None of \[Int64Index\(\[3, 3, 3\], dtype='int64'\)\] are"
- r" in the \[index\]\""
+ r"\"None of \[Int64Index\(\[3, 3, 3\], dtype='int64'\)\] are "
+ r"in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
ser.loc[[3, 3, 3]]
@@ -286,8 +286,8 @@ def test_series_partial_set_with_name(self):
# raises as nothing in in the index
msg = (
- r"\"None of \[Int64Index\(\[3, 3, 3\], dtype='int64',"
- r" name='idx'\)\] are in the \[index\]\""
+ r"\"None of \[Int64Index\(\[3, 3, 3\], dtype='int64', "
+ r"name='idx'\)\] are in the \[index\]\""
)
with pytest.raises(KeyError, match=msg):
ser.loc[[3, 3, 3]]
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 15b1434f8629f..aa966caa63238 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -297,7 +297,8 @@ def test_delete(self):
assert (newb.values[1] == 1).all()
newb = self.fblock.copy()
- with pytest.raises(Exception):
+
+ with pytest.raises(IndexError, match=None):
newb.delete(3)
@@ -321,7 +322,12 @@ def test_can_hold_element(self):
val = date(2010, 10, 10)
assert not block._can_hold_element(val)
- with pytest.raises(TypeError):
+
+ msg = (
+ "'value' should be a 'Timestamp', 'NaT', "
+ "or array of those. Got 'date' instead."
+ )
+ with pytest.raises(TypeError, match=msg):
arr[0] = val
@@ -350,7 +356,10 @@ def test_duplicate_ref_loc_failure(self):
blocks[1].mgr_locs = np.array([0])
# test trying to create block manager with overlapping ref locs
- with pytest.raises(AssertionError):
+
+ msg = "Gaps in blk ref_locs"
+
+ with pytest.raises(AssertionError, match=msg):
BlockManager(blocks, axes)
blocks[0].mgr_locs = np.array([0])
@@ -808,7 +817,11 @@ def test_validate_bool_args(self):
bm1 = create_mgr("a,b,c: i8-1; d,e,f: i8-2")
for value in invalid_values:
- with pytest.raises(ValueError):
+ msg = (
+ 'For argument "inplace" expected type bool, '
+ f"received type {type(value).__name__}."
+ )
+ with pytest.raises(ValueError, match=msg):
bm1.replace_list([1], [2], inplace=value)
@@ -1027,9 +1040,11 @@ def test_slice_len(self):
assert len(BlockPlacement(slice(1, 0, -1))) == 1
def test_zero_step_raises(self):
- with pytest.raises(ValueError):
+ msg = "slice step cannot be zero"
+
+ with pytest.raises(ValueError, match=msg):
BlockPlacement(slice(1, 1, 0))
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
BlockPlacement(slice(1, 2, 0))
def test_unbounded_slice_raises(self):
@@ -1132,9 +1147,11 @@ def assert_add_equals(val, inc, result):
assert_add_equals(slice(1, 4), -1, [0, 1, 2])
assert_add_equals([1, 2, 4], -1, [0, 1, 3])
- with pytest.raises(ValueError):
+ msg = "iadd causes length change"
+
+ with pytest.raises(ValueError, match=msg):
BlockPlacement(slice(1, 4)).add(-10)
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
BlockPlacement([1, 2, 4]).add(-10)
@@ -1200,7 +1217,7 @@ def test_binop_other(self, op, value, dtype):
(operator.pow, "bool"),
}
if (op, dtype) in skip:
- pytest.skip("Invalid combination {},{}".format(op, dtype))
+ pytest.skip(f"Invalid combination {op},{dtype}")
e = DummyElement(value, dtype)
s = pd.DataFrame({"A": [e.value, e.value]}, dtype=e.dtype)
@@ -1216,7 +1233,17 @@ def test_binop_other(self, op, value, dtype):
}
if (op, dtype) in invalid:
- with pytest.raises(TypeError):
+ msg = (
+ None
+ if (dtype == "<M8[ns]" and op == operator.add)
+ or (dtype == "<m8[ns]" and op == operator.mul)
+ else (
+ f"cannot perform __{op.__name__}__ with this "
+ "index type: (DatetimeArray|TimedeltaArray)"
+ )
+ )
+
+ with pytest.raises(TypeError, match=msg):
op(s, e.value)
else:
# FIXME: Since dispatching to Series, this test no longer
diff --git a/pandas/tests/io/data/excel/blank.xlsb b/pandas/tests/io/data/excel/blank.xlsb
new file mode 100644
index 0000000000000..d72fd68ab3dbf
Binary files /dev/null and b/pandas/tests/io/data/excel/blank.xlsb differ
diff --git a/pandas/tests/io/data/excel/blank_with_header.xlsb b/pandas/tests/io/data/excel/blank_with_header.xlsb
new file mode 100644
index 0000000000000..3c241513d221a
Binary files /dev/null and b/pandas/tests/io/data/excel/blank_with_header.xlsb differ
diff --git a/pandas/tests/io/data/excel/test1.xlsb b/pandas/tests/io/data/excel/test1.xlsb
new file mode 100644
index 0000000000000..d0b8a1f2735bd
Binary files /dev/null and b/pandas/tests/io/data/excel/test1.xlsb differ
diff --git a/pandas/tests/io/data/excel/test2.xlsb b/pandas/tests/io/data/excel/test2.xlsb
new file mode 100644
index 0000000000000..e19a0f1e067c8
Binary files /dev/null and b/pandas/tests/io/data/excel/test2.xlsb differ
diff --git a/pandas/tests/io/data/excel/test3.xlsb b/pandas/tests/io/data/excel/test3.xlsb
new file mode 100644
index 0000000000000..617d27630e8a0
Binary files /dev/null and b/pandas/tests/io/data/excel/test3.xlsb differ
diff --git a/pandas/tests/io/data/excel/test4.xlsb b/pandas/tests/io/data/excel/test4.xlsb
new file mode 100644
index 0000000000000..2e5bb229939be
Binary files /dev/null and b/pandas/tests/io/data/excel/test4.xlsb differ
diff --git a/pandas/tests/io/data/excel/test5.xlsb b/pandas/tests/io/data/excel/test5.xlsb
new file mode 100644
index 0000000000000..022ebef25aee2
Binary files /dev/null and b/pandas/tests/io/data/excel/test5.xlsb differ
diff --git a/pandas/tests/io/data/excel/test_converters.xlsb b/pandas/tests/io/data/excel/test_converters.xlsb
new file mode 100644
index 0000000000000..c39c33d8dd94f
Binary files /dev/null and b/pandas/tests/io/data/excel/test_converters.xlsb differ
diff --git a/pandas/tests/io/data/excel/test_index_name_pre17.xlsb b/pandas/tests/io/data/excel/test_index_name_pre17.xlsb
new file mode 100644
index 0000000000000..5251b8f3b3194
Binary files /dev/null and b/pandas/tests/io/data/excel/test_index_name_pre17.xlsb differ
diff --git a/pandas/tests/io/data/excel/test_multisheet.xlsb b/pandas/tests/io/data/excel/test_multisheet.xlsb
new file mode 100644
index 0000000000000..39b15568a7121
Binary files /dev/null and b/pandas/tests/io/data/excel/test_multisheet.xlsb differ
diff --git a/pandas/tests/io/data/excel/test_squeeze.xlsb b/pandas/tests/io/data/excel/test_squeeze.xlsb
new file mode 100644
index 0000000000000..6aadd727e957b
Binary files /dev/null and b/pandas/tests/io/data/excel/test_squeeze.xlsb differ
diff --git a/pandas/tests/io/data/excel/test_types.xlsb b/pandas/tests/io/data/excel/test_types.xlsb
new file mode 100644
index 0000000000000..e7403aa288263
Binary files /dev/null and b/pandas/tests/io/data/excel/test_types.xlsb differ
diff --git a/pandas/tests/io/data/excel/testdateoverflow.xlsb b/pandas/tests/io/data/excel/testdateoverflow.xlsb
new file mode 100644
index 0000000000000..3d279396924b9
Binary files /dev/null and b/pandas/tests/io/data/excel/testdateoverflow.xlsb differ
diff --git a/pandas/tests/io/data/excel/testdtype.xlsb b/pandas/tests/io/data/excel/testdtype.xlsb
new file mode 100644
index 0000000000000..1c1d45f0d783b
Binary files /dev/null and b/pandas/tests/io/data/excel/testdtype.xlsb differ
diff --git a/pandas/tests/io/data/excel/testmultiindex.xlsb b/pandas/tests/io/data/excel/testmultiindex.xlsb
new file mode 100644
index 0000000000000..b66d6dab17ee0
Binary files /dev/null and b/pandas/tests/io/data/excel/testmultiindex.xlsb differ
diff --git a/pandas/tests/io/data/excel/testskiprows.xlsb b/pandas/tests/io/data/excel/testskiprows.xlsb
new file mode 100644
index 0000000000000..a5ff4ed22e70c
Binary files /dev/null and b/pandas/tests/io/data/excel/testskiprows.xlsb differ
diff --git a/pandas/tests/io/data/excel/times_1900.xlsb b/pandas/tests/io/data/excel/times_1900.xlsb
new file mode 100644
index 0000000000000..ceb7bccb0c66e
Binary files /dev/null and b/pandas/tests/io/data/excel/times_1900.xlsb differ
diff --git a/pandas/tests/io/data/excel/times_1904.xlsb b/pandas/tests/io/data/excel/times_1904.xlsb
new file mode 100644
index 0000000000000..e426dc959da49
Binary files /dev/null and b/pandas/tests/io/data/excel/times_1904.xlsb differ
diff --git a/pandas/tests/io/data/html/computer_sales_page.html b/pandas/tests/io/data/html/computer_sales_page.html
deleted file mode 100644
index ff2b031b58d64..0000000000000
--- a/pandas/tests/io/data/html/computer_sales_page.html
+++ /dev/null
@@ -1,619 +0,0 @@
-<table width="100%" border="0" cellspacing="0" cellpadding="0">
-<tbody><tr><!-- TABLE COLUMN WIDTHS SET -->
-<td width="" style="font-family:times;"></td>
-<td width="12pt" style="font-family:times;"></td>
-<td width="7pt" align="RIGHT" style="font-family:times;"></td>
-<td width="45pt" style="font-family:times;"></td>
-<td width="12pt" style="font-family:times;"></td>
-<td width="7pt" align="RIGHT" style="font-family:times;"></td>
-<td width="45pt" style="font-family:times;"></td>
-<td width="12pt" style="font-family:times;"></td>
-<td width="7pt" align="RIGHT" style="font-family:times;"></td>
-<td width="45pt" style="font-family:times;"></td>
-<td width="12pt" style="font-family:times;"></td>
-<td width="7pt" align="RIGHT" style="font-family:times;"></td>
-<td width="45pt" style="font-family:times;"></td>
-<td width="12pt" style="font-family:times;"></td>
-<!-- TABLE COLUMN WIDTHS END --></tr>
-
-<tr valign="BOTTOM">
-<th align="LEFT" style="font-family:times;"><font size="2"> </font><br></th>
-<th style="font-family:times;"><font size="1"> </font></th>
-<th colspan="5" align="CENTER" style="font-family:times;border-bottom:solid #000000 1.0pt;"><font size="1"><b>Three months ended<br>
-April 30 </b></font></th>
-<th style="font-family:times;"><font size="1"> </font></th>
-<th colspan="5" align="CENTER" style="font-family:times;border-bottom:solid #000000 1.0pt;"><font size="1"><b>Six months ended<br>
-April 30 </b></font></th>
-<th style="font-family:times;"><font size="1"> </font></th>
-</tr>
-<tr valign="BOTTOM">
-<th align="LEFT" style="font-family:times;"><font size="1"> </font><br></th>
-<th style="font-family:times;"><font size="1"> </font></th>
-<th colspan="2" align="CENTER" style="font-family:times;border-bottom:solid #000000 1.0pt;"><font size="1"><b>2013 </b></font></th>
-<th style="font-family:times;"><font size="1"> </font></th>
-<th colspan="2" align="CENTER" style="font-family:times;border-bottom:solid #000000 1.0pt;"><font size="1"><b>2012 </b></font></th>
-<th style="font-family:times;"><font size="1"> </font></th>
-<th colspan="2" align="CENTER" style="font-family:times;border-bottom:solid #000000 1.0pt;"><font size="1"><b>2013 </b></font></th>
-<th style="font-family:times;"><font size="1"> </font></th>
-<th colspan="2" align="CENTER" style="font-family:times;border-bottom:solid #000000 1.0pt;"><font size="1"><b>2012 </b></font></th>
-<th style="font-family:times;"><font size="1"> </font></th>
-</tr>
-<tr valign="BOTTOM">
-<th align="LEFT" style="font-family:times;"><font size="1"> </font><br></th>
-<th style="font-family:times;"><font size="1"> </font></th>
-<th colspan="11" align="CENTER" style="font-family:times;"><font size="1"><b>In millions</b></font><br></th>
-<th style="font-family:times;"><font size="1"> </font></th>
-</tr>
-<tr bgcolor="#CCEEFF" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:10pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Net revenue:</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr bgcolor="White" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:30pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Notebooks</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">$</font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">3,718</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">$</font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">4,900</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">$</font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">7,846</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">$</font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">9,842</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr bgcolor="#CCEEFF" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:30pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Desktops</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">3,103</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">3,827</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">6,424</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">7,033</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr bgcolor="White" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:30pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Workstations</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">521</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">537</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">1,056</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">1,072</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr bgcolor="#CCEEFF" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:30pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Other</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">242</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">206</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">462</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">415</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr style="font-size:1.5pt;" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-</tr>
-<tr bgcolor="White" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:20pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Personal Systems</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">7,584</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">9,470</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">15,788</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">18,362</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr style="font-size:1.5pt;" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-</tr>
-<tr bgcolor="#CCEEFF" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:20pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Supplies</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">4,122</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">4,060</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">8,015</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">8,139</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr bgcolor="White" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:20pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Commercial Hardware</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">1,398</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">1,479</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">2,752</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">2,968</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr bgcolor="#CCEEFF" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:20pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Consumer Hardware</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">561</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">593</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">1,240</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">1,283</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr style="font-size:1.5pt;" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-</tr>
-<tr bgcolor="White" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:20pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Printing</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">6,081</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">6,132</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">12,007</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">12,390</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr style="font-size:1.5pt;" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-</tr>
-<tr bgcolor="#CCEEFF" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:10pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Printing and Personal Systems Group</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">13,665</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">15,602</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">27,795</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">30,752</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr style="font-size:1.5pt;" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-</tr>
-<tr bgcolor="White" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:20pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Industry Standard Servers</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">2,806</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">3,186</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">5,800</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">6,258</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr bgcolor="#CCEEFF" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:20pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Technology Services</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">2,272</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">2,335</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">4,515</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">4,599</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr bgcolor="White" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:20pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Storage</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">857</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">990</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">1,690</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">1,945</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr bgcolor="#CCEEFF" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:20pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Networking</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">618</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">614</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">1,226</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">1,200</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr bgcolor="White" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:20pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Business Critical Systems</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">266</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">421</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">572</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">826</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr style="font-size:1.5pt;" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-</tr>
-<tr bgcolor="#CCEEFF" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:10pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Enterprise Group</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">6,819</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">7,546</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">13,803</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">14,828</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr style="font-size:1.5pt;" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-</tr>
-<tr bgcolor="White" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:20pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Infrastructure Technology Outsourcing</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">3,721</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">3,954</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">7,457</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">7,934</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr bgcolor="#CCEEFF" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:20pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Application and Business Services</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">2,278</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">2,535</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">4,461</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">4,926</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr style="font-size:1.5pt;" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-</tr>
-<tr bgcolor="White" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:10pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Enterprise Services</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">5,999</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">6,489</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">11,918</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">12,860</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr style="font-size:1.5pt;" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-</tr>
-<tr bgcolor="#CCEEFF" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:10pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Software</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">941</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">970</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">1,867</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">1,916</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr bgcolor="White" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:10pt;text-indent:-10pt;"><font size="2"> </font><font size="2">HP Financial Services</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">881</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">968</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">1,838</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">1,918</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr bgcolor="#CCEEFF" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:10pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Corporate Investments</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">10</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">7</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">14</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">37</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr style="font-size:1.5pt;" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-</tr>
-<tr bgcolor="White" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:30pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Total segments</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">28,315</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">31,582</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">57,235</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">62,311</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr style="font-size:1.5pt;" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-</tr>
-<tr bgcolor="#CCEEFF" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:10pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Eliminations of intersegment net revenue and other</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">(733</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2">)</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">(889</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2">)</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">(1,294</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2">)</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">(1,582</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2">)</font></td>
-</tr>
-<tr style="font-size:1.5pt;" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:solid #000000 1.0pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-</tr>
-<tr bgcolor="White" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"><p style="font-family:times;margin-left:30pt;text-indent:-10pt;"><font size="2"> </font><font size="2">Total HP consolidated net revenue</font></p></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">$</font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">27,582</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">$</font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">30,693</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">$</font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">55,941</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">$</font></td>
-<td align="RIGHT" valign="BOTTOM" style="font-family:times;"><font size="2">60,729</font></td>
-<td valign="BOTTOM" style="font-family:times;"><font size="2"> </font></td>
-</tr>
-<tr style="font-size:1.5pt;" valign="TOP">
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:double #000000 2.25pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:double #000000 2.25pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:double #000000 2.25pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-<td colspan="2" align="RIGHT" valign="BOTTOM" style="font-family:times;border-bottom:double #000000 2.25pt;"> </td>
-<td valign="BOTTOM" style="font-family:times;"> </td>
-</tr>
-</tbody></table>
diff --git a/pandas/tests/io/data/html/macau.html b/pandas/tests/io/data/html/macau.html
deleted file mode 100644
index edc4ea96f0f20..0000000000000
--- a/pandas/tests/io/data/html/macau.html
+++ /dev/null
@@ -1,3691 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<!-- saved from url=(0037)http://www.camacau.com/statistic_list -->
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
-
-
-<link rel="stylesheet" type="text/css" href="./macau_files/style.css" media="screen">
-<script type="text/javascript" src="./macau_files/jquery.js"></script>
-
-
-
-
-
-<script type="text/javascript">
-
-function slideSwitch() {
-
- var $active = $('#banner1 a.active');
-
- var totalTmp=document.getElementById("bannerTotal").innerHTML;
-
- var randomTmp=Math.floor(Math.random()*totalTmp+1);
-
- var $next = $('#image'+randomTmp).length?$('#image'+randomTmp):$('#banner1 a:first');
-
- if($next.attr("id")==$active.attr("id")){
-
- $next = $active.next().length ? $active.next():$('#banner1 a:first');
- }
-
- $active.removeClass("active");
-
- $next.addClass("active").show();
-
- $active.hide();
-
-}
-
-jQuery(function() {
-
- var totalTmp=document.getElementById("bannerTotal").innerHTML;
- if(totalTmp>1){
- setInterval( "slideSwitch()", 5000 );
- }
-
-});
-
-</script>
-<script type="text/javascript">
-function close_notice(){
-jQuery("#tbNotice").hide();
-}
-</script>
-
-<title>Traffic Statistics - Passengers</title>
-
-<!-- GOOGLE STATISTICS
-<script type="text/javascript">
-
- var _gaq = _gaq || [];
- _gaq.push(['_setAccount', 'UA-24989877-2']);
- _gaq.push(['_trackPageview']);
-
- (function() {
- var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
- ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
- var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
- })();
-
-</script>
--->
-<style type="text/css"></style><style type="text/css"></style><script id="fireplug-jssdk" src="./macau_files/all.js"></script><style type="text/css">.fireplug-credit-widget-overlay{z-index:9999999999999999999;background-color:rgba(91,91,91,0.6)}.fireplug-credit-widget-overlay div,.fireplug-credit-widget-overlay span,.fireplug-credit-widget-overlay applet,.fireplug-credit-widget-overlay object,.fireplug-credit-widget-overlay iframe,.fireplug-credit-widget-overlay h1,.fireplug-credit-widget-overlay h2,.fireplug-credit-widget-overlay h3,.fireplug-credit-widget-overlay h4,.fireplug-credit-widget-overlay h5,.fireplug-credit-widget-overlay h6,.fireplug-credit-widget-overlay p,.fireplug-credit-widget-overlay blockquote,.fireplug-credit-widget-overlay pre,.fireplug-credit-widget-overlay a,.fireplug-credit-widget-overlay abbr,.fireplug-credit-widget-overlay acronym,.fireplug-credit-widget-overlay address,.fireplug-credit-widget-overlay big,.fireplug-credit-widget-overlay cite,.fireplug-credit-widget-overlay code,.fireplug-credit-widget-overlay del,.fireplug-credit-widget-overlay dfn,.fireplug-credit-widget-overlay em,.fireplug-credit-widget-overlay img,.fireplug-credit-widget-overlay ins,.fireplug-credit-widget-overlay kbd,.fireplug-credit-widget-overlay q,.fireplug-credit-widget-overlay s,.fireplug-credit-widget-overlay samp,.fireplug-credit-widget-overlay small,.fireplug-credit-widget-overlay strike,.fireplug-credit-widget-overlay strong,.fireplug-credit-widget-overlay sub,.fireplug-credit-widget-overlay sup,.fireplug-credit-widget-overlay tt,.fireplug-credit-widget-overlay var,.fireplug-credit-widget-overlay b,.fireplug-credit-widget-overlay u,.fireplug-credit-widget-overlay i,.fireplug-credit-widget-overlay center,.fireplug-credit-widget-overlay dl,.fireplug-credit-widget-overlay dt,.fireplug-credit-widget-overlay dd,.fireplug-credit-widget-overlay ol,.fireplug-credit-widget-overlay ul,.fireplug-credit-widget-overlay li,.fireplug-credit-widget-overlay fieldset,.fireplug-credit-widget-overlay form,.fireplug-credit-widget-overlay label,.fireplug-credit-widget-overlay legend,.fireplug-credit-widget-overlay table,.fireplug-credit-widget-overlay caption,.fireplug-credit-widget-overlay tbody,.fireplug-credit-widget-overlay tfoot,.fireplug-credit-widget-overlay thead,.fireplug-credit-widget-overlay tr,.fireplug-credit-widget-overlay th,.fireplug-credit-widget-overlay td,.fireplug-credit-widget-overlay article,.fireplug-credit-widget-overlay aside,.fireplug-credit-widget-overlay canvas,.fireplug-credit-widget-overlay details,.fireplug-credit-widget-overlay embed,.fireplug-credit-widget-overlay figure,.fireplug-credit-widget-overlay figcaption,.fireplug-credit-widget-overlay footer,.fireplug-credit-widget-overlay header,.fireplug-credit-widget-overlay hgroup,.fireplug-credit-widget-overlay menu,.fireplug-credit-widget-overlay nav,.fireplug-credit-widget-overlay output,.fireplug-credit-widget-overlay ruby,.fireplug-credit-widget-overlay section,.fireplug-credit-widget-overlay summary,.fireplug-credit-widget-overlay time,.fireplug-credit-widget-overlay mark,.fireplug-credit-widget-overlay audio,.fireplug-credit-widget-overlay video{margin:0;padding:0;border:0;font:inherit;font-size:100%;vertical-align:baseline}.fireplug-credit-widget-overlay table{border-collapse:collapse;border-spacing:0}.fireplug-credit-widget-overlay caption,.fireplug-credit-widget-overlay th,.fireplug-credit-widget-overlay td{text-align:left;font-weight:normal;vertical-align:middle}.fireplug-credit-widget-overlay q,.fireplug-credit-widget-overlay blockquote{quotes:none}.fireplug-credit-widget-overlay q:before,.fireplug-credit-widget-overlay q:after,.fireplug-credit-widget-overlay blockquote:before,.fireplug-credit-widget-overlay blockquote:after{content:"";content:none}.fireplug-credit-widget-overlay a img{border:none}.fireplug-credit-widget-overlay .fireplug-credit-widget-overlay-item{z-index:9999999999999999999;-webkit-box-shadow:#333 0px 0px 10px;-moz-box-shadow:#333 0px 0px 10px;box-shadow:#333 0px 0px 10px}.fireplug-credit-widget-overlay-body{height:100% !important;overflow:hidden !important}.fp-getcredit iframe{border:none;overflow:hidden;height:20px;width:145px}
-</style></head>
-<body>
-<div id="full">
-<div id="container">
-
-
-<div id="top">
- <div id="lang">
-
- <a href="http://www.camacau.com/changeLang?lang=zh_TW&url=/statistic_list">繁體中文</a> |
- <a href="http://www.camacau.com/changeLang?lang=zh_CN&url=/statistic_list">簡體中文</a>
- <!--<a href="changeLang?lang=pt_PT&url=/statistic_list" >Portuguese</a>
- -->
- </div>
-</div>
-
-<div id="header">
- <div id="sitelogo"><a href="http://www.camacau.com/index" style="color : #FFF;"><img src="./macau_files/cam h04.jpg"></a></div>
- <div id="navcontainer">
- <div id="menu">
- <div id="search">
- <form id="searchForm" name="searchForm" action="http://www.camacau.com/search" method="POST">
- <input id="keyword" name="keyword" type="text">
- <a href="javascript:document.searchForm.submit();">Search</a> |
- <a href="mailto:mkd@macau-airport.com">Contact Us</a> |
- <a href="http://www.camacau.com/sitemap">SiteMap</a> |
-
- <a href="http://www.camacau.com/rssBuilder.action"><img src="./macau_files/rssIcon.png" alt="RSS">RSS</a>
- </form></div>
- </div>
-</div>
-</div>
-<div id="menu2">
- <div>
-
-
- <object classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" width="95" height="20" id="FlashID4" title="Main Page">
- <param name="movie" value="flash/button_index_EN.swf">
- <param name="quality" value="high">
- <param name="scale" value="exactfit">
- <param name="wmode" value="opaque">
- <param name="swfversion" value="6.0.65.0">
- <!-- 此 param 標籤會提示使用 Flash Player 6.0 r65 和更新版本的使用者下載最新版本的 Flash Player。如果您不想讓使用者看到這項提示,請將其刪除。 -->
- <param name="expressinstall" value="flash/expressInstall.swf">
- <!-- 下一個物件標籤僅供非 IE 瀏覽器使用。因此,請使用 IECC 將其自 IE 隱藏。 -->
- <!--[if !IE]>-->
- <object type="application/x-shockwave-flash" data="http://www.camacau.com/flash/button_index_EN.swf" width="92" height="20">
- <!--<![endif]-->
- <param name="quality" value="high">
- <param name="scale" value="exactfit">
- <param name="wmode" value="opaque">
- <param name="swfversion" value="6.0.65.0">
- <param name="expressinstall" value="flash/expressInstall.swf">
- <!-- 瀏覽器會為使用 Flash Player 6.0 和更早版本的使用者顯示下列替代內容。 -->
- <div>
- <h4>這個頁面上的內容需要較新版本的 Adobe Flash Player。</h4>
- <p><a href="http://www.adobe.com/go/getflashplayer"><img src="./macau_files/get_flash_player.gif" alt="取得 Adobe Flash Player" width="112" height="33"></a></p>
- </div>
- <!--[if !IE]>-->
- </object>
- <!--<![endif]-->
- </object>
-
- <object classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" width="95" height="20" id="FlashID4" title="Our Business">
- <param name="movie" value="flash/button_our business_EN.swf">
- <param name="quality" value="high">
- <param name="scale" value="exactfit">
- <param name="wmode" value="opaque">
- <param name="swfversion" value="6.0.65.0">
- <!-- 此 param 標籤會提示使用 Flash Player 6.0 r65 和更新版本的使用者下載最新版本的 Flash Player。如果您不想讓使用者看到這項提示,請將其刪除。 -->
- <param name="expressinstall" value="flash/expressInstall.swf">
- <!-- 下一個物件標籤僅供非 IE 瀏覽器使用。因此,請使用 IECC 將其自 IE 隱藏。 -->
- <!--[if !IE]>-->
- <object type="application/x-shockwave-flash" data="http://www.camacau.com/flash/button_our%20business_EN.swf" width="92" height="20">
- <!--<![endif]-->
- <param name="quality" value="high">
- <param name="scale" value="exactfit">
- <param name="wmode" value="opaque">
- <param name="swfversion" value="6.0.65.0">
- <param name="expressinstall" value="flash/expressInstall.swf">
- <!-- 瀏覽器會為使用 Flash Player 6.0 和更早版本的使用者顯示下列替代內容。 -->
- <div>
- <h4>這個頁面上的內容需要較新版本的 Adobe Flash Player。</h4>
- <p><a href="http://www.adobe.com/go/getflashplayer"><img src="./macau_files/get_flash_player.gif" alt="取得 Adobe Flash Player" width="112" height="33"></a></p>
- </div>
- <!--[if !IE]>-->
- </object>
- <!--<![endif]-->
- </object>
-
- <object id="FlashID" classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" width="95" height="20" title="About Us">
- <param name="movie" value="flash/button_about us_EN.swf">
- <param name="quality" value="high">
- <param name="scale" value="exactfit">
- <param name="wmode" value="opaque">
- <param name="swfversion" value="6.0.65.0">
- <!-- 此 param 標籤會提示使用 Flash Player 6.0 r65 和更新版本的使用者下載最新版本的 Flash Player。如果您不想讓使用者看到這項提示,請將其刪除。 -->
- <param name="expressinstall" value="flash/expressInstall.swf">
- <!-- 下一個物件標籤僅供非 IE 瀏覽器使用。因此,請使用 IECC 將其自 IE 隱藏。 -->
- <!--[if !IE]>-->
- <object type="application/x-shockwave-flash" data="http://www.camacau.com/flash/button_about%20us_EN.swf" width="92" height="20">
- <!--<![endif]-->
- <param name="quality" value="high">
- <param name="scale" value="exactfit">
- <param name="wmode" value="opaque">
- <param name="swfversion" value="6.0.65.0">
- <param name="expressinstall" value="flash/expressInstall.swf">
- <!-- 瀏覽器會為使用 Flash Player 6.0 和更早版本的使用者顯示下列替代內容。 -->
- <div>
- <h4>這個頁面上的內容需要較新版本的 Adobe Flash Player。</h4>
- <p><a href="http://www.adobe.com/go/getflashplayer"><img src="./macau_files/get_flash_player.gif" alt="取得 Adobe Flash Player" width="112" height="33"></a></p>
- </div>
- <!--[if !IE]>-->
- </object>
- <!--<![endif]-->
- </object>
-
- <object id="FlashID3" classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" width="95" height="20" title="Media Centre">
- <param name="movie" value="flash/button_media centre_EN.swf">
- <param name="quality" value="high">
- <param name="scale" value="exactfit">
- <param name="wmode" value="opaque">
- <param name="swfversion" value="6.0.65.0">
- <!-- 此 param 標籤會提示使用 Flash Player 6.0 r65 和更新版本的使用者下載最新版本的 Flash Player。如果您不想讓使用者看到這項提示,請將其刪除。 -->
- <param name="expressinstall" value="flash/expressInstall.swf">
- <!-- 下一個物件標籤僅供非 IE 瀏覽器使用。因此,請使用 IECC 將其自 IE 隱藏。 -->
- <!--[if !IE]>-->
- <object type="application/x-shockwave-flash" data="http://www.camacau.com/flash/button_media%20centre_EN.swf" width="92" height="20">
- <!--<![endif]-->
- <param name="quality" value="high">
- <param name="wmode" value="opaque">
- <param name="scale" value="exactfit">
- <param name="swfversion" value="6.0.65.0">
- <param name="expressinstall" value="flash/expressInstall.swf">
- <!-- 瀏覽器會為使用 Flash Player 6.0 和更早版本的使用者顯示下列替代內容。 -->
- <div>
- <h4>這個頁面上的內容需要較新版本的 Adobe Flash Player。</h4>
- <p><a href="http://www.adobe.com/go/getflashplayer"><img src="./macau_files/get_flash_player.gif" alt="取得 Adobe Flash Player" width="112" height="33"></a></p>
- </div>
- <!--[if !IE]>-->
- </object>
- <!--<![endif]-->
- </object>
-
- <object classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" width="95" height="20" id="FlashID5" title="Related Links">
- <param name="movie" value="flash/button_related links_EN.swf">
- <param name="quality" value="high">
- <param name="scale" value="exactfit">
- <param name="wmode" value="opaque">
- <param name="swfversion" value="6.0.65.0">
- <!-- 此 param 標籤會提示使用 Flash Player 6.0 r65 和更新版本的使用者下載最新版本的 Flash Player。如果您不想讓使用者看到這項提示,請將其刪除。 -->
- <param name="expressinstall" value="flash/expressInstall.swf">
- <!-- 下一個物件標籤僅供非 IE 瀏覽器使用。因此,請使用 IECC 將其自 IE 隱藏。 -->
- <!--[if !IE]>-->
- <object type="application/x-shockwave-flash" data="http://www.camacau.com/flash/button_related%20links_EN.swf" width="92" height="20">
- <!--<![endif]-->
- <param name="quality" value="high">
- <param name="scale" value="exactfit">
- <param name="wmode" value="opaque">
- <param name="swfversion" value="6.0.65.0">
- <param name="expressinstall" value="flash/expressInstall.swf">
- <!-- 瀏覽器會為使用 Flash Player 6.0 和更早版本的使用者顯示下列替代內容。 -->
- <div>
- <h4>這個頁面上的內容需要較新版本的 Adobe Flash Player。</h4>
- <p><a href="http://www.adobe.com/go/getflashplayer"><img src="./macau_files/get_flash_player.gif" alt="取得 Adobe Flash Player" width="112" height="33"></a></p>
- </div>
- <!--[if !IE]>-->
- </object>
- <!--<![endif]-->
- </object>
-
- <object id="FlashID2" classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" width="95" height="20" title="Interactive">
- <param name="movie" value="flash/button_interactive_EN.swf">
- <param name="quality" value="high">
- <param name="scale" value="exactfit">
- <param name="wmode" value="opaque">
- <param name="swfversion" value="6.0.65.0">
- <!-- 此 param 標籤會提示使用 Flash Player 6.0 r65 和更新版本的使用者下載最新版本的 Flash Player。如果您不想讓使用者看到這項提示,請將其刪除。 -->
- <param name="expressinstall" value="flash/expressInstall.swf">
- <!-- 下一個物件標籤僅供非 IE 瀏覽器使用。因此,請使用 IECC 將其自 IE 隱藏。 -->
- <!--[if !IE]>-->
- <object type="application/x-shockwave-flash" data="http://www.camacau.com/flash/button_interactive_EN.swf" width="92" height="20">
- <!--<![endif]-->
- <param name="quality" value="high">
- <param name="scale" value="exactfit">
- <param name="wmode" value="opaque">
- <param name="swfversion" value="6.0.65.0">
- <param name="expressinstall" value="flash/expressInstall.swf">
- <!-- 瀏覽器會為使用 Flash Player 6.0 和更早版本的使用者顯示下列替代內容。 -->
- <div>
- <h4>這個頁面上的內容需要較新版本的 Adobe Flash Player。</h4>
- <p><a href="http://www.adobe.com/go/getflashplayer"><img src="./macau_files/get_flash_player.gif" alt="取得 Adobe Flash Player" width="112" height="33"></a></p>
- </div>
- <!--[if !IE]>-->
- </object>
- <!--<![endif]-->
- </object>
-
- <!--<object classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" width="95" height="20" id="FlashID4" title="Group of Public">
- <param name="movie" value="flash/button_pressRelease_EN.swf" />
- <param name="quality" value="high" />
- <param name="scale" value="exactfit">
- <param name="wmode" value="opaque" />
- <param name="swfversion" value="6.0.65.0" />
- 此 param 標籤會提示使用 Flash Player 6.0 r65 和更新版本的使用者下載最新版本的 Flash Player。如果您不想讓使用者看到這項提示,請將其刪除。
- <param name="expressinstall" value="flash/expressInstall.swf" />
- 下一個物件標籤僅供非 IE 瀏覽器使用。因此,請使用 IECC 將其自 IE 隱藏。
- [if !IE]>
- <object type="application/x-shockwave-flash" data="flash/button_pressRelease_EN.swf" width="92" height="20">
- <![endif]
- <param name="quality" value="high" />
- <param name="scale" value="exactfit">
- <param name="wmode" value="opaque" />
- <param name="swfversion" value="6.0.65.0" />
- <param name="expressinstall" value="flash/expressInstall.swf" />
- 瀏覽器會為使用 Flash Player 6.0 和更早版本的使用者顯示下列替代內容。
- <div>
- <h4>這個頁面上的內容需要較新版本的 Adobe Flash Player。</h4>
- <p><a href="http://www.adobe.com/go/getflashplayer"><img src="http://www.adobe.com/images/shared/download_buttons/get_flash_player.gif" alt="取得 Adobe Flash Player" width="112" height="33" /></a></p>
- </div>
- [if !IE]>
- </object>
- <![endif]
- </object>
-
- --></div>
- </div>
-
-
-
-
-
-
-
-<style>
-#slider ul li
-{
-height: 90px;
-list-style:none;
-width:95%;
-font-size:11pt;
-text-indent:2em;
-text-align:justify;
-text-justify:inter-ideograph;
-color:#663300;
-}
-
-
-#slider
-{
-margin: auto;
-overflow: hidden;
-/* Non Core */
-background: #f6f7f8;
-box-shadow: 4px 4px 15px #aaa;
--o-box-shadow: 4px 4px 15px #aaa;
--icab-box-shadow: 4px 4px 15px #aaa;
--khtml-box-shadow: 4px 4px 15px #aaa;
--moz-box-shadow: 4px 4px 15px #aaa;
--webkit-box-shadow: 4px 4px 15px #aaa;
-border: 4px solid #bcc5cb;
-
-border-width: 1px 2px 2px 1px;
-
--o-border-radius: 10px;
--icab-border-radius: 10px;
--khtml-border-radius: 10px;
--moz-border-radius: 10px;
--webkit-border-radius: 10px;
-border-radius: 10px;
-
-}
-
-#close_tbNotice img
-{
-width:20px;
-height:20px;
-align:right;
-cursor:pointer;
-}
-</style>
-
-<div id="banner">
- <!--<div id="leftGradient"></div>-->
-
- <table id="tbNotice" style="display:none;width:800px;z-index:999;position:absolute;left:20%;" align="center">
- <tbody><tr height="40px"><td></td></tr>
- <tr><td>
-
- <div id="slider">
- <div id="close_tbNotice"><img src="./macau_files/delete.png" onclick="close_notice()"></div>
- <ul>
- <li>
-
-
-
-
- </li>
- </ul>
-
- </div>
- <div id="show_notice" style="display:none;">
-
- </div>
-
- </td>
-
- </tr>
- <tr><td align="right"></td></tr>
- </tbody></table>
-
-
- <div class="gradient">
-
- </div>
- <div class="banner1" id="banner1">
-
-
-
-
- <a href="http://www.macau-airport.com/" target="_blank" style="display: none;" id="image1" class="">
- <img src="./macau_files/41.jpeg" alt="Slideshow Image 1">
- </a>
-
-
-
-
-
- <a href="http://www.macau-airport.com/" target="_blank" style="display: none;" id="image2" class="">
- <img src="./macau_files/45.jpeg" alt="Slideshow Image 2">
- </a>
-
-
-
-
-
- <a href="http://www.macau-airport.com/" target="_blank" style="display: none;" id="image3" class="">
- <img src="./macau_files/46.jpeg" alt="Slideshow Image 3">
- </a>
-
-
-
-
-
- <a href="http://www.macau-airport.com/" target="_blank" style="display: inline;" id="image4" class="active">
- <img src="./macau_files/47.jpeg" alt="Slideshow Image 4">
- </a>
-
-
-
-
-
- <a href="http://www.macau-airport.com/" target="_blank" style="display: none;" id="image5" class="">
- <img src="./macau_files/48.jpeg" alt="Slideshow Image 5">
- </a>
-
-
-
-
-
- <a href="http://www.macau-airport.com/" target="_blank" style="display: none;" id="image6" class="">
- <img src="./macau_files/49.jpeg" alt="Slideshow Image 6">
- </a>
-
-
-
-
-
- <a href="http://www.4cpscac.com/" target="_blank" style="display: none;" id="image7" class="">
- <img src="./macau_files/50.jpg" alt="Slideshow Image 7">
- </a>
-
-
-
-
- </div>
- <div id="bannerTotal" style="display:none;">7</div>
-</div>
-
-<div id="content">
- <div id="leftnav">
-
- <div id="navmenu">
-
-
-
-
-
-<link href="./macau_files/ddaccordion.css" rel="stylesheet" type="text/css">
-<script type="text/javascript" src="./macau_files/ddaccordion.js"></script>
-
-
-
-<script type="text/javascript">
- ddaccordion.init({
- headerclass: "leftmenu_silverheader", //Shared CSS class name of headers group
- contentclass: "leftmenu_submenu", //Shared CSS class name of contents group
- revealtype: "clickgo", //Reveal content when user clicks or onmouseover the header? Valid value: "click", "clickgo", or "mouseover"
- mouseoverdelay: 100, //if revealtype="mouseover", set delay in milliseconds before header expands onMouseover
- collapseprev: true, //Collapse previous content (so only one open at any time)? true/false
- defaultexpanded: [0], //index of content(s) open by default [index1, index2, etc] [] denotes no content
- onemustopen: false, //Specify whether at least one header should be open always (so never all headers closed)
- animatedefault: true, //Should contents open by default be animated into view?
- persiststate: true, //persist state of opened contents within browser session?
- toggleclass: ["", "selected"], //Two CSS classes to be applied to the header when it's collapsed and expanded, respectively ["class1", "class2"]
- togglehtml: ["", "", ""], //Additional HTML added to the header when it's collapsed and expanded, respectively ["position", "html1", "html2"] (see docs)
- animatespeed: "normal", //speed of animation: integer in milliseconds (ie: 200), or keywords "fast", "normal", or "slow"
- oninit:function(headers, expandedindices){ //custom code to run when headers have initialized
- //do nothing
- },
- onopenclose:function(header, index, state, isuseractivated){ //custom code to run whenever a header is opened or closed
- //do nothing
-
- }
-});
-</script><style type="text/css">
-.leftmenu_submenu{display: none}
-a.hiddenajaxlink{display: none}
-</style>
-
-
- <table>
- <tbody><tr><td><img width="20" height="15" src="./macau_files/double.gif"></td><td><a href="http://www.camacau.com/geographic_information">MIA Geographical Information</a></td></tr>
- <tr><td><img width="20" height="15" src="./macau_files/double.gif"></td><td><a href="http://www.camacau.com/airport_services">Scope of Service</a></td></tr>
- <tr><td><img width="20" height="15" src="./macau_files/double.gif"></td><td><a href="http://www.camacau.com/services_agreement">Air Services Agreement</a></td></tr>
- <tr><td><img width="20" height="15" src="./macau_files/double.gif"></td><td><a href="http://www.camacau.com/airport_charges" class="leftmenu_silverheader selected" headerindex="0h"><span>Airport Charges</span></a></td></tr>
- <tr><td colspan="2" style="padding-top:0px;padding-bottom:0px;padding-right:0px;">
- <table class="leftmenu_submenu" contentindex="0c" style="display: block;">
- <tbody><tr><td> </td><td><table class="submenu"><tbody><tr><td><img width="20" height="15" src="./macau_files/sub_icon.gif"></td><td><a href="http://www.camacau.com/airport_charges1">Passenger Service Fees</a></td></tr></tbody></table></td></tr>
- <tr><td> </td><td><table class="submenu"><tbody><tr><td><img width="20" height="15" src="./macau_files/sub_icon.gif"></td><td><a href="http://www.camacau.com/airport_charges2">Aircraft Parking fees</a></td></tr></tbody></table></td></tr>
- <tr><td> </td><td><table class="submenu"><tbody><tr><td><img width="20" height="15" src="./macau_files/sub_icon.gif"></td><td><a href="http://www.camacau.com/airport_charges3">Airport Security Fee</a></td></tr></tbody></table></td></tr>
- <tr><td> </td><td><table class="submenu"><tbody><tr><td><img width="20" height="15" src="./macau_files/sub_icon.gif"></td><td><a href="http://www.camacau.com/airport_charges4">Utilization fees</a></td></tr></tbody></table></td></tr>
- <tr><td> </td><td><table class="submenu"><tbody><tr><td><img width="20" height="15" src="./macau_files/sub_icon.gif"></td><td><a href="http://www.camacau.com/airport_charges5">Refuelling Charge</a></td></tr></tbody></table></td></tr>
- <tr><td> </td><td><table class="submenu"><tbody><tr><td><img width="20" height="15" src="./macau_files/sub_icon.gif"></td><td><a href="http://www.camacau.com/calculation">Calculation of Landing fee Rate</a></td></tr></tbody></table></td></tr>
- </tbody></table>
- </td>
- </tr>
- <tr><td><img width="20" height="15" src="./macau_files/double.gif"></td><td><a href="http://www.camacau.com/application_facilities">Application of Credit Facilities</a></td></tr>
- <tr><td><img width="20" height="15" src="./macau_files/double.gif"></td><td><a href="javascript:void(0)" class="leftmenu_silverheader " headerindex="1h"><span>Passenger Flight Incentive Program</span></a></td></tr>
- <tr><td colspan="2" style="padding-top:0px;padding-bottom:0px;padding-right:0px;">
- <table class="leftmenu_submenu" contentindex="1c" style="display: none;">
- <tbody><tr><td> </td><td><table class="submenu"><tbody><tr><td><img width="20" height="15" src="./macau_files/sub_icon.gif"></td><td><a href="http://www.camacau.com/incentive_program1">Incentive policy for new routes and additional flights</a></td></tr></tbody></table></td></tr>
- <tr><td> </td><td><table class="submenu"><tbody><tr><td><img width="20" height="15" src="./macau_files/sub_icon.gif"></td><td><a href="http://www.camacau.com/incentive_program1_1">Passenger flights</a></td></tr></tbody></table></td></tr>
- <tr><td> </td><td><table class="submenu"><tbody><tr><td><img width="20" height="15" src="./macau_files/sub_icon.gif"></td><td><a href="http://www.camacau.com/incentive_program1_2">Charter flights</a></td></tr></tbody></table></td></tr>
- <tr><td> </td><td><table class="submenu"><tbody><tr><td><img width="20" height="15" src="./macau_files/sub_icon.gif"></td><td><a href="http://www.camacau.com/docs/MIA_Route_Development_IncentiveApp_Form.pdf" target="_blank">Route Development Incentive Application Form</a></td></tr></tbody></table></td></tr>
- <tr><td> </td><td><table class="submenu"><tbody><tr><td><img width="20" height="15" src="./macau_files/sub_icon.gif"></td><td><a href="http://www.camacau.com/online_application">Online Application</a></td></tr></tbody></table></td></tr>
- </tbody></table>
- </td>
- </tr>
-
- <tr><td><img width="20" height="15" src="./macau_files/double.gif"></td><td><a href="http://www.camacau.com/slot_application">Slot Application</a></td></tr>
-
- <tr><td><img width="20" height="15" src="./macau_files/double.gif"></td><td><a href="http://www.camacau.com/freighter_forwards">Macau Freight Forwarders</a></td></tr>
- <tr><td><img width="20" height="15" src="./macau_files/double.gif"></td><td><a href="http://www.camacau.com/ctplatform">Cargo Tracking Platform</a></td></tr>
- <tr><td><img width="20" height="15" src="./macau_files/double.gif"></td><td><a href="http://www.camacau.com/for_rent">For Rent</a></td></tr>
- <tr><td><img width="20" height="15" src="./macau_files/double.gif"></td><td><a href="http://www.camacau.com/capacity">Airport Capacity</a></td></tr>
-
- <tr><td><img width="20" height="15" src="./macau_files/double.gif"></td><td style="color: #606060;text-decoration: none;"><a href="javascript:void(0)" class="leftmenu_silverheader " headerindex="2h">Airport Characteristics & Traffic Statistics</a></td></tr>
- <tr><td colspan="2" style="padding-top:0px;padding-bottom:0px;padding-right:0px;">
- <table class="leftmenu_submenu" contentindex="2c" style="display: none;">
- <!--<tr><td> </td><td><table class="submenu"><tr><td><img width="20" height="15" src="images/sub_icon.gif"/></td><td><a href="airport_characteristics">Airport Characteristics</a></td></tr></table></td></tr>
- -->
- <tbody><tr><td> </td><td><table class="submenu"><tbody><tr><td><img width="20" height="15" src="./macau_files/sub_icon.gif"></td><td><a href="./macau_files/macau.html">Traffic Statistics - Passengers</a></td></tr></tbody></table></td></tr>
- <tr><td> </td><td><table class="submenu"><tbody><tr><td><img width="20" height="15" src="./macau_files/sub_icon.gif"></td><td><a href="http://www.camacau.com/statistics_cargo">Traffic Statistics - Cargo</a></td></tr></tbody></table></td></tr>
- </tbody></table>
- </td>
- </tr>
- <tr><td><img width="20" height="15" src="./macau_files/double.gif"></td><td><a href="http://www.camacau.com/operational_routes">Operational Routes</a></td></tr>
-
- <!--<tr><td><img width="20" height="15" src="images/double.gif"/></td><td><a href="route_development">Member Registration</a></td></tr>
-
- --><!--<tr><td><img width="20" height="15" src="images/double.gif"/></td><td><a href="cargo_arrival">Cargo Flight Information</a></td></tr>-->
-
- <!--<tr><td><img width="20" height="15" src="images/double.gif"/></td><td><a href="/mvnforum/mvnforum/index">Forum</a></td></tr>-->
-
- </tbody></table>
-
-
- </div>
- </div>
-
-<div id="under">
- <div id="contextTitle">
- <h2 class="con">Traffic Statistics - Passengers</h2>
-
- </div>
- <div class="contextTitleAfter"></div>
- <div>
-
-
- <div id="context">
- <!--/*begin context*/-->
- <div class="Container">
- <div id="Scroller-1">
- <div class="Scroller-Container">
- <div id="statisticspassengers" style="width:550px;">
-
-
- <span id="title">Traffic Statistics</span>
-
-
-
-
-
- <br><br><br>
- <span id="title">Passengers Figure(2008-2013) </span><br><br>
- <table class="style1">
- <tbody>
- <tr height="17">
- <th align="right"> </th>
-
- <th align="center">2013</th>
-
- <th align="center">2012</th>
-
- <th align="center">2011</th>
-
- <th align="center">2010</th>
-
- <th align="center">2009</th>
-
- <th align="center">2008</th>
-
- </tr>
- <tr height="17">
- <th align="right">January</th>
-
- <td align="center">
-
- 374,917
- </td>
-
- <td align="center">
-
- 362,379
- </td>
-
- <td align="center">
-
- 301,503
- </td>
-
- <td align="center">
-
- 358,902
- </td>
-
- <td align="center">
-
- 342,323
- </td>
-
- <td align="center">
-
- 420,574
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">February</th>
-
- <td align="center">
-
- 393,152
- </td>
-
- <td align="center">
-
- 312,405
- </td>
-
- <td align="center">
-
- 301,259
- </td>
-
- <td align="center">
-
- 351,654
- </td>
-
- <td align="center">
-
- 297,755
- </td>
-
- <td align="center">
-
- 442,809
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">March</th>
-
- <td align="center">
-
- 408,755
- </td>
-
- <td align="center">
-
- 334,000
- </td>
-
- <td align="center">
-
- 318,908
- </td>
-
- <td align="center">
-
- 360,365
- </td>
-
- <td align="center">
-
- 387,879
- </td>
-
- <td align="center">
-
- 468,540
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">April</th>
-
- <td align="center">
-
- 408,860
- </td>
-
- <td align="center">
-
- 358,198
- </td>
-
- <td align="center">
-
- 339,060
- </td>
-
- <td align="center">
-
- 352,976
- </td>
-
- <td align="center">
-
- 400,553
- </td>
-
- <td align="center">
-
- 492,930
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">May</th>
-
- <td align="center">
-
- 374,397
- </td>
-
- <td align="center">
-
- 329,218
- </td>
-
- <td align="center">
-
- 321,060
- </td>
-
- <td align="center">
-
- 330,407
- </td>
-
- <td align="center">
-
- 335,967
- </td>
-
- <td align="center">
-
- 465,045
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">June</th>
-
- <td align="center">
-
- 401,995
- </td>
-
- <td align="center">
-
- 356,679
- </td>
-
- <td align="center">
-
- 343,006
- </td>
-
- <td align="center">
-
- 326,724
- </td>
-
- <td align="center">
-
- 296,748
- </td>
-
- <td align="center">
-
- 426,764
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">July</th>
-
- <td align="center">
-
-
- </td>
-
- <td align="center">
-
- 423,081
- </td>
-
- <td align="center">
-
- 378,993
- </td>
-
- <td align="center">
-
- 356,580
- </td>
-
- <td align="center">
-
- 351,110
- </td>
-
- <td align="center">
-
- 439,425
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">August</th>
-
- <td align="center">
-
-
- </td>
-
- <td align="center">
-
- 453,391
- </td>
-
- <td align="center">
-
- 395,883
- </td>
-
- <td align="center">
-
- 364,011
- </td>
-
- <td align="center">
-
- 404,076
- </td>
-
- <td align="center">
-
- 425,814
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">September</th>
-
- <td align="center">
-
-
- </td>
-
- <td align="center">
-
- 384,887
- </td>
-
- <td align="center">
-
- 325,124
- </td>
-
- <td align="center">
-
- 308,940
- </td>
-
- <td align="center">
-
- 317,226
- </td>
-
- <td align="center">
-
- 379,898
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">October</th>
-
- <td align="center">
-
-
- </td>
-
- <td align="center">
-
- 383,889
- </td>
-
- <td align="center">
-
- 333,102
- </td>
-
- <td align="center">
-
- 317,040
- </td>
-
- <td align="center">
-
- 355,935
- </td>
-
- <td align="center">
-
- 415,339
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">November</th>
-
- <td align="center">
-
-
- </td>
-
- <td align="center">
-
- 379,065
- </td>
-
- <td align="center">
-
- 327,803
- </td>
-
- <td align="center">
-
- 303,186
- </td>
-
- <td align="center">
-
- 372,104
- </td>
-
- <td align="center">
-
- 366,411
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">December</th>
-
- <td align="center">
-
-
- </td>
-
- <td align="center">
-
- 413,873
- </td>
-
- <td align="center">
-
- 359,313
- </td>
-
- <td align="center">
-
- 348,051
- </td>
-
- <td align="center">
-
- 388,573
- </td>
-
- <td align="center">
-
- 354,253
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">Total</th>
-
- <td align="center">
-
- 2,362,076
- </td>
-
- <td align="center">
-
- 4,491,065
- </td>
-
- <td align="center">
-
- 4,045,014
- </td>
-
- <td align="center">
-
- 4,078,836
- </td>
-
- <td align="center">
-
- 4,250,249
- </td>
-
- <td align="center">
-
- 5,097,802
- </td>
-
- </tr>
- </tbody>
- </table>
-
- <br><br><br>
- <span id="title">Passengers Figure(2002-2007) </span><br><br>
- <table class="style1">
- <tbody>
- <tr height="17">
- <th align="right"> </th>
-
- <th align="center">2007</th>
-
- <th align="center">2006</th>
-
- <th align="center">2005</th>
-
- <th align="center">2004</th>
-
- <th align="center">2003</th>
-
- <th align="center">2002</th>
-
- </tr>
- <tr height="17">
- <th align="right">January</th>
-
- <td align="center">
-
- 381,887
- </td>
-
- <td align="center">
-
- 323,282
- </td>
-
- <td align="center">
-
- 289,701
- </td>
-
- <td align="center">
-
- 288,507
- </td>
-
- <td align="center">
-
- 290,140
- </td>
-
- <td align="center">
-
- 268,783
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">February</th>
-
- <td align="center">
-
- 426,014
- </td>
-
- <td align="center">
-
- 360,820
- </td>
-
- <td align="center">
-
- 348,723
- </td>
-
- <td align="center">
-
- 207,710
- </td>
-
- <td align="center">
-
- 323,264
- </td>
-
- <td align="center">
-
- 323,654
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">March</th>
-
- <td align="center">
-
- 443,805
- </td>
-
- <td align="center">
-
- 389,125
- </td>
-
- <td align="center">
-
- 321,953
- </td>
-
- <td align="center">
-
- 273,910
- </td>
-
- <td align="center">
-
- 295,052
- </td>
-
- <td align="center">
-
- 360,668
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">April</th>
-
- <td align="center">
-
- 500,917
- </td>
-
- <td align="center">
-
- 431,550
- </td>
-
- <td align="center">
-
- 367,976
- </td>
-
- <td align="center">
-
- 324,931
- </td>
-
- <td align="center">
-
- 144,082
- </td>
-
- <td align="center">
-
- 380,648
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">May</th>
-
- <td align="center">
-
- 468,637
- </td>
-
- <td align="center">
-
- 399,743
- </td>
-
- <td align="center">
-
- 359,298
- </td>
-
- <td align="center">
-
- 250,601
- </td>
-
- <td align="center">
-
- 47,333
- </td>
-
- <td align="center">
-
- 359,547
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">June</th>
-
- <td align="center">
-
- 463,676
- </td>
-
- <td align="center">
-
- 393,713
- </td>
-
- <td align="center">
-
- 360,147
- </td>
-
- <td align="center">
-
- 296,000
- </td>
-
- <td align="center">
-
- 94,294
- </td>
-
- <td align="center">
-
- 326,508
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">July</th>
-
- <td align="center">
-
- 490,404
- </td>
-
- <td align="center">
-
- 465,497
- </td>
-
- <td align="center">
-
- 413,131
- </td>
-
- <td align="center">
-
- 365,454
- </td>
-
- <td align="center">
-
- 272,784
- </td>
-
- <td align="center">
-
- 388,061
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">August</th>
-
- <td align="center">
-
- 490,830
- </td>
-
- <td align="center">
-
- 478,474
- </td>
-
- <td align="center">
-
- 409,281
- </td>
-
- <td align="center">
-
- 372,802
- </td>
-
- <td align="center">
-
- 333,840
- </td>
-
- <td align="center">
-
- 384,719
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">September</th>
-
- <td align="center">
-
- 446,594
- </td>
-
- <td align="center">
-
- 412,444
- </td>
-
- <td align="center">
-
- 354,751
- </td>
-
- <td align="center">
-
- 321,456
- </td>
-
- <td align="center">
-
- 295,447
- </td>
-
- <td align="center">
-
- 334,029
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">October</th>
-
- <td align="center">
-
- 465,757
- </td>
-
- <td align="center">
-
- 461,215
- </td>
-
- <td align="center">
-
- 390,435
- </td>
-
- <td align="center">
-
- 358,362
- </td>
-
- <td align="center">
-
- 291,193
- </td>
-
- <td align="center">
-
- 372,706
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">November</th>
-
- <td align="center">
-
- 455,132
- </td>
-
- <td align="center">
-
- 425,116
- </td>
-
- <td align="center">
-
- 323,347
- </td>
-
- <td align="center">
-
- 327,593
- </td>
-
- <td align="center">
-
- 268,282
- </td>
-
- <td align="center">
-
- 350,324
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">December</th>
-
- <td align="center">
-
- 465,225
- </td>
-
- <td align="center">
-
- 435,114
- </td>
-
- <td align="center">
-
- 308,999
- </td>
-
- <td align="center">
-
- 326,933
- </td>
-
- <td align="center">
-
- 249,855
- </td>
-
- <td align="center">
-
- 322,056
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">Total</th>
-
- <td align="center">
-
- 5,498,878
- </td>
-
- <td align="center">
-
- 4,976,093
- </td>
-
- <td align="center">
-
- 4,247,742
- </td>
-
- <td align="center">
-
- 3,714,259
- </td>
-
- <td align="center">
-
- 2,905,566
- </td>
-
- <td align="center">
-
- 4,171,703
- </td>
-
- </tr>
- </tbody>
- </table>
-
- <br><br><br>
- <span id="title">Passengers Figure(1996-2001) </span><br><br>
- <table class="style1">
- <tbody>
- <tr height="17">
- <th align="right"> </th>
-
- <th align="center">2001</th>
-
- <th align="center">2000</th>
-
- <th align="center">1999</th>
-
- <th align="center">1998</th>
-
- <th align="center">1997</th>
-
- <th align="center">1996</th>
-
- </tr>
- <tr height="17">
- <th align="right">January</th>
-
- <td align="center">
-
- 265,603
- </td>
-
- <td align="center">
-
- 184,381
- </td>
-
- <td align="center">
-
- 161,264
- </td>
-
- <td align="center">
-
- 161,432
- </td>
-
- <td align="center">
-
- 117,984
- </td>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">February</th>
-
- <td align="center">
-
- 249,259
- </td>
-
- <td align="center">
-
- 264,066
- </td>
-
- <td align="center">
-
- 209,569
- </td>
-
- <td align="center">
-
- 168,777
- </td>
-
- <td align="center">
-
- 150,772
- </td>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">March</th>
-
- <td align="center">
-
- 312,319
- </td>
-
- <td align="center">
-
- 226,483
- </td>
-
- <td align="center">
-
- 186,965
- </td>
-
- <td align="center">
-
- 172,060
- </td>
-
- <td align="center">
-
- 149,795
- </td>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">April</th>
-
- <td align="center">
-
- 351,793
- </td>
-
- <td align="center">
-
- 296,541
- </td>
-
- <td align="center">
-
- 237,449
- </td>
-
- <td align="center">
-
- 180,241
- </td>
-
- <td align="center">
-
- 179,049
- </td>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">May</th>
-
- <td align="center">
-
- 338,692
- </td>
-
- <td align="center">
-
- 288,949
- </td>
-
- <td align="center">
-
- 230,691
- </td>
-
- <td align="center">
-
- 172,391
- </td>
-
- <td align="center">
-
- 189,925
- </td>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">June</th>
-
- <td align="center">
-
- 332,630
- </td>
-
- <td align="center">
-
- 271,181
- </td>
-
- <td align="center">
-
- 231,328
- </td>
-
- <td align="center">
-
- 157,519
- </td>
-
- <td align="center">
-
- 175,402
- </td>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">July</th>
-
- <td align="center">
-
- 344,658
- </td>
-
- <td align="center">
-
- 304,276
- </td>
-
- <td align="center">
-
- 243,534
- </td>
-
- <td align="center">
-
- 205,595
- </td>
-
- <td align="center">
-
- 173,103
- </td>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">August</th>
-
- <td align="center">
-
- 360,899
- </td>
-
- <td align="center">
-
- 300,418
- </td>
-
- <td align="center">
-
- 257,616
- </td>
-
- <td align="center">
-
- 241,140
- </td>
-
- <td align="center">
-
- 178,118
- </td>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">September</th>
-
- <td align="center">
-
- 291,817
- </td>
-
- <td align="center">
-
- 280,803
- </td>
-
- <td align="center">
-
- 210,885
- </td>
-
- <td align="center">
-
- 183,954
- </td>
-
- <td align="center">
-
- 163,385
- </td>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">October</th>
-
- <td align="center">
-
- 327,232
- </td>
-
- <td align="center">
-
- 298,873
- </td>
-
- <td align="center">
-
- 231,251
- </td>
-
- <td align="center">
-
- 205,726
- </td>
-
- <td align="center">
-
- 176,879
- </td>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">November</th>
-
- <td align="center">
-
- 315,538
- </td>
-
- <td align="center">
-
- 265,528
- </td>
-
- <td align="center">
-
- 228,637
- </td>
-
- <td align="center">
-
- 181,677
- </td>
-
- <td align="center">
-
- 146,804
- </td>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">December</th>
-
- <td align="center">
-
- 314,866
- </td>
-
- <td align="center">
-
- 257,929
- </td>
-
- <td align="center">
-
- 210,922
- </td>
-
- <td align="center">
-
- 183,975
- </td>
-
- <td align="center">
-
- 151,362
- </td>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">Total</th>
-
- <td align="center">
-
- 3,805,306
- </td>
-
- <td align="center">
-
- 3,239,428
- </td>
-
- <td align="center">
-
- 2,640,111
- </td>
-
- <td align="center">
-
- 2,214,487
- </td>
-
- <td align="center">
-
- 1,952,578
- </td>
-
- <td align="center">
-
- 0
- </td>
-
- </tr>
- </tbody>
- </table>
-
- <br><br><br>
- <span id="title">Passengers Figure(1995-1995) </span><br><br>
- <table class="style1">
- <tbody>
- <tr height="17">
- <th align="right"> </th>
-
- <th align="center">1995</th>
-
- </tr>
- <tr height="17">
- <th align="right">January</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">February</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">March</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">April</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">May</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">June</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">July</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">August</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">September</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">October</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">November</th>
-
- <td align="center">
-
- 6,601
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">December</th>
-
- <td align="center">
-
- 37,041
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">Total</th>
-
- <td align="center">
-
- 43,642
- </td>
-
- </tr>
- </tbody>
- </table>
-
-
- <br><br><br>
- <div align="right"><img src="./macau_files/pass_stat.jpg" alt="passenger statistic picture" width="565" height="318"></div>
- <br><br><br>
-
-
- <!--statistics-movement -->
-
- <br><br><br>
- <span id="title">Movement Statistics(2008-2013) </span><br><br>
- <table class="style1">
- <tbody>
- <tr height="17">
- <th align="right"> </th>
-
- <th align="center">2013</th>
-
- <th align="center">2012</th>
-
- <th align="center">2011</th>
-
- <th align="center">2010</th>
-
- <th align="center">2009</th>
-
- <th align="center">2008</th>
-
- </tr>
- <tr height="17">
- <th align="right">January</th>
-
- <td align="center">
-
- 3,925
- </td>
-
- <td align="center">
-
- 3,463
- </td>
-
- <td align="center">
-
- 3,289
- </td>
-
- <td align="center">
-
- 3,184
- </td>
-
- <td align="center">
-
- 3,488
- </td>
-
- <td align="center">
-
- 4,568
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">February</th>
-
- <td align="center">
-
- 3,632
- </td>
-
- <td align="center">
-
- 2,983
- </td>
-
- <td align="center">
-
- 2,902
- </td>
-
- <td align="center">
-
- 3,053
- </td>
-
- <td align="center">
-
- 3,347
- </td>
-
- <td align="center">
-
- 4,527
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">March</th>
-
- <td align="center">
-
- 3,909
- </td>
-
- <td align="center">
-
- 3,166
- </td>
-
- <td align="center">
-
- 3,217
- </td>
-
- <td align="center">
-
- 3,175
- </td>
-
- <td align="center">
-
- 3,636
- </td>
-
- <td align="center">
-
- 4,594
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">April</th>
-
- <td align="center">
-
- 3,903
- </td>
-
- <td align="center">
-
- 3,258
- </td>
-
- <td align="center">
-
- 3,146
- </td>
-
- <td align="center">
-
- 3,023
- </td>
-
- <td align="center">
-
- 3,709
- </td>
-
- <td align="center">
-
- 4,574
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">May</th>
-
- <td align="center">
-
- 4,075
- </td>
-
- <td align="center">
-
- 3,234
- </td>
-
- <td align="center">
-
- 3,266
- </td>
-
- <td align="center">
-
- 3,033
- </td>
-
- <td align="center">
-
- 3,603
- </td>
-
- <td align="center">
-
- 4,511
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">June</th>
-
- <td align="center">
-
- 4,038
- </td>
-
- <td align="center">
-
- 3,272
- </td>
-
- <td align="center">
-
- 3,316
- </td>
-
- <td align="center">
-
- 2,909
- </td>
-
- <td align="center">
-
- 3,057
- </td>
-
- <td align="center">
-
- 4,081
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">July</th>
-
- <td align="center">
-
-
- </td>
-
- <td align="center">
-
- 3,661
- </td>
-
- <td align="center">
-
- 3,359
- </td>
-
- <td align="center">
-
- 3,062
- </td>
-
- <td align="center">
-
- 3,354
- </td>
-
- <td align="center">
-
- 4,215
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">August</th>
-
- <td align="center">
-
-
- </td>
-
- <td align="center">
-
- 3,942
- </td>
-
- <td align="center">
-
- 3,417
- </td>
-
- <td align="center">
-
- 3,077
- </td>
-
- <td align="center">
-
- 3,395
- </td>
-
- <td align="center">
-
- 4,139
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">September</th>
-
- <td align="center">
-
-
- </td>
-
- <td align="center">
-
- 3,703
- </td>
-
- <td align="center">
-
- 3,169
- </td>
-
- <td align="center">
-
- 3,095
- </td>
-
- <td align="center">
-
- 3,100
- </td>
-
- <td align="center">
-
- 3,752
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">October</th>
-
- <td align="center">
-
-
- </td>
-
- <td align="center">
-
- 3,727
- </td>
-
- <td align="center">
-
- 3,469
- </td>
-
- <td align="center">
-
- 3,179
- </td>
-
- <td align="center">
-
- 3,375
- </td>
-
- <td align="center">
-
- 3,874
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">November</th>
-
- <td align="center">
-
-
- </td>
-
- <td align="center">
-
- 3,722
- </td>
-
- <td align="center">
-
- 3,145
- </td>
-
- <td align="center">
-
- 3,159
- </td>
-
- <td align="center">
-
- 3,213
- </td>
-
- <td align="center">
-
- 3,567
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">December</th>
-
- <td align="center">
-
-
- </td>
-
- <td align="center">
-
- 3,866
- </td>
-
- <td align="center">
-
- 3,251
- </td>
-
- <td align="center">
-
- 3,199
- </td>
-
- <td align="center">
-
- 3,324
- </td>
-
- <td align="center">
-
- 3,362
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">Total</th>
-
- <td align="center">
-
- 23,482
- </td>
-
- <td align="center">
-
- 41,997
- </td>
-
- <td align="center">
-
- 38,946
- </td>
-
- <td align="center">
-
- 37,148
- </td>
-
- <td align="center">
-
- 40,601
- </td>
-
- <td align="center">
-
- 49,764
- </td>
-
- </tr>
- </tbody>
- </table>
-
- <br><br><br>
- <span id="title">Movement Statistics(2002-2007) </span><br><br>
- <table class="style1">
- <tbody>
- <tr height="17">
- <th align="right"> </th>
-
- <th align="center">2007</th>
-
- <th align="center">2006</th>
-
- <th align="center">2005</th>
-
- <th align="center">2004</th>
-
- <th align="center">2003</th>
-
- <th align="center">2002</th>
-
- </tr>
- <tr height="17">
- <th align="right">January</th>
-
- <td align="center">
-
- 4,384
- </td>
-
- <td align="center">
-
- 3,933
- </td>
-
- <td align="center">
-
- 3,528
- </td>
-
- <td align="center">
-
- 3,051
- </td>
-
- <td align="center">
-
- 3,257
- </td>
-
- <td align="center">
-
- 2,711
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">February</th>
-
- <td align="center">
-
- 4,131
- </td>
-
- <td align="center">
-
- 3,667
- </td>
-
- <td align="center">
-
- 3,331
- </td>
-
- <td align="center">
-
- 2,372
- </td>
-
- <td align="center">
-
- 3,003
- </td>
-
- <td align="center">
-
- 2,747
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">March</th>
-
- <td align="center">
-
- 4,349
- </td>
-
- <td align="center">
-
- 4,345
- </td>
-
- <td align="center">
-
- 3,549
- </td>
-
- <td align="center">
-
- 3,049
- </td>
-
- <td align="center">
-
- 3,109
- </td>
-
- <td align="center">
-
- 2,985
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">April</th>
-
- <td align="center">
-
- 4,460
- </td>
-
- <td align="center">
-
- 4,490
- </td>
-
- <td align="center">
-
- 3,832
- </td>
-
- <td align="center">
-
- 3,359
- </td>
-
- <td align="center">
-
- 2,033
- </td>
-
- <td align="center">
-
- 2,928
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">May</th>
-
- <td align="center">
-
- 4,629
- </td>
-
- <td align="center">
-
- 4,245
- </td>
-
- <td align="center">
-
- 3,663
- </td>
-
- <td align="center">
-
- 3,251
- </td>
-
- <td align="center">
-
- 1,229
- </td>
-
- <td align="center">
-
- 3,109
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">June</th>
-
- <td align="center">
-
- 4,365
- </td>
-
- <td align="center">
-
- 4,124
- </td>
-
- <td align="center">
-
- 3,752
- </td>
-
- <td align="center">
-
- 3,414
- </td>
-
- <td align="center">
-
- 1,217
- </td>
-
- <td align="center">
-
- 3,049
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">July</th>
-
- <td align="center">
-
- 4,612
- </td>
-
- <td align="center">
-
- 4,386
- </td>
-
- <td align="center">
-
- 3,876
- </td>
-
- <td align="center">
-
- 3,664
- </td>
-
- <td align="center">
-
- 2,423
- </td>
-
- <td align="center">
-
- 3,078
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">August</th>
-
- <td align="center">
-
- 4,446
- </td>
-
- <td align="center">
-
- 4,373
- </td>
-
- <td align="center">
-
- 3,987
- </td>
-
- <td align="center">
-
- 3,631
- </td>
-
- <td align="center">
-
- 3,040
- </td>
-
- <td align="center">
-
- 3,166
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">September</th>
-
- <td align="center">
-
- 4,414
- </td>
-
- <td align="center">
-
- 4,311
- </td>
-
- <td align="center">
-
- 3,782
- </td>
-
- <td align="center">
-
- 3,514
- </td>
-
- <td align="center">
-
- 2,809
- </td>
-
- <td align="center">
-
- 3,239
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">October</th>
-
- <td align="center">
-
- 4,445
- </td>
-
- <td align="center">
-
- 4,455
- </td>
-
- <td align="center">
-
- 3,898
- </td>
-
- <td align="center">
-
- 3,744
- </td>
-
- <td align="center">
-
- 3,052
- </td>
-
- <td align="center">
-
- 3,562
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">November</th>
-
- <td align="center">
-
- 4,563
- </td>
-
- <td align="center">
-
- 4,285
- </td>
-
- <td align="center">
-
- 3,951
- </td>
-
- <td align="center">
-
- 3,694
- </td>
-
- <td align="center">
-
- 3,125
- </td>
-
- <td align="center">
-
- 3,546
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">December</th>
-
- <td align="center">
-
- 4,588
- </td>
-
- <td align="center">
-
- 4,435
- </td>
-
- <td align="center">
-
- 3,855
- </td>
-
- <td align="center">
-
- 3,763
- </td>
-
- <td align="center">
-
- 2,996
- </td>
-
- <td align="center">
-
- 3,444
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">Total</th>
-
- <td align="center">
-
- 53,386
- </td>
-
- <td align="center">
-
- 51,049
- </td>
-
- <td align="center">
-
- 45,004
- </td>
-
- <td align="center">
-
- 40,506
- </td>
-
- <td align="center">
-
- 31,293
- </td>
-
- <td align="center">
-
- 37,564
- </td>
-
- </tr>
- </tbody>
- </table>
-
- <br><br><br>
- <span id="title">Movement Statistics(1996-2001) </span><br><br>
- <table class="style1">
- <tbody>
- <tr height="17">
- <th align="right"> </th>
-
- <th align="center">2001</th>
-
- <th align="center">2000</th>
-
- <th align="center">1999</th>
-
- <th align="center">1998</th>
-
- <th align="center">1997</th>
-
- <th align="center">1996</th>
-
- </tr>
- <tr height="17">
- <th align="right">January</th>
-
- <td align="center">
-
- 2,694
- </td>
-
- <td align="center">
-
- 2,201
- </td>
-
- <td align="center">
-
- 1,835
- </td>
-
- <td align="center">
-
- 2,177
- </td>
-
- <td align="center">
-
- 1,353
- </td>
-
- <td align="center">
-
- 744
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">February</th>
-
- <td align="center">
-
- 2,364
- </td>
-
- <td align="center">
-
- 2,357
- </td>
-
- <td align="center">
-
- 1,826
- </td>
-
- <td align="center">
-
- 1,740
- </td>
-
- <td align="center">
-
- 1,339
- </td>
-
- <td align="center">
-
- 692
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">March</th>
-
- <td align="center">
-
- 2,543
- </td>
-
- <td align="center">
-
- 2,206
- </td>
-
- <td align="center">
-
- 1,895
- </td>
-
- <td align="center">
-
- 1,911
- </td>
-
- <td align="center">
-
- 1,533
- </td>
-
- <td align="center">
-
- 872
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">April</th>
-
- <td align="center">
-
- 2,531
- </td>
-
- <td align="center">
-
- 2,311
- </td>
-
- <td align="center">
-
- 2,076
- </td>
-
- <td align="center">
-
- 1,886
- </td>
-
- <td align="center">
-
- 1,587
- </td>
-
- <td align="center">
-
- 1,026
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">May</th>
-
- <td align="center">
-
- 2,579
- </td>
-
- <td align="center">
-
- 2,383
- </td>
-
- <td align="center">
-
- 1,914
- </td>
-
- <td align="center">
-
- 2,102
- </td>
-
- <td align="center">
-
- 1,720
- </td>
-
- <td align="center">
-
- 1,115
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">June</th>
-
- <td align="center">
-
- 2,681
- </td>
-
- <td align="center">
-
- 2,370
- </td>
-
- <td align="center">
-
- 1,890
- </td>
-
- <td align="center">
-
- 2,038
- </td>
-
- <td align="center">
-
- 1,716
- </td>
-
- <td align="center">
-
- 1,037
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">July</th>
-
- <td align="center">
-
- 2,903
- </td>
-
- <td align="center">
-
- 2,609
- </td>
-
- <td align="center">
-
- 1,916
- </td>
-
- <td align="center">
-
- 2,078
- </td>
-
- <td align="center">
-
- 1,693
- </td>
-
- <td align="center">
-
- 1,209
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">August</th>
-
- <td align="center">
-
- 3,037
- </td>
-
- <td align="center">
-
- 2,487
- </td>
-
- <td align="center">
-
- 1,968
- </td>
-
- <td align="center">
-
- 2,061
- </td>
-
- <td align="center">
-
- 1,676
- </td>
-
- <td align="center">
-
- 1,241
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">September</th>
-
- <td align="center">
-
- 2,767
- </td>
-
- <td align="center">
-
- 2,329
- </td>
-
- <td align="center">
-
- 1,955
- </td>
-
- <td align="center">
-
- 1,970
- </td>
-
- <td align="center">
-
- 1,681
- </td>
-
- <td align="center">
-
- 1,263
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">October</th>
-
- <td align="center">
-
- 2,922
- </td>
-
- <td align="center">
-
- 2,417
- </td>
-
- <td align="center">
-
- 2,267
- </td>
-
- <td align="center">
-
- 1,969
- </td>
-
- <td align="center">
-
- 1,809
- </td>
-
- <td align="center">
-
- 1,368
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">November</th>
-
- <td align="center">
-
- 2,670
- </td>
-
- <td align="center">
-
- 2,273
- </td>
-
- <td align="center">
-
- 2,132
- </td>
-
- <td align="center">
-
- 2,102
- </td>
-
- <td align="center">
-
- 1,786
- </td>
-
- <td align="center">
-
- 1,433
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">December</th>
-
- <td align="center">
-
- 2,815
- </td>
-
- <td align="center">
-
- 2,749
- </td>
-
- <td align="center">
-
- 2,187
- </td>
-
- <td align="center">
-
- 1,981
- </td>
-
- <td align="center">
-
- 1,944
- </td>
-
- <td align="center">
-
- 1,386
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">Total</th>
-
- <td align="center">
-
- 32,506
- </td>
-
- <td align="center">
-
- 28,692
- </td>
-
- <td align="center">
-
- 23,861
- </td>
-
- <td align="center">
-
- 24,015
- </td>
-
- <td align="center">
-
- 19,837
- </td>
-
- <td align="center">
-
- 13,386
- </td>
-
- </tr>
- </tbody>
- </table>
-
- <br><br><br>
- <span id="title">Movement Statistics(1995-1995) </span><br><br>
- <table class="style1">
- <tbody>
- <tr height="17">
- <th align="right"> </th>
-
- <th align="center">1995</th>
-
- </tr>
- <tr height="17">
- <th align="right">January</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">February</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">March</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">April</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">May</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">June</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">July</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">August</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">September</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">October</th>
-
- <td align="center">
-
-
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">November</th>
-
- <td align="center">
-
- 126
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">December</th>
-
- <td align="center">
-
- 536
- </td>
-
- </tr>
- <tr height="17">
- <th align="right">Total</th>
-
- <td align="center">
-
- 662
- </td>
-
- </tr>
- </tbody>
- </table>
-
-
- <br><br><br>
- <div align="right"><img src="./macau_files/mov_stat.jpg" alt="passenger statistic picture" width="565" height="318"></div>
-
-
- </div>
-
- </div>
- </div>
- </div>
-
-
- <!--/*end context*/-->
- </div>
- </div>
-
- <div id="buttombar"><img height="100" src="./macau_files/buttombar.gif"></div>
- <div id="logo">
-
-
-
- <div>
-
- <a href="http://www.macau-airport.com/envirop/zh/default.php" style="display: inline;"><img height="80" src="./macau_files/38.jpg"></a>
-
- </div>
-
-
- <div>
-
- <a href="http://www.macau-airport.com/envirop/en/default.php" style="display: inline;"><img height="80" src="./macau_files/36.jpg"></a>
-
- </div>
-
-</div>
-</div>
-
-
-
-</div>
-
-
-<div id="footer">
-<hr>
- <div id="footer-left">
- <a href="http://www.camacau.com/index">Main Page</a> |
- <a href="http://www.camacau.com/geographic_information">Our Business</a> |
- <a href="http://www.camacau.com/about_us">About Us</a> |
- <a href="http://www.camacau.com/pressReleases_list">Media Centre</a> |
- <a href="http://www.camacau.com/rlinks2">Related Links</a> |
- <a href="http://www.camacau.com/download_list">Interactive</a>
- </div>
- <div id="footer-right">Macau International Airport Co. Ltd. | Copyright 2013 | All rights reserved</div>
-</div>
-</div>
-</div>
-
-<div id="___fireplug_chrome_extension___" style="display: none;"></div><iframe id="rdbIndicator" width="100%" height="270" border="0" src="./macau_files/indicator.html" style="display: none; border: 0; position: fixed; left: 0; top: 0; z-index: 2147483647"></iframe><link rel="stylesheet" type="text/css" media="screen" href="chrome-extension://fcdjadjbdihbaodagojiomdljhjhjfho/css/atd.css"></body></html>
\ No newline at end of file
diff --git a/pandas/tests/io/data/html/nyse_wsj.html b/pandas/tests/io/data/html/nyse_wsj.html
deleted file mode 100644
index 2360bd49e9950..0000000000000
--- a/pandas/tests/io/data/html/nyse_wsj.html
+++ /dev/null
@@ -1,1207 +0,0 @@
-<table border="0" cellpadding="0" cellspacing="0" class="autocompleteContainer">
- <tbody>
- <tr>
- <td>
- <div class="symbolCompleteContainer">
- <div><input autocomplete="off" maxlength="80" name="KEYWORDS" type="text" value=""/></div>
- </div>
- <div class="hat_button">
- <span class="hat_button_text">SEARCH</span>
- </div>
- <div style="clear: both;"><div class="subSymbolCompleteResults"></div></div>
- </td>
- </tr>
- </tbody>
-</table>
-<table bgcolor="" border="0" cellpadding="0" cellspacing="0" width="100%"><tbody><tr>
- <td height="0"><img alt="" border="0" height="0" src="null/img/b.gif" width="1"/></td>
-</tr></tbody></table>
-<table border="0" cellpadding="0" cellspacing="0" class="mdcTable" width="100%">
- <tbody><tr>
- <td class="colhead" style="text-align:left"> </td>
- <td class="colhead" style="text-align:left">Issue<span class="textb10gray" style="margin-left: 8px;">(Roll over for charts and headlines)</span>
- </td>
- <td class="colhead">Volume</td>
- <td class="colhead">Price</td>
- <td class="colhead" style="width:35px;">Chg</td>
- <td class="colhead">% Chg</td>
- </tr>
- <tr>
- <td class="num">1</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=JCP" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'JCP')">J.C. Penney (JCP)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">250,697,455</td>
- <td class="nnum">$9.05</td>
- <td class="nnum">-1.37</td>
- <td class="nnum" style="border-right:0px">-13.15</td>
- </tr>
- <tr>
- <td class="num">2</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=BAC" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'BAC')">Bank of America (BAC)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">77,162,103</td>
- <td class="nnum">13.90</td>
- <td class="nnum">-0.18</td>
- <td class="nnum" style="border-right:0px">-1.28</td>
- </tr>
- <tr>
- <td class="num">3</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=RAD" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'RAD')">Rite Aid (RAD)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">52,140,382</td>
- <td class="nnum">4.70</td>
- <td class="nnum">-0.08</td>
- <td class="nnum" style="border-right:0px">-1.67</td>
- </tr>
- <tr>
- <td class="num">4</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=F" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'F')">Ford Motor (F)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">33,745,287</td>
- <td class="nnum">17.05</td>
- <td class="nnum">-0.22</td>
- <td class="nnum" style="border-right:0px">-1.27</td>
- </tr>
- <tr>
- <td class="num">5</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=PFE" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'PFE')">Pfizer (PFE)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">27,801,853</td>
- <td class="pnum">28.88</td>
- <td class="pnum">0.36</td>
- <td class="pnum" style="border-right:0px">1.26</td>
- </tr>
- <tr>
- <td class="num">6</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=HTZ" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'HTZ')">Hertz Global Hldgs (HTZ)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">25,821,264</td>
- <td class="pnum">22.32</td>
- <td class="pnum">0.69</td>
- <td class="pnum" style="border-right:0px">3.19</td>
- </tr>
- <tr>
- <td class="num">7</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=GE" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'GE')">General Electric (GE)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">25,142,064</td>
- <td class="nnum">24.05</td>
- <td class="nnum">-0.20</td>
- <td class="nnum" style="border-right:0px">-0.82</td>
- </tr>
- <tr>
- <td class="num">8</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=ELN" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'ELN')">Elan ADS (ELN)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">24,725,209</td>
- <td class="pnum">15.59</td>
- <td class="pnum">0.08</td>
- <td class="pnum" style="border-right:0px">0.52</td>
- </tr>
- <tr>
- <td class="num">9</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=JPM" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'JPM')">JPMorgan Chase (JPM)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">22,402,756</td>
- <td class="pnum">52.24</td>
- <td class="pnum">0.35</td>
- <td class="pnum" style="border-right:0px">0.67</td>
- </tr>
- <tr>
- <td class="num">10</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=RF" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'RF')">Regions Financial (RF)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">20,790,532</td>
- <td class="pnum">9.30</td>
- <td class="pnum">0.12</td>
- <td class="pnum" style="border-right:0px">1.31</td>
- </tr>
- <tr>
- <td class="num">11</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=VMEM" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'VMEM')">Violin Memory (VMEM)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">20,669,846</td>
- <td class="nnum">7.02</td>
- <td class="nnum">-1.98</td>
- <td class="nnum" style="border-right:0px">-22.00</td>
- </tr>
- <tr>
- <td class="num">12</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=C" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'C')">Citigroup (C)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">19,979,932</td>
- <td class="nnum">48.89</td>
- <td class="nnum">-0.04</td>
- <td class="nnum" style="border-right:0px">-0.08</td>
- </tr>
- <tr>
- <td class="num">13</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=NOK" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'NOK')">Nokia ADS (NOK)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">19,585,075</td>
- <td class="pnum">6.66</td>
- <td class="pnum">0.02</td>
- <td class="pnum" style="border-right:0px">0.30</td>
- </tr>
- <tr>
- <td class="num">14</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=WFC" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'WFC')">Wells Fargo (WFC)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">19,478,590</td>
- <td class="nnum">41.59</td>
- <td class="nnum">-0.02</td>
- <td class="nnum" style="border-right:0px">-0.05</td>
- </tr>
- <tr>
- <td class="num">15</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=VALE" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'VALE')">Vale ADS (VALE)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">18,781,987</td>
- <td class="nnum">15.60</td>
- <td class="nnum">-0.52</td>
- <td class="nnum" style="border-right:0px">-3.23</td>
- </tr>
- <tr>
- <td class="num">16</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=DAL" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'DAL')">Delta Air Lines (DAL)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">16,013,956</td>
- <td class="nnum">23.57</td>
- <td class="nnum">-0.44</td>
- <td class="nnum" style="border-right:0px">-1.83</td>
- </tr>
- <tr>
- <td class="num">17</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=EMC" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'EMC')">EMC (EMC)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">15,771,252</td>
- <td class="nnum">26.07</td>
- <td class="nnum">-0.11</td>
- <td class="nnum" style="border-right:0px">-0.42</td>
- </tr>
- <tr>
- <td class="num">18</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=NKE" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'NKE')">Nike Cl B (NKE)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">15,514,717</td>
- <td class="pnum">73.64</td>
- <td class="pnum">3.30</td>
- <td class="pnum" style="border-right:0px">4.69</td>
- </tr>
- <tr>
- <td class="num">19</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=AA" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'AA')">Alcoa (AA)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">14,061,073</td>
- <td class="nnum">8.20</td>
- <td class="nnum">-0.07</td>
- <td class="nnum" style="border-right:0px">-0.85</td>
- </tr>
- <tr>
- <td class="num">20</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=GM" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'GM')">General Motors (GM)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">13,984,004</td>
- <td class="nnum">36.37</td>
- <td class="nnum">-0.58</td>
- <td class="nnum" style="border-right:0px">-1.57</td>
- </tr>
- <tr>
- <td class="num">21</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=ORCL" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'ORCL')">Oracle (ORCL)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">13,856,671</td>
- <td class="nnum">33.78</td>
- <td class="nnum">-0.03</td>
- <td class="nnum" style="border-right:0px">-0.09</td>
- </tr>
- <tr>
- <td class="num">22</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=T" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'T')">AT&T (T)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">13,736,948</td>
- <td class="nnum">33.98</td>
- <td class="nnum">-0.25</td>
- <td class="nnum" style="border-right:0px">-0.73</td>
- </tr>
- <tr>
- <td class="num">23</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=TSL" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'TSL')">Trina Solar ADS (TSL)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">13,284,202</td>
- <td class="pnum">14.83</td>
- <td class="pnum">1.99</td>
- <td class="pnum" style="border-right:0px">15.50</td>
- </tr>
- <tr>
- <td class="num">24</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=YGE" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'YGE')">Yingli Green Energy Holding ADS (YGE)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">12,978,378</td>
- <td class="pnum">6.73</td>
- <td class="pnum">0.63</td>
- <td class="pnum" style="border-right:0px">10.33</td>
- </tr>
- <tr>
- <td class="num">25</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=PBR" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'PBR')">Petroleo Brasileiro ADS (PBR)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">12,833,660</td>
- <td class="nnum">15.40</td>
- <td class="nnum">-0.21</td>
- <td class="nnum" style="border-right:0px">-1.35</td>
- </tr>
- <tr>
- <td class="num">26</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=UAL" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'UAL')">United Continental Holdings (UAL)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">12,603,225</td>
- <td class="nnum">30.91</td>
- <td class="nnum">-3.16</td>
- <td class="nnum" style="border-right:0px">-9.28</td>
- </tr>
- <tr>
- <td class="num">27</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=KO" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'KO')">Coca-Cola (KO)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">12,343,452</td>
- <td class="nnum">38.40</td>
- <td class="nnum">-0.34</td>
- <td class="nnum" style="border-right:0px">-0.88</td>
- </tr>
- <tr>
- <td class="num">28</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=ACI" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'ACI')">Arch Coal (ACI)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">12,261,138</td>
- <td class="nnum">4.25</td>
- <td class="nnum">-0.28</td>
- <td class="nnum" style="border-right:0px">-6.18</td>
- </tr>
- <tr>
- <td class="num">29</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=MS" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'MS')">Morgan Stanley (MS)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">11,956,345</td>
- <td class="nnum">27.08</td>
- <td class="nnum">-0.07</td>
- <td class="nnum" style="border-right:0px">-0.26</td>
- </tr>
- <tr>
- <td class="num">30</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=P" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'P')">Pandora Media (P)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">11,829,963</td>
- <td class="pnum">25.52</td>
- <td class="pnum">0.13</td>
- <td class="pnum" style="border-right:0px">0.51</td>
- </tr>
- <tr>
- <td class="num">31</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=ABX" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'ABX')">Barrick Gold (ABX)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">11,775,585</td>
- <td class="num">18.53</td>
- <td class="num">0.00</td>
- <td class="num" style="border-right:0px">0.00</td>
- </tr>
- <tr>
- <td class="num">32</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=ABT" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'ABT')">Abbott Laboratories (ABT)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">11,755,718</td>
- <td class="nnum">33.14</td>
- <td class="nnum">-0.52</td>
- <td class="nnum" style="border-right:0px">-1.54</td>
- </tr>
- <tr>
- <td class="num">33</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=BSBR" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'BSBR')">Banco Santander Brasil ADS (BSBR)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">11,587,310</td>
- <td class="pnum">7.01</td>
- <td class="pnum">0.46</td>
- <td class="pnum" style="border-right:0px">7.02</td>
- </tr>
- <tr>
- <td class="num">34</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=AMD" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'AMD')">Advanced Micro Devices (AMD)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">11,337,609</td>
- <td class="nnum">3.86</td>
- <td class="nnum">-0.03</td>
- <td class="nnum" style="border-right:0px">-0.77</td>
- </tr>
- <tr>
- <td class="num">35</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=NLY" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'NLY')">Annaly Capital Management (NLY)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">11,004,440</td>
- <td class="nnum">11.63</td>
- <td class="nnum">-0.07</td>
- <td class="nnum" style="border-right:0px">-0.60</td>
- </tr>
- <tr>
- <td class="num">36</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=ANR" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'ANR')">Alpha Natural Resources (ANR)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">10,941,074</td>
- <td class="nnum">6.08</td>
- <td class="nnum">-0.19</td>
- <td class="nnum" style="border-right:0px">-3.03</td>
- </tr>
- <tr>
- <td class="num">37</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=XOM" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'XOM')">Exxon Mobil (XOM)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">10,668,115</td>
- <td class="nnum">86.90</td>
- <td class="nnum">-0.17</td>
- <td class="nnum" style="border-right:0px">-0.20</td>
- </tr>
- <tr>
- <td class="num">38</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=ITUB" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'ITUB')">Itau Unibanco Holding ADS (ITUB)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">10,638,803</td>
- <td class="pnum">14.30</td>
- <td class="pnum">0.23</td>
- <td class="pnum" style="border-right:0px">1.63</td>
- </tr>
- <tr>
- <td class="num">39</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=MRK" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'MRK')">Merck&Co (MRK)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">10,388,152</td>
- <td class="pnum">47.79</td>
- <td class="pnum">0.11</td>
- <td class="pnum" style="border-right:0px">0.23</td>
- </tr>
- <tr>
- <td class="num">40</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=ALU" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'ALU')">Alcatel-Lucent ADS (ALU)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">10,181,833</td>
- <td class="pnum">3.65</td>
- <td class="pnum">0.01</td>
- <td class="pnum" style="border-right:0px">0.27</td>
- </tr>
- <tr>
- <td class="num">41</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=VZ" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'VZ')">Verizon Communications (VZ)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">10,139,321</td>
- <td class="nnum">47.00</td>
- <td class="nnum">-0.67</td>
- <td class="nnum" style="border-right:0px">-1.41</td>
- </tr>
- <tr>
- <td class="num">42</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=MHR" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'MHR')">Magnum Hunter Resources (MHR)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">10,004,303</td>
- <td class="pnum">6.33</td>
- <td class="pnum">0.46</td>
- <td class="pnum" style="border-right:0px">7.84</td>
- </tr>
- <tr>
- <td class="num">43</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=HPQ" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'HPQ')">Hewlett-Packard (HPQ)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">9,948,935</td>
- <td class="nnum">21.17</td>
- <td class="nnum">-0.13</td>
- <td class="nnum" style="border-right:0px">-0.61</td>
- </tr>
- <tr>
- <td class="num">44</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=PHM" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'PHM')">PulteGroup (PHM)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">9,899,141</td>
- <td class="nnum">16.57</td>
- <td class="nnum">-0.41</td>
- <td class="nnum" style="border-right:0px">-2.41</td>
- </tr>
- <tr>
- <td class="num">45</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=SOL" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'SOL')">ReneSola ADS (SOL)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">9,667,438</td>
- <td class="pnum">4.84</td>
- <td class="pnum">0.39</td>
- <td class="pnum" style="border-right:0px">8.76</td>
- </tr>
- <tr>
- <td class="num">46</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=GLW" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'GLW')">Corning (GLW)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">9,547,265</td>
- <td class="nnum">14.73</td>
- <td class="nnum">-0.21</td>
- <td class="nnum" style="border-right:0px">-1.41</td>
- </tr>
- <tr>
- <td class="num">47</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=COLE" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'COLE')">Cole Real Estate Investments (COLE)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">9,544,021</td>
- <td class="pnum">12.21</td>
- <td class="pnum">0.01</td>
- <td class="pnum" style="border-right:0px">0.08</td>
- </tr>
- <tr>
- <td class="num">48</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=DOW" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'DOW')">Dow Chemical (DOW)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">9,150,479</td>
- <td class="nnum">39.02</td>
- <td class="nnum">-0.97</td>
- <td class="nnum" style="border-right:0px">-2.43</td>
- </tr>
- <tr>
- <td class="num">49</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=IGT" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'IGT')">International Game Technology (IGT)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">9,129,123</td>
- <td class="nnum">19.23</td>
- <td class="nnum">-1.44</td>
- <td class="nnum" style="border-right:0px">-6.97</td>
- </tr>
- <tr>
- <td class="num">50</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=ACN" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'ACN')">Accenture Cl A (ACN)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">8,773,260</td>
- <td class="nnum">74.09</td>
- <td class="nnum">-1.78</td>
- <td class="nnum" style="border-right:0px">-2.35</td>
- </tr>
- <tr>
- <td class="num">51</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=KEY" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'KEY')">KeyCorp (KEY)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">8,599,333</td>
- <td class="pnum">11.36</td>
- <td class="pnum">0.02</td>
- <td class="pnum" style="border-right:0px">0.18</td>
- </tr>
- <tr>
- <td class="num">52</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=BMY" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'BMY')">Bristol-Myers Squibb (BMY)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">8,440,709</td>
- <td class="nnum">46.20</td>
- <td class="nnum">-0.73</td>
- <td class="nnum" style="border-right:0px">-1.56</td>
- </tr>
- <tr>
- <td class="num">53</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=SID" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'SID')">Companhia Siderurgica Nacional ADS (SID)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">8,437,636</td>
- <td class="nnum">4.36</td>
- <td class="nnum">-0.05</td>
- <td class="nnum" style="border-right:0px">-1.13</td>
- </tr>
- <tr>
- <td class="num">54</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=HRB" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'HRB')">H&R Block (HRB)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">8,240,984</td>
- <td class="pnum">26.36</td>
- <td class="pnum">0.31</td>
- <td class="pnum" style="border-right:0px">1.19</td>
- </tr>
- <tr>
- <td class="num">55</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=MTG" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'MTG')">MGIC Investment (MTG)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">8,135,037</td>
- <td class="nnum">7.26</td>
- <td class="nnum">-0.10</td>
- <td class="nnum" style="border-right:0px">-1.36</td>
- </tr>
- <tr>
- <td class="num">56</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=RNG" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'RNG')">RingCentral Cl A (RNG)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">8,117,469</td>
- <td class="pnum">18.20</td>
- <td class="pnum">5.20</td>
- <td class="pnum" style="border-right:0px">40.00</td>
- </tr>
- <tr>
- <td class="num">57</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=X" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'X')">United States Steel (X)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">8,107,899</td>
- <td class="nnum">20.44</td>
- <td class="nnum">-0.66</td>
- <td class="nnum" style="border-right:0px">-3.13</td>
- </tr>
- <tr>
- <td class="num">58</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=CLF" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'CLF')">Cliffs Natural Resources (CLF)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">8,041,572</td>
- <td class="nnum">21.00</td>
- <td class="nnum">-0.83</td>
- <td class="nnum" style="border-right:0px">-3.80</td>
- </tr>
- <tr>
- <td class="num">59</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=NEM" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'NEM')">Newmont Mining (NEM)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">8,014,250</td>
- <td class="nnum">27.98</td>
- <td class="nnum">-0.19</td>
- <td class="nnum" style="border-right:0px">-0.67</td>
- </tr>
- <tr>
- <td class="num">60</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=MO" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'MO')">Altria Group (MO)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">7,786,048</td>
- <td class="nnum">34.71</td>
- <td class="nnum">-0.29</td>
- <td class="nnum" style="border-right:0px">-0.83</td>
- </tr>
- <tr>
- <td class="num">61</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=SD" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'SD')">SandRidge Energy (SD)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">7,782,745</td>
- <td class="nnum">5.93</td>
- <td class="nnum">-0.06</td>
- <td class="nnum" style="border-right:0px">-1.00</td>
- </tr>
- <tr>
- <td class="num">62</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=MCP" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'MCP')">Molycorp (MCP)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">7,735,831</td>
- <td class="nnum">6.73</td>
- <td class="nnum">-0.45</td>
- <td class="nnum" style="border-right:0px">-6.27</td>
- </tr>
- <tr>
- <td class="num">63</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=HAL" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'HAL')">Halliburton (HAL)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">7,728,735</td>
- <td class="nnum">48.39</td>
- <td class="nnum">-0.32</td>
- <td class="nnum" style="border-right:0px">-0.66</td>
- </tr>
- <tr>
- <td class="num">64</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=TSM" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'TSM')">Taiwan Semiconductor Manufacturing ADS (TSM)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">7,661,397</td>
- <td class="nnum">17.07</td>
- <td class="nnum">-0.25</td>
- <td class="nnum" style="border-right:0px">-1.44</td>
- </tr>
- <tr>
- <td class="num">65</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=FCX" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'FCX')">Freeport-McMoRan Copper&Gold (FCX)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">7,622,803</td>
- <td class="nnum">33.42</td>
- <td class="nnum">-0.45</td>
- <td class="nnum" style="border-right:0px">-1.33</td>
- </tr>
- <tr>
- <td class="num">66</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=KOG" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'KOG')">Kodiak Oil&Gas (KOG)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">7,543,806</td>
- <td class="pnum">11.94</td>
- <td class="pnum">0.16</td>
- <td class="pnum" style="border-right:0px">1.36</td>
- </tr>
- <tr>
- <td class="num">67</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=XRX" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'XRX')">Xerox (XRX)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">7,440,689</td>
- <td class="nnum">10.37</td>
- <td class="nnum">-0.01</td>
- <td class="nnum" style="border-right:0px">-0.10</td>
- </tr>
- <tr>
- <td class="num">68</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=S" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'S')">Sprint (S)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">7,291,351</td>
- <td class="nnum">6.16</td>
- <td class="nnum">-0.14</td>
- <td class="nnum" style="border-right:0px">-2.22</td>
- </tr>
- <tr>
- <td class="num">69</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=TWO" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'TWO')">Two Harbors Investment (TWO)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">7,153,803</td>
- <td class="pnum">9.79</td>
- <td class="pnum">0.05</td>
- <td class="pnum" style="border-right:0px">0.51</td>
- </tr>
- <tr>
- <td class="num">70</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=WLT" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'WLT')">Walter Energy (WLT)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">7,152,192</td>
- <td class="nnum">14.19</td>
- <td class="nnum">-0.36</td>
- <td class="nnum" style="border-right:0px">-2.47</td>
- </tr>
- <tr>
- <td class="num">71</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=IP" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'IP')">International Paper (IP)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">7,123,722</td>
- <td class="nnum">45.44</td>
- <td class="nnum">-1.85</td>
- <td class="nnum" style="border-right:0px">-3.91</td>
- </tr>
- <tr>
- <td class="num">72</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=PPL" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'PPL')">PPL (PPL)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">7,026,292</td>
- <td class="nnum">30.34</td>
- <td class="nnum">-0.13</td>
- <td class="nnum" style="border-right:0px">-0.43</td>
- </tr>
- <tr>
- <td class="num">73</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=GG" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'GG')">Goldcorp (GG)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">6,857,447</td>
- <td class="pnum">25.76</td>
- <td class="pnum">0.08</td>
- <td class="pnum" style="border-right:0px">0.31</td>
- </tr>
- <tr>
- <td class="num">74</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=TWX" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'TWX')">Time Warner (TWX)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">6,807,237</td>
- <td class="pnum">66.20</td>
- <td class="pnum">1.33</td>
- <td class="pnum" style="border-right:0px">2.05</td>
- </tr>
- <tr>
- <td class="num">75</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=SNV" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'SNV')">Synovus Financial (SNV)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">6,764,805</td>
- <td class="pnum">3.29</td>
- <td class="pnum">0.02</td>
- <td class="pnum" style="border-right:0px">0.61</td>
- </tr>
- <tr>
- <td class="num">76</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=AKS" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'AKS')">AK Steel Holding (AKS)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">6,662,599</td>
- <td class="nnum">3.83</td>
- <td class="nnum">-0.11</td>
- <td class="nnum" style="border-right:0px">-2.79</td>
- </tr>
- <tr>
- <td class="num">77</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=BSX" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'BSX')">Boston Scientific (BSX)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">6,629,084</td>
- <td class="nnum">11.52</td>
- <td class="nnum">-0.15</td>
- <td class="nnum" style="border-right:0px">-1.29</td>
- </tr>
- <tr>
- <td class="num">78</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=EGO" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'EGO')">Eldorado Gold (EGO)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">6,596,902</td>
- <td class="nnum">6.65</td>
- <td class="nnum">-0.03</td>
- <td class="nnum" style="border-right:0px">-0.45</td>
- </tr>
- <tr>
- <td class="num">79</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=NR" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'NR')">Newpark Resources (NR)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">6,552,453</td>
- <td class="pnum">12.56</td>
- <td class="pnum">0.09</td>
- <td class="pnum" style="border-right:0px">0.72</td>
- </tr>
- <tr>
- <td class="num">80</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=ABBV" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'ABBV')">AbbVie (ABBV)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">6,525,524</td>
- <td class="nnum">44.33</td>
- <td class="nnum">-0.67</td>
- <td class="nnum" style="border-right:0px">-1.49</td>
- </tr>
- <tr>
- <td class="num">81</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=MBI" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'MBI')">MBIA (MBI)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">6,416,587</td>
- <td class="nnum">10.38</td>
- <td class="nnum">-0.43</td>
- <td class="nnum" style="border-right:0px">-3.98</td>
- </tr>
- <tr>
- <td class="num">82</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=SAI" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'SAI')">SAIC (SAI)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">6,404,587</td>
- <td class="pnum">16.03</td>
- <td class="pnum">0.13</td>
- <td class="pnum" style="border-right:0px">0.82</td>
- </tr>
- <tr>
- <td class="num">83</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=PG" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'PG')">Procter&Gamble (PG)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">6,389,143</td>
- <td class="nnum">77.21</td>
- <td class="nnum">-0.84</td>
- <td class="nnum" style="border-right:0px">-1.08</td>
- </tr>
- <tr>
- <td class="num">84</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=IAG" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'IAG')">IAMGOLD (IAG)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">6,293,001</td>
- <td class="nnum">4.77</td>
- <td class="nnum">-0.06</td>
- <td class="nnum" style="border-right:0px">-1.24</td>
- </tr>
- <tr>
- <td class="num">85</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=SWY" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'SWY')">Safeway (SWY)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">6,268,184</td>
- <td class="nnum">32.25</td>
- <td class="nnum">-0.29</td>
- <td class="nnum" style="border-right:0px">-0.89</td>
- </tr>
- <tr>
- <td class="num">86</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=KGC" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'KGC')">Kinross Gold (KGC)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">6,112,658</td>
- <td class="nnum">4.99</td>
- <td class="nnum">-0.03</td>
- <td class="nnum" style="border-right:0px">-0.60</td>
- </tr>
- <tr>
- <td class="num">87</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=MGM" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'MGM')">MGM Resorts International (MGM)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">5,986,143</td>
- <td class="nnum">20.22</td>
- <td class="nnum">-0.05</td>
- <td class="nnum" style="border-right:0px">-0.25</td>
- </tr>
- <tr>
- <td class="num">88</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=CX" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'CX')">Cemex ADS (CX)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">5,907,040</td>
- <td class="nnum">11.27</td>
- <td class="nnum">-0.06</td>
- <td class="nnum" style="border-right:0px">-0.53</td>
- </tr>
- <tr>
- <td class="num">89</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=AIG" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'AIG')">American International Group (AIG)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">5,900,133</td>
- <td class="nnum">49.15</td>
- <td class="nnum">-0.30</td>
- <td class="nnum" style="border-right:0px">-0.61</td>
- </tr>
- <tr>
- <td class="num">90</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=CHK" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'CHK')">Chesapeake Energy (CHK)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">5,848,016</td>
- <td class="nnum">26.21</td>
- <td class="nnum">-0.20</td>
- <td class="nnum" style="border-right:0px">-0.76</td>
- </tr>
- <tr>
- <td class="num">91</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=RSH" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'RSH')">RadioShack (RSH)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">5,837,833</td>
- <td class="nnum">3.44</td>
- <td class="nnum">-0.43</td>
- <td class="nnum" style="border-right:0px">-11.11</td>
- </tr>
- <tr>
- <td class="num">92</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=USB" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'USB')">U.S. Bancorp (USB)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">5,814,373</td>
- <td class="nnum">36.50</td>
- <td class="nnum">-0.04</td>
- <td class="nnum" style="border-right:0px">-0.11</td>
- </tr>
- <tr>
- <td class="num">93</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=LLY" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'LLY')">Eli Lilly (LLY)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">5,776,991</td>
- <td class="nnum">50.50</td>
- <td class="nnum">-0.54</td>
- <td class="nnum" style="border-right:0px">-1.06</td>
- </tr>
- <tr>
- <td class="num">94</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=MET" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'MET')">MetLife (MET)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">5,774,996</td>
- <td class="nnum">47.21</td>
- <td class="nnum">-0.37</td>
- <td class="nnum" style="border-right:0px">-0.78</td>
- </tr>
- <tr>
- <td class="num">95</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=AUY" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'AUY')">Yamana Gold (AUY)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">5,742,426</td>
- <td class="pnum">10.37</td>
- <td class="pnum">0.03</td>
- <td class="pnum" style="border-right:0px">0.29</td>
- </tr>
- <tr>
- <td class="num">96</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=CBS" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'CBS')">CBS Cl B (CBS)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">5,718,858</td>
- <td class="nnum">55.50</td>
- <td class="nnum">-0.06</td>
- <td class="nnum" style="border-right:0px">-0.11</td>
- </tr>
- <tr>
- <td class="num">97</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=CSX" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'CSX')">CSX (CSX)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">5,710,066</td>
- <td class="nnum">25.85</td>
- <td class="nnum">-0.13</td>
- <td class="nnum" style="border-right:0px">-0.50</td>
- </tr>
- <tr>
- <td class="num">98</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=CCL" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'CCL')">Carnival (CCL)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">5,661,325</td>
- <td class="nnum">32.88</td>
- <td class="nnum">-0.05</td>
- <td class="nnum" style="border-right:0px">-0.15</td>
- </tr>
- <tr>
- <td class="num">99</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=MOS" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'MOS')">Mosaic (MOS)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">5,595,592</td>
- <td class="nnum">43.43</td>
- <td class="nnum">-0.76</td>
- <td class="nnum" style="border-right:0px">-1.72</td>
- </tr>
- <tr>
- <td class="num">100</td>
- <td class="text" style="max-width:307px">
- <a class="linkb" href="/public/quotes/main.html?symbol=WAG" onmouseout="com.dowjones.rolloverQuotes.hidelater();" onmouseover="com.dowjones.rolloverQuotes.show(this,'WAG')">Walgreen (WAG)
- </a>
- </td>
- <td align="right" class="num" style="font-weight:bold;">5,568,310</td>
- <td class="nnum">54.51</td>
- <td class="nnum">-0.22</td>
- <td class="nnum" style="border-right:0px">-0.40</td>
- </tr>
-</tbody></table>
-<table bgcolor="" border="0" cellpadding="0" cellspacing="0" width="100%">
- <tbody><tr><td height="20px"><img alt="" border="0" height="20px" src="/img/b.gif" width="1"/></td></tr>
-</tbody></table>
-<table align="center" bgcolor="#ffffff" border="0" cellpadding="0" cellspacing="0" style="border:1px solid #cfc7b7;margin-bottom:5px;" width="575px">
- <tbody><tr>
- <td bgcolor="#e9e7e0" class="b12" colspan="3" style="padding:3px 0px 3px 0px;"><span class="p10" style="color:#000; float:right">An Advertising Feature </span> PARTNER CENTER</td>
- </tr>
-
- <tr>
- <td align="center" class="p10" style="padding:10px 0px 5px 0px;border-right:1px solid #cfc7b7;" valign="top">
-
-
-
- <script type="text/javascript">
-<!--
- var tempHTML = '';
- var adURL = 'http://ad.doubleclick.net/adi/'+((GetCookie('etsFlag'))?'ets.wsj.com':'brokerbuttons.wsj.com')+'/markets_front;!category=;msrc=' + msrc + ';' + segQS + ';' + mc + ';tile=1;sz=170x67;ord=26093260932609326093;';
- if ( isSafari ) {
- tempHTML += '<iframe id="mdc_tradingcenter1" src="'+adURL+'" width="170" height="67" marginwidth="0" marginheight="0" hspace="0" vspace="0" frameborder="0" scrolling="no" bordercolor="#000000" style="width:170">';
- } else {
- tempHTML += '<iframe id="mdc_tradingcenter1" src="/static_html_files/blank.htm" width="170" height="67" marginwidth="0" marginheight="0" hspace="0" vspace="0" frameborder="0" scrolling="no" bordercolor="#000000" style="width:170px;">';
- ListOfIframes.mdc_tradingcenter1= adURL;
- }
- tempHTML += '<a href="http://ad.doubleclick.net/jump/'+((GetCookie('etsFlag'))?'ets.wsj.com':'brokerbuttons.wsj.com')+'/markets_front;!category=;msrc=' + msrc + ';' + segQS + ';' + mc + ';tile=1;sz=170x67;ord=26093260932609326093;" target="_new">';
- tempHTML += '<img src="http://ad.doubleclick.net/ad/'+((GetCookie('etsFlag'))?'ets.wsj.com':'brokerbuttons.wsj.com')+'/markets_front;!category=;msrc=' + msrc + ';' + segQS + ';' + mc + ';tile=1;sz=170x67;ord=26093260932609326093;" border="0" width="170" height="67" vspace="0" alt="Advertisement" /></a><br /></iframe>';
- document.write(tempHTML);
- // -->
- </script>
- </td>
-
- <td align="center" class="p10" style="padding:10px 0px 5px 0px;border-right:1px solid #cfc7b7;" valign="top">
-
-
-
- <script type="text/javascript">
-<!--
- var tempHTML = '';
- var adURL = 'http://ad.doubleclick.net/adi/'+((GetCookie('etsFlag'))?'ets.wsj.com':'brokerbuttons.wsj.com')+'/markets_front;!category=;msrc=' + msrc + ';' + segQS + ';' + mc + ';tile=2;sz=170x67;ord=26093260932609326093;';
- if ( isSafari ) {
- tempHTML += '<iframe id="mdc_tradingcenter2" src="'+adURL+'" width="170" height="67" marginwidth="0" marginheight="0" hspace="0" vspace="0" frameborder="0" scrolling="no" bordercolor="#000000" style="width:170">';
- } else {
- tempHTML += '<iframe id="mdc_tradingcenter2" src="/static_html_files/blank.htm" width="170" height="67" marginwidth="0" marginheight="0" hspace="0" vspace="0" frameborder="0" scrolling="no" bordercolor="#000000" style="width:170px;">';
- ListOfIframes.mdc_tradingcenter2= adURL;
- }
- tempHTML += '<a href="http://ad.doubleclick.net/jump/'+((GetCookie('etsFlag'))?'ets.wsj.com':'brokerbuttons.wsj.com')+'/markets_front;!category=;msrc=' + msrc + ';' + segQS + ';' + mc + ';tile=2;sz=170x67;ord=26093260932609326093;" target="_new">';
- tempHTML += '<img src="http://ad.doubleclick.net/ad/'+((GetCookie('etsFlag'))?'ets.wsj.com':'brokerbuttons.wsj.com')+'/markets_front;!category=;msrc=' + msrc + ';' + segQS + ';' + mc + ';tile=2;sz=170x67;ord=26093260932609326093;" border="0" width="170" height="67" vspace="0" alt="Advertisement" /></a><br /></iframe>';
- document.write(tempHTML);
- // -->
- </script>
- </td>
-
- <td align="center" class="p10" style="padding:10px 0px 5px 0px;" valign="top">
-
-
-
- <script type="text/javascript">
-<!--
- var tempHTML = '';
- var adURL = 'http://ad.doubleclick.net/adi/'+((GetCookie('etsFlag'))?'ets.wsj.com':'brokerbuttons.wsj.com')+'/markets_front;!category=;msrc=' + msrc + ';' + segQS + ';' + mc + ';tile=3;sz=170x67;ord=26093260932609326093;';
- if ( isSafari ) {
- tempHTML += '<iframe id="mdc_tradingcenter3" src="'+adURL+'" width="170" height="67" marginwidth="0" marginheight="0" hspace="0" vspace="0" frameborder="0" scrolling="no" bordercolor="#000000" style="width:170">';
- } else {
- tempHTML += '<iframe id="mdc_tradingcenter3" src="/static_html_files/blank.htm" width="170" height="67" marginwidth="0" marginheight="0" hspace="0" vspace="0" frameborder="0" scrolling="no" bordercolor="#000000" style="width:170px;">';
- ListOfIframes.mdc_tradingcenter3= adURL;
- }
- tempHTML += '<a href="http://ad.doubleclick.net/jump/'+((GetCookie('etsFlag'))?'ets.wsj.com':'brokerbuttons.wsj.com')+'/markets_front;!category=;msrc=' + msrc + ';' + segQS + ';' + mc + ';tile=3;sz=170x67;ord=26093260932609326093;" target="_new">';
- tempHTML += '<img src="http://ad.doubleclick.net/ad/'+((GetCookie('etsFlag'))?'ets.wsj.com':'brokerbuttons.wsj.com')+'/markets_front;!category=;msrc=' + msrc + ';' + segQS + ';' + mc + ';tile=3;sz=170x67;ord=26093260932609326093;" border="0" width="170" height="67" vspace="0" alt="Advertisement" /></a><br /></iframe>';
- document.write(tempHTML);
- // -->
- </script>
- </td>
-
- </tr>
-
-</tbody></table>
-<table bgcolor="" border="0" cellpadding="0" cellspacing="0" width="100%">
- <tbody><tr><td height="20px"><img alt="" border="0" height="20px" src="/img/b.gif" width="1"/></td></tr>
-</tbody></table>
diff --git a/pandas/tests/io/excel/conftest.py b/pandas/tests/io/excel/conftest.py
index a257735dc1ec5..0455e0d61ad97 100644
--- a/pandas/tests/io/excel/conftest.py
+++ b/pandas/tests/io/excel/conftest.py
@@ -35,7 +35,7 @@ def df_ref(datapath):
return df_ref
-@pytest.fixture(params=[".xls", ".xlsx", ".xlsm", ".ods"])
+@pytest.fixture(params=[".xls", ".xlsx", ".xlsm", ".ods", ".xlsb"])
def read_ext(request):
"""
Valid extensions for reading Excel files.
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index 629d3d02028bd..8d00ef1b7fe3e 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -31,7 +31,7 @@ def ignore_xlrd_time_clock_warning():
yield
-read_ext_params = [".xls", ".xlsx", ".xlsm", ".ods"]
+read_ext_params = [".xls", ".xlsx", ".xlsm", ".xlsb", ".ods"]
engine_params = [
# Add any engines to test here
# When defusedxml is installed it triggers deprecation warnings for
@@ -57,6 +57,7 @@ def ignore_xlrd_time_clock_warning():
pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
+ pytest.param("pyxlsb", marks=td.skip_if_no("pyxlsb")),
pytest.param("odf", marks=td.skip_if_no("odf")),
]
@@ -73,6 +74,10 @@ def _is_valid_engine_ext_pair(engine, read_ext: str) -> bool:
return False
if read_ext == ".ods" and engine != "odf":
return False
+ if engine == "pyxlsb" and read_ext != ".xlsb":
+ return False
+ if read_ext == ".xlsb" and engine != "pyxlsb":
+ return False
return True
@@ -120,7 +125,6 @@ def cd_and_set_engine(self, engine, datapath, monkeypatch):
"""
Change directory and set engine for read_excel calls.
"""
-
func = partial(pd.read_excel, engine=engine)
monkeypatch.chdir(datapath("io", "data", "excel"))
monkeypatch.setattr(pd, "read_excel", func)
@@ -142,6 +146,8 @@ def test_usecols_int(self, read_ext, df_ref):
)
def test_usecols_list(self, read_ext, df_ref):
+ if pd.read_excel.keywords["engine"] == "pyxlsb":
+ pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
df_ref = df_ref.reindex(columns=["B", "C"])
df1 = pd.read_excel(
@@ -156,6 +162,8 @@ def test_usecols_list(self, read_ext, df_ref):
tm.assert_frame_equal(df2, df_ref, check_names=False)
def test_usecols_str(self, read_ext, df_ref):
+ if pd.read_excel.keywords["engine"] == "pyxlsb":
+ pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
df1 = df_ref.reindex(columns=["A", "B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A:D")
@@ -188,6 +196,9 @@ def test_usecols_str(self, read_ext, df_ref):
"usecols", [[0, 1, 3], [0, 3, 1], [1, 0, 3], [1, 3, 0], [3, 0, 1], [3, 1, 0]]
)
def test_usecols_diff_positional_int_columns_order(self, read_ext, usecols, df_ref):
+ if pd.read_excel.keywords["engine"] == "pyxlsb":
+ pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+
expected = df_ref[["A", "C"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols=usecols
@@ -203,11 +214,17 @@ def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_r
tm.assert_frame_equal(result, expected, check_names=False)
def test_read_excel_without_slicing(self, read_ext, df_ref):
+ if pd.read_excel.keywords["engine"] == "pyxlsb":
+ pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+
expected = df_ref
result = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str(self, read_ext, df_ref):
+ if pd.read_excel.keywords["engine"] == "pyxlsb":
+ pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+
expected = df_ref[["C", "D"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols="A,D:E"
@@ -274,12 +291,16 @@ def test_excel_stop_iterator(self, read_ext):
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self, read_ext):
+ if pd.read_excel.keywords["engine"] == "pyxlsb":
+ pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
parsed = pd.read_excel("test3" + read_ext, "Sheet1")
expected = DataFrame([[np.nan]], columns=["Test"])
tm.assert_frame_equal(parsed, expected)
def test_excel_table(self, read_ext, df_ref):
+ if pd.read_excel.keywords["engine"] == "pyxlsb":
+ pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
df1 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0)
df2 = pd.read_excel("test1" + read_ext, "Sheet2", skiprows=[1], index_col=0)
@@ -291,6 +312,8 @@ def test_excel_table(self, read_ext, df_ref):
tm.assert_frame_equal(df3, df1.iloc[:-1])
def test_reader_special_dtypes(self, read_ext):
+ if pd.read_excel.keywords["engine"] == "pyxlsb":
+ pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
expected = DataFrame.from_dict(
OrderedDict(
@@ -488,6 +511,9 @@ def test_read_excel_blank_with_header(self, read_ext):
def test_date_conversion_overflow(self, read_ext):
# GH 10001 : pandas.ExcelFile ignore parse_dates=False
+ if pd.read_excel.keywords["engine"] == "pyxlsb":
+ pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+
expected = pd.DataFrame(
[
[pd.Timestamp("2016-03-12"), "Marc Johnson"],
@@ -504,9 +530,14 @@ def test_date_conversion_overflow(self, read_ext):
tm.assert_frame_equal(result, expected)
def test_sheet_name(self, read_ext, df_ref):
+ if pd.read_excel.keywords["engine"] == "pyxlsb":
+ pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
filename = "test1"
sheet_name = "Sheet1"
+ if pd.read_excel.keywords["engine"] == "openpyxl":
+ pytest.xfail("Maybe not supported by openpyxl")
+
df1 = pd.read_excel(
filename + read_ext, sheet_name=sheet_name, index_col=0
) # doc
@@ -531,7 +562,6 @@ def test_bad_engine_raises(self, read_ext):
@tm.network
def test_read_from_http_url(self, read_ext):
-
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/master/"
"pandas/tests/io/data/excel/test1" + read_ext
@@ -599,6 +629,8 @@ def test_read_from_py_localpath(self, read_ext):
tm.assert_frame_equal(expected, actual)
def test_reader_seconds(self, read_ext):
+ if pd.read_excel.keywords["engine"] == "pyxlsb":
+ pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
# Test reading times with and without milliseconds. GH5945.
expected = DataFrame.from_dict(
@@ -627,6 +659,9 @@ def test_reader_seconds(self, read_ext):
def test_read_excel_multiindex(self, read_ext):
# see gh-4679
+ if pd.read_excel.keywords["engine"] == "pyxlsb":
+ pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+
mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]])
mi_file = "testmultiindex" + read_ext
@@ -786,6 +821,9 @@ def test_read_excel_chunksize(self, read_ext):
def test_read_excel_skiprows_list(self, read_ext):
# GH 4903
+ if pd.read_excel.keywords["engine"] == "pyxlsb":
+ pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+
actual = pd.read_excel(
"testskiprows" + read_ext, "skiprows_list", skiprows=[0, 2]
)
@@ -851,13 +889,11 @@ def cd_and_set_engine(self, engine, datapath, monkeypatch):
"""
Change directory and set engine for ExcelFile objects.
"""
-
func = partial(pd.ExcelFile, engine=engine)
monkeypatch.chdir(datapath("io", "data", "excel"))
monkeypatch.setattr(pd, "ExcelFile", func)
def test_excel_passes_na(self, read_ext):
-
with pd.ExcelFile("test4" + read_ext) as excel:
parsed = pd.read_excel(
excel, "Sheet1", keep_default_na=False, na_values=["apple"]
@@ -928,6 +964,10 @@ def test_unexpected_kwargs_raises(self, read_ext, arg):
pd.read_excel(excel, **kwarg)
def test_excel_table_sheet_by_index(self, read_ext, df_ref):
+ # For some reason pd.read_excel has no attribute 'keywords' here.
+ # Skipping based on read_ext instead.
+ if read_ext == ".xlsb":
+ pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
with pd.ExcelFile("test1" + read_ext) as excel:
df1 = pd.read_excel(excel, 0, index_col=0)
@@ -951,6 +991,11 @@ def test_excel_table_sheet_by_index(self, read_ext, df_ref):
tm.assert_frame_equal(df3, df1.iloc[:-1])
def test_sheet_name(self, read_ext, df_ref):
+ # For some reason pd.read_excel has no attribute 'keywords' here.
+ # Skipping based on read_ext instead.
+ if read_ext == ".xlsb":
+ pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+
filename = "test1"
sheet_name = "Sheet1"
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index 55b987a599670..f7b49ccb1a72d 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -258,6 +258,36 @@ def test_read_excel_parse_dates(self, ext):
)
tm.assert_frame_equal(df, res)
+ def test_multiindex_interval_datetimes(self, ext):
+ # GH 30986
+ midx = pd.MultiIndex.from_arrays(
+ [
+ range(4),
+ pd.interval_range(
+ start=pd.Timestamp("2020-01-01"), periods=4, freq="6M"
+ ),
+ ]
+ )
+ df = pd.DataFrame(range(4), index=midx)
+ with tm.ensure_clean(ext) as pth:
+ df.to_excel(pth)
+ result = pd.read_excel(pth, index_col=[0, 1])
+ expected = pd.DataFrame(
+ range(4),
+ pd.MultiIndex.from_arrays(
+ [
+ range(4),
+ [
+ "(2020-01-31, 2020-07-31]",
+ "(2020-07-31, 2021-01-31]",
+ "(2021-01-31, 2021-07-31]",
+ "(2021-07-31, 2022-01-31]",
+ ],
+ ]
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
@td.skip_if_no("xlrd")
@pytest.mark.parametrize(
diff --git a/pandas/tests/io/excel/test_xlrd.py b/pandas/tests/io/excel/test_xlrd.py
index d1f900a2dc58b..cc7e2311f362a 100644
--- a/pandas/tests/io/excel/test_xlrd.py
+++ b/pandas/tests/io/excel/test_xlrd.py
@@ -10,9 +10,11 @@
@pytest.fixture(autouse=True)
-def skip_ods_files(read_ext):
+def skip_ods_and_xlsb_files(read_ext):
if read_ext == ".ods":
pytest.skip("Not valid for xlrd")
+ if read_ext == ".xlsb":
+ pytest.skip("Not valid for xlrd")
def test_read_xlrd_book(read_ext, frame):
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 97956489e7da6..7650561d3072d 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -296,7 +296,7 @@ def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
- # for the case of Index, where the repr is traditional rather then
+ # for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py
index e5dac18acedf6..a2659079be7c0 100644
--- a/pandas/tests/io/formats/test_style.py
+++ b/pandas/tests/io/formats/test_style.py
@@ -472,8 +472,19 @@ def test_empty(self):
result = s._translate()["cellstyle"]
expected = [
- {"props": [["color", " red"]], "selector": "row0_col0"},
- {"props": [["", ""]], "selector": "row1_col0"},
+ {"props": [("color", " red")], "selectors": ["row0_col0"]},
+ {"props": [("", "")], "selectors": ["row1_col0"]},
+ ]
+ assert result == expected
+
+ def test_duplicate(self):
+ df = pd.DataFrame({"A": [1, 0]})
+ s = df.style
+ s.ctx = {(0, 0): ["color: red"], (1, 0): ["color: red"]}
+
+ result = s._translate()["cellstyle"]
+ expected = [
+ {"props": [("color", " red")], "selectors": ["row0_col0", "row1_col0"]}
]
assert result == expected
diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py
index 6ef0e0457e2e2..67b767a337a89 100755
--- a/pandas/tests/io/generate_legacy_storage_files.py
+++ b/pandas/tests/io/generate_legacy_storage_files.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
self-contained to write legacy storage pickle files
diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py
index efb95a0cb2a42..91b204ed41ebc 100644
--- a/pandas/tests/io/json/test_normalize.py
+++ b/pandas/tests/io/json/test_normalize.py
@@ -3,7 +3,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, Index, json_normalize
+from pandas import DataFrame, Index, Series, json_normalize
import pandas._testing as tm
from pandas.io.json._normalize import nested_to_record
@@ -728,3 +728,24 @@ def test_deprecated_import(self):
recs = [{"a": 1, "b": 2, "c": 3}, {"a": 4, "b": 5, "c": 6}]
json_normalize(recs)
+
+ def test_series_non_zero_index(self):
+ # GH 19020
+ data = {
+ 0: {"id": 1, "name": "Foo", "elements": {"a": 1}},
+ 1: {"id": 2, "name": "Bar", "elements": {"b": 2}},
+ 2: {"id": 3, "name": "Baz", "elements": {"c": 3}},
+ }
+ s = Series(data)
+ s.index = [1, 2, 3]
+ result = json_normalize(s)
+ expected = DataFrame(
+ {
+ "id": [1, 2, 3],
+ "name": ["Foo", "Bar", "Baz"],
+ "elements.a": [1.0, np.nan, np.nan],
+ "elements.b": [np.nan, 2.0, np.nan],
+ "elements.c": [np.nan, np.nan, 3.0],
+ }
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index e909a4952948c..94d51589023c4 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -1,4 +1,5 @@
from collections import OrderedDict
+import datetime
from datetime import timedelta
from io import StringIO
import json
@@ -810,6 +811,31 @@ def test_convert_dates(self):
result = read_json(json, typ="series")
tm.assert_series_equal(result, ts)
+ @pytest.mark.parametrize("date_format", ["epoch", "iso"])
+ @pytest.mark.parametrize("as_object", [True, False])
+ @pytest.mark.parametrize(
+ "date_typ", [datetime.date, datetime.datetime, pd.Timestamp]
+ )
+ def test_date_index_and_values(self, date_format, as_object, date_typ):
+ data = [date_typ(year=2020, month=1, day=1), pd.NaT]
+ if as_object:
+ data.append("a")
+
+ ser = pd.Series(data, index=data)
+ result = ser.to_json(date_format=date_format)
+
+ if date_format == "epoch":
+ expected = '{"1577836800000":1577836800000,"null":null}'
+ else:
+ expected = (
+ '{"2020-01-01T00:00:00.000Z":"2020-01-01T00:00:00.000Z","null":null}'
+ )
+
+ if as_object:
+ expected = expected.replace("}", ',"a":"a"}')
+
+ assert result == expected
+
@pytest.mark.parametrize(
"infer_word",
[
@@ -1614,3 +1640,25 @@ def test_deprecate_numpy_argument_read_json(self):
with tm.assert_produces_warning(FutureWarning):
result = read_json(expected.to_json(), numpy=True)
tm.assert_frame_equal(result, expected)
+
+ def test_frame_int_overflow(self):
+ # GH 30320
+ encoded_json = json.dumps([{"col": "31900441201190696999"}, {"col": "Text"}])
+ expected = DataFrame({"col": ["31900441201190696999", "Text"]})
+ result = read_json(encoded_json)
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "dataframe,expected",
+ [
+ (
+ pd.DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]}),
+ '{"(0, \'x\')":1,"(0, \'y\')":"a","(1, \'x\')":2,'
+ '"(1, \'y\')":"b","(2, \'x\')":3,"(2, \'y\')":"c"}',
+ )
+ ],
+ )
+ def test_json_multiindex(self, dataframe, expected):
+ series = dataframe.stack()
+ result = series.to_json(orient="index")
+ assert result == expected
diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py
index 4c02a37b66455..6c17f40b790ac 100644
--- a/pandas/tests/io/parser/test_common.py
+++ b/pandas/tests/io/parser/test_common.py
@@ -8,7 +8,6 @@
from io import StringIO
import os
import platform
-from tempfile import TemporaryFile
from urllib.error import URLError
import numpy as np
@@ -1847,16 +1846,15 @@ def test_temporary_file(all_parsers):
parser = all_parsers
data = "0 0"
- new_file = TemporaryFile("w+")
- new_file.write(data)
- new_file.flush()
- new_file.seek(0)
+ with tm.ensure_clean(mode="w+", return_filelike=True) as new_file:
+ new_file.write(data)
+ new_file.flush()
+ new_file.seek(0)
- result = parser.read_csv(new_file, sep=r"\s+", header=None)
- new_file.close()
+ result = parser.read_csv(new_file, sep=r"\s+", header=None)
- expected = DataFrame([[0, 0]])
- tm.assert_frame_equal(result, expected)
+ expected = DataFrame([[0, 0]])
+ tm.assert_frame_equal(result, expected)
def test_internal_eof_byte(all_parsers):
diff --git a/pandas/tests/io/parser/test_dtypes.py b/pandas/tests/io/parser/test_dtypes.py
index 2133f8116a95e..11dcf7f04f76b 100644
--- a/pandas/tests/io/parser/test_dtypes.py
+++ b/pandas/tests/io/parser/test_dtypes.py
@@ -79,7 +79,7 @@ def test_invalid_dtype_per_column(all_parsers):
3,4.5
4,5.5"""
- with pytest.raises(TypeError, match='data type "foo" not understood'):
+ with pytest.raises(TypeError, match="data type [\"']foo[\"'] not understood"):
parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"})
@@ -550,3 +550,35 @@ def test_numeric_dtype(all_parsers, dtype):
result = parser.read_csv(StringIO(data), header=None, dtype=dtype)
tm.assert_frame_equal(expected, result)
+
+
+def test_boolean_dtype(all_parsers):
+ parser = all_parsers
+ data = "\n".join(
+ [
+ "a",
+ "True",
+ "TRUE",
+ "true",
+ "False",
+ "FALSE",
+ "false",
+ "NaN",
+ "nan",
+ "NA",
+ "null",
+ "NULL",
+ ]
+ )
+
+ result = parser.read_csv(StringIO(data), dtype="boolean")
+ expected = pd.DataFrame(
+ {
+ "a": pd.array(
+ [True, True, True, False, False, False, None, None, None, None, None],
+ dtype="boolean",
+ )
+ }
+ )
+
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/parser/test_encoding.py b/pandas/tests/io/parser/test_encoding.py
index 33abf4bb7d9ee..406e7bedfd298 100644
--- a/pandas/tests/io/parser/test_encoding.py
+++ b/pandas/tests/io/parser/test_encoding.py
@@ -5,7 +5,6 @@
from io import BytesIO
import os
-import tempfile
import numpy as np
import pytest
@@ -164,7 +163,7 @@ def test_encoding_temp_file(all_parsers, utf_value, encoding_fmt, pass_encoding)
expected = DataFrame({"foo": ["bar"]})
- with tempfile.TemporaryFile(mode="w+", encoding=encoding) as f:
+ with tm.ensure_clean(mode="w+", encoding=encoding, return_filelike=True) as f:
f.write("foo\nbar")
f.seek(0)
diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py
index b8d66874bc660..b7164477c31f2 100644
--- a/pandas/tests/io/parser/test_network.py
+++ b/pandas/tests/io/parser/test_network.py
@@ -205,7 +205,7 @@ def test_read_csv_chunked_download(self, s3_resource, caplog):
with caplog.at_level(logging.DEBUG, logger="s3fs"):
read_csv("s3://pandas-test/large-file.csv", nrows=5)
# log of fetch_range (start, stop)
- assert (0, 5505024) in {x.args[-2:] for x in caplog.records}
+ assert (0, 5505024) in (x.args[-2:] for x in caplog.records)
def test_read_s3_with_hash_in_key(self, tips_df):
# GH 25945
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 64c4ad800f49d..f56d042093886 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -64,6 +64,16 @@
@pytest.mark.single
class TestHDFStore:
+ def test_format_type(self, setup_path):
+ df = pd.DataFrame({"A": [1, 2]})
+ with ensure_clean_path(setup_path) as path:
+ with HDFStore(path) as store:
+ store.put("a", df, format="fixed")
+ store.put("b", df, format="table")
+
+ assert store.get_storer("a").format_type == "fixed"
+ assert store.get_storer("b").format_type == "table"
+
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py
index a69e5556f3e85..652cacaf14ffb 100644
--- a/pandas/tests/io/test_clipboard.py
+++ b/pandas/tests/io/test_clipboard.py
@@ -8,13 +8,7 @@
from pandas import DataFrame, get_option, read_clipboard
import pandas._testing as tm
-from pandas.io.clipboard import PyperclipException, clipboard_get, clipboard_set
-
-try:
- DataFrame({"A": [1, 2]}).to_clipboard()
- _DEPS_INSTALLED = 1
-except (PyperclipException, RuntimeError):
- _DEPS_INSTALLED = 0
+from pandas.io.clipboard import clipboard_get, clipboard_set
def build_kwargs(sep, excel):
@@ -148,7 +142,6 @@ def test_mock_clipboard(mock_clipboard):
@pytest.mark.single
@pytest.mark.clipboard
-@pytest.mark.skipif(not _DEPS_INSTALLED, reason="clipboard primitives not installed")
@pytest.mark.usefixtures("mock_clipboard")
class TestClipboard:
def check_round_trip_frame(self, data, excel=None, sep=None, encoding=None):
@@ -256,9 +249,7 @@ def test_round_trip_valid_encodings(self, enc, df):
@pytest.mark.single
@pytest.mark.clipboard
-@pytest.mark.skipif(not _DEPS_INSTALLED, reason="clipboard primitives not installed")
@pytest.mark.parametrize("data", ["\U0001f44d...", "Ωœ∑´...", "abcd..."])
-@pytest.mark.xfail(reason="flaky in CI", strict=False)
def test_raw_roundtrip(data):
# PR #25040 wide unicode wasn't copied correctly on PY3 on windows
clipboard_set(data)
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index a126f83164ce5..d7a21b27308e8 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -129,7 +129,7 @@ def test_iterator(self):
(pd.read_csv, "os", FileNotFoundError, "csv"),
(pd.read_fwf, "os", FileNotFoundError, "txt"),
(pd.read_excel, "xlrd", FileNotFoundError, "xlsx"),
- (pd.read_feather, "feather", Exception, "feather"),
+ (pd.read_feather, "pyarrow", IOError, "feather"),
(pd.read_hdf, "tables", FileNotFoundError, "h5"),
(pd.read_stata, "os", FileNotFoundError, "dta"),
(pd.read_sas, "os", FileNotFoundError, "sas7bdat"),
@@ -146,15 +146,18 @@ def test_read_non_existant(self, reader, module, error_class, fn_ext):
msg3 = "Expected object or value"
msg4 = "path_or_buf needs to be a string file path or file-like"
msg5 = (
- fr"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist:"
- fr" '.+does_not_exist\.{fn_ext}'"
+ fr"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
+ fr"'.+does_not_exist\.{fn_ext}'"
)
msg6 = fr"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
msg7 = (
fr"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
)
+ msg8 = fr"Failed to open local file.+does_not_exist\.{fn_ext}.?, error: .*"
+
with pytest.raises(
- error_class, match=fr"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7})"
+ error_class,
+ match=fr"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})",
):
reader(path)
@@ -165,7 +168,7 @@ def test_read_non_existant(self, reader, module, error_class, fn_ext):
(pd.read_table, "os", FileNotFoundError, "csv"),
(pd.read_fwf, "os", FileNotFoundError, "txt"),
(pd.read_excel, "xlrd", FileNotFoundError, "xlsx"),
- (pd.read_feather, "feather", Exception, "feather"),
+ (pd.read_feather, "pyarrow", IOError, "feather"),
(pd.read_hdf, "tables", FileNotFoundError, "h5"),
(pd.read_stata, "os", FileNotFoundError, "dta"),
(pd.read_sas, "os", FileNotFoundError, "sas7bdat"),
@@ -186,16 +189,18 @@ def test_read_expands_user_home_dir(
msg3 = "Unexpected character found when decoding 'false'"
msg4 = "path_or_buf needs to be a string file path or file-like"
msg5 = (
- fr"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist:"
- fr" '.+does_not_exist\.{fn_ext}'"
+ fr"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
+ fr"'.+does_not_exist\.{fn_ext}'"
)
msg6 = fr"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
msg7 = (
fr"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
)
+ msg8 = fr"Failed to open local file.+does_not_exist\.{fn_ext}.?, error: .*"
with pytest.raises(
- error_class, match=fr"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7})"
+ error_class,
+ match=fr"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})",
):
reader(path)
@@ -212,7 +217,7 @@ def test_read_expands_user_home_dir(
(pd.read_excel, "xlrd", ("io", "data", "excel", "test1.xlsx")),
(
pd.read_feather,
- "feather",
+ "pyarrow",
("io", "data", "feather", "feather-0_3_1.feather"),
),
(
@@ -249,7 +254,7 @@ def test_read_fspath_all(self, reader, module, path, datapath):
[
("to_csv", {}, "os"),
("to_excel", {"engine": "xlwt"}, "xlwt"),
- ("to_feather", {}, "feather"),
+ ("to_feather", {}, "pyarrow"),
("to_html", {}, "os"),
("to_json", {}, "os"),
("to_latex", {}, "os"),
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index 626df839363cb..b649e394c780b 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -14,7 +14,7 @@
from pandas.errors import ParserError
import pandas.util._test_decorators as td
-from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
+from pandas import DataFrame, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas._testing as tm
from pandas.io.common import file_path_to_url
@@ -373,32 +373,6 @@ def test_python_docs_table(self):
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(["Repo", "What"])
- @pytest.mark.slow
- def test_thousands_macau_stats(self, datapath):
- all_non_nan_table_index = -2
- macau_data = datapath("io", "data", "html", "macau.html")
- dfs = self.read_html(macau_data, index_col=0, attrs={"class": "style1"})
- df = dfs[all_non_nan_table_index]
-
- assert not any(s.isna().any() for _, s in df.items())
-
- @pytest.mark.slow
- def test_thousands_macau_index_col(self, datapath, request):
- # https://github.com/pandas-dev/pandas/issues/29622
- # This tests fails for bs4 >= 4.8.0 - so handle xfail accordingly
- if self.read_html.keywords.get("flavor") == "bs4" and td.safe_import(
- "bs4", "4.8.0"
- ):
- reason = "fails for bs4 version >= 4.8.0"
- request.node.add_marker(pytest.mark.xfail(reason=reason))
-
- all_non_nan_table_index = -2
- macau_data = datapath("io", "data", "html", "macau.html")
- dfs = self.read_html(macau_data, index_col=0, header=0)
- df = dfs[all_non_nan_table_index]
-
- assert not any(s.isna().any() for _, s in df.items())
-
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
@@ -571,23 +545,6 @@ def test_parse_header_of_non_string_column(self):
tm.assert_frame_equal(result, expected)
- def test_nyse_wsj_commas_table(self, datapath):
- data = datapath("io", "data", "html", "nyse_wsj.html")
- df = self.read_html(data, index_col=0, header=0, attrs={"class": "mdcTable"})[0]
-
- expected = Index(
- [
- "Issue(Roll over for charts and headlines)",
- "Volume",
- "Price",
- "Chg",
- "% Chg",
- ]
- )
- nrows = 100
- assert df.shape[0] == nrows
- tm.assert_index_equal(df.columns, expected)
-
@pytest.mark.slow
def test_banklist_header(self, datapath):
from pandas.io.html import _remove_whitespace
@@ -894,24 +851,23 @@ def test_parse_dates_combine(self):
newdf = DataFrame({"datetime": raw_dates})
tm.assert_frame_equal(newdf, res[0])
- def test_computer_sales_page(self, datapath):
- data = datapath("io", "data", "html", "computer_sales_page.html")
- msg = (
- r"Passed header=\[0,1\] are too many "
- r"rows for this multi_index of columns"
- )
- with pytest.raises(ParserError, match=msg):
- self.read_html(data, header=[0, 1])
-
- data = datapath("io", "data", "html", "computer_sales_page.html")
- assert self.read_html(data, header=[1, 2])
-
def test_wikipedia_states_table(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
assert os.path.isfile(data), f"{repr(data)} is not a file"
assert os.path.getsize(data), f"{repr(data)} is an empty file"
result = self.read_html(data, "Arizona", header=1)[0]
+ assert result.shape == (60, 12)
+ assert "Unnamed" in result.columns[-1]
assert result["sq mi"].dtype == np.dtype("float64")
+ assert np.allclose(result.loc[0, "sq mi"], 665384.04)
+
+ def test_wikipedia_states_multiindex(self, datapath):
+ data = datapath("io", "data", "html", "wikipedia_states.html")
+ result = self.read_html(data, "Arizona", index_col=0)[0]
+ assert result.shape == (60, 11)
+ assert "Unnamed" in result.columns[-1][1]
+ assert result.columns.nlevels == 2
+ assert np.allclose(result.loc["Alaska", ("Total area[2]", "sq mi")], 665384.04)
def test_parser_error_on_empty_header_row(self):
msg = (
@@ -1158,9 +1114,9 @@ def test_displayed_only(self, displayed_only, exp0, exp1):
assert len(dfs) == 1 # Should not parse hidden table
def test_encode(self, html_encoding_file):
- _, encoding = os.path.splitext(os.path.basename(html_encoding_file))[0].split(
- "_"
- )
+ base_path = os.path.basename(html_encoding_file)
+ root = os.path.splitext(base_path)[0]
+ _, encoding = root.split("_")
try:
with open(html_encoding_file, "rb") as fobj:
@@ -1183,7 +1139,7 @@ def test_encode(self, html_encoding_file):
if is_platform_windows():
if "16" in encoding or "32" in encoding:
pytest.skip()
- raise
+ raise
def test_parse_failure_unseekable(self):
# Issue #17975
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index 3d427dde573af..04fd4835469a9 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -11,6 +11,7 @@
3. Move the created pickle to "data/legacy_pickle/<version>" directory.
"""
import bz2
+import datetime
import glob
import gzip
import os
@@ -197,6 +198,7 @@ def test_pickle_path_localpath():
tm.assert_frame_equal(df, result)
+@pytest.mark.xfail(reason="GitHub issue #31310", strict=False)
def test_legacy_sparse_warning(datapath):
"""
@@ -487,3 +489,17 @@ def open(self, *args):
df.to_pickle(mockurl)
result = pd.read_pickle(mockurl)
tm.assert_frame_equal(df, result)
+
+
+class MyTz(datetime.tzinfo):
+ def __init__(self):
+ pass
+
+
+def test_read_pickle_with_subclass():
+ # GH 12163
+ expected = pd.Series(dtype=object), MyTz()
+ result = tm.round_trip_pickle(expected)
+
+ tm.assert_series_equal(result[0], expected[0])
+ assert isinstance(result[1], MyTz)
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 1d3cddbf01738..cb2112b481952 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -21,11 +21,22 @@
PossiblePrecisionLoss,
StataMissingValue,
StataReader,
- StataWriter118,
+ StataWriterUTF8,
read_stata,
)
+@pytest.fixture()
+def mixed_frame():
+ return pd.DataFrame(
+ {
+ "a": [1, 2, 3, 4],
+ "b": [1.0, 3.0, 27.0, 81.0],
+ "c": ["Atlanta", "Birmingham", "Cincinnati", "Detroit"],
+ }
+ )
+
+
@pytest.fixture
def dirpath(datapath):
return datapath("io", "data", "stata")
@@ -112,7 +123,7 @@ def read_dta(self, file):
def read_csv(self, file):
return read_csv(file, parse_dates=True)
- @pytest.mark.parametrize("version", [114, 117])
+ @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_read_empty_dta(self, version):
empty_ds = DataFrame(columns=["unit"])
# GH 7369, make sure can read a 0-obs dta file
@@ -332,7 +343,7 @@ def test_write_dta6(self):
check_index_type=False,
)
- @pytest.mark.parametrize("version", [114, 117])
+ @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_read_write_dta10(self, version):
original = DataFrame(
data=[["string", "object", 1, 1.1, np.datetime64("2003-12-25")]],
@@ -368,7 +379,7 @@ def test_write_preserves_original(self):
df.to_stata(path, write_index=False)
tm.assert_frame_equal(df, df_copy)
- @pytest.mark.parametrize("version", [114, 117])
+ @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_encoding(self, version):
# GH 4626, proper encoding handling
@@ -409,7 +420,7 @@ def test_read_write_dta11(self):
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), formatted)
- @pytest.mark.parametrize("version", [114, 117])
+ @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_read_write_dta12(self, version):
original = DataFrame(
[(1, 2, 3, 4, 5, 6)],
@@ -461,7 +472,7 @@ def test_read_write_dta13(self):
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), formatted)
- @pytest.mark.parametrize("version", [114, 117])
+ @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
@pytest.mark.parametrize(
"file", ["dta14_113", "dta14_114", "dta14_115", "dta14_117"]
)
@@ -504,7 +515,7 @@ def test_read_write_reread_dta15(self, file):
tm.assert_frame_equal(expected, parsed)
- @pytest.mark.parametrize("version", [114, 117])
+ @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_timestamp_and_label(self, version):
original = DataFrame([(1,)], columns=["variable"])
time_stamp = datetime(2000, 2, 29, 14, 21)
@@ -518,7 +529,7 @@ def test_timestamp_and_label(self, version):
assert reader.time_stamp == "29 Feb 2000 14:21"
assert reader.data_label == data_label
- @pytest.mark.parametrize("version", [114, 117])
+ @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_invalid_timestamp(self, version):
original = DataFrame([(1,)], columns=["variable"])
time_stamp = "01 Jan 2000, 00:00:00"
@@ -542,7 +553,7 @@ def test_numeric_column_names(self):
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
- @pytest.mark.parametrize("version", [114, 117])
+ @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_nan_to_missing_value(self, version):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
@@ -662,7 +673,7 @@ def test_write_missing_strings(self):
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
- @pytest.mark.parametrize("version", [114, 117])
+ @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
@pytest.mark.parametrize("byteorder", [">", "<"])
def test_bool_uint(self, byteorder, version):
s0 = Series([0, 1, True], dtype=np.bool)
@@ -737,10 +748,10 @@ def test_excessively_long_string(self):
)
original = DataFrame(s)
msg = (
- r"Fixed width strings in Stata \.dta files are limited to 244"
- r" \(or fewer\)\ncharacters\. Column 's500' does not satisfy"
- r" this restriction\. Use the\n'version=117' parameter to write"
- r" the newer \(Stata 13 and later\) format\."
+ r"Fixed width strings in Stata \.dta files are limited to 244 "
+ r"\(or fewer\)\ncharacters\. Column 's500' does not satisfy "
+ r"this restriction\. Use the\n'version=117' parameter to write "
+ r"the newer \(Stata 13 and later\) format\."
)
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean() as path:
@@ -908,7 +919,7 @@ def test_drop_column(self):
columns = ["byte_", "int_", "long_", "not_found"]
read_stata(self.dta15_117, convert_dates=True, columns=columns)
- @pytest.mark.parametrize("version", [114, 117])
+ @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
@pytest.mark.filterwarnings(
"ignore:\\nStata value:pandas.io.stata.ValueLabelTypeMismatch"
)
@@ -968,8 +979,8 @@ def test_categorical_warnings_and_errors(self):
)
with tm.ensure_clean() as path:
msg = (
- "Stata value labels for a single variable must have"
- r" a combined length less than 32,000 characters\."
+ "Stata value labels for a single variable must have "
+ r"a combined length less than 32,000 characters\."
)
with pytest.raises(ValueError, match=msg):
original.to_stata(path)
@@ -985,7 +996,7 @@ def test_categorical_warnings_and_errors(self):
original.to_stata(path)
# should get a warning for mixed content
- @pytest.mark.parametrize("version", [114, 117])
+ @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_categorical_with_stata_missing_values(self, version):
values = [["a" + str(i)] for i in range(120)]
values.append([np.nan])
@@ -1221,20 +1232,13 @@ def test_read_chunks_columns(self):
tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
pos += chunksize
- @pytest.mark.parametrize("version", [114, 117])
- def test_write_variable_labels(self, version):
+ @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
+ def test_write_variable_labels(self, version, mixed_frame):
# GH 13631, add support for writing variable labels
- original = pd.DataFrame(
- {
- "a": [1, 2, 3, 4],
- "b": [1.0, 3.0, 27.0, 81.0],
- "c": ["Atlanta", "Birmingham", "Cincinnati", "Detroit"],
- }
- )
- original.index.name = "index"
+ mixed_frame.index.name = "index"
variable_labels = {"a": "City Rank", "b": "City Exponent", "c": "City"}
with tm.ensure_clean() as path:
- original.to_stata(path, variable_labels=variable_labels, version=version)
+ mixed_frame.to_stata(path, variable_labels=variable_labels, version=version)
with StataReader(path) as sr:
read_labels = sr.variable_labels()
expected_labels = {
@@ -1247,46 +1251,36 @@ def test_write_variable_labels(self, version):
variable_labels["index"] = "The Index"
with tm.ensure_clean() as path:
- original.to_stata(path, variable_labels=variable_labels, version=version)
+ mixed_frame.to_stata(path, variable_labels=variable_labels, version=version)
with StataReader(path) as sr:
read_labels = sr.variable_labels()
assert read_labels == variable_labels
- @pytest.mark.parametrize("version", [114, 117])
- def test_invalid_variable_labels(self, version):
- original = pd.DataFrame(
- {
- "a": [1, 2, 3, 4],
- "b": [1.0, 3.0, 27.0, 81.0],
- "c": ["Atlanta", "Birmingham", "Cincinnati", "Detroit"],
- }
- )
- original.index.name = "index"
+ @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
+ def test_invalid_variable_labels(self, version, mixed_frame):
+ mixed_frame.index.name = "index"
variable_labels = {"a": "very long" * 10, "b": "City Exponent", "c": "City"}
with tm.ensure_clean() as path:
msg = "Variable labels must be 80 characters or fewer"
with pytest.raises(ValueError, match=msg):
- original.to_stata(
+ mixed_frame.to_stata(
path, variable_labels=variable_labels, version=version
)
+ @pytest.mark.parametrize("version", [114, 117])
+ def test_invalid_variable_label_encoding(self, version, mixed_frame):
+ mixed_frame.index.name = "index"
+ variable_labels = {"a": "very long" * 10, "b": "City Exponent", "c": "City"}
variable_labels["a"] = "invalid character Œ"
with tm.ensure_clean() as path:
with pytest.raises(
ValueError, match="Variable labels must contain only characters"
):
- original.to_stata(
+ mixed_frame.to_stata(
path, variable_labels=variable_labels, version=version
)
- def test_write_variable_label_errors(self):
- original = pd.DataFrame(
- {
- "a": [1, 2, 3, 4],
- "b": [1.0, 3.0, 27.0, 81.0],
- "c": ["Atlanta", "Birmingham", "Cincinnati", "Detroit"],
- }
- )
+ def test_write_variable_label_errors(self, mixed_frame):
values = ["\u03A1", "\u0391", "\u039D", "\u0394", "\u0391", "\u03A3"]
variable_labels_utf8 = {
@@ -1301,7 +1295,7 @@ def test_write_variable_label_errors(self):
)
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean() as path:
- original.to_stata(path, variable_labels=variable_labels_utf8)
+ mixed_frame.to_stata(path, variable_labels=variable_labels_utf8)
variable_labels_long = {
"a": "City Rank",
@@ -1314,7 +1308,7 @@ def test_write_variable_label_errors(self):
msg = "Variable labels must be 80 characters or fewer"
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean() as path:
- original.to_stata(path, variable_labels=variable_labels_long)
+ mixed_frame.to_stata(path, variable_labels=variable_labels_long)
def test_default_date_conversion(self):
# GH 12259
@@ -1380,7 +1374,7 @@ def test_unsupported_datetype(self):
"dates": dates,
}
)
- with pytest.raises(NotImplementedError):
+ with pytest.raises(NotImplementedError, match="Data type datetime64"):
with tm.ensure_clean() as path:
original.to_stata(path)
@@ -1636,7 +1630,7 @@ def test_invalid_date_conversion(self):
with pytest.raises(ValueError, match=msg):
original.to_stata(path, convert_dates={"wrong_name": "tc"})
- @pytest.mark.parametrize("version", [114, 117])
+ @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_nonfile_writing(self, version):
# GH 21041
bio = io.BytesIO()
@@ -1699,7 +1693,7 @@ def test_mixed_string_strl(self):
expected = output.fillna("")
tm.assert_frame_equal(reread, expected)
- @pytest.mark.parametrize("version", [114, 117])
+ @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_all_none_exception(self, version):
output = [{"none": "none", "number": 0}, {"none": None, "number": 1}]
output = pd.DataFrame(output)
@@ -1708,18 +1702,18 @@ def test_all_none_exception(self, version):
with pytest.raises(ValueError, match="Column `none` cannot be exported"):
output.to_stata(path, version=version)
- @pytest.mark.parametrize("version", [114, 117])
+ @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_invalid_file_not_written(self, version):
content = "Here is one __�__ Another one __·__ Another one __½__"
df = DataFrame([content], columns=["invalid"])
with tm.ensure_clean() as path:
msg1 = (
- r"'latin-1' codec can't encode character '\\ufffd'"
- r" in position 14: ordinal not in range\(256\)"
+ r"'latin-1' codec can't encode character '\\ufffd' "
+ r"in position 14: ordinal not in range\(256\)"
)
msg2 = (
- "'ascii' codec can't decode byte 0xef in position 14:"
- r" ordinal not in range\(128\)"
+ "'ascii' codec can't decode byte 0xef in position 14: "
+ r"ordinal not in range\(128\)"
)
with pytest.raises(UnicodeEncodeError, match=r"{}|{}".format(msg1, msg2)):
with tm.assert_produces_warning(ResourceWarning):
@@ -1770,7 +1764,8 @@ def test_stata_119(self):
assert df.iloc[0, -1] == 1
assert df.iloc[0, 0] == pd.Timestamp(datetime(2012, 12, 21, 21, 12, 21))
- def test_118_writer(self):
+ @pytest.mark.parametrize("version", [118, 119, None])
+ def test_utf8_writer(self, version):
cat = pd.Categorical(["a", "β", "ĉ"], ordered=True)
data = pd.DataFrame(
[
@@ -1791,13 +1786,14 @@ def test_118_writer(self):
data_label = "ᴅaᵀa-label"
data["β"] = data["β"].astype(np.int32)
with tm.ensure_clean() as path:
- writer = StataWriter118(
+ writer = StataWriterUTF8(
path,
data,
data_label=data_label,
convert_strl=["strls"],
variable_labels=variable_labels,
write_index=False,
+ version=version,
)
writer.write_file()
reread_encoded = read_stata(path)
@@ -1807,3 +1803,16 @@ def test_118_writer(self):
reader = StataReader(path)
assert reader.data_label == data_label
assert reader.variable_labels() == variable_labels
+
+ data.to_stata(path, version=version, write_index=False)
+ reread_to_stata = read_stata(path)
+ tm.assert_frame_equal(data, reread_to_stata)
+
+ def test_writer_118_exceptions(self):
+ df = DataFrame(np.zeros((1, 33000), dtype=np.int8))
+ with tm.ensure_clean() as path:
+ with pytest.raises(ValueError, match="version must be either 118 or 119."):
+ StataWriterUTF8(path, df, version=117)
+ with tm.ensure_clean() as path:
+ with pytest.raises(ValueError, match="You must use version 119"):
+ StataWriterUTF8(path, df, version=118)
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index 9f43027836eb4..a604d90acc854 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# coding: utf-8
import os
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 8f855fd0c6cff..84d298cd7c6fe 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -43,7 +43,13 @@ def setup_method(self, method):
def teardown_method(self, method):
tm.close()
+ # Ignore warning
+ # ```
+ # Converting to PeriodArray/Index representation will drop timezone information.
+ # ```
+ # which occurs for UTC-like timezones.
@pytest.mark.slow
+ @pytest.mark.filterwarnings("ignore:msg:UserWarning")
def test_ts_plot_with_tz(self, tz_aware_fixture):
# GH2877, GH17173
tz = tz_aware_fixture
@@ -121,8 +127,8 @@ def test_both_style_and_color(self):
ts = tm.makeTimeSeries()
msg = (
"Cannot pass 'style' string with a color symbol and 'color' "
- "keyword argument. Please use one or the other or pass 'style'"
- " without a color symbol"
+ "keyword argument. Please use one or the other or pass 'style' "
+ "without a color symbol"
)
with pytest.raises(ValueError, match=msg):
ts.plot(style="b-", color="#000099")
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index c8aa1f23ccf1f..168e8c7de0b83 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -319,8 +319,8 @@ def test_subplot_titles(self, iris):
# Case len(title) > len(df)
msg = (
- "The length of `title` must equal the number of columns if"
- " using `title` of type `list` and `subplots=True`"
+ "The length of `title` must equal the number of columns if "
+ "using `title` of type `list` and `subplots=True`"
)
with pytest.raises(ValueError, match=msg):
df.plot(subplots=True, title=title + ["kittens > puppies"])
@@ -331,8 +331,8 @@ def test_subplot_titles(self, iris):
# Case subplots=False and title is of type list
msg = (
- "Using `title` of type `list` is not supported unless"
- " `subplots=True` is passed"
+ "Using `title` of type `list` is not supported unless "
+ "`subplots=True` is passed"
)
with pytest.raises(ValueError, match=msg):
df.plot(subplots=False, title=title)
@@ -406,3 +406,24 @@ def test_get_standard_colors_no_appending(self):
color_list = cm.gnuplot(np.linspace(0, 1, 16))
p = df.A.plot.bar(figsize=(16, 7), color=color_list)
assert p.patches[1].get_facecolor() == p.patches[17].get_facecolor()
+
+ @pytest.mark.slow
+ def test_dictionary_color(self):
+ # issue-8193
+ # Test plot color dictionary format
+ data_files = ["a", "b"]
+
+ expected = [(0.5, 0.24, 0.6), (0.3, 0.7, 0.7)]
+
+ df1 = DataFrame(np.random.rand(2, 2), columns=data_files)
+ dic_color = {"b": (0.3, 0.7, 0.7), "a": (0.5, 0.24, 0.6)}
+
+ # Bar color test
+ ax = df1.plot(kind="bar", color=dic_color)
+ colors = [rect.get_facecolor()[0:-1] for rect in ax.get_children()[0:3:2]]
+ assert all(color == expected[index] for index, color in enumerate(colors))
+
+ # Line color test
+ ax = df1.plot(kind="line", color=dic_color)
+ colors = [rect.get_color() for rect in ax.get_lines()[0:2]]
+ assert all(color == expected[index] for index, color in enumerate(colors))
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index 7400b049961d5..8d2058ffab643 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -316,7 +316,12 @@ def test_invalid_td64_reductions(self, opname):
)
td = s.diff()
- msg = "reduction operation '{op}' not allowed for this dtype"
+ msg = "|".join(
+ [
+ "reduction operation '{op}' not allowed for this dtype",
+ r"cannot perform {op} with type timedelta64\[ns\]",
+ ]
+ )
msg = msg.format(op=opname)
with pytest.raises(TypeError, match=msg):
@@ -648,7 +653,13 @@ def test_ops_consistency_on_empty(self, method):
# timedelta64[ns]
tdser = Series([], dtype="m8[ns]")
if method == "var":
- with pytest.raises(TypeError, match="operation 'var' not allowed"):
+ msg = "|".join(
+ [
+ "operation 'var' not allowed",
+ r"cannot perform var with type timedelta64\[ns\]",
+ ]
+ )
+ with pytest.raises(TypeError, match=msg):
getattr(tdser, method)()
else:
result = getattr(tdser, method)()
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index 4860329718f54..3ad82b9e075a8 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -122,7 +122,9 @@ def test_resample_integerarray():
result = ts.resample("3T").mean()
expected = Series(
- [1, 4, 7], index=pd.date_range("1/1/2000", periods=3, freq="3T"), dtype="Int64"
+ [1, 4, 7],
+ index=pd.date_range("1/1/2000", periods=3, freq="3T"),
+ dtype="float64",
)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/resample/test_period_index.py b/pandas/tests/resample/test_period_index.py
index 955f8c7482937..ff303b808f6f5 100644
--- a/pandas/tests/resample/test_period_index.py
+++ b/pandas/tests/resample/test_period_index.py
@@ -870,3 +870,15 @@ def test_get_period_range_edges(self, first, last, offset, exp_first, exp_last):
result = _get_period_range_edges(first, last, offset)
expected = (exp_first, exp_last)
assert result == expected
+
+ def test_sum_min_count(self):
+ # GH 19974
+ index = pd.date_range(start="2018", freq="M", periods=6)
+ data = np.ones(6)
+ data[3:6] = np.nan
+ s = pd.Series(data, index).to_period()
+ result = s.resample("Q").sum(min_count=1)
+ expected = pd.Series(
+ [3.0, np.nan], index=PeriodIndex(["2018Q1", "2018Q2"], freq="Q-DEC")
+ )
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index 170201b4f8e5c..d552241f9126f 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -257,8 +257,8 @@ def test_fillna():
tm.assert_series_equal(result, expected)
msg = (
- r"Invalid fill method\. Expecting pad \(ffill\), backfill"
- r" \(bfill\) or nearest\. Got 0"
+ r"Invalid fill method\. Expecting pad \(ffill\), backfill "
+ r"\(bfill\) or nearest\. Got 0"
)
with pytest.raises(ValueError, match=msg):
r.fillna(0)
diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index 4e3585c0be884..03c1445e099a0 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -230,6 +230,23 @@ def f(x):
tm.assert_series_equal(result, expected)
+def test_apply_columns_multilevel():
+ # GH 16231
+ cols = pd.MultiIndex.from_tuples([("A", "a", "", "one"), ("B", "b", "i", "two")])
+ ind = date_range(start="2017-01-01", freq="15Min", periods=8)
+ df = DataFrame(np.array([0] * 16).reshape(8, 2), index=ind, columns=cols)
+ agg_dict = {col: (np.sum if col[3] == "one" else np.mean) for col in df.columns}
+ result = df.resample("H").apply(lambda x: agg_dict[x.name](x))
+ expected = DataFrame(
+ np.array([0] * 4).reshape(2, 2),
+ index=date_range(start="2017-01-01", freq="1H", periods=2),
+ columns=pd.MultiIndex.from_tuples(
+ [("A", "a", "", "one"), ("B", "b", "i", "two")]
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
def test_resample_groupby_with_label():
# GH 13235
index = date_range("2000-01-01", freq="2D", periods=5)
diff --git a/pandas/tests/resample/test_timedelta.py b/pandas/tests/resample/test_timedelta.py
index d1bcdc55cb509..a4d14f127b80e 100644
--- a/pandas/tests/resample/test_timedelta.py
+++ b/pandas/tests/resample/test_timedelta.py
@@ -105,7 +105,7 @@ def test_resample_categorical_data_with_timedeltaindex():
index=pd.to_timedelta([0, 10], unit="s"),
)
expected = expected.reindex(["Group_obj", "Group"], axis=1)
- expected["Group"] = expected["Group_obj"].astype("category")
+ expected["Group"] = expected["Group_obj"]
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index a660acb143433..7020d373caf82 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -212,8 +212,8 @@ def test_join_on(self):
source_copy = source.copy()
source_copy["A"] = 0
msg = (
- "You are trying to merge on float64 and object columns. If"
- " you wish to proceed you should use pd.concat"
+ "You are trying to merge on float64 and object columns. If "
+ "you wish to proceed you should use pd.concat"
)
with pytest.raises(ValueError, match=msg):
target.join(source_copy, on="A")
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index 8e0c4766056d3..f9acf5b60a3cd 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -201,8 +201,8 @@ def test_merge_misspecified(self):
merge(self.left, self.right, right_index=True)
msg = (
- 'Can only pass argument "on" OR "left_on" and "right_on", not'
- " a combination of both"
+ 'Can only pass argument "on" OR "left_on" and "right_on", not '
+ "a combination of both"
)
with pytest.raises(pd.errors.MergeError, match=msg):
merge(self.left, self.left, left_on="key", on="key")
@@ -1013,10 +1013,9 @@ def test_indicator(self):
df_badcolumn = DataFrame({"col1": [1, 2], i: [2, 2]})
msg = (
- "Cannot use `indicator=True` option when data contains a"
- " column named {}|"
- "Cannot use name of an existing column for indicator"
- " column"
+ "Cannot use `indicator=True` option when data contains a "
+ "column named {}|"
+ "Cannot use name of an existing column for indicator column"
).format(i)
with pytest.raises(ValueError, match=msg):
merge(df1, df_badcolumn, on="col1", how="outer", indicator=True)
@@ -1235,8 +1234,8 @@ def test_validation(self):
)
msg = (
- "Merge keys are not unique in either left or right dataset;"
- " not a one-to-one merge"
+ "Merge keys are not unique in either left or right dataset; "
+ "not a one-to-one merge"
)
with pytest.raises(MergeError, match=msg):
merge(left, right, on="a", validate="1:1")
@@ -2153,3 +2152,20 @@ def test_merge_multiindex_columns():
expected["id"] = ""
tm.assert_frame_equal(result, expected)
+
+
+def test_merge_datetime_upcast_dtype():
+ # https://github.com/pandas-dev/pandas/issues/31208
+ df1 = pd.DataFrame({"x": ["a", "b", "c"], "y": ["1", "2", "4"]})
+ df2 = pd.DataFrame(
+ {"y": ["1", "2", "3"], "z": pd.to_datetime(["2000", "2001", "2002"])}
+ )
+ result = pd.merge(df1, df2, how="left", on="y")
+ expected = pd.DataFrame(
+ {
+ "x": ["a", "b", "c"],
+ "y": ["1", "2", "4"],
+ "z": pd.to_datetime(["2000", "2001", "NaT"]),
+ }
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index 990669f1ae13a..5811f3bc196a1 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -198,8 +198,8 @@ def test_concatlike_same_dtypes(self):
# cannot append non-index
msg = (
- r"cannot concatenate object of type '.+';"
- " only Series and DataFrame objs are valid"
+ r"cannot concatenate object of type '.+'; "
+ "only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
pd.Series(vals1).append(vals2)
@@ -1866,8 +1866,8 @@ def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
msg = (
- "cannot concatenate object of type '{}';"
- " only Series and DataFrame objs are valid"
+ "cannot concatenate object of type '{}'; "
+ "only Series and DataFrame objs are valid"
)
for obj in [1, dict(), [1, 2], (1, 2)]:
with pytest.raises(TypeError, match=msg.format(type(obj))):
@@ -2750,3 +2750,17 @@ def test_concat_sparse():
)
result = pd.concat([a, a], axis=1)
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("test_series", [True, False])
+def test_concat_copy_index(test_series, axis):
+ # GH 29879
+ if test_series:
+ ser = Series([1, 2])
+ comb = concat([ser, ser], axis=axis, copy=True)
+ assert comb.index is not ser.index
+ else:
+ df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
+ comb = concat([df, df], axis=axis, copy=True)
+ assert comb.index is not df.index
+ assert comb.columns is not df.columns
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 743fc50c87e96..fe75aef1ca3d7 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -781,6 +781,15 @@ def test_pivot_with_list_like_values_nans(self, values, method):
expected = DataFrame(data=data, index=index, columns=columns, dtype="object")
tm.assert_frame_equal(result, expected)
+ def test_pivot_columns_none_raise_error(self):
+ # GH 30924
+ df = pd.DataFrame(
+ {"col1": ["a", "b", "c"], "col2": [1, 2, 3], "col3": [1, 2, 3]}
+ )
+ msg = r"pivot\(\) missing 1 required argument: 'columns'"
+ with pytest.raises(TypeError, match=msg):
+ df.pivot(index="col1", values="col3")
+
@pytest.mark.xfail(
reason="MultiIndexed unstack with tuple names fails with KeyError GH#19966"
)
@@ -896,17 +905,69 @@ def _check_output(
totals = table.loc[("All", ""), value_col]
assert totals == self.data[value_col].mean()
- # no rows
- rtable = self.data.pivot_table(
- columns=["AA", "BB"], margins=True, aggfunc=np.mean
- )
- assert isinstance(rtable, Series)
-
table = self.data.pivot_table(index=["AA", "BB"], margins=True, aggfunc="mean")
for item in ["DD", "EE", "FF"]:
totals = table.loc[("All", ""), item]
assert totals == self.data[item].mean()
+ @pytest.mark.parametrize(
+ "columns, aggfunc, values, expected_columns",
+ [
+ (
+ "A",
+ np.mean,
+ [[5.5, 5.5, 2.2, 2.2], [8.0, 8.0, 4.4, 4.4]],
+ Index(["bar", "All", "foo", "All"], name="A"),
+ ),
+ (
+ ["A", "B"],
+ "sum",
+ [[9, 13, 22, 5, 6, 11], [14, 18, 32, 11, 11, 22]],
+ MultiIndex.from_tuples(
+ [
+ ("bar", "one"),
+ ("bar", "two"),
+ ("bar", "All"),
+ ("foo", "one"),
+ ("foo", "two"),
+ ("foo", "All"),
+ ],
+ names=["A", "B"],
+ ),
+ ),
+ ],
+ )
+ def test_margin_with_only_columns_defined(
+ self, columns, aggfunc, values, expected_columns
+ ):
+ # GH 31016
+ df = pd.DataFrame(
+ {
+ "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
+ "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
+ "C": [
+ "small",
+ "large",
+ "large",
+ "small",
+ "small",
+ "large",
+ "small",
+ "small",
+ "large",
+ ],
+ "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
+ "E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
+ }
+ )
+
+ result = df.pivot_table(columns=columns, margins=True, aggfunc=aggfunc)
+ expected = pd.DataFrame(
+ values, index=Index(["D", "E"]), columns=expected_columns
+ )
+
+ tm.assert_frame_equal(result, expected)
+
def test_margins_dtype(self):
# GH 17013
@@ -951,6 +1012,20 @@ def test_margins_dtype_len(self):
tm.assert_frame_equal(expected, result)
+ @pytest.mark.parametrize("cols", [(1, 2), ("a", "b"), (1, "b"), ("a", 1)])
+ def test_pivot_table_multiindex_only(self, cols):
+ # GH 17038
+ df2 = DataFrame({cols[0]: [1, 2, 3], cols[1]: [1, 2, 3], "v": [4, 5, 6]})
+
+ result = df2.pivot_table(values="v", columns=cols)
+ expected = DataFrame(
+ [[4, 5, 6]],
+ columns=MultiIndex.from_tuples([(1, 1), (2, 2), (3, 3)], names=cols),
+ index=Index(["v"]),
+ )
+
+ tm.assert_frame_equal(result, expected)
+
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
@@ -2549,6 +2624,19 @@ def test_crosstab_tuple_name(self, names):
result = pd.crosstab(s1, s2)
tm.assert_frame_equal(result, expected)
+ def test_crosstab_both_tuple_names(self):
+ # GH 18321
+ s1 = pd.Series(range(3), name=("a", "b"))
+ s2 = pd.Series(range(3), name=("c", "d"))
+
+ expected = pd.DataFrame(
+ np.eye(3, dtype="int64"),
+ index=pd.Index(range(3), name=("a", "b")),
+ columns=pd.Index(range(3), name=("c", "d")),
+ )
+ result = crosstab(s1, s2)
+ tm.assert_frame_equal(result, expected)
+
def test_crosstab_unsorted_order(self):
df = pd.DataFrame({"b": [3, 1, 2], "a": [5, 4, 6]}, index=["C", "A", "B"])
result = pd.crosstab(df.index, [df.b, df.a])
@@ -2561,6 +2649,46 @@ def test_crosstab_unsorted_order(self):
)
tm.assert_frame_equal(result, expected)
+ def test_crosstab_normalize_multiple_columns(self):
+ # GH 15150
+ df = pd.DataFrame(
+ {
+ "A": ["one", "one", "two", "three"] * 6,
+ "B": ["A", "B", "C"] * 8,
+ "C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 4,
+ "D": [0] * 24,
+ "E": [0] * 24,
+ }
+ )
+ result = pd.crosstab(
+ [df.A, df.B],
+ df.C,
+ values=df.D,
+ aggfunc=np.sum,
+ normalize=True,
+ margins=True,
+ )
+ expected = pd.DataFrame(
+ np.array([0] * 29 + [1], dtype=float).reshape(10, 3),
+ columns=Index(["bar", "foo", "All"], dtype="object", name="C"),
+ index=MultiIndex.from_tuples(
+ [
+ ("one", "A"),
+ ("one", "B"),
+ ("one", "C"),
+ ("three", "A"),
+ ("three", "B"),
+ ("three", "C"),
+ ("two", "A"),
+ ("two", "B"),
+ ("two", "C"),
+ ("All", ""),
+ ],
+ names=["A", "B"],
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
def test_margin_normalize(self):
# GH 27500
df = pd.DataFrame(
diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py
index f25291f4aef12..6113cfec48df9 100644
--- a/pandas/tests/reshape/test_reshape.py
+++ b/pandas/tests/reshape/test_reshape.py
@@ -86,9 +86,7 @@ def test_basic_types(self, sparse, dtype):
result = get_dummies(s_df, columns=s_df.columns, sparse=sparse, dtype=dtype)
if sparse:
- dtype_name = "Sparse[{}, {}]".format(
- self.effective_dtype(dtype).name, fill_value
- )
+ dtype_name = f"Sparse[{self.effective_dtype(dtype).name}, {fill_value}]"
else:
dtype_name = self.effective_dtype(dtype).name
@@ -163,8 +161,7 @@ def test_unicode(self, sparse):
s = [e, eacute, eacute]
res = get_dummies(s, prefix="letter", sparse=sparse)
exp = DataFrame(
- {"letter_e": [1, 0, 0], "letter_{eacute}".format(eacute=eacute): [0, 1, 1]},
- dtype=np.uint8,
+ {"letter_e": [1, 0, 0], f"letter_{eacute}": [0, 1, 1]}, dtype=np.uint8,
)
if sparse:
exp = exp.apply(SparseArray, fill_value=0)
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index 6af9c9884589c..bbc81e0dbb6e6 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -308,7 +308,7 @@ def test_multiples(self):
@pytest.mark.parametrize("month", MONTHS)
def test_period_cons_quarterly(self, month):
# bugs in scikits.timeseries
- freq = "Q-{month}".format(month=month)
+ freq = f"Q-{month}"
exp = Period("1989Q3", freq=freq)
assert "1989Q3" in str(exp)
stamp = exp.to_timestamp("D", how="end")
@@ -322,7 +322,7 @@ def test_period_cons_quarterly(self, month):
@pytest.mark.parametrize("month", MONTHS)
def test_period_cons_annual(self, month):
# bugs in scikits.timeseries
- freq = "A-{month}".format(month=month)
+ freq = f"A-{month}"
exp = Period("1989", freq=freq)
stamp = exp.to_timestamp("D", how="end") + timedelta(days=30)
p = Period(stamp, freq=freq)
@@ -333,8 +333,8 @@ def test_period_cons_annual(self, month):
@pytest.mark.parametrize("day", DAYS)
@pytest.mark.parametrize("num", range(10, 17))
def test_period_cons_weekly(self, num, day):
- daystr = "2011-02-{num}".format(num=num)
- freq = "W-{day}".format(day=day)
+ daystr = f"2011-02-{num}"
+ freq = f"W-{day}"
result = Period(daystr, freq=freq)
expected = Period(daystr, freq="D").asfreq(freq)
diff --git a/pandas/tests/scalar/test_na_scalar.py b/pandas/tests/scalar/test_na_scalar.py
index 7d05511239ebc..dcb9d66708724 100644
--- a/pandas/tests/scalar/test_na_scalar.py
+++ b/pandas/tests/scalar/test_na_scalar.py
@@ -96,19 +96,7 @@ def test_pow_special(value, asarray):
@pytest.mark.parametrize(
- "value",
- [
- 1,
- 1.0,
- -1,
- -1.0,
- True,
- np.bool_(True),
- np.int_(1),
- np.float_(1),
- np.int_(-1),
- np.float_(-1),
- ],
+ "value", [1, 1.0, True, np.bool_(True), np.int_(1), np.float_(1)],
)
@pytest.mark.parametrize("asarray", [True, False])
def test_rpow_special(value, asarray):
@@ -125,6 +113,21 @@ def test_rpow_special(value, asarray):
assert result == value
+@pytest.mark.parametrize(
+ "value", [-1, -1.0, np.int_(-1), np.float_(-1)],
+)
+@pytest.mark.parametrize("asarray", [True, False])
+def test_rpow_minus_one(value, asarray):
+ if asarray:
+ value = np.array([value])
+ result = value ** pd.NA
+
+ if asarray:
+ result = result[0]
+
+ assert pd.isna(result)
+
+
def test_unary_ops():
assert +NA is NA
assert -NA is NA
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index 3764d9b7548fc..5fc991df49424 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -271,8 +271,8 @@ def test_ops_ndarray(self):
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
msg = (
- "ufunc '?multiply'? cannot use operands with types"
- r" dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
+ "ufunc '?multiply'? cannot use operands with types "
+ r"dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
)
with pytest.raises(TypeError, match=msg):
td * other
@@ -498,7 +498,7 @@ def test_td_rfloordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
- dt64 = np.datetime64("2016-01-01", dtype="datetime64[us]")
+ dt64 = np.datetime64("2016-01-01", "us")
with pytest.raises(TypeError):
td.__rfloordiv__(dt64)
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index f1fcf46a936fd..692eb6cd8bc43 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -2,6 +2,7 @@
import calendar
from datetime import datetime, timedelta
+from distutils.version import LooseVersion
import locale
import unicodedata
@@ -12,7 +13,6 @@
import pytz
from pytz import timezone, utc
-from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz, get_timezone
import pandas.compat as compat
from pandas.compat.numpy import np_datetime64_compat
@@ -241,24 +241,20 @@ def test_constructor(self):
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
- assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
- assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date, tz=tz)]:
expected_tz = expected - offset * 3600 * 1_000_000_000
assert result.value == expected_tz
- assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
- assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
if tz is not None:
@@ -267,7 +263,6 @@ def test_constructor(self):
result = Timestamp(result, tz="UTC")
expected_utc = expected - offset * 3600 * 1_000_000_000
assert result.value == expected_utc
- assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
@@ -300,30 +295,25 @@ def test_constructor_with_stringoffset(self):
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
- assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
- assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
- assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
- assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result).tz_convert("UTC")
expected_utc = expected
assert result.value == expected_utc
- assert conversion.pydt_to_i8(result) == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
@@ -575,7 +565,7 @@ def test_bounds_with_different_units(self):
for date_string in out_of_bounds_dates:
for unit in time_units:
- dt64 = np.datetime64(date_string, dtype="M8[{unit}]".format(unit=unit))
+ dt64 = np.datetime64(date_string, unit)
with pytest.raises(ValueError):
Timestamp(dt64)
@@ -583,7 +573,7 @@ def test_bounds_with_different_units(self):
for date_string in in_bounds_dates:
for unit in time_units:
- dt64 = np.datetime64(date_string, dtype="M8[{unit}]".format(unit=unit))
+ dt64 = np.datetime64(date_string, unit)
Timestamp(dt64)
def test_min_valid(self):
@@ -751,7 +741,7 @@ def test_asm8(self):
def test_class_ops_pytz(self):
def compare(x, y):
- assert int(Timestamp(x).value / 1e9) == int(Timestamp(y).value / 1e9)
+ assert int((Timestamp(x).value - Timestamp(y).value) / 1e9) == 0
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now("UTC"), datetime.now(timezone("UTC")))
@@ -775,8 +765,12 @@ def compare(x, y):
def test_class_ops_dateutil(self):
def compare(x, y):
- assert int(np.round(Timestamp(x).value / 1e9)) == int(
- np.round(Timestamp(y).value / 1e9)
+ assert (
+ int(
+ np.round(Timestamp(x).value / 1e9)
+ - np.round(Timestamp(y).value / 1e9)
+ )
+ == 0
)
compare(Timestamp.now(), datetime.now())
@@ -1081,3 +1075,34 @@ def test_dt_subclass_add_timedelta(lh, rh):
result = lh + rh
expected = SubDatetime(2000, 1, 1, 1)
assert result == expected
+
+
+def test_constructor_ambigous_dst():
+ # GH 24329
+ # Make sure that calling Timestamp constructor
+ # on Timestamp created from ambiguous time
+ # doesn't change Timestamp.value
+ ts = Timestamp(1382835600000000000, tz="dateutil/Europe/London")
+ expected = ts.value
+ result = Timestamp(ts).value
+ assert result == expected
+
+
+@pytest.mark.xfail(
+ LooseVersion(compat._optional._get_version(dateutil)) < LooseVersion("2.7.0"),
+ reason="dateutil moved to Timedelta.total_seconds() in 2.7.0",
+)
+@pytest.mark.parametrize("epoch", [1552211999999999872, 1552211999999999999])
+def test_constructor_before_dst_switch(epoch):
+ # GH 31043
+ # Make sure that calling Timestamp constructor
+ # on time just before DST switch doesn't lead to
+ # nonexistent time or value change
+ # Works only with dateutil >= 2.7.0 as dateutil overrid
+ # pandas.Timedelta.total_seconds with
+ # datetime.timedelta.total_seconds before
+ ts = Timestamp(epoch, tz="dateutil/US/Pacific")
+ result = ts.tz.dst(ts)
+ expected = timedelta(seconds=0)
+ assert Timestamp(ts).value == epoch
+ assert result == expected
diff --git a/pandas/tests/series/indexing/test_alter_index.py b/pandas/tests/series/indexing/test_alter_index.py
index 47f40e24e1637..dc8b91de3d09b 100644
--- a/pandas/tests/series/indexing/test_alter_index.py
+++ b/pandas/tests/series/indexing/test_alter_index.py
@@ -243,8 +243,8 @@ def test_reindex_corner(datetime_series):
# bad fill method
ts = datetime_series[::2]
msg = (
- r"Invalid fill method\. Expecting pad \(ffill\), backfill"
- r" \(bfill\) or nearest\. Got foo"
+ r"Invalid fill method\. Expecting pad \(ffill\), backfill "
+ r"\(bfill\) or nearest\. Got foo"
)
with pytest.raises(ValueError, match=msg):
ts.reindex(datetime_series.index, method="foo")
diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py
index d75efcf52c271..16a29d10eb414 100644
--- a/pandas/tests/series/indexing/test_boolean.py
+++ b/pandas/tests/series/indexing/test_boolean.py
@@ -44,8 +44,8 @@ def test_getitem_boolean_empty():
# invalid because of the boolean indexer
# that's empty or not-aligned
msg = (
- r"Unalignable boolean Series provided as indexer \(index of"
- r" the boolean Series and of the indexed object do not match"
+ r"Unalignable boolean Series provided as indexer \(index of "
+ r"the boolean Series and of the indexed object do not match"
)
with pytest.raises(IndexingError, match=msg):
s[Series([], dtype=bool)]
@@ -89,8 +89,8 @@ def test_getitem_setitem_boolean_corner(datetime_series):
# these used to raise...??
msg = (
- r"Unalignable boolean Series provided as indexer \(index of"
- r" the boolean Series and of the indexed object do not match"
+ r"Unalignable boolean Series provided as indexer \(index of "
+ r"the boolean Series and of the indexed object do not match"
)
with pytest.raises(IndexingError, match=msg):
ts[mask_shifted]
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index 15ff5f6b343d1..77085ef547690 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -49,39 +49,6 @@ def test_fancy_setitem():
assert (s[48:54] == -3).all()
-@pytest.mark.filterwarnings("ignore::DeprecationWarning")
-@pytest.mark.parametrize("tz", [None, "Asia/Shanghai", "Europe/Berlin"])
-@pytest.mark.parametrize("name", [None, "my_dti"])
-def test_dti_snap(name, tz):
- dti = DatetimeIndex(
- [
- "1/1/2002",
- "1/2/2002",
- "1/3/2002",
- "1/4/2002",
- "1/5/2002",
- "1/6/2002",
- "1/7/2002",
- ],
- name=name,
- tz=tz,
- freq="D",
- )
-
- result = dti.snap(freq="W-MON")
- expected = date_range("12/31/2001", "1/7/2002", name=name, tz=tz, freq="w-mon")
- expected = expected.repeat([3, 4])
- tm.assert_index_equal(result, expected)
- assert result.tz == expected.tz
-
- result = dti.snap(freq="B")
-
- expected = date_range("1/1/2002", "1/7/2002", name=name, tz=tz, freq="b")
- expected = expected.repeat([1, 1, 1, 2, 2])
- tm.assert_index_equal(result, expected)
- assert result.tz == expected.tz
-
-
def test_dti_reset_index_round_trip():
dti = date_range(start="1/1/2001", end="6/1/2001", freq="D")
d1 = DataFrame({"v": np.random.rand(len(dti))}, index=dti)
@@ -751,16 +718,6 @@ def test_nat_operations():
assert s.max() == exp
-@pytest.mark.parametrize("method", ["round", "floor", "ceil"])
-@pytest.mark.parametrize("freq", ["s", "5s", "min", "5min", "h", "5h"])
-def test_round_nat(method, freq):
- # GH14940
- s = Series([pd.NaT])
- expected = Series(pd.NaT)
- round_method = getattr(s.dt, method)
- tm.assert_series_equal(round_method(freq), expected)
-
-
def test_setitem_tuple_with_datetimetz():
# GH 20441
arr = date_range("2017", periods=4, tz="US/Eastern")
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 4601cabf69b52..18dbd22b73b35 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -393,8 +393,8 @@ def test_2d_to_1d_assignment_raises():
y = pd.Series(range(2))
msg = (
- r"shape mismatch: value array of shape \(2,2\) could not be"
- r" broadcast to indexing result of shape \(2,\)"
+ r"shape mismatch: value array of shape \(2,2\) could not be "
+ r"broadcast to indexing result of shape \(2,\)"
)
with pytest.raises(ValueError, match=msg):
y.loc[range(2)] = x
@@ -894,7 +894,7 @@ def test_take():
expected = Series([4, 2, 4], index=[4, 3, 4])
tm.assert_series_equal(actual, expected)
- msg = "index {} is out of bounds for size 5"
+ msg = "index {} is out of bounds for( axis 0 with)? size 5"
with pytest.raises(IndexError, match=msg.format(10)):
s.take([1, 10])
with pytest.raises(IndexError, match=msg.format(5)):
@@ -925,3 +925,13 @@ def test_uint_drop(any_int_dtype):
series.loc[0] = 4
expected = pd.Series([4, 2, 3], dtype=any_int_dtype)
tm.assert_series_equal(series, expected)
+
+
+def test_getitem_2d_no_warning():
+ # https://github.com/pandas-dev/pandas/issues/30867
+ # Don't want to support this long-term, but
+ # for now ensure that the warning from Index
+ # doesn't comes through via Series.__getitem__.
+ series = pd.Series([1, 2, 3], index=[1, 2, 3])
+ with tm.assert_produces_warning(None):
+ series[:, None]
diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py
index ce0d04ff99077..3684ca00c2f17 100644
--- a/pandas/tests/series/indexing/test_numeric.py
+++ b/pandas/tests/series/indexing/test_numeric.py
@@ -261,8 +261,8 @@ def test_setitem_float_labels():
def test_slice_float_get_set(datetime_series):
msg = (
r"cannot do slice indexing on <class 'pandas\.core\.indexes"
- r"\.datetimes\.DatetimeIndex'> with these indexers \[{key}\]"
- r" of <class 'float'>"
+ r"\.datetimes\.DatetimeIndex'> with these indexers \[{key}\] "
+ r"of <class 'float'>"
)
with pytest.raises(TypeError, match=msg.format(key=r"4\.0")):
datetime_series[4.0:10.0]
diff --git a/pandas/tests/series/methods/test_append.py b/pandas/tests/series/methods/test_append.py
index dc0fca4bba067..4d64b5b397981 100644
--- a/pandas/tests/series/methods/test_append.py
+++ b/pandas/tests/series/methods/test_append.py
@@ -61,6 +61,16 @@ def test_append_tuples(self):
tm.assert_series_equal(expected, result)
+ def test_append_dataframe_regression(self):
+ # GH 30975
+ df = pd.DataFrame({"A": [1, 2]})
+ result = df.A.append([df])
+ expected = pd.DataFrame(
+ {0: [1.0, 2.0, None, None], "A": [None, None, 1.0, 2.0]}, index=[0, 1, 0, 1]
+ )
+
+ tm.assert_frame_equal(expected, result)
+
class TestSeriesAppendWithDatetimeIndex:
def test_append(self):
diff --git a/pandas/tests/series/methods/test_argsort.py b/pandas/tests/series/methods/test_argsort.py
index 1fc98ded0d3d2..62273e2d363fb 100644
--- a/pandas/tests/series/methods/test_argsort.py
+++ b/pandas/tests/series/methods/test_argsort.py
@@ -52,8 +52,8 @@ def test_argsort_stable(self):
tm.assert_series_equal(mindexer, Series(mexpected), check_dtype=False)
tm.assert_series_equal(qindexer, Series(qexpected), check_dtype=False)
msg = (
- r"ndarray Expected type <class 'numpy\.ndarray'>,"
- r" found <class 'pandas\.core\.series\.Series'> instead"
+ r"ndarray Expected type <class 'numpy\.ndarray'>, "
+ r"found <class 'pandas\.core\.series\.Series'> instead"
)
with pytest.raises(AssertionError, match=msg):
tm.assert_numpy_array_equal(qindexer, mindexer)
diff --git a/pandas/tests/series/methods/test_describe.py b/pandas/tests/series/methods/test_describe.py
index b147a04b11090..4e59c6995f4f2 100644
--- a/pandas/tests/series/methods/test_describe.py
+++ b/pandas/tests/series/methods/test_describe.py
@@ -1,6 +1,6 @@
import numpy as np
-from pandas import Series, Timestamp, date_range
+from pandas import Period, Series, Timedelta, Timestamp, date_range
import pandas._testing as tm
@@ -29,6 +29,36 @@ def test_describe(self):
)
tm.assert_series_equal(result, expected)
+ s = Series(
+ [
+ Timedelta("1 days"),
+ Timedelta("2 days"),
+ Timedelta("3 days"),
+ Timedelta("4 days"),
+ Timedelta("5 days"),
+ ],
+ name="timedelta_data",
+ )
+ result = s.describe()
+ expected = Series(
+ [5, s[2], s.std(), s[0], s[1], s[2], s[3], s[4]],
+ name="timedelta_data",
+ index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
+ )
+ tm.assert_series_equal(result, expected)
+
+ s = Series(
+ [Period("2020-01", "M"), Period("2020-01", "M"), Period("2019-12", "M")],
+ name="period_data",
+ )
+ result = s.describe()
+ expected = Series(
+ [3, 2, s[0], 2],
+ name="period_data",
+ index=["count", "unique", "top", "freq"],
+ )
+ tm.assert_series_equal(result, expected)
+
def test_describe_empty_object(self):
# https://github.com/pandas-dev/pandas/issues/27183
s = Series([None, None], dtype=object)
@@ -57,13 +87,14 @@ def test_describe_with_tz(self, tz_naive_fixture):
expected = Series(
[
5,
- 5,
- s.value_counts().index[0],
- 1,
+ Timestamp(2018, 1, 3).tz_localize(tz),
start.tz_localize(tz),
+ s[1],
+ s[2],
+ s[3],
end.tz_localize(tz),
],
name=name,
- index=["count", "unique", "top", "freq", "first", "last"],
+ index=["count", "mean", "min", "25%", "50%", "75%", "max"],
)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_isin.py b/pandas/tests/series/methods/test_isin.py
index ca93e989ba6b5..3836c1d56bf87 100644
--- a/pandas/tests/series/methods/test_isin.py
+++ b/pandas/tests/series/methods/test_isin.py
@@ -29,8 +29,8 @@ def test_isin_with_string_scalar(self):
# GH#4763
s = Series(["A", "B", "C", "a", "B", "B", "A", "C"])
msg = (
- r"only list-like objects are allowed to be passed to isin\(\),"
- r" you passed a \[str\]"
+ r"only list-like objects are allowed to be passed to isin\(\), "
+ r"you passed a \[str\]"
)
with pytest.raises(TypeError, match=msg):
s.isin("a")
diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py
index b20baa2836363..770ad38b0215e 100644
--- a/pandas/tests/series/methods/test_replace.py
+++ b/pandas/tests/series/methods/test_replace.py
@@ -120,8 +120,8 @@ def test_replace_with_single_list(self):
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
msg = (
- r"Invalid fill method\. Expecting pad \(ffill\) or backfill"
- r" \(bfill\)\. Got crash_cymbal"
+ r"Invalid fill method\. Expecting pad \(ffill\) or backfill "
+ r"\(bfill\)\. Got crash_cymbal"
)
with pytest.raises(ValueError, match=msg):
s.replace([1, 2, 3], inplace=True, method="crash_cymbal")
diff --git a/pandas/tests/series/methods/test_round.py b/pandas/tests/series/methods/test_round.py
index 7f0711a0f30d7..88d5c428712dc 100644
--- a/pandas/tests/series/methods/test_round.py
+++ b/pandas/tests/series/methods/test_round.py
@@ -1,6 +1,7 @@
import numpy as np
import pytest
+import pandas as pd
from pandas import Series
import pandas._testing as tm
@@ -44,3 +45,12 @@ def test_round_builtin(self):
expected_rounded = Series([1.12, 2.12, 3.12], index=range(3))
result = round(ser, decimals)
tm.assert_series_equal(result, expected_rounded)
+
+ @pytest.mark.parametrize("method", ["round", "floor", "ceil"])
+ @pytest.mark.parametrize("freq", ["s", "5s", "min", "5min", "h", "5h"])
+ def test_round_nat(self, method, freq):
+ # GH14940
+ ser = Series([pd.NaT])
+ expected = Series(pd.NaT)
+ round_method = getattr(ser.dt, method)
+ tm.assert_series_equal(round_method(freq), expected)
diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py
index 628c66583535d..71f6681e8c955 100644
--- a/pandas/tests/series/test_alter_axes.py
+++ b/pandas/tests/series/test_alter_axes.py
@@ -11,8 +11,8 @@ class TestSeriesAlterAxes:
def test_setindex(self, string_series):
# wrong type
msg = (
- r"Index\(\.\.\.\) must be called with a collection of some"
- r" kind, None was passed"
+ r"Index\(\.\.\.\) must be called with a collection of some "
+ r"kind, None was passed"
)
with pytest.raises(TypeError, match=msg):
string_series.index = None
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index c29bd3ea0cb7d..e6e91b5d4f5f4 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -6,7 +6,7 @@
import pandas.util._test_decorators as td
import pandas as pd
-from pandas import DataFrame, MultiIndex, Series
+from pandas import DataFrame, Series
import pandas._testing as tm
@@ -160,65 +160,6 @@ def test_is_monotonic(self):
assert s.is_monotonic is False
assert s.is_monotonic_decreasing is True
- def test_unstack(self):
-
- index = MultiIndex(
- levels=[["bar", "foo"], ["one", "three", "two"]],
- codes=[[1, 1, 0, 0], [0, 1, 0, 2]],
- )
-
- s = Series(np.arange(4.0), index=index)
- unstacked = s.unstack()
-
- expected = DataFrame(
- [[2.0, np.nan, 3.0], [0.0, 1.0, np.nan]],
- index=["bar", "foo"],
- columns=["one", "three", "two"],
- )
-
- tm.assert_frame_equal(unstacked, expected)
-
- unstacked = s.unstack(level=0)
- tm.assert_frame_equal(unstacked, expected.T)
-
- index = MultiIndex(
- levels=[["bar"], ["one", "two", "three"], [0, 1]],
- codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
- )
- s = Series(np.random.randn(6), index=index)
- exp_index = MultiIndex(
- levels=[["one", "two", "three"], [0, 1]],
- codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
- )
- expected = DataFrame({"bar": s.values}, index=exp_index).sort_index(level=0)
- unstacked = s.unstack(0).sort_index()
- tm.assert_frame_equal(unstacked, expected)
-
- # GH5873
- idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]])
- ts = pd.Series([1, 2], index=idx)
- left = ts.unstack()
- right = DataFrame(
- [[np.nan, 1], [2, np.nan]], index=[101, 102], columns=[np.nan, 3.5]
- )
- tm.assert_frame_equal(left, right)
-
- idx = pd.MultiIndex.from_arrays(
- [
- ["cat", "cat", "cat", "dog", "dog"],
- ["a", "a", "b", "a", "b"],
- [1, 2, 1, 1, np.nan],
- ]
- )
- ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)
- right = DataFrame(
- [[1.0, 1.3], [1.1, np.nan], [np.nan, 1.4], [1.2, np.nan]],
- columns=["cat", "dog"],
- )
- tpls = [("a", 1), ("a", 2), ("b", np.nan), ("b", 1)]
- right.index = pd.MultiIndex.from_tuples(tpls)
- tm.assert_frame_equal(ts.unstack(level=0), right)
-
@pytest.mark.parametrize("func", [np.any, np.all])
@pytest.mark.parametrize("kwargs", [dict(keepdims=True), dict(out=object())])
@td.skip_if_np_lt("1.15")
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index c38e5708be09b..2651c3d73c9ab 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -967,6 +967,15 @@ def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):
expected = Series(pd.Timestamp(arg)).dt.tz_localize("CET")
tm.assert_series_equal(result, expected)
+ def test_constructor_datetime64_bigendian(self):
+ # GH#30976
+ ms = np.datetime64(1, "ms")
+ arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
+
+ result = Series(arr)
+ expected = Series([Timestamp(ms)])
+ tm.assert_series_equal(result, expected)
+
@pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray])
def test_construction_interval(self, interval_constructor):
# construction from interval & array of intervals
@@ -1106,6 +1115,15 @@ def create_data(constructor):
tm.assert_series_equal(result_datetime, expected)
tm.assert_series_equal(result_Timestamp, expected)
+ def test_contructor_dict_tuple_indexer(self):
+ # GH 12948
+ data = {(1, 1, None): -1.0}
+ result = Series(data)
+ expected = Series(
+ -1.0, index=MultiIndex(levels=[[1], [1], [np.nan]], codes=[[0], [0], [-1]])
+ )
+ tm.assert_series_equal(result, expected)
+
def test_constructor_mapping(self, non_mapping_dict_subclass):
# GH 29788
ndm = non_mapping_dict_subclass({3: "three"})
diff --git a/pandas/tests/series/test_convert_dtypes.py b/pandas/tests/series/test_convert_dtypes.py
new file mode 100644
index 0000000000000..923b5a94c5f41
--- /dev/null
+++ b/pandas/tests/series/test_convert_dtypes.py
@@ -0,0 +1,248 @@
+from itertools import product
+
+import numpy as np
+import pytest
+
+import pandas as pd
+import pandas._testing as tm
+
+
+class TestSeriesConvertDtypes:
+ # The answerdict has keys that have 4 tuples, corresponding to the arguments
+ # infer_objects, convert_string, convert_integer, convert_boolean
+ # This allows all 16 possible combinations to be tested. Since common
+ # combinations expect the same answer, this provides an easy way to list
+ # all the possibilities
+ @pytest.mark.parametrize(
+ "data, maindtype, answerdict",
+ [
+ (
+ [1, 2, 3],
+ np.dtype("int32"),
+ {
+ ((True, False), (True, False), (True,), (True, False)): "Int32",
+ ((True, False), (True, False), (False,), (True, False)): np.dtype(
+ "int32"
+ ),
+ },
+ ),
+ (
+ [1, 2, 3],
+ np.dtype("int64"),
+ {
+ ((True, False), (True, False), (True,), (True, False)): "Int64",
+ ((True, False), (True, False), (False,), (True, False)): np.dtype(
+ "int64"
+ ),
+ },
+ ),
+ (
+ ["x", "y", "z"],
+ np.dtype("O"),
+ {
+ (
+ (True, False),
+ (True,),
+ (True, False),
+ (True, False),
+ ): pd.StringDtype(),
+ ((True, False), (False,), (True, False), (True, False)): np.dtype(
+ "O"
+ ),
+ },
+ ),
+ (
+ [True, False, np.nan],
+ np.dtype("O"),
+ {
+ (
+ (True, False),
+ (True, False),
+ (True, False),
+ (True,),
+ ): pd.BooleanDtype(),
+ ((True, False), (True, False), (True, False), (False,)): np.dtype(
+ "O"
+ ),
+ },
+ ),
+ (
+ ["h", "i", np.nan],
+ np.dtype("O"),
+ {
+ (
+ (True, False),
+ (True,),
+ (True, False),
+ (True, False),
+ ): pd.StringDtype(),
+ ((True, False), (False,), (True, False), (True, False)): np.dtype(
+ "O"
+ ),
+ },
+ ),
+ (
+ [10, np.nan, 20],
+ np.dtype("float"),
+ {
+ ((True, False), (True, False), (True,), (True, False)): "Int64",
+ ((True, False), (True, False), (False,), (True, False)): np.dtype(
+ "float"
+ ),
+ },
+ ),
+ (
+ [np.nan, 100.5, 200],
+ np.dtype("float"),
+ {
+ (
+ (True, False),
+ (True, False),
+ (True, False),
+ (True, False),
+ ): np.dtype("float"),
+ },
+ ),
+ (
+ [3, 4, 5],
+ "Int8",
+ {((True, False), (True, False), (True, False), (True, False)): "Int8"},
+ ),
+ (
+ [[1, 2], [3, 4], [5]],
+ None,
+ {
+ (
+ (True, False),
+ (True, False),
+ (True, False),
+ (True, False),
+ ): np.dtype("O"),
+ },
+ ),
+ (
+ [4, 5, 6],
+ np.dtype("uint32"),
+ {
+ ((True, False), (True, False), (True,), (True, False)): "UInt32",
+ ((True, False), (True, False), (False,), (True, False)): np.dtype(
+ "uint32"
+ ),
+ },
+ ),
+ (
+ [-10, 12, 13],
+ np.dtype("i1"),
+ {
+ ((True, False), (True, False), (True,), (True, False)): "Int8",
+ ((True, False), (True, False), (False,), (True, False)): np.dtype(
+ "i1"
+ ),
+ },
+ ),
+ (
+ [1, 2.0],
+ object,
+ {
+ ((True, False), (True, False), (True,), (True, False)): "Int64",
+ ((True,), (True, False), (False,), (True, False)): np.dtype(
+ "float"
+ ),
+ ((False,), (True, False), (False,), (True, False)): np.dtype(
+ "object"
+ ),
+ },
+ ),
+ (
+ ["a", "b"],
+ pd.CategoricalDtype(),
+ {
+ (
+ (True, False),
+ (True, False),
+ (True, False),
+ (True, False),
+ ): pd.CategoricalDtype(),
+ },
+ ),
+ (
+ pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
+ pd.DatetimeTZDtype(tz="UTC"),
+ {
+ (
+ (True, False),
+ (True, False),
+ (True, False),
+ (True, False),
+ ): pd.DatetimeTZDtype(tz="UTC"),
+ },
+ ),
+ (
+ pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
+ "datetime64[ns]",
+ {
+ (
+ (True, False),
+ (True, False),
+ (True, False),
+ (True, False),
+ ): np.dtype("datetime64[ns]"),
+ },
+ ),
+ (
+ pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
+ object,
+ {
+ ((True,), (True, False), (True, False), (True, False),): np.dtype(
+ "datetime64[ns]"
+ ),
+ ((False,), (True, False), (True, False), (True, False),): np.dtype(
+ "O"
+ ),
+ },
+ ),
+ (
+ pd.period_range("1/1/2011", freq="M", periods=3),
+ None,
+ {
+ (
+ (True, False),
+ (True, False),
+ (True, False),
+ (True, False),
+ ): pd.PeriodDtype("M"),
+ },
+ ),
+ (
+ pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]),
+ None,
+ {
+ (
+ (True, False),
+ (True, False),
+ (True, False),
+ (True, False),
+ ): pd.IntervalDtype("int64"),
+ },
+ ),
+ ],
+ )
+ @pytest.mark.parametrize("params", product(*[(True, False)] * 4))
+ def test_convert_dtypes(self, data, maindtype, params, answerdict):
+ if maindtype is not None:
+ series = pd.Series(data, dtype=maindtype)
+ else:
+ series = pd.Series(data)
+ answers = {k: a for (kk, a) in answerdict.items() for k in product(*kk)}
+
+ ns = series.convert_dtypes(*params)
+ expected_dtype = answers[tuple(params)]
+ expected = pd.Series(series.values, dtype=expected_dtype)
+ tm.assert_series_equal(ns, expected)
+
+ # Test that it is a copy
+ copy = series.copy(deep=True)
+ ns[ns.notna()] = np.nan
+
+ # Make sure original not changed
+ tm.assert_series_equal(series, copy)
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index a57ec2ba05d54..1fc582156a884 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -193,8 +193,8 @@ def test_astype_dict_like(self, dtype_class):
dt3 = dtype_class({"abc": str, "def": str})
msg = (
- "Only the Series name can be used for the key in Series dtype"
- r" mappings\."
+ "Only the Series name can be used for the key in Series dtype "
+ r"mappings\."
)
with pytest.raises(KeyError, match=msg):
s.astype(dt3)
@@ -410,8 +410,8 @@ def test_arg_for_errors_in_astype(self):
s = Series([1, 2, 3])
msg = (
- r"Expected value of kwarg 'errors' to be one of \['raise',"
- r" 'ignore'\]\. Supplied value is 'False'"
+ r"Expected value of kwarg 'errors' to be one of \['raise', "
+ r"'ignore'\]\. Supplied value is 'False'"
)
with pytest.raises(ValueError, match=msg):
s.astype(np.float64, errors=False)
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 7b6d9210ed3d9..6b7d9e00a5228 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -1324,8 +1324,8 @@ def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
- r"Invalid limit_direction: expecting one of \['forward',"
- r" 'backward', 'both'\], got 'abc'"
+ r"Invalid limit_direction: expecting one of \['forward', "
+ r"'backward', 'both'\], got 'abc'"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit=2, limit_direction="abc")
@@ -1347,6 +1347,7 @@ def test_interp_limit_area(self):
[np.nan, np.nan, 3.0, 4.0, np.nan, np.nan, 7.0, np.nan, np.nan]
)
result = s.interpolate(method="linear", limit_area="inside", limit=1)
+ tm.assert_series_equal(result, expected)
expected = Series([np.nan, np.nan, 3.0, 4.0, np.nan, 6.0, 7.0, np.nan, np.nan])
result = s.interpolate(
@@ -1362,6 +1363,7 @@ def test_interp_limit_area(self):
[np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan]
)
result = s.interpolate(method="linear", limit_area="outside", limit=1)
+ tm.assert_series_equal(result, expected)
expected = Series([np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan])
result = s.interpolate(
@@ -1371,8 +1373,9 @@ def test_interp_limit_area(self):
expected = Series([3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan])
result = s.interpolate(
- method="linear", limit_area="outside", direction="backward"
+ method="linear", limit_area="outside", limit_direction="backward"
)
+ tm.assert_series_equal(result, expected)
# raises an error even if limit type is wrong.
msg = r"Invalid limit_area: expecting one of \['inside', 'outside'\], got abc"
diff --git a/pandas/tests/series/test_reshaping.py b/pandas/tests/series/test_reshaping.py
new file mode 100644
index 0000000000000..7645fb8759a54
--- /dev/null
+++ b/pandas/tests/series/test_reshaping.py
@@ -0,0 +1,120 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import DataFrame, MultiIndex, Series
+import pandas._testing as tm
+
+
+def test_unstack():
+ index = MultiIndex(
+ levels=[["bar", "foo"], ["one", "three", "two"]],
+ codes=[[1, 1, 0, 0], [0, 1, 0, 2]],
+ )
+
+ s = Series(np.arange(4.0), index=index)
+ unstacked = s.unstack()
+
+ expected = DataFrame(
+ [[2.0, np.nan, 3.0], [0.0, 1.0, np.nan]],
+ index=["bar", "foo"],
+ columns=["one", "three", "two"],
+ )
+
+ tm.assert_frame_equal(unstacked, expected)
+
+ unstacked = s.unstack(level=0)
+ tm.assert_frame_equal(unstacked, expected.T)
+
+ index = MultiIndex(
+ levels=[["bar"], ["one", "two", "three"], [0, 1]],
+ codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
+ )
+ s = Series(np.random.randn(6), index=index)
+ exp_index = MultiIndex(
+ levels=[["one", "two", "three"], [0, 1]],
+ codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
+ )
+ expected = DataFrame({"bar": s.values}, index=exp_index).sort_index(level=0)
+ unstacked = s.unstack(0).sort_index()
+ tm.assert_frame_equal(unstacked, expected)
+
+ # GH5873
+ idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]])
+ ts = pd.Series([1, 2], index=idx)
+ left = ts.unstack()
+ right = DataFrame(
+ [[np.nan, 1], [2, np.nan]], index=[101, 102], columns=[np.nan, 3.5]
+ )
+ tm.assert_frame_equal(left, right)
+
+ idx = pd.MultiIndex.from_arrays(
+ [
+ ["cat", "cat", "cat", "dog", "dog"],
+ ["a", "a", "b", "a", "b"],
+ [1, 2, 1, 1, np.nan],
+ ]
+ )
+ ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)
+ right = DataFrame(
+ [[1.0, 1.3], [1.1, np.nan], [np.nan, 1.4], [1.2, np.nan]],
+ columns=["cat", "dog"],
+ )
+ tpls = [("a", 1), ("a", 2), ("b", np.nan), ("b", 1)]
+ right.index = pd.MultiIndex.from_tuples(tpls)
+ tm.assert_frame_equal(ts.unstack(level=0), right)
+
+
+def test_unstack_tuplename_in_multiindex():
+ # GH 19966
+ idx = pd.MultiIndex.from_product(
+ [["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
+ )
+ ser = pd.Series(1, index=idx)
+ result = ser.unstack(("A", "a"))
+
+ expected = pd.DataFrame(
+ [[1, 1, 1], [1, 1, 1], [1, 1, 1]],
+ columns=pd.MultiIndex.from_tuples(
+ [("a",), ("b",), ("c",)], names=[("A", "a")],
+ ),
+ index=pd.Index([1, 2, 3], name=("B", "b")),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "unstack_idx, expected_values, expected_index, expected_columns",
+ [
+ (
+ ("A", "a"),
+ [[1, 1], [1, 1], [1, 1], [1, 1]],
+ pd.MultiIndex.from_tuples(
+ [(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]
+ ),
+ pd.MultiIndex.from_tuples([("a",), ("b",)], names=[("A", "a")]),
+ ),
+ (
+ (("A", "a"), "B"),
+ [[1, 1, 1, 1], [1, 1, 1, 1]],
+ pd.Index([3, 4], name="C"),
+ pd.MultiIndex.from_tuples(
+ [("a", 1), ("a", 2), ("b", 1), ("b", 2)], names=[("A", "a"), "B"]
+ ),
+ ),
+ ],
+)
+def test_unstack_mixed_type_name_in_multiindex(
+ unstack_idx, expected_values, expected_index, expected_columns
+):
+ # GH 19966
+ idx = pd.MultiIndex.from_product(
+ [["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
+ )
+ ser = pd.Series(1, index=idx)
+ result = ser.unstack(unstack_idx)
+
+ expected = pd.DataFrame(
+ expected_values, columns=expected_columns, index=expected_index,
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py
index a2d14f27d7b7a..459377fb18f29 100644
--- a/pandas/tests/series/test_timeseries.py
+++ b/pandas/tests/series/test_timeseries.py
@@ -137,7 +137,12 @@ def test_first_last_valid(self, datetime_series):
assert ts.last_valid_index().freq == ts.index.freq
def test_mpl_compat_hack(self, datetime_series):
- with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
+
+ # This is currently failing because the test was relying on
+ # the DeprecationWarning coming through Index.__getitem__.
+ # We want to implement a warning specifically for Series.__getitem__
+ # at which point this will become a Deprecation/FutureWarning
+ with tm.assert_produces_warning(None):
# GH#30588 multi-dimensional indexing deprecated
result = datetime_series[:, np.newaxis]
expected = datetime_series.values[:, np.newaxis]
diff --git a/pandas/tests/test_aggregation.py b/pandas/tests/test_aggregation.py
new file mode 100644
index 0000000000000..74ccebc8e2275
--- /dev/null
+++ b/pandas/tests/test_aggregation.py
@@ -0,0 +1,90 @@
+import numpy as np
+import pytest
+
+from pandas.core.aggregation import _make_unique_kwarg_list, maybe_mangle_lambdas
+
+
+def test_maybe_mangle_lambdas_passthrough():
+ assert maybe_mangle_lambdas("mean") == "mean"
+ assert maybe_mangle_lambdas(lambda x: x).__name__ == "<lambda>"
+ # don't mangel single lambda.
+ assert maybe_mangle_lambdas([lambda x: x])[0].__name__ == "<lambda>"
+
+
+def test_maybe_mangle_lambdas_listlike():
+ aggfuncs = [lambda x: 1, lambda x: 2]
+ result = maybe_mangle_lambdas(aggfuncs)
+ assert result[0].__name__ == "<lambda_0>"
+ assert result[1].__name__ == "<lambda_1>"
+ assert aggfuncs[0](None) == result[0](None)
+ assert aggfuncs[1](None) == result[1](None)
+
+
+def test_maybe_mangle_lambdas():
+ func = {"A": [lambda x: 0, lambda x: 1]}
+ result = maybe_mangle_lambdas(func)
+ assert result["A"][0].__name__ == "<lambda_0>"
+ assert result["A"][1].__name__ == "<lambda_1>"
+
+
+def test_maybe_mangle_lambdas_args():
+ func = {"A": [lambda x, a, b=1: (0, a, b), lambda x: 1]}
+ result = maybe_mangle_lambdas(func)
+ assert result["A"][0].__name__ == "<lambda_0>"
+ assert result["A"][1].__name__ == "<lambda_1>"
+
+ assert func["A"][0](0, 1) == (0, 1, 1)
+ assert func["A"][0](0, 1, 2) == (0, 1, 2)
+ assert func["A"][0](0, 2, b=3) == (0, 2, 3)
+
+
+def test_maybe_mangle_lambdas_named():
+ func = {"C": np.mean, "D": {"foo": np.mean, "bar": np.mean}}
+ result = maybe_mangle_lambdas(func)
+ assert result == func
+
+
+@pytest.mark.parametrize(
+ "order, expected_reorder",
+ [
+ (
+ [
+ ("height", "<lambda>"),
+ ("height", "max"),
+ ("weight", "max"),
+ ("height", "<lambda>"),
+ ("weight", "<lambda>"),
+ ],
+ [
+ ("height", "<lambda>_0"),
+ ("height", "max"),
+ ("weight", "max"),
+ ("height", "<lambda>_1"),
+ ("weight", "<lambda>"),
+ ],
+ ),
+ (
+ [
+ ("col2", "min"),
+ ("col1", "<lambda>"),
+ ("col1", "<lambda>"),
+ ("col1", "<lambda>"),
+ ],
+ [
+ ("col2", "min"),
+ ("col1", "<lambda>_0"),
+ ("col1", "<lambda>_1"),
+ ("col1", "<lambda>_2"),
+ ],
+ ),
+ (
+ [("col", "<lambda>"), ("col", "<lambda>"), ("col", "<lambda>")],
+ [("col", "<lambda>_0"), ("col", "<lambda>_1"), ("col", "<lambda>_2")],
+ ),
+ ],
+)
+def test_make_unique(order, expected_reorder):
+ # GH 27519, test if make_unique function reorders correctly
+ result = _make_unique_kwarg_list(order)
+
+ assert result == expected_reorder
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 2b46f86d49c5e..6c7f8c9b0475e 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -653,8 +653,8 @@ class TestIsin:
def test_invalid(self):
msg = (
- r"only list-like objects are allowed to be passed to isin\(\),"
- r" you passed a \[int\]"
+ r"only list-like objects are allowed to be passed to isin\(\), "
+ r"you passed a \[int\]"
)
with pytest.raises(TypeError, match=msg):
algos.isin(1, 1)
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index a8a0fcea7182c..186c735a0bff9 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -60,10 +60,11 @@ def test_random_state():
assert com.random_state() is np.random
# Error for floats or strings
- with pytest.raises(ValueError):
+ msg = "random_state must be an integer, a numpy RandomState, or None"
+ with pytest.raises(ValueError, match=msg):
com.random_state("test")
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
com.random_state(5.5)
@@ -93,15 +94,17 @@ def test_dict_compat():
def test_standardize_mapping():
# No uninitialized defaultdicts
- with pytest.raises(TypeError):
+ msg = r"to_dict\(\) only accepts initialized defaultdicts"
+ with pytest.raises(TypeError, match=msg):
com.standardize_mapping(collections.defaultdict)
# No non-mapping subtypes, instance
- with pytest.raises(TypeError):
+ msg = "unsupported type: <class 'list'>"
+ with pytest.raises(TypeError, match=msg):
com.standardize_mapping([])
# No non-mapping subtypes, class
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
com.standardize_mapping(list)
fill = {"bad": "data"}
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index ee006233c4c1b..02898988ca8aa 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -8,7 +8,7 @@
import numpy as np # noqa
import pytest
-from pandas import DataFrame, Series
+from pandas import DataFrame
import pandas._testing as tm
@@ -114,26 +114,6 @@ def test_geopandas():
assert geopandas.read_file(fp) is not None
-def test_geopandas_coordinate_indexer():
- # this test is included to have coverage of one case in the indexing.py
- # code that is only kept for compatibility with geopandas, see
- # https://github.com/pandas-dev/pandas/issues/27258
- # We should be able to remove this after some time when its usage is
- # removed in geopandas
- from pandas.core.indexing import _NDFrameIndexer
-
- class _CoordinateIndexer(_NDFrameIndexer):
- def _getitem_tuple(self, tup):
- obj = self.obj
- xs, ys = tup
- return obj[xs][ys]
-
- Series._create_indexer("cx", _CoordinateIndexer)
- s = Series(range(5))
- res = s.cx[:, :]
- tm.assert_series_equal(s, res)
-
-
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
@pytest.mark.filterwarnings("ignore:RangeIndex.* is deprecated:DeprecationWarning")
@@ -156,7 +136,12 @@ def test_missing_required_dependency():
# https://github.com/MacPython/pandas-wheels/pull/50
call = ["python", "-sSE", "-c", "import pandas"]
- with pytest.raises(subprocess.CalledProcessError) as exc:
+ msg = (
+ r"Command '\['python', '-sSE', '-c', 'import pandas'\]' "
+ "returned non-zero exit status 1."
+ )
+
+ with pytest.raises(subprocess.CalledProcessError, match=msg) as exc:
subprocess.check_output(call, stderr=subprocess.STDOUT)
output = exc.value.stdout.decode()
diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py
index fa2142444ed92..d72c00ceb0045 100644
--- a/pandas/tests/test_errors.py
+++ b/pandas/tests/test_errors.py
@@ -22,21 +22,23 @@
def test_exception_importable(exc):
from pandas import errors
- e = getattr(errors, exc)
- assert e is not None
+ err = getattr(errors, exc)
+ assert err is not None
# check that we can raise on them
- with pytest.raises(e):
- raise e()
+
+ msg = "^$"
+
+ with pytest.raises(err, match=msg):
+ raise err()
def test_catch_oob():
from pandas import errors
- try:
+ msg = "Out of bounds nanosecond timestamp: 1500-01-01 00:00:00"
+ with pytest.raises(errors.OutOfBoundsDatetime, match=msg):
pd.Timestamp("15000101")
- except errors.OutOfBoundsDatetime:
- pass
class Foo:
diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py
index f839aa198d03f..d914cf873de24 100644
--- a/pandas/tests/test_lib.py
+++ b/pandas/tests/test_lib.py
@@ -22,7 +22,8 @@ def test_max_len_string_array(self):
assert libwriters.max_len_string_array(arr) == 3
# raises
- with pytest.raises(TypeError):
+ msg = "No matching signature found"
+ with pytest.raises(TypeError, match=msg):
libwriters.max_len_string_array(arr.astype("U"))
def test_fast_unique_multiple_list_gen_sort(self):
@@ -100,9 +101,11 @@ def test_maybe_indices_to_slice_right_edge(self):
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
- with pytest.raises(IndexError):
+ msg = "index 100 is out of bounds for axis (0|1) with size 100"
+
+ with pytest.raises(IndexError, match=msg):
target[indices]
- with pytest.raises(IndexError):
+ with pytest.raises(IndexError, match=msg):
target[maybe_slice]
indices = np.array([100, 99, 98, 97], dtype=np.int64)
@@ -111,9 +114,9 @@ def test_maybe_indices_to_slice_right_edge(self):
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
- with pytest.raises(IndexError):
+ with pytest.raises(IndexError, match=msg):
target[indices]
- with pytest.raises(IndexError):
+ with pytest.raises(IndexError, match=msg):
target[maybe_slice]
for case in [[99, 97, 99, 96], [99, 99, 98, 97], [98, 98, 97, 96]]:
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 5382ad84bcca2..640cd8faf6811 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -891,6 +891,31 @@ def manual_compare_stacked(df, df_stacked, lev0, lev1):
)
manual_compare_stacked(df, df.stack(0), 0, 1)
+ def test_stack_unstack_unordered_multiindex(self):
+ # GH 18265
+ values = np.arange(5)
+ data = np.vstack(
+ [
+ ["b{}".format(x) for x in values], # b0, b1, ..
+ ["a{}".format(x) for x in values],
+ ]
+ ) # a0, a1, ..
+ df = pd.DataFrame(data.T, columns=["b", "a"])
+ df.columns.name = "first"
+ second_level_dict = {"x": df}
+ multi_level_df = pd.concat(second_level_dict, axis=1)
+ multi_level_df.columns.names = ["second", "first"]
+ df = multi_level_df.reindex(sorted(multi_level_df.columns), axis=1)
+ result = df.stack(["first", "second"]).unstack(["first", "second"])
+ expected = DataFrame(
+ [["a0", "b0"], ["a1", "b1"], ["a2", "b2"], ["a3", "b3"], ["a4", "b4"]],
+ index=[0, 1, 2, 3, 4],
+ columns=MultiIndex.from_tuples(
+ [("a", "x"), ("b", "x")], names=["first", "second"]
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
def test_groupby_corner(self):
midx = MultiIndex(
levels=[["foo"], ["bar"], ["baz"]],
@@ -957,6 +982,10 @@ def test_swaplevel(self):
exp = self.frame.swaplevel("first", "second").T
tm.assert_frame_equal(swapped, exp)
+ msg = "Can only swap levels on a hierarchical axis."
+ with pytest.raises(TypeError, match=msg):
+ DataFrame(range(3)).swaplevel()
+
def test_reorder_levels(self):
result = self.ymd.reorder_levels(["month", "day", "year"])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
@@ -2147,6 +2176,40 @@ def test_sort_index_level_mixed(self):
sorted_after.drop([("foo", "three")], axis=1),
)
+ def test_sort_index_categorical_multiindex(self):
+ # GH 15058
+ df = DataFrame(
+ {
+ "a": range(6),
+ "l1": pd.Categorical(
+ ["a", "a", "b", "b", "c", "c"],
+ categories=["c", "a", "b"],
+ ordered=True,
+ ),
+ "l2": [0, 1, 0, 1, 0, 1],
+ }
+ )
+ result = df.set_index(["l1", "l2"]).sort_index()
+ expected = DataFrame(
+ [4, 5, 0, 1, 2, 3],
+ columns=["a"],
+ index=MultiIndex(
+ levels=[
+ pd.CategoricalIndex(
+ ["c", "a", "b"],
+ categories=["c", "a", "b"],
+ ordered=True,
+ name="l1",
+ dtype="category",
+ ),
+ [0, 1],
+ ],
+ codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
+ names=["l1", "l2"],
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 7f3375070d7d9..62d26dacde67b 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -3392,8 +3392,8 @@ def test_encode_decode_errors(self):
encodeBase = Series(["a", "b", "a\x9d"])
msg = (
- r"'charmap' codec can't encode character '\\x9d' in position 1:"
- " character maps to <undefined>"
+ r"'charmap' codec can't encode character '\\x9d' in position 1: "
+ "character maps to <undefined>"
)
with pytest.raises(UnicodeEncodeError, match=msg):
encodeBase.str.encode("cp1252")
@@ -3406,8 +3406,8 @@ def test_encode_decode_errors(self):
decodeBase = Series([b"a", b"b", b"a\x9d"])
msg = (
- "'charmap' codec can't decode byte 0x9d in position 1:"
- " character maps to <undefined>"
+ "'charmap' codec can't decode byte 0x9d in position 1: "
+ "character maps to <undefined>"
)
with pytest.raises(UnicodeDecodeError, match=msg):
decodeBase.str.decode("cp1252")
@@ -3521,7 +3521,7 @@ def test_string_array(any_string_method):
if isinstance(expected, Series):
if expected.dtype == "object" and lib.is_string_array(
- expected.values, skipna=True
+ expected.dropna().values,
):
assert result.dtype == "string"
result = result.astype(object)
@@ -3573,3 +3573,18 @@ def test_string_array_boolean_array(method, expected):
result = getattr(s.str, method)()
expected = Series(expected, dtype="boolean")
tm.assert_series_equal(result, expected)
+
+
+def test_string_array_extract():
+ # https://github.com/pandas-dev/pandas/issues/30969
+ # Only expand=False & multiple groups was failing
+ a = Series(["a1", "b2", "cc"], dtype="string")
+ b = Series(["a1", "b2", "cc"], dtype="object")
+ pat = r"(\w)(\d)"
+
+ result = a.str.extract(pat, expand=False)
+ expected = b.str.extract(pat, expand=False)
+ assert all(result.dtypes == "string")
+
+ result = result.astype(object)
+ tm.assert_equal(result, expected)
diff --git a/pandas/tests/test_take.py b/pandas/tests/test_take.py
index 465296a6f9e51..2534f1849cf61 100644
--- a/pandas/tests/test_take.py
+++ b/pandas/tests/test_take.py
@@ -345,7 +345,7 @@ def test_2d_float32(self):
def test_2d_datetime64(self):
# 2005/01/01 - 2006/01/01
- arr = np.random.randint(11045376, 11360736, (5, 3)) * 100000000000
+ arr = np.random.randint(11_045_376, 11_360_736, (5, 3)) * 100_000_000_000
arr = arr.view(dtype="datetime64[ns]")
indexer = [0, 2, -1, 1, -1]
@@ -423,16 +423,21 @@ class TestExtensionTake:
def test_bounds_check_large(self):
arr = np.array([1, 2])
- with pytest.raises(IndexError):
+
+ msg = "indices are out-of-bounds"
+ with pytest.raises(IndexError, match=msg):
algos.take(arr, [2, 3], allow_fill=True)
- with pytest.raises(IndexError):
+ msg = "index 2 is out of bounds for( axis 0 with)? size 2"
+ with pytest.raises(IndexError, match=msg):
algos.take(arr, [2, 3], allow_fill=False)
def test_bounds_check_small(self):
arr = np.array([1, 2, 3], dtype=np.int64)
indexer = [0, -1, -2]
- with pytest.raises(ValueError):
+
+ msg = r"'indices' contains values less than allowed \(-2 < -1\)"
+ with pytest.raises(ValueError, match=msg):
algos.take(arr, indexer, allow_fill=True)
result = algos.take(arr, indexer)
@@ -446,7 +451,11 @@ def test_take_empty(self, allow_fill):
result = algos.take(arr, [], allow_fill=allow_fill)
tm.assert_numpy_array_equal(arr, result)
- with pytest.raises(IndexError):
+ msg = (
+ "cannot do a non-empty take from an empty axes.|"
+ "indices are out-of-bounds"
+ )
+ with pytest.raises(IndexError, match=msg):
algos.take(arr, [0], allow_fill=allow_fill)
def test_take_na_empty(self):
diff --git a/pandas/tests/tseries/offsets/test_fiscal.py b/pandas/tests/tseries/offsets/test_fiscal.py
index 5686119593e18..f0ce104a68e29 100644
--- a/pandas/tests/tseries/offsets/test_fiscal.py
+++ b/pandas/tests/tseries/offsets/test_fiscal.py
@@ -95,7 +95,7 @@ class TestFY5253LastOfMonth(Base):
on_offset_cases = [
# From Wikipedia (see:
- # http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Last_Saturday_of_the_month_at_fiscal_year_end)
+ # https://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Last_Saturday_of_the_month_at_fiscal_year_end)
(offset_lom_sat_aug, datetime(2006, 8, 26), True),
(offset_lom_sat_aug, datetime(2007, 8, 25), True),
(offset_lom_sat_aug, datetime(2008, 8, 30), True),
@@ -208,7 +208,7 @@ def test_get_year_end(self):
on_offset_cases = [
# From Wikipedia (see:
- # http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
+ # https://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
# #Saturday_nearest_the_end_of_month)
# 2006-09-02 2006 September 2
# 2007-09-01 2007 September 1
diff --git a/pandas/tests/tslibs/test_conversion.py b/pandas/tests/tslibs/test_conversion.py
index 2beeae85de683..96c2d6bbd8106 100644
--- a/pandas/tests/tslibs/test_conversion.py
+++ b/pandas/tests/tslibs/test_conversion.py
@@ -72,6 +72,15 @@ def test_length_zero_copy(dtype, copy):
assert result.base is (None if copy else arr)
+def test_ensure_datetime64ns_bigendian():
+ # GH#29684
+ arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
+ result = conversion.ensure_datetime64ns(arr)
+
+ expected = np.array([np.datetime64(1, "ms")], dtype="M8[ns]")
+ tm.assert_numpy_array_equal(result, expected)
+
+
class SubDatetime(datetime):
pass
diff --git a/pandas/tests/tslibs/test_parsing.py b/pandas/tests/tslibs/test_parsing.py
index 36f7ada7326bf..c452d5b12ce01 100644
--- a/pandas/tests/tslibs/test_parsing.py
+++ b/pandas/tests/tslibs/test_parsing.py
@@ -15,10 +15,9 @@
def test_parse_time_string():
- (date, parsed, reso) = parse_time_string("4Q1984")
- (date_lower, parsed_lower, reso_lower) = parse_time_string("4q1984")
+ (parsed, reso) = parse_time_string("4Q1984")
+ (parsed_lower, reso_lower) = parse_time_string("4q1984")
- assert date == date_lower
assert reso == reso_lower
assert parsed == parsed_lower
@@ -34,10 +33,9 @@ def test_parse_time_string_invalid_type():
)
def test_parse_time_quarter_with_dash(dashed, normal):
# see gh-9688
- (date_dash, parsed_dash, reso_dash) = parse_time_string(dashed)
- (date, parsed, reso) = parse_time_string(normal)
+ (parsed_dash, reso_dash) = parse_time_string(dashed)
+ (parsed, reso) = parse_time_string(normal)
- assert date_dash == date
assert parsed_dash == parsed
assert reso_dash == reso
@@ -106,7 +104,7 @@ def test_parsers_quarterly_with_freq_error(date_str, kwargs, msg):
],
)
def test_parsers_quarterly_with_freq(date_str, freq, expected):
- result, _, _ = parsing.parse_time_string(date_str, freq=freq)
+ result, _ = parsing.parse_time_string(date_str, freq=freq)
assert result == expected
@@ -131,7 +129,7 @@ def test_parsers_quarter_invalid(date_str):
[("201101", datetime(2011, 1, 1, 0, 0)), ("200005", datetime(2000, 5, 1, 0, 0))],
)
def test_parsers_month_freq(date_str, expected):
- result, _, _ = parsing.parse_time_string(date_str, freq="M")
+ result, _ = parsing.parse_time_string(date_str, freq="M")
assert result == expected
@@ -223,5 +221,5 @@ def test_parse_time_string_check_instance_type_raise_exception():
parse_time_string((1, 2, 3))
result = parse_time_string("2019")
- expected = (datetime(2019, 1, 1), datetime(2019, 1, 1), "year")
+ expected = (datetime(2019, 1, 1), "year")
assert result == expected
diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py
index 23c845f2b2795..3090343ba2fd9 100644
--- a/pandas/tests/util/test_assert_frame_equal.py
+++ b/pandas/tests/util/test_assert_frame_equal.py
@@ -46,13 +46,9 @@ def _assert_not_frame_equal(a, b, **kwargs):
kwargs : dict
The arguments passed to `tm.assert_frame_equal`.
"""
- try:
+ msg = "The two DataFrames were equal when they shouldn't have been"
+ with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(a, b, **kwargs)
- msg = "The two DataFrames were equal when they shouldn't have been"
-
- pytest.fail(msg=msg)
- except AssertionError:
- pass
def _assert_not_frame_equal_both(a, b, **kwargs):
diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py
index c915edad4bb8e..c856585f20138 100644
--- a/pandas/tests/util/test_hashing.py
+++ b/pandas/tests/util/test_hashing.py
@@ -374,3 +374,10 @@ def test_hash_with_tuple():
df3 = pd.DataFrame({"data": [tuple([1, []]), tuple([2, {}])]})
with pytest.raises(TypeError, match="unhashable type: 'list'"):
hash_pandas_object(df3)
+
+
+def test_hash_object_none_key():
+ # https://github.com/pandas-dev/pandas/issues/30887
+ result = pd.util.hash_pandas_object(pd.Series(["a", "b"]), hash_key=None)
+ expected = pd.Series([4578374827886788867, 17338122309987883691], dtype="uint64")
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/util/test_validate_kwargs.py b/pandas/tests/util/test_validate_kwargs.py
index a7b6d8f98cc60..8fe2a3712bf49 100644
--- a/pandas/tests/util/test_validate_kwargs.py
+++ b/pandas/tests/util/test_validate_kwargs.py
@@ -49,8 +49,8 @@ def test_validation():
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_bool_kwarg_fail(name, value):
msg = (
- f'For argument "{name}" expected type bool,'
- f" received type {type(value).__name__}"
+ f'For argument "{name}" expected type bool, '
+ f"received type {type(value).__name__}"
)
with pytest.raises(ValueError, match=msg):
diff --git a/pandas/tests/window/moments/test_moments_expanding.py b/pandas/tests/window/moments/test_moments_expanding.py
index 4596552d8f255..9dfaecee9caeb 100644
--- a/pandas/tests/window/moments/test_moments_expanding.py
+++ b/pandas/tests/window/moments/test_moments_expanding.py
@@ -13,15 +13,17 @@ class TestExpandingMomentsConsistency(ConsistencyBase):
def setup_method(self, method):
self._create_data()
- def test_expanding_apply_args_kwargs(self, raw):
+ def test_expanding_apply_args_kwargs(self, engine_and_raw):
def mean_w_arg(x, const):
return np.mean(x) + const
+ engine, raw = engine_and_raw
+
df = DataFrame(np.random.rand(20, 3))
- expected = df.expanding().apply(np.mean, raw=raw) + 20.0
+ expected = df.expanding().apply(np.mean, engine=engine, raw=raw) + 20.0
- result = df.expanding().apply(mean_w_arg, raw=raw, args=(20,))
+ result = df.expanding().apply(mean_w_arg, engine=engine, raw=raw, args=(20,))
tm.assert_frame_equal(result, expected)
result = df.expanding().apply(mean_w_arg, raw=raw, kwargs={"const": 20})
@@ -38,9 +40,9 @@ def test_expanding_corr(self):
tm.assert_almost_equal(rolling_result, result)
def test_expanding_count(self):
- result = self.series.expanding().count()
+ result = self.series.expanding(min_periods=0).count()
tm.assert_almost_equal(
- result, self.series.rolling(window=len(self.series)).count()
+ result, self.series.rolling(window=len(self.series), min_periods=0).count()
)
def test_expanding_quantile(self):
@@ -190,11 +192,14 @@ def expanding_func(x, min_periods=1, center=False, axis=0):
)
@pytest.mark.parametrize("has_min_periods", [True, False])
- def test_expanding_apply(self, raw, has_min_periods):
+ def test_expanding_apply(self, engine_and_raw, has_min_periods):
+
+ engine, raw = engine_and_raw
+
def expanding_mean(x, min_periods=1):
exp = x.expanding(min_periods=min_periods)
- result = exp.apply(lambda x: x.mean(), raw=raw)
+ result = exp.apply(lambda x: x.mean(), raw=raw, engine=engine)
return result
# TODO(jreback), needed to add preserve_nan=False
@@ -202,14 +207,20 @@ def expanding_mean(x, min_periods=1):
self._check_expanding(expanding_mean, np.mean, preserve_nan=False)
self._check_expanding_has_min_periods(expanding_mean, np.mean, has_min_periods)
- def test_expanding_apply_empty_series(self, raw):
+ def test_expanding_apply_empty_series(self, engine_and_raw):
+ engine, raw = engine_and_raw
ser = Series([], dtype=np.float64)
- tm.assert_series_equal(ser, ser.expanding().apply(lambda x: x.mean(), raw=raw))
+ tm.assert_series_equal(
+ ser, ser.expanding().apply(lambda x: x.mean(), raw=raw, engine=engine)
+ )
- def test_expanding_apply_min_periods_0(self, raw):
+ def test_expanding_apply_min_periods_0(self, engine_and_raw):
# GH 8080
+ engine, raw = engine_and_raw
s = Series([None, None, None])
- result = s.expanding(min_periods=0).apply(lambda x: len(x), raw=raw)
+ result = s.expanding(min_periods=0).apply(
+ lambda x: len(x), raw=raw, engine=engine
+ )
expected = Series([1.0, 2.0, 3.0])
tm.assert_series_equal(result, expected)
@@ -358,7 +369,7 @@ def test_expanding_consistency(self, min_periods):
)
self._test_moments_consistency(
min_periods=min_periods,
- count=lambda x: x.expanding().count(),
+ count=lambda x: x.expanding(min_periods=min_periods).count(),
mean=lambda x: x.expanding(min_periods=min_periods).mean(),
corr=lambda x, y: x.expanding(min_periods=min_periods).corr(y),
var_unbiased=lambda x: x.expanding(min_periods=min_periods).var(),
diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py
index 9acb4ffcb40b8..83e4ee25558b5 100644
--- a/pandas/tests/window/moments/test_moments_rolling.py
+++ b/pandas/tests/window/moments/test_moments_rolling.py
@@ -777,8 +777,8 @@ def get_result(obj, window, min_periods=None, center=False):
series_result = get_result(series, window=win, min_periods=minp)
frame_result = get_result(frame, window=win, min_periods=minp)
else:
- series_result = get_result(series, window=win)
- frame_result = get_result(frame, window=win)
+ series_result = get_result(series, window=win, min_periods=0)
+ frame_result = get_result(frame, window=win, min_periods=0)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
@@ -835,8 +835,8 @@ def get_result(obj, window, min_periods=None, center=False):
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
else:
- result = get_result(self.series, len(self.series) + 1)
- expected = get_result(self.series, len(self.series))
+ result = get_result(self.series, len(self.series) + 1, min_periods=0)
+ expected = get_result(self.series, len(self.series), min_periods=0)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
@@ -851,10 +851,11 @@ def get_result(obj, window, min_periods=None, center=False):
pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=15
)[9:].reset_index(drop=True)
else:
- result = get_result(obj, 20, center=True)
- expected = get_result(pd.concat([obj, Series([np.NaN] * 9)]), 20)[
- 9:
- ].reset_index(drop=True)
+ result = get_result(obj, 20, min_periods=0, center=True)
+ print(result)
+ expected = get_result(
+ pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=0
+ )[9:].reset_index(drop=True)
tm.assert_series_equal(result, expected)
@@ -893,21 +894,27 @@ def get_result(obj, window, min_periods=None, center=False):
else:
series_xp = (
get_result(
- self.series.reindex(list(self.series.index) + s), window=25
+ self.series.reindex(list(self.series.index) + s),
+ window=25,
+ min_periods=0,
)
.shift(-12)
.reindex(self.series.index)
)
frame_xp = (
get_result(
- self.frame.reindex(list(self.frame.index) + s), window=25
+ self.frame.reindex(list(self.frame.index) + s),
+ window=25,
+ min_periods=0,
)
.shift(-12)
.reindex(self.frame.index)
)
- series_rs = get_result(self.series, window=25, center=True)
- frame_rs = get_result(self.frame, window=25, center=True)
+ series_rs = get_result(
+ self.series, window=25, min_periods=0, center=True
+ )
+ frame_rs = get_result(self.frame, window=25, min_periods=0, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
@@ -964,7 +971,11 @@ def test_rolling_consistency(self, window, min_periods, center):
self._test_moments_consistency_is_constant(
min_periods=min_periods,
- count=lambda x: (x.rolling(window=window, center=center).count()),
+ count=lambda x: (
+ x.rolling(
+ window=window, min_periods=min_periods, center=center
+ ).count()
+ ),
mean=lambda x: (
x.rolling(
window=window, min_periods=min_periods, center=center
@@ -989,19 +1000,26 @@ def test_rolling_consistency(self, window, min_periods, center):
).var(ddof=0)
),
var_debiasing_factors=lambda x: (
- x.rolling(window=window, center=center)
+ x.rolling(window=window, min_periods=min_periods, center=center)
.count()
.divide(
- (x.rolling(window=window, center=center).count() - 1.0).replace(
- 0.0, np.nan
- )
+ (
+ x.rolling(
+ window=window, min_periods=min_periods, center=center
+ ).count()
+ - 1.0
+ ).replace(0.0, np.nan)
)
),
)
self._test_moments_consistency(
min_periods=min_periods,
- count=lambda x: (x.rolling(window=window, center=center).count()),
+ count=lambda x: (
+ x.rolling(
+ window=window, min_periods=min_periods, center=center
+ ).count()
+ ),
mean=lambda x: (
x.rolling(
window=window, min_periods=min_periods, center=center
@@ -1071,7 +1089,7 @@ def test_rolling_consistency(self, window, min_periods, center):
if name == "count":
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
- window=window, min_periods=0, center=center
+ window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
else:
if name in ["cov", "corr"]:
diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py
index 5e70e13209de5..680237db0535b 100644
--- a/pandas/tests/window/test_api.py
+++ b/pandas/tests/window/test_api.py
@@ -237,10 +237,10 @@ def test_count_nonnumeric_types(self):
columns=cols,
)
- result = df.rolling(window=2).count()
+ result = df.rolling(window=2, min_periods=0).count()
tm.assert_frame_equal(result, expected)
- result = df.rolling(1).count()
+ result = df.rolling(1, min_periods=0).count()
expected = df.notna().astype(float)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py
index fc4bd50f25c73..6b6367fd80b26 100644
--- a/pandas/tests/window/test_expanding.py
+++ b/pandas/tests/window/test_expanding.py
@@ -113,3 +113,22 @@ def test_expanding_axis(self, axis_frame):
result = df.expanding(3, axis=axis_frame).sum()
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("constructor", [Series, DataFrame])
+def test_expanding_count_with_min_periods(constructor):
+ # GH 26996
+ result = constructor(range(5)).expanding(min_periods=3).count()
+ expected = constructor([np.nan, np.nan, 3.0, 4.0, 5.0])
+ tm.assert_equal(result, expected)
+
+
+@pytest.mark.parametrize("constructor", [Series, DataFrame])
+def test_expanding_count_default_min_periods_with_null_values(constructor):
+ # GH 26996
+ values = [1, 2, 3, np.nan, 4, 5, 6]
+ expected_counts = [1.0, 2.0, 3.0, 3.0, 4.0, 5.0, 6.0]
+
+ result = constructor(values).expanding().count()
+ expected = constructor(expected_counts)
+ tm.assert_equal(result, expected)
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 04fab93b71c4a..ab2c7fcb7a0dc 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -32,23 +32,34 @@ def test_constructor(self, which):
c = o.rolling
# valid
+ c(0)
c(window=2)
c(window=2, min_periods=1)
c(window=2, min_periods=1, center=True)
c(window=2, min_periods=1, center=False)
# GH 13383
- with pytest.raises(ValueError):
- c(0)
+
+ msg = "window must be non-negative"
+
+ with pytest.raises(ValueError, match=msg):
c(-1)
# not valid
for w in [2.0, "foo", np.array([2])]:
- with pytest.raises(ValueError):
+ msg = (
+ "window must be an integer|"
+ "passed window foo is not compatible with a datetimelike index"
+ )
+ with pytest.raises(ValueError, match=msg):
c(window=w)
- with pytest.raises(ValueError):
+
+ msg = "min_periods must be an integer"
+ with pytest.raises(ValueError, match=msg):
c(window=2, min_periods=w)
- with pytest.raises(ValueError):
+
+ msg = "center must be a boolean"
+ with pytest.raises(ValueError, match=msg):
c(window=2, min_periods=1, center=w)
@td.skip_if_no_scipy
@@ -57,7 +68,10 @@ def test_constructor_with_win_type(self, which):
# GH 13383
o = getattr(self, which)
c = o.rolling
- with pytest.raises(ValueError):
+
+ msg = "window must be > 0"
+
+ with pytest.raises(ValueError, match=msg):
c(-1, win_type="boxcar")
@pytest.mark.parametrize("window", [timedelta(days=3), pd.Timedelta(days=3)])
@@ -113,7 +127,10 @@ def test_numpy_compat(self, method):
def test_closed(self):
df = DataFrame({"A": [0, 1, 2, 3, 4]})
# closed only allowed for datetimelike
- with pytest.raises(ValueError):
+
+ msg = "closed only implemented for datetimelike and offset based windows"
+
+ with pytest.raises(ValueError, match=msg):
df.rolling(window=3, closed="neither")
@pytest.mark.parametrize("closed", ["neither", "left"])
@@ -296,7 +313,10 @@ def test_iter_raises(self, klass):
# https://github.com/pandas-dev/pandas/issues/11704
# Iteration over a Window
obj = klass([1, 2, 3, 4])
- with pytest.raises(NotImplementedError):
+
+ msg = "See issue #11704 https://github.com/pandas-dev/pandas/issues/11704"
+
+ with pytest.raises(NotImplementedError, match=msg):
iter(obj.rolling(2))
def test_rolling_axis_sum(self, axis_frame):
@@ -324,7 +344,7 @@ def test_rolling_axis_count(self, axis_frame):
else:
expected = DataFrame({"x": [1.0, 1.0, 1.0], "y": [2.0, 2.0, 2.0]})
- result = df.rolling(2, axis=axis_frame).count()
+ result = df.rolling(2, axis=axis_frame, min_periods=0).count()
tm.assert_frame_equal(result, expected)
def test_readonly_array(self):
@@ -426,3 +446,22 @@ def test_min_periods1():
result = df["a"].rolling(3, center=True, min_periods=1).max()
expected = pd.Series([1.0, 2.0, 2.0, 2.0, 1.0], name="a")
tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("constructor", [Series, DataFrame])
+def test_rolling_count_with_min_periods(constructor):
+ # GH 26996
+ result = constructor(range(5)).rolling(3, min_periods=3).count()
+ expected = constructor([np.nan, np.nan, 3.0, 3.0, 3.0])
+ tm.assert_equal(result, expected)
+
+
+@pytest.mark.parametrize("constructor", [Series, DataFrame])
+def test_rolling_count_default_min_periods_with_null_values(constructor):
+ # GH 26996
+ values = [1, 2, 3, np.nan, 4, 5, 6]
+ expected_counts = [1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0]
+
+ result = constructor(values).rolling(3).count()
+ expected = constructor(expected_counts)
+ tm.assert_equal(result, expected)
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index e2d007cd2d7f8..af34180fb3170 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -334,7 +334,7 @@ def is_unique(self) -> bool:
return len(self.deltas) == 1
@cache_readonly
- def is_unique_asi8(self):
+ def is_unique_asi8(self) -> bool:
return len(self.deltas_asi8) == 1
def get_freq(self) -> Optional[str]:
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 8bb98a271bce8..e05cce9c49f4b 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -26,7 +26,7 @@
BaseOffset,
_get_calendar,
_is_normalized,
- _to_dt64,
+ _to_dt64D,
apply_index_wraps,
as_datetime,
roll_yearday,
@@ -365,7 +365,7 @@ def apply_index(self, i):
"applied vectorized"
)
- def is_anchored(self):
+ def is_anchored(self) -> bool:
# TODO: Does this make sense for the general case? It would help
# if there were a canonical docstring for what is_anchored means.
return self.n == 1
@@ -378,7 +378,7 @@ def onOffset(self, dt):
)
return self.is_on_offset(dt)
- def isAnchored(self):
+ def isAnchored(self) -> bool:
warnings.warn(
"isAnchored is a deprecated, use is_anchored instead",
FutureWarning,
@@ -389,7 +389,7 @@ def isAnchored(self):
# TODO: Combine this with BusinessMixin version by defining a whitelisted
# set of attributes on each object rather than the existing behavior of
# iterating over internal ``__dict__``
- def _repr_attrs(self):
+ def _repr_attrs(self) -> str:
exclude = {"n", "inc", "normalize"}
attrs = []
for attr in sorted(self.__dict__):
@@ -405,7 +405,7 @@ def _repr_attrs(self):
return out
@property
- def name(self):
+ def name(self) -> str:
return self.rule_code
def rollback(self, dt):
@@ -452,15 +452,15 @@ def is_on_offset(self, dt):
# way to get around weirdness with rule_code
@property
- def _prefix(self):
+ def _prefix(self) -> str:
raise NotImplementedError("Prefix not defined")
@property
- def rule_code(self):
+ def rule_code(self) -> str:
return self._prefix
@cache_readonly
- def freqstr(self):
+ def freqstr(self) -> str:
try:
code = self.rule_code
except NotImplementedError:
@@ -480,7 +480,7 @@ def freqstr(self):
return fstr
- def _offset_str(self):
+ def _offset_str(self) -> str:
return ""
@property
@@ -529,11 +529,11 @@ def offset(self):
# Alias for backward compat
return self._offset
- def _repr_attrs(self):
+ def _repr_attrs(self) -> str:
if self.offset:
attrs = [f"offset={repr(self.offset)}"]
else:
- attrs = None
+ attrs = []
out = ""
if attrs:
out += ": " + ", ".join(attrs)
@@ -553,7 +553,7 @@ def __init__(self, n=1, normalize=False, offset=timedelta(0)):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "_offset", offset)
- def _offset_str(self):
+ def _offset_str(self) -> str:
def get_str(td):
off_str = ""
if td.days > 0:
@@ -649,7 +649,7 @@ def apply_index(self, i):
result = shifted.to_timestamp() + time
return result
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
return dt.weekday() < 5
@@ -1087,10 +1087,10 @@ def apply(self, other):
def apply_index(self, i):
raise NotImplementedError
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
- day64 = _to_dt64(dt, "datetime64[D]")
+ day64 = _to_dt64D(dt)
return np.is_busday(day64, busdaycal=self.calendar)
@@ -1134,14 +1134,14 @@ class MonthOffset(SingleConstructorOffset):
__init__ = BaseOffset.__init__
@property
- def name(self):
+ def name(self) -> str:
if self.is_anchored:
return self.rule_code
else:
month = ccalendar.MONTH_ALIASES[self.n]
return f"{self.code_rule}-{month}"
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
return dt.day == self._get_offset_day(dt)
@@ -1333,7 +1333,7 @@ def _from_name(cls, suffix=None):
return cls(day_of_month=suffix)
@property
- def rule_code(self):
+ def rule_code(self) -> str:
suffix = f"-{self.day_of_month}"
return self._prefix + suffix
@@ -1429,7 +1429,7 @@ class SemiMonthEnd(SemiMonthOffset):
_prefix = "SM"
_min_day_of_month = 1
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
days_in_month = ccalendar.get_days_in_month(dt.year, dt.month)
@@ -1487,7 +1487,7 @@ class SemiMonthBegin(SemiMonthOffset):
_prefix = "SMS"
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
return dt.day in (1, self.day_of_month)
@@ -1556,7 +1556,7 @@ def __init__(self, n=1, normalize=False, weekday=None):
if self.weekday < 0 or self.weekday > 6:
raise ValueError(f"Day must be 0<=day<=6, got {self.weekday}")
- def is_anchored(self):
+ def is_anchored(self) -> bool:
return self.n == 1 and self.weekday is not None
@apply_wraps
@@ -1632,7 +1632,7 @@ def _end_apply_index(self, dtindex):
return base + off + Timedelta(1, "ns") - Timedelta(1, "D")
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
elif self.weekday is None:
@@ -1640,7 +1640,7 @@ def is_on_offset(self, dt):
return dt.weekday() == self.weekday
@property
- def rule_code(self):
+ def rule_code(self) -> str:
suffix = ""
if self.weekday is not None:
weekday = ccalendar.int_to_weekday[self.weekday]
@@ -1717,7 +1717,7 @@ def __init__(self, n=1, normalize=False, week=0, weekday=0):
if self.week < 0 or self.week > 3:
raise ValueError(f"Week must be 0<=week<=3, got {self.week}")
- def _get_offset_day(self, other):
+ def _get_offset_day(self, other: datetime) -> int:
"""
Find the day in the same month as other that has the same
weekday as self.weekday and is the self.week'th such day in the month.
@@ -1736,7 +1736,7 @@ def _get_offset_day(self, other):
return 1 + shift_days + self.week * 7
@property
- def rule_code(self):
+ def rule_code(self) -> str:
weekday = ccalendar.int_to_weekday.get(self.weekday, "")
return f"{self._prefix}-{self.week + 1}{weekday}"
@@ -1785,7 +1785,7 @@ def __init__(self, n=1, normalize=False, weekday=0):
if self.weekday < 0 or self.weekday > 6:
raise ValueError(f"Day must be 0<=day<=6, got {self.weekday}")
- def _get_offset_day(self, other):
+ def _get_offset_day(self, other: datetime) -> int:
"""
Find the day in the same month as other that has the same
weekday as self.weekday and is the last such day in the month.
@@ -1805,7 +1805,7 @@ def _get_offset_day(self, other):
return dim - shift_days
@property
- def rule_code(self):
+ def rule_code(self) -> str:
weekday = ccalendar.int_to_weekday.get(self.weekday, "")
return f"{self._prefix}-{weekday}"
@@ -1842,7 +1842,7 @@ def __init__(self, n=1, normalize=False, startingMonth=None):
startingMonth = self._default_startingMonth
object.__setattr__(self, "startingMonth", startingMonth)
- def is_anchored(self):
+ def is_anchored(self) -> bool:
return self.n == 1 and self.startingMonth is not None
@classmethod
@@ -1856,7 +1856,7 @@ def _from_name(cls, suffix=None):
return cls(**kwargs)
@property
- def rule_code(self):
+ def rule_code(self) -> str:
month = ccalendar.MONTH_ALIASES[self.startingMonth]
return f"{self._prefix}-{month}"
@@ -1874,7 +1874,7 @@ def apply(self, other):
months = qtrs * 3 - months_since
return shift_month(other, months, self._day_opt)
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
mod_month = (dt.month - self.startingMonth) % 3
@@ -1953,7 +1953,7 @@ class YearOffset(DateOffset):
_adjust_dst = True
_attributes = frozenset(["n", "normalize", "month"])
- def _get_offset_day(self, other):
+ def _get_offset_day(self, other: datetime) -> int:
# override BaseOffset method to use self.month instead of other.month
# TODO: there may be a more performant way to do this
return liboffsets.get_day_of_month(
@@ -1977,7 +1977,7 @@ def apply_index(self, dtindex):
shifted, freq=dtindex.freq, dtype=dtindex.dtype
)
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
return dt.month == self.month and dt.day == self._get_offset_day(dt)
@@ -1999,7 +1999,7 @@ def _from_name(cls, suffix=None):
return cls(**kwargs)
@property
- def rule_code(self):
+ def rule_code(self) -> str:
month = ccalendar.MONTH_ALIASES[self.month]
return f"{self._prefix}-{month}"
@@ -2062,7 +2062,7 @@ class FY5253(DateOffset):
such as retail, manufacturing and parking industry.
For more information see:
- http://en.wikipedia.org/wiki/4-4-5_calendar
+ https://en.wikipedia.org/wiki/4-4-5_calendar
The year may either:
@@ -2117,12 +2117,12 @@ def __init__(
if self.variation not in ["nearest", "last"]:
raise ValueError(f"{self.variation} is not a valid variation")
- def is_anchored(self):
+ def is_anchored(self) -> bool:
return (
self.n == 1 and self.startingMonth is not None and self.weekday is not None
)
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
dt = datetime(dt.year, dt.month, dt.day)
@@ -2217,18 +2217,18 @@ def get_year_end(self, dt):
return target_date + timedelta(days_forward - 7)
@property
- def rule_code(self):
+ def rule_code(self) -> str:
prefix = self._prefix
suffix = self.get_rule_code_suffix()
return f"{prefix}-{suffix}"
- def _get_suffix_prefix(self):
+ def _get_suffix_prefix(self) -> str:
if self.variation == "nearest":
return "N"
else:
return "L"
- def get_rule_code_suffix(self):
+ def get_rule_code_suffix(self) -> str:
prefix = self._get_suffix_prefix()
month = ccalendar.MONTH_ALIASES[self.startingMonth]
weekday = ccalendar.int_to_weekday[self.weekday]
@@ -2270,7 +2270,7 @@ class FY5253Quarter(DateOffset):
such as retail, manufacturing and parking industry.
For more information see:
- http://en.wikipedia.org/wiki/4-4-5_calendar
+ https://en.wikipedia.org/wiki/4-4-5_calendar
The year may either:
@@ -2346,7 +2346,7 @@ def _offset(self):
variation=self.variation,
)
- def is_anchored(self):
+ def is_anchored(self) -> bool:
return self.n == 1 and self._offset.is_anchored()
def _rollback_to_year(self, other):
@@ -2434,7 +2434,7 @@ def get_weeks(self, dt):
return ret
- def year_has_extra_week(self, dt):
+ def year_has_extra_week(self, dt: datetime) -> bool:
# Avoid round-down errors --> normalize to get
# e.g. '370D' instead of '360D23H'
norm = Timestamp(dt).normalize().tz_localize(None)
@@ -2445,7 +2445,7 @@ def year_has_extra_week(self, dt):
assert weeks_in_year in [52, 53], weeks_in_year
return weeks_in_year == 53
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
if self._offset.is_on_offset(dt):
@@ -2463,7 +2463,7 @@ def is_on_offset(self, dt):
return False
@property
- def rule_code(self):
+ def rule_code(self) -> str:
suffix = self._offset.get_rule_code_suffix()
qtr = self.qtr_with_extra_week
return f"{self._prefix}-{suffix}-{qtr}"
@@ -2516,7 +2516,7 @@ def apply(self, other):
)
return new
- def is_on_offset(self, dt):
+ def is_on_offset(self, dt: datetime) -> bool:
if self.normalize and not _is_normalized(dt):
return False
return date(dt.year, dt.month, dt.day) == easter(dt.year)
@@ -2596,7 +2596,7 @@ def __eq__(self, other: Any) -> bool:
# This is identical to DateOffset.__hash__, but has to be redefined here
# for Python 3, because we've redefined __eq__.
- def __hash__(self):
+ def __hash__(self) -> int:
return hash(self._params)
def __ne__(self, other):
@@ -2617,7 +2617,7 @@ def __ne__(self, other):
return True
@property
- def delta(self):
+ def delta(self) -> Timedelta:
return self.n * self._inc
@property
@@ -2648,11 +2648,11 @@ def apply(self, other):
raise ApplyTypeError(f"Unhandled type: {type(other).__name__}")
- def is_anchored(self):
+ def is_anchored(self) -> bool:
return False
-def _delta_to_tick(delta):
+def _delta_to_tick(delta: timedelta) -> Tick:
if delta.microseconds == 0 and getattr(delta, "nanoseconds", 0) == 0:
# nanoseconds only for pd.Timedelta
if delta.seconds == 0:
@@ -2667,8 +2667,8 @@ def _delta_to_tick(delta):
return Second(seconds)
else:
nanos = delta_to_nanoseconds(delta)
- if nanos % 1000000 == 0:
- return Milli(nanos // 1000000)
+ if nanos % 1_000_000 == 0:
+ return Milli(nanos // 1_000_000)
elif nanos % 1000 == 0:
return Micro(nanos // 1000)
else: # pragma: no cover
diff --git a/pandas/util/__init__.py b/pandas/util/__init__.py
index d906c0371d207..b5271dbc0443e 100644
--- a/pandas/util/__init__.py
+++ b/pandas/util/__init__.py
@@ -1,3 +1,30 @@
from pandas.util._decorators import Appender, Substitution, cache_readonly # noqa
+from pandas import compat
from pandas.core.util.hashing import hash_array, hash_pandas_object # noqa
+
+# compatibility for import pandas; pandas.util.testing
+
+if compat.PY37:
+
+ def __getattr__(name):
+ if name == "testing":
+ import pandas.util.testing
+
+ return pandas.util.testing
+ else:
+ raise AttributeError(f"module 'pandas.util' has no attribute '{name}'")
+
+
+else:
+
+ class _testing:
+ def __getattr__(self, item):
+ import pandas.util.testing
+
+ return getattr(pandas.util.testing, item)
+
+ testing = _testing()
+
+
+del compat
diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index d10d3a1f71fe6..0aab5a9c4113d 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -248,7 +248,7 @@ def wrapper(*args, **kwargs) -> Callable[..., Any]:
# Substitution and Appender are derived from matplotlib.docstring (1.1.0)
-# module http://matplotlib.org/users/license.html
+# module https://matplotlib.org/users/license.html
class Substitution:
diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py
index d8804994af426..cd7fdd55a4d2c 100644
--- a/pandas/util/_test_decorators.py
+++ b/pandas/util/_test_decorators.py
@@ -77,8 +77,8 @@ def safe_import(mod_name: str, min_version: Optional[str] = None):
# TODO:
-# remove when gh-24839 is fixed; this affects numpy 1.16
-# and pytables 3.4.4
+# remove when gh-24839 is fixed.
+# this affects numpy 1.16 and pytables 3.4.4
tables = safe_import("tables")
xfail_non_writeable = pytest.mark.xfail(
tables
@@ -86,7 +86,7 @@ def safe_import(mod_name: str, min_version: Optional[str] = None):
and LooseVersion(tables.__version__) < LooseVersion("3.5.1"),
reason=(
"gh-25511, gh-24839. pytables needs a "
- "release beyong 3.4.4 to support numpy 1.16x"
+ "release beyond 3.4.4 to support numpy 1.16.x"
),
)
diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py
index b69c974661f89..a715094e65e98 100644
--- a/pandas/util/_validators.py
+++ b/pandas/util/_validators.py
@@ -297,7 +297,7 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
"\n\t'.{method_name}(index=a, columns=b)'.\nUse named "
"arguments to remove any ambiguity. In the future, using "
"positional arguments for 'index' or 'columns' will raise "
- " a 'TypeError'."
+ "a 'TypeError'."
)
warnings.warn(msg.format(method_name=method_name), FutureWarning, stacklevel=4)
out[data._AXIS_NAMES[0]] = args[0]
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 017e6258d9941..08cbef2c7fc6b 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -16,7 +16,6 @@ mypy==0.730
pycodestyle
gitpython
sphinx
-numpydoc>=0.9.0
nbconvert>=5.4.1
nbsphinx
pandoc
@@ -70,4 +69,5 @@ sqlalchemy
xarray
pyreadstat
tabulate>=0.8.3
-git+https://github.com/pandas-dev/pandas-sphinx-theme.git@master
\ No newline at end of file
+git+https://github.com/pandas-dev/pandas-sphinx-theme.git@master
+git+https://github.com/numpy/numpydoc
\ No newline at end of file
diff --git a/scripts/build_dist.sh b/scripts/build_dist.sh
deleted file mode 100755
index c3f849ce7a6eb..0000000000000
--- a/scripts/build_dist.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-# build the distribution
-LAST=`git tag --sort version:refname | grep -v rc | tail -1`
-
-echo "Building distribution for: $LAST"
-git checkout $LAST
-
-read -p "Ok to continue (y/n)? " answer
-case ${answer:0:1} in
- y|Y )
- echo "Building distribution"
- ./build_dist_for_release.sh
- ;;
- * )
- echo "Not building distribution"
- ;;
-esac
diff --git a/scripts/build_dist_for_release.sh b/scripts/build_dist_for_release.sh
deleted file mode 100755
index bee0f23a68ec2..0000000000000
--- a/scripts/build_dist_for_release.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-# this requires cython to be installed
-
-# this builds the release cleanly & is building on the current checkout
-rm -rf dist
-git clean -xfd
-python setup.py clean --quiet
-python setup.py cython --quiet
-python setup.py sdist --formats=gztar --quiet
diff --git a/scripts/download_wheels.py b/scripts/download_wheels.py
deleted file mode 100644
index 3d36eed2d888a..0000000000000
--- a/scripts/download_wheels.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-"""Fetch wheels from wheels.scipy.org for a pandas version."""
-import argparse
-import pathlib
-import sys
-import urllib.parse
-import urllib.request
-
-from lxml import html
-
-
-def parse_args(args=None):
- parser = argparse.ArgumentParser(description=__doc__)
- parser.add_argument("version", type=str, help="Pandas version (0.23.0)")
- return parser.parse_args(args)
-
-
-def fetch(version):
- base = "http://wheels.scipy.org"
- tree = html.parse(base)
- root = tree.getroot()
-
- dest = pathlib.Path("dist")
- dest.mkdir(exist_ok=True)
-
- files = [
- x
- for x in root.xpath("//a/text()")
- if x.startswith(f"pandas-{version}") and not dest.joinpath(x).exists()
- ]
-
- N = len(files)
-
- for i, filename in enumerate(files, 1):
- out = str(dest.joinpath(filename))
- link = urllib.request.urljoin(base, filename)
- urllib.request.urlretrieve(link, out)
- print(f"Downloaded {link} to {out} [{i}/{N}]")
-
-
-def main(args=None):
- args = parse_args(args)
- fetch(args.version)
-
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/scripts/find_commits_touching_func.py b/scripts/find_commits_touching_func.py
index 5e1a169dbfc3f..85675cb6df42b 100755
--- a/scripts/find_commits_touching_func.py
+++ b/scripts/find_commits_touching_func.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# copyright 2013, y-p @ github
"""
Search the git history for all commits touching a named method
diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py
index 53a27e8782ad7..b0a06416ce443 100755
--- a/scripts/generate_pip_deps_from_conda.py
+++ b/scripts/generate_pip_deps_from_conda.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
Convert the conda environment.yml to the pip requirements-dev.txt,
or check that they have the same packages (for the CI)
@@ -132,8 +132,7 @@ def main(conda_fname, pip_fname, compare=False):
)
if args.azure:
msg = (
- "##vso[task.logissue type=error;"
- f"sourcepath=requirements-dev.txt]{msg}"
+ f"##vso[task.logissue type=error;sourcepath=requirements-dev.txt]{msg}"
)
sys.stderr.write(msg)
sys.exit(res)
diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py
index a1bccb1dd1629..b11de0c4ad860 100644
--- a/scripts/tests/test_validate_docstrings.py
+++ b/scripts/tests/test_validate_docstrings.py
@@ -1,819 +1,52 @@
-import functools
import io
-import random
-import string
import textwrap
-import numpy as np
import pytest
import validate_docstrings
-import pandas as pd
-validate_one = validate_docstrings.validate_one
-
-
-class GoodDocStrings:
- """
- Collection of good doc strings.
-
- This class contains a lot of docstrings that should pass the validation
- script without any errors.
- """
-
- def plot(self, kind, color="blue", **kwargs):
- """
- Generate a plot.
-
- Render the data in the Series as a matplotlib plot of the
- specified kind.
-
- Parameters
- ----------
- kind : str
- Kind of matplotlib plot.
- color : str, default 'blue'
- Color name or rgb code.
- **kwargs
- These parameters will be passed to the matplotlib plotting
- function.
- """
- pass
-
- def swap(self, arr, i, j, *args, **kwargs):
- """
- Swap two indicies on an array.
-
- Parameters
- ----------
- arr : list
- The list having indexes swapped.
- i, j : int
- The indexes being swapped.
- *args, **kwargs
- Extraneous parameters are being permitted.
- """
- pass
-
- def sample(self):
- """
- Generate and return a random number.
-
- The value is sampled from a continuous uniform distribution between
- 0 and 1.
-
- Returns
- -------
- float
- Random number generated.
- """
- return random.random()
-
- @functools.lru_cache(None)
- def decorated_sample(self, max):
- """
- Generate and return a random integer between 0 and max.
-
- Parameters
- ----------
- max : int
- The maximum value of the random number.
-
- Returns
- -------
- int
- Random number generated.
- """
- return random.randint(0, max)
-
- def random_letters(self):
- """
- Generate and return a sequence of random letters.
-
- The length of the returned string is also random, and is also
- returned.
-
- Returns
- -------
- length : int
- Length of the returned string.
- letters : str
- String of random letters.
- """
- length = random.randint(1, 10)
- letters = "".join(random.sample(string.ascii_lowercase, length))
- return length, letters
-
- def sample_values(self):
- """
- Generate an infinite sequence of random numbers.
-
- The values are sampled from a continuous uniform distribution between
- 0 and 1.
-
- Yields
- ------
- float
- Random number generated.
- """
- while True:
- yield random.random()
-
- def head(self):
- """
- Return the first 5 elements of the Series.
-
- This function is mainly useful to preview the values of the
- Series without displaying the whole of it.
-
- Returns
- -------
- Series
- Subset of the original series with the 5 first values.
-
- See Also
- --------
- Series.tail : Return the last 5 elements of the Series.
- Series.iloc : Return a slice of the elements in the Series,
- which can also be used to return the first or last n.
- """
- return self.iloc[:5]
-
- def head1(self, n=5):
- """
- Return the first elements of the Series.
-
- This function is mainly useful to preview the values of the
- Series without displaying the whole of it.
-
- Parameters
- ----------
- n : int
- Number of values to return.
-
- Returns
- -------
- Series
- Subset of the original series with the n first values.
-
- See Also
- --------
- tail : Return the last n elements of the Series.
-
- Examples
- --------
- >>> s = pd.Series(['Ant', 'Bear', 'Cow', 'Dog', 'Falcon'])
- >>> s.head()
- 0 Ant
- 1 Bear
- 2 Cow
- 3 Dog
- 4 Falcon
- dtype: object
-
- With the `n` parameter, we can change the number of returned rows:
-
- >>> s.head(n=3)
- 0 Ant
- 1 Bear
- 2 Cow
- dtype: object
- """
- return self.iloc[:n]
-
- def contains(self, pat, case=True, na=np.nan):
- """
- Return whether each value contains `pat`.
-
- In this case, we are illustrating how to use sections, even
- if the example is simple enough and does not require them.
-
- Parameters
- ----------
- pat : str
- Pattern to check for within each element.
- case : bool, default True
- Whether check should be done with case sensitivity.
- na : object, default np.nan
- Fill value for missing data.
-
- Examples
- --------
- >>> s = pd.Series(['Antelope', 'Lion', 'Zebra', np.nan])
- >>> s.str.contains(pat='a')
- 0 False
- 1 False
- 2 True
- 3 NaN
- dtype: object
-
- **Case sensitivity**
-
- With `case_sensitive` set to `False` we can match `a` with both
- `a` and `A`:
-
- >>> s.str.contains(pat='a', case=False)
- 0 True
- 1 False
- 2 True
- 3 NaN
- dtype: object
-
- **Missing values**
-
- We can fill missing values in the output using the `na` parameter:
-
- >>> s.str.contains(pat='a', na=False)
- 0 False
- 1 False
- 2 True
- 3 False
- dtype: bool
- """
- pass
-
- def mode(self, axis, numeric_only):
- """
- Ensure reST directives don't affect checks for leading periods.
-
- Parameters
- ----------
- axis : str
- Sentence ending in period, followed by single directive.
-
- .. versionchanged:: 0.1.2
-
- numeric_only : bool
- Sentence ending in period, followed by multiple directives.
-
- .. versionadded:: 0.1.2
- .. deprecated:: 0.00.0
- A multiline description,
- which spans another line.
- """
- pass
-
- def good_imports(self):
- """
- Ensure import other than numpy and pandas are fine.
-
- Examples
- --------
- This example does not import pandas or import numpy.
- >>> import datetime
- >>> datetime.MAXYEAR
- 9999
- """
- pass
-
- def no_returns(self):
- """
- Say hello and have no returns.
- """
- pass
-
- def empty_returns(self):
- """
- Say hello and always return None.
-
- Since this function never returns a value, this
- docstring doesn't need a return section.
- """
-
- def say_hello():
- return "Hello World!"
-
- say_hello()
- if True:
- return
- else:
- return None
-
- def multiple_variables_on_one_line(self, matrix, a, b, i, j):
- """
- Swap two values in a matrix.
-
- Parameters
- ----------
- matrix : list of list
- A double list that represents a matrix.
- a, b : int
- The indicies of the first value.
- i, j : int
- The indicies of the second value.
- """
- pass
-
-
-class BadGenericDocStrings:
- """Everything here has a bad docstring
- """
-
- def func(self):
-
- """Some function.
-
- With several mistakes in the docstring.
-
- It has a blank like after the signature `def func():`.
-
- The text 'Some function' should go in the line after the
- opening quotes of the docstring, not in the same line.
-
- There is a blank line between the docstring and the first line
- of code `foo = 1`.
-
- The closing quotes should be in the next line, not in this one."""
-
- foo = 1
- bar = 2
- return foo + bar
-
- def astype(self, dtype):
- """
- Casts Series type.
-
- Verb in third-person of the present simple, should be infinitive.
- """
- pass
-
- def astype1(self, dtype):
- """
- Method to cast Series type.
-
- Does not start with verb.
- """
- pass
-
- def astype2(self, dtype):
- """
- Cast Series type
-
- Missing dot at the end.
- """
- pass
-
- def astype3(self, dtype):
- """
- Cast Series type from its current type to the new type defined in
- the parameter dtype.
-
- Summary is too verbose and doesn't fit in a single line.
- """
- pass
-
- def two_linebreaks_between_sections(self, foo):
- """
- Test linebreaks message GL03.
-
- Note 2 blank lines before parameters section.
-
-
- Parameters
- ----------
- foo : str
- Description of foo parameter.
- """
- pass
-
- def linebreak_at_end_of_docstring(self, foo):
- """
- Test linebreaks message GL03.
-
- Note extra blank line at end of docstring.
-
- Parameters
- ----------
- foo : str
- Description of foo parameter.
-
- """
- pass
-
- def plot(self, kind, **kwargs):
- """
- Generate a plot.
-
- Render the data in the Series as a matplotlib plot of the
- specified kind.
-
- Note the blank line between the parameters title and the first
- parameter. Also, note that after the name of the parameter `kind`
- and before the colon, a space is missing.
-
- Also, note that the parameter descriptions do not start with a
- capital letter, and do not finish with a dot.
-
- Finally, the `**kwargs` parameter is missing.
-
- Parameters
- ----------
-
- kind: str
- kind of matplotlib plot
- """
- pass
-
- def method(self, foo=None, bar=None):
- """
- A sample DataFrame method.
-
- Do not import numpy and pandas.
-
- Try to use meaningful data, when it makes the example easier
- to understand.
-
- Try to avoid positional arguments like in `df.method(1)`. They
- can be alright if previously defined with a meaningful name,
- like in `present_value(interest_rate)`, but avoid them otherwise.
-
- When presenting the behavior with different parameters, do not place
- all the calls one next to the other. Instead, add a short sentence
- explaining what the example shows.
-
- Examples
- --------
- >>> import numpy as np
- >>> import pandas as pd
- >>> df = pd.DataFrame(np.ones((3, 3)),
- ... columns=('a', 'b', 'c'))
- >>> df.all(1)
- 0 True
- 1 True
- 2 True
- dtype: bool
- >>> df.all(bool_only=True)
- Series([], dtype: bool)
- """
- pass
-
- def private_classes(self):
- """
- This mentions NDFrame, which is not correct.
- """
-
- def unknown_section(self):
- """
- This section has an unknown section title.
-
- Unknown Section
- ---------------
- This should raise an error in the validation.
- """
-
- def sections_in_wrong_order(self):
- """
- This docstring has the sections in the wrong order.
-
- Parameters
- ----------
- name : str
- This section is in the right position.
-
- Examples
- --------
- >>> print('So far Examples is good, as it goes before Parameters')
- So far Examples is good, as it goes before Parameters
-
- See Also
- --------
- function : This should generate an error, as See Also needs to go
- before Examples.
- """
-
- def deprecation_in_wrong_order(self):
- """
- This docstring has the deprecation warning in the wrong order.
-
- This is the extended summary. The correct order should be
- summary, deprecation warning, extended summary.
-
- .. deprecated:: 1.0
- This should generate an error as it needs to go before
- extended summary.
- """
-
- def method_wo_docstrings(self):
- pass
-
- def directives_without_two_colons(self, first, second):
- """
- Ensure reST directives have trailing colons.
-
- Parameters
- ----------
- first : str
- Sentence ending in period, followed by single directive w/o colons.
-
- .. versionchanged 0.1.2
-
- second : bool
- Sentence ending in period, followed by multiple directives w/o
- colons.
-
- .. versionadded 0.1.2
- .. deprecated 0.00.0
-
- """
- pass
-
-
-class BadSummaries:
- def wrong_line(self):
- """Exists on the wrong line"""
- pass
-
- def no_punctuation(self):
- """
- Has the right line but forgets punctuation
- """
- pass
-
- def no_capitalization(self):
- """
- provides a lowercase summary.
- """
- pass
-
- def no_infinitive(self):
- """
- Started with a verb that is not infinitive.
- """
-
- def multi_line(self):
- """
- Extends beyond one line
- which is not correct.
- """
-
- def two_paragraph_multi_line(self):
- """
- Extends beyond one line
- which is not correct.
-
- Extends beyond one line, which in itself is correct but the
- previous short summary should still be an issue.
- """
-
-
-class BadParameters:
- """
- Everything here has a problem with its Parameters section.
- """
-
- def missing_params(self, kind, **kwargs):
- """
- Lacks kwargs in Parameters.
-
- Parameters
- ----------
- kind : str
- Foo bar baz.
- """
-
- def bad_colon_spacing(self, kind):
- """
- Has bad spacing in the type line.
-
- Parameters
- ----------
- kind: str
- Needs a space after kind.
- """
-
- def no_description_period(self, kind):
- """
- Forgets to add a period to the description.
-
- Parameters
- ----------
- kind : str
- Doesn't end with a dot
- """
-
- def no_description_period_with_directive(self, kind):
- """
- Forgets to add a period, and also includes a directive.
-
- Parameters
- ----------
- kind : str
- Doesn't end with a dot
-
- .. versionadded:: 0.00.0
- """
-
- def no_description_period_with_directives(self, kind):
- """
- Forgets to add a period, and also includes multiple directives.
-
- Parameters
- ----------
- kind : str
- Doesn't end with a dot
-
- .. versionchanged:: 0.00.0
- .. deprecated:: 0.00.0
- """
-
- def parameter_capitalization(self, kind):
- """
- Forgets to capitalize the description.
-
- Parameters
- ----------
- kind : str
- this is not capitalized.
- """
-
- def blank_lines(self, kind):
- """
- Adds a blank line after the section header.
-
- Parameters
- ----------
-
- kind : str
- Foo bar baz.
- """
- pass
-
- def integer_parameter(self, kind):
- """
- Uses integer instead of int.
-
- Parameters
- ----------
- kind : integer
- Foo bar baz.
- """
- pass
-
- def string_parameter(self, kind):
- """
- Uses string instead of str.
-
- Parameters
- ----------
- kind : string
- Foo bar baz.
- """
- pass
-
- def boolean_parameter(self, kind):
- """
- Uses boolean instead of bool.
-
- Parameters
- ----------
- kind : boolean
- Foo bar baz.
- """
- pass
-
- def list_incorrect_parameter_type(self, kind):
- """
- Uses list of boolean instead of list of bool.
-
- Parameters
- ----------
- kind : list of boolean, integer, float or string
- Foo bar baz.
- """
- pass
-
- def bad_parameter_spacing(self, a, b):
- """
- The parameters on the same line have an extra space between them.
-
- Parameters
- ----------
- a, b : int
- Foo bar baz.
- """
- pass
-
-
-class BadReturns:
- def return_not_documented(self):
- """
- Lacks section for Returns
- """
- return "Hello world!"
-
- def yield_not_documented(self):
- """
- Lacks section for Yields
- """
- yield "Hello world!"
-
- def no_type(self):
- """
- Returns documented but without type.
-
- Returns
- -------
- Some value.
- """
- return "Hello world!"
-
- def no_description(self):
- """
- Provides type but no description.
-
- Returns
- -------
- str
- """
- return "Hello world!"
-
- def no_punctuation(self):
- """
- Provides type and description but no period.
-
- Returns
- -------
- str
- A nice greeting
- """
- return "Hello world!"
-
- def named_single_return(self):
- """
- Provides name but returns only one value.
-
- Returns
- -------
- s : str
- A nice greeting.
- """
- return "Hello world!"
-
- def no_capitalization(self):
- """
- Forgets capitalization in return values description.
-
- Returns
- -------
- foo : str
- The first returned string.
- bar : str
- the second returned string.
- """
- return "Hello", "World!"
+class BadDocstrings:
+ """Everything here has a bad docstring
+ """
- def no_period_multi(self):
+ def private_classes(self):
"""
- Forgets period in return values description.
-
- Returns
- -------
- foo : str
- The first returned string
- bar : str
- The second returned string.
+ This mentions NDFrame, which is not correct.
"""
- return "Hello", "World!"
-
-class BadSeeAlso:
- def desc_no_period(self):
+ def prefix_pandas(self):
"""
- Return the first 5 elements of the Series.
+ Have `pandas` prefix in See Also section.
See Also
--------
- Series.tail : Return the last 5 elements of the Series.
- Series.iloc : Return a slice of the elements in the Series,
- which can also be used to return the first or last n
+ pandas.Series.rename : Alter Series index labels or name.
+ DataFrame.head : The first `n` rows of the caller object.
"""
pass
- def desc_first_letter_lowercase(self):
- """
- Return the first 5 elements of the Series.
-
- See Also
- --------
- Series.tail : return the last 5 elements of the Series.
- Series.iloc : Return a slice of the elements in the Series,
- which can also be used to return the first or last n.
+ def redundant_import(self, foo=None, bar=None):
"""
- pass
+ A sample DataFrame method.
- def prefix_pandas(self):
- """
- Have `pandas` prefix in See Also section.
+ Should not import numpy and pandas.
- See Also
+ Examples
--------
- pandas.Series.rename : Alter Series index labels or name.
- DataFrame.head : The first `n` rows of the caller object.
+ >>> import numpy as np
+ >>> import pandas as pd
+ >>> df = pd.DataFrame(np.ones((3, 3)),
+ ... columns=('a', 'b', 'c'))
+ >>> df.all(1)
+ 0 True
+ 1 True
+ 2 True
+ dtype: bool
+ >>> df.all(bool_only=True)
+ Series([], dtype: bool)
"""
pass
-
-class BadExamples:
def unused_import(self):
"""
Examples
@@ -877,59 +110,9 @@ def _import_path(self, klass=None, func=None):
return base_path
- def test_good_class(self, capsys):
- errors = validate_one(self._import_path(klass="GoodDocStrings"))["errors"]
- assert isinstance(errors, list)
- assert not errors
-
- @pytest.mark.parametrize(
- "func",
- [
- "plot",
- "swap",
- "sample",
- "decorated_sample",
- "random_letters",
- "sample_values",
- "head",
- "head1",
- "contains",
- "mode",
- "good_imports",
- "no_returns",
- "empty_returns",
- "multiple_variables_on_one_line",
- ],
- )
- def test_good_functions(self, capsys, func):
- errors = validate_one(self._import_path(klass="GoodDocStrings", func=func))[
- "errors"
- ]
- assert isinstance(errors, list)
- assert not errors
-
def test_bad_class(self, capsys):
- errors = validate_one(self._import_path(klass="BadGenericDocStrings"))["errors"]
- assert isinstance(errors, list)
- assert errors
-
- @pytest.mark.parametrize(
- "func",
- [
- "func",
- "astype",
- "astype1",
- "astype2",
- "astype3",
- "plot",
- "method",
- "private_classes",
- "directives_without_two_colons",
- ],
- )
- def test_bad_generic_functions(self, capsys, func):
- errors = validate_one(
- self._import_path(klass="BadGenericDocStrings", func=func) # noqa:F821
+ errors = validate_docstrings.pandas_validate(
+ self._import_path(klass="BadDocstrings")
)["errors"]
assert isinstance(errors, list)
assert errors
@@ -937,9 +120,8 @@ def test_bad_generic_functions(self, capsys, func):
@pytest.mark.parametrize(
"klass,func,msgs",
[
- # See Also tests
(
- "BadGenericDocStrings",
+ "BadDocstrings",
"private_classes",
(
"Private classes (NDFrame) should not be mentioned in public "
@@ -947,200 +129,31 @@ def test_bad_generic_functions(self, capsys, func):
),
),
(
- "BadGenericDocStrings",
- "unknown_section",
- ('Found unknown section "Unknown Section".',),
- ),
- (
- "BadGenericDocStrings",
- "sections_in_wrong_order",
- (
- "Sections are in the wrong order. Correct order is: Parameters, "
- "See Also, Examples",
- ),
- ),
- (
- "BadGenericDocStrings",
- "deprecation_in_wrong_order",
- ("Deprecation warning should precede extended summary",),
- ),
- (
- "BadGenericDocStrings",
- "directives_without_two_colons",
- (
- "reST directives ['versionchanged', 'versionadded', "
- "'deprecated'] must be followed by two colons",
- ),
- ),
- (
- "BadSeeAlso",
- "desc_no_period",
- ('Missing period at end of description for See Also "Series.iloc"',),
- ),
- (
- "BadSeeAlso",
- "desc_first_letter_lowercase",
- ('should be capitalized for See Also "Series.tail"',),
- ),
- # Summary tests
- (
- "BadSummaries",
- "wrong_line",
- ("should start in the line immediately after the opening quotes",),
- ),
- ("BadSummaries", "no_punctuation", ("Summary does not end with a period",)),
- (
- "BadSummaries",
- "no_capitalization",
- ("Summary does not start with a capital letter",),
- ),
- (
- "BadSummaries",
- "no_capitalization",
- ("Summary must start with infinitive verb",),
- ),
- ("BadSummaries", "multi_line", ("Summary should fit in a single line",)),
- (
- "BadSummaries",
- "two_paragraph_multi_line",
- ("Summary should fit in a single line",),
- ),
- # Parameters tests
- (
- "BadParameters",
- "missing_params",
- ("Parameters {**kwargs} not documented",),
- ),
- (
- "BadParameters",
- "bad_colon_spacing",
- (
- 'Parameter "kind" requires a space before the colon '
- "separating the parameter name and type",
- ),
- ),
- (
- "BadParameters",
- "no_description_period",
- ('Parameter "kind" description should finish with "."',),
- ),
- (
- "BadParameters",
- "no_description_period_with_directive",
- ('Parameter "kind" description should finish with "."',),
- ),
- (
- "BadParameters",
- "parameter_capitalization",
- ('Parameter "kind" description should start with a capital letter',),
- ),
- (
- "BadParameters",
- "integer_parameter",
- ('Parameter "kind" type should use "int" instead of "integer"',),
- ),
- (
- "BadParameters",
- "string_parameter",
- ('Parameter "kind" type should use "str" instead of "string"',),
- ),
- (
- "BadParameters",
- "boolean_parameter",
- ('Parameter "kind" type should use "bool" instead of "boolean"',),
- ),
- (
- "BadParameters",
- "list_incorrect_parameter_type",
- ('Parameter "kind" type should use "bool" instead of "boolean"',),
- ),
- (
- "BadParameters",
- "list_incorrect_parameter_type",
- ('Parameter "kind" type should use "int" instead of "integer"',),
- ),
- (
- "BadParameters",
- "list_incorrect_parameter_type",
- ('Parameter "kind" type should use "str" instead of "string"',),
- ),
- (
- "BadParameters",
- "bad_parameter_spacing",
- ("Parameters {b} not documented", "Unknown parameters { b}"),
- ),
- pytest.param(
- "BadParameters",
- "blank_lines",
- ("No error yet?",),
- marks=pytest.mark.xfail,
- ),
- # Returns tests
- ("BadReturns", "return_not_documented", ("No Returns section found",)),
- ("BadReturns", "yield_not_documented", ("No Yields section found",)),
- pytest.param("BadReturns", "no_type", ("foo",), marks=pytest.mark.xfail),
- ("BadReturns", "no_description", ("Return value has no description",)),
- (
- "BadReturns",
- "no_punctuation",
- ('Return value description should finish with "."',),
- ),
- (
- "BadReturns",
- "named_single_return",
+ "BadDocstrings",
+ "prefix_pandas",
(
- "The first line of the Returns section should contain only the "
- "type, unless multiple values are being returned",
+ "pandas.Series.rename in `See Also` section "
+ "does not need `pandas` prefix",
),
),
- (
- "BadReturns",
- "no_capitalization",
- ("Return value description should start with a capital letter",),
- ),
- (
- "BadReturns",
- "no_period_multi",
- ('Return value description should finish with "."',),
- ),
# Examples tests
(
- "BadGenericDocStrings",
- "method",
+ "BadDocstrings",
+ "redundant_import",
("Do not import numpy, as it is imported automatically",),
),
(
- "BadGenericDocStrings",
- "method",
+ "BadDocstrings",
+ "redundant_import",
("Do not import pandas, as it is imported automatically",),
),
(
- "BadGenericDocStrings",
- "method_wo_docstrings",
- ("The object does not have a docstring",),
- ),
- # See Also tests
- (
- "BadSeeAlso",
- "prefix_pandas",
- (
- "pandas.Series.rename in `See Also` section "
- "does not need `pandas` prefix",
- ),
- ),
- # Examples tests
- (
- "BadExamples",
+ "BadDocstrings",
"unused_import",
("flake8 error: F401 'pandas as pdf' imported but unused",),
),
(
- "BadExamples",
- "indentation_is_not_a_multiple_of_four",
- ("flake8 error: E111 indentation is not a multiple of four",),
- ),
- (
- "BadExamples",
+ "BadDocstrings",
"missing_whitespace_around_arithmetic_operator",
(
"flake8 error: "
@@ -1148,39 +161,28 @@ def test_bad_generic_functions(self, capsys, func):
),
),
(
- "BadExamples",
- "missing_whitespace_after_comma",
- ("flake8 error: E231 missing whitespace after ',' (3 times)",),
- ),
- (
- "BadGenericDocStrings",
- "two_linebreaks_between_sections",
- (
- "Double line break found; please use only one blank line to "
- "separate sections or paragraphs, and do not leave blank lines "
- "at the end of docstrings",
- ),
+ "BadDocstrings",
+ "indentation_is_not_a_multiple_of_four",
+ ("flake8 error: E111 indentation is not a multiple of four",),
),
(
- "BadGenericDocStrings",
- "linebreak_at_end_of_docstring",
- (
- "Double line break found; please use only one blank line to "
- "separate sections or paragraphs, and do not leave blank lines "
- "at the end of docstrings",
- ),
+ "BadDocstrings",
+ "missing_whitespace_after_comma",
+ ("flake8 error: E231 missing whitespace after ',' (3 times)",),
),
],
)
def test_bad_docstrings(self, capsys, klass, func, msgs):
- result = validate_one(self._import_path(klass=klass, func=func))
+ result = validate_docstrings.pandas_validate(
+ self._import_path(klass=klass, func=func)
+ )
for msg in msgs:
assert msg in " ".join(err[1] for err in result["errors"])
def test_validate_all_ignore_deprecated(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings,
- "validate_one",
+ "pandas_validate",
lambda func_name: {
"docstring": "docstring1",
"errors": [
@@ -1285,50 +287,22 @@ def test_item_subsection(self, idx, subsection):
assert result[idx][3] == subsection
-class TestDocstringClass:
- @pytest.mark.parametrize(
- "name, expected_obj",
- [
- ("pandas.isnull", pd.isnull),
- ("pandas.DataFrame", pd.DataFrame),
- ("pandas.Series.sum", pd.Series.sum),
- ],
- )
- def test_resolves_class_name(self, name, expected_obj):
- d = validate_docstrings.Docstring(name)
- assert d.obj is expected_obj
-
- @pytest.mark.parametrize("invalid_name", ["panda", "panda.DataFrame"])
- def test_raises_for_invalid_module_name(self, invalid_name):
- msg = f'No module can be imported from "{invalid_name}"'
- with pytest.raises(ImportError, match=msg):
- validate_docstrings.Docstring(invalid_name)
-
- @pytest.mark.parametrize(
- "invalid_name", ["pandas.BadClassName", "pandas.Series.bad_method_name"]
- )
- def test_raises_for_invalid_attribute_name(self, invalid_name):
- name_components = invalid_name.split(".")
- obj_name, invalid_attr_name = name_components[-2], name_components[-1]
- msg = f"'{obj_name}' has no attribute '{invalid_attr_name}'"
- with pytest.raises(AttributeError, match=msg):
- validate_docstrings.Docstring(invalid_name)
-
+class TestPandasDocstringClass:
@pytest.mark.parametrize(
"name", ["pandas.Series.str.isdecimal", "pandas.Series.str.islower"]
)
def test_encode_content_write_to_file(self, name):
# GH25466
- docstr = validate_docstrings.Docstring(name).validate_pep8()
+ docstr = validate_docstrings.PandasDocstring(name).validate_pep8()
# the list of pep8 errors should be empty
assert not list(docstr)
class TestMainFunction:
- def test_exit_status_for_validate_one(self, monkeypatch):
+ def test_exit_status_for_main(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings,
- "validate_one",
+ "pandas_validate",
lambda func_name: {
"docstring": "docstring1",
"errors": [
@@ -1336,8 +310,7 @@ def test_exit_status_for_validate_one(self, monkeypatch):
("ER02", "err desc"),
("ER03", "err desc"),
],
- "warnings": [],
- "examples_errors": "",
+ "examples_errs": "",
},
)
exit_status = validate_docstrings.main(
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index bcf3fd5d276f5..d43086756769a 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
Analyze docstrings to detect errors.
@@ -14,19 +14,14 @@
$ ./validate_docstrings.py pandas.DataFrame.head
"""
import argparse
-import ast
import doctest
-import functools
import glob
import importlib
-import inspect
import json
import os
-import pydoc
-import re
import sys
import tempfile
-import textwrap
+from typing import List, Optional
import flake8.main.application
@@ -52,87 +47,15 @@
import pandas # noqa: E402 isort:skip
sys.path.insert(1, os.path.join(BASE_PATH, "doc", "sphinxext"))
-from numpydoc.docscrape import NumpyDocString # noqa: E402 isort:skip
-from pandas.io.formats.printing import pprint_thing # noqa: E402 isort:skip
+from numpydoc.validate import validate, Docstring # noqa: E402 isort:skip
PRIVATE_CLASSES = ["NDFrame", "IndexOpsMixin"]
-DIRECTIVES = ["versionadded", "versionchanged", "deprecated"]
-DIRECTIVE_PATTERN = re.compile(rf"^\s*\.\. ({'|'.join(DIRECTIVES)})(?!::)", re.I | re.M)
-ALLOWED_SECTIONS = [
- "Parameters",
- "Attributes",
- "Methods",
- "Returns",
- "Yields",
- "Other Parameters",
- "Raises",
- "Warns",
- "See Also",
- "Notes",
- "References",
- "Examples",
-]
ERROR_MSGS = {
- "GL01": "Docstring text (summary) should start in the line immediately "
- "after the opening quotes (not in the same line, or leaving a "
- "blank line in between)",
- "GL02": "Closing quotes should be placed in the line after the last text "
- "in the docstring (do not close the quotes in the same line as "
- "the text, or leave a blank line between the last text and the "
- "quotes)",
- "GL03": "Double line break found; please use only one blank line to "
- "separate sections or paragraphs, and do not leave blank lines "
- "at the end of docstrings",
"GL04": "Private classes ({mentioned_private_classes}) should not be "
"mentioned in public docstrings",
- "GL05": 'Tabs found at the start of line "{line_with_tabs}", please use '
- "whitespace only",
- "GL06": 'Found unknown section "{section}". Allowed sections are: '
- "{allowed_sections}",
- "GL07": "Sections are in the wrong order. Correct order is: {correct_sections}",
- "GL08": "The object does not have a docstring",
- "GL09": "Deprecation warning should precede extended summary",
- "GL10": "reST directives {directives} must be followed by two colons",
- "SS01": "No summary found (a short summary in a single line should be "
- "present at the beginning of the docstring)",
- "SS02": "Summary does not start with a capital letter",
- "SS03": "Summary does not end with a period",
- "SS04": "Summary contains heading whitespaces",
- "SS05": "Summary must start with infinitive verb, not third person "
- '(e.g. use "Generate" instead of "Generates")',
- "SS06": "Summary should fit in a single line",
- "ES01": "No extended summary found",
- "PR01": "Parameters {missing_params} not documented",
- "PR02": "Unknown parameters {unknown_params}",
- "PR03": "Wrong parameters order. Actual: {actual_params}. "
- "Documented: {documented_params}",
- "PR04": 'Parameter "{param_name}" has no type',
- "PR05": 'Parameter "{param_name}" type should not finish with "."',
- "PR06": 'Parameter "{param_name}" type should use "{right_type}" instead '
- 'of "{wrong_type}"',
- "PR07": 'Parameter "{param_name}" has no description',
- "PR08": 'Parameter "{param_name}" description should start with a '
- "capital letter",
- "PR09": 'Parameter "{param_name}" description should finish with "."',
- "PR10": 'Parameter "{param_name}" requires a space before the colon '
- "separating the parameter name and type",
- "RT01": "No Returns section found",
- "RT02": "The first line of the Returns section should contain only the "
- "type, unless multiple values are being returned",
- "RT03": "Return value has no description",
- "RT04": "Return value description should start with a capital letter",
- "RT05": 'Return value description should finish with "."',
- "YD01": "No Yields section found",
- "SA01": "See Also section not found",
- "SA02": "Missing period at end of description for See Also "
- '"{reference_name}" reference',
- "SA03": "Description should be capitalized for See Also "
- '"{reference_name}" reference',
- "SA04": 'Missing description for See Also "{reference_name}" reference',
"SA05": "{reference_name} in `See Also` section does not need `pandas` "
"prefix, use {right_reference} instead.",
- "EX01": "No examples section found",
"EX02": "Examples do not pass tests:\n{doctest_log}",
"EX03": "flake8 error: {error_code} {error_message}{times_happening}",
"EX04": "Do not import {imported_library}, as it is imported "
@@ -140,29 +63,10 @@
}
-def error(code, **kwargs):
+def pandas_error(code, **kwargs):
"""
- Return a tuple with the error code and the message with variables replaced.
-
- This is syntactic sugar so instead of:
- - `('EX02', ERROR_MSGS['EX02'].format(doctest_log=log))`
-
- We can simply use:
- - `error('EX02', doctest_log=log)`
-
- Parameters
- ----------
- code : str
- Error code.
- **kwargs
- Values for the variables in the error messages
-
- Returns
- -------
- code : str
- Error code.
- message : str
- Error message with variables replaced.
+ Copy of the numpydoc error function, since ERROR_MSGS can't be updated
+ with our custom errors yet.
"""
return (code, ERROR_MSGS[code].format(**kwargs))
@@ -239,347 +143,7 @@ def get_api_items(api_doc_fd):
previous_line = line
-class Docstring:
- def __init__(self, name):
- self.name = name
- obj = self._load_obj(name)
- self.obj = obj
- self.code_obj = self._to_original_callable(obj)
- self.raw_doc = obj.__doc__ or ""
- self.clean_doc = pydoc.getdoc(obj)
- self.doc = NumpyDocString(self.clean_doc)
-
- def __len__(self) -> int:
- return len(self.raw_doc)
-
- @staticmethod
- def _load_obj(name):
- """
- Import Python object from its name as string.
-
- Parameters
- ----------
- name : str
- Object name to import (e.g. pandas.Series.str.upper)
-
- Returns
- -------
- object
- Python object that can be a class, method, function...
-
- Examples
- --------
- >>> Docstring._load_obj('pandas.Series')
- <class 'pandas.core.series.Series'>
- """
- for maxsplit in range(1, name.count(".") + 1):
- # TODO when py3 only replace by: module, *func_parts = ...
- func_name_split = name.rsplit(".", maxsplit)
- module = func_name_split[0]
- func_parts = func_name_split[1:]
- try:
- obj = importlib.import_module(module)
- except ImportError:
- pass
- else:
- continue
-
- if "obj" not in locals():
- raise ImportError(f'No module can be imported from "{name}"')
-
- for part in func_parts:
- obj = getattr(obj, part)
- return obj
-
- @staticmethod
- def _to_original_callable(obj):
- """
- Find the Python object that contains the source code of the object.
-
- This is useful to find the place in the source code (file and line
- number) where a docstring is defined. It does not currently work for
- all cases, but it should help find some (properties...).
- """
- while True:
- if inspect.isfunction(obj) or inspect.isclass(obj):
- f = inspect.getfile(obj)
- if f.startswith("<") and f.endswith(">"):
- return None
- return obj
- if inspect.ismethod(obj):
- obj = obj.__func__
- elif isinstance(obj, functools.partial):
- obj = obj.func
- elif isinstance(obj, property):
- obj = obj.fget
- else:
- return None
-
- @property
- def type(self):
- return type(self.obj).__name__
-
- @property
- def is_function_or_method(self):
- # TODO(py27): remove ismethod
- return inspect.isfunction(self.obj) or inspect.ismethod(self.obj)
-
- @property
- def source_file_name(self):
- """
- File name where the object is implemented (e.g. pandas/core/frame.py).
- """
- try:
- fname = inspect.getsourcefile(self.code_obj)
- except TypeError:
- # In some cases the object is something complex like a cython
- # object that can't be easily introspected. An it's better to
- # return the source code file of the object as None, than crash
- pass
- else:
- if fname:
- fname = os.path.relpath(fname, BASE_PATH)
- return fname
-
- @property
- def source_file_def_line(self):
- """
- Number of line where the object is defined in its file.
- """
- try:
- return inspect.getsourcelines(self.code_obj)[-1]
- except (OSError, TypeError):
- # In some cases the object is something complex like a cython
- # object that can't be easily introspected. An it's better to
- # return the line number as None, than crash
- pass
-
- @property
- def github_url(self):
- url = "https://github.com/pandas-dev/pandas/blob/master/"
- url += f"{self.source_file_name}#L{self.source_file_def_line}"
- return url
-
- @property
- def start_blank_lines(self):
- i = None
- if self.raw_doc:
- for i, row in enumerate(self.raw_doc.split("\n")):
- if row.strip():
- break
- return i
-
- @property
- def end_blank_lines(self):
- i = None
- if self.raw_doc:
- for i, row in enumerate(reversed(self.raw_doc.split("\n"))):
- if row.strip():
- break
- return i
-
- @property
- def double_blank_lines(self):
- prev = True
- for row in self.raw_doc.split("\n"):
- if not prev and not row.strip():
- return True
- prev = row.strip()
- return False
-
- @property
- def section_titles(self):
- sections = []
- self.doc._doc.reset()
- while not self.doc._doc.eof():
- content = self.doc._read_to_next_section()
- if (
- len(content) > 1
- and len(content[0]) == len(content[1])
- and set(content[1]) == {"-"}
- ):
- sections.append(content[0])
- return sections
-
- @property
- def summary(self):
- return " ".join(self.doc["Summary"])
-
- @property
- def num_summary_lines(self):
- return len(self.doc["Summary"])
-
- @property
- def extended_summary(self):
- if not self.doc["Extended Summary"] and len(self.doc["Summary"]) > 1:
- return " ".join(self.doc["Summary"])
- return " ".join(self.doc["Extended Summary"])
-
- @property
- def needs_summary(self):
- return not (bool(self.summary) and bool(self.extended_summary))
-
- @property
- def doc_parameters(self):
- parameters = {}
- for names, type_, desc in self.doc["Parameters"]:
- for name in names.split(", "):
- parameters[name] = (type_, "".join(desc))
- return parameters
-
- @property
- def signature_parameters(self):
- def add_stars(param_name: str, info: inspect.Parameter):
- """
- Add stars to *args and **kwargs parameters
- """
- if info.kind == inspect.Parameter.VAR_POSITIONAL:
- return f"*{param_name}"
- elif info.kind == inspect.Parameter.VAR_KEYWORD:
- return f"**{param_name}"
- else:
- return param_name
-
- if inspect.isclass(self.obj):
- if hasattr(self.obj, "_accessors") and (
- self.name.split(".")[-1] in self.obj._accessors
- ):
- # accessor classes have a signature but don't want to show this
- return tuple()
- try:
- sig = inspect.signature(self.obj)
- except (TypeError, ValueError):
- # Some objects, mainly in C extensions do not support introspection
- # of the signature
- return tuple()
-
- params = tuple(
- add_stars(parameter, sig.parameters[parameter])
- for parameter in sig.parameters
- )
- if params and params[0] in ("self", "cls"):
- return params[1:]
- return params
-
- @property
- def parameter_mismatches(self):
- errs = []
- signature_params = self.signature_parameters
- doc_params = tuple(self.doc_parameters)
- missing = set(signature_params) - set(doc_params)
- if missing:
- errs.append(error("PR01", missing_params=pprint_thing(missing)))
- extra = set(doc_params) - set(signature_params)
- if extra:
- errs.append(error("PR02", unknown_params=pprint_thing(extra)))
- if (
- not missing
- and not extra
- and signature_params != doc_params
- and not (not signature_params and not doc_params)
- ):
- errs.append(
- error(
- "PR03", actual_params=signature_params, documented_params=doc_params
- )
- )
-
- return errs
-
- @property
- def correct_parameters(self):
- return not bool(self.parameter_mismatches)
-
- @property
- def directives_without_two_colons(self):
- return DIRECTIVE_PATTERN.findall(self.raw_doc)
-
- def parameter_type(self, param):
- return self.doc_parameters[param][0]
-
- def parameter_desc(self, param):
- desc = self.doc_parameters[param][1]
- # Find and strip out any sphinx directives
- for directive in DIRECTIVES:
- full_directive = f".. {directive}"
- if full_directive in desc:
- # Only retain any description before the directive
- desc = desc[: desc.index(full_directive)]
- return desc
-
- @property
- def see_also(self):
- result = {}
- for funcs, desc in self.doc["See Also"]:
- for func, _ in funcs:
- result[func] = "".join(desc)
-
- return result
-
- @property
- def examples(self):
- return self.doc["Examples"]
-
- @property
- def returns(self):
- return self.doc["Returns"]
-
- @property
- def yields(self):
- return self.doc["Yields"]
-
- @property
- def method_source(self):
- try:
- source = inspect.getsource(self.obj)
- except TypeError:
- return ""
- return textwrap.dedent(source)
-
- @property
- def method_returns_something(self):
- """
- Check if the docstrings method can return something.
-
- Bare returns, returns valued None and returns from nested functions are
- disconsidered.
-
- Returns
- -------
- bool
- Whether the docstrings method can return something.
- """
-
- def get_returns_not_on_nested_functions(node):
- returns = [node] if isinstance(node, ast.Return) else []
- for child in ast.iter_child_nodes(node):
- # Ignore nested functions and its subtrees.
- if not isinstance(child, ast.FunctionDef):
- child_returns = get_returns_not_on_nested_functions(child)
- returns.extend(child_returns)
- return returns
-
- tree = ast.parse(self.method_source).body
- if tree:
- returns = get_returns_not_on_nested_functions(tree[0])
- return_values = [r.value for r in returns]
- # Replace NameConstant nodes valued None for None.
- for i, v in enumerate(return_values):
- if isinstance(v, ast.NameConstant) and v.value is None:
- return_values[i] = None
- return any(return_values)
- else:
- return False
-
- @property
- def first_line_ends_in_dot(self):
- if self.doc:
- return self.doc.split("\n")[0][-1] == "."
-
- @property
- def deprecated(self):
- return ".. deprecated:: " in (self.summary + self.extended_summary)
-
+class PandasDocstring(Docstring):
@property
def mentioned_private_classes(self):
return [klass for klass in PRIVATE_CLASSES if klass in self.raw_doc]
@@ -632,237 +196,66 @@ def validate_pep8(self):
yield from application.guide.stats.statistics_for("")
-def get_validation_data(doc):
+def pandas_validate(func_name: str):
"""
- Validate the docstring.
+ Call the numpydoc validation, and add the errors specific to pandas.
Parameters
----------
- doc : Docstring
- A Docstring object with the given function name.
+ func_name : str
+ Name of the object of the docstring to validate.
Returns
-------
- tuple
- errors : list of tuple
- Errors occurred during validation.
- warnings : list of tuple
- Warnings occurred during validation.
- examples_errs : str
- Examples usage displayed along the error, otherwise empty string.
-
- Notes
- -----
- The errors codes are defined as:
- - First two characters: Section where the error happens:
- * GL: Global (no section, like section ordering errors)
- * SS: Short summary
- * ES: Extended summary
- * PR: Parameters
- * RT: Returns
- * YD: Yields
- * RS: Raises
- * WN: Warns
- * SA: See Also
- * NT: Notes
- * RF: References
- * EX: Examples
- - Last two characters: Numeric error code inside the section
-
- For example, EX02 is the second codified error in the Examples section
- (which in this case is assigned to examples that do not pass the tests).
-
- The error codes, their corresponding error messages, and the details on how
- they are validated, are not documented more than in the source code of this
- function.
+ dict
+ Information about the docstring and the errors found.
"""
+ doc = PandasDocstring(func_name)
+ result = validate(func_name)
- errs = []
- wrns = []
- if not doc.raw_doc:
- errs.append(error("GL08"))
- return errs, wrns, ""
-
- if doc.start_blank_lines != 1:
- errs.append(error("GL01"))
- if doc.end_blank_lines != 1:
- errs.append(error("GL02"))
- if doc.double_blank_lines:
- errs.append(error("GL03"))
mentioned_errs = doc.mentioned_private_classes
if mentioned_errs:
- errs.append(error("GL04", mentioned_private_classes=", ".join(mentioned_errs)))
- for line in doc.raw_doc.splitlines():
- if re.match("^ *\t", line):
- errs.append(error("GL05", line_with_tabs=line.lstrip()))
-
- unexpected_sections = [
- section for section in doc.section_titles if section not in ALLOWED_SECTIONS
- ]
- for section in unexpected_sections:
- errs.append(
- error("GL06", section=section, allowed_sections=", ".join(ALLOWED_SECTIONS))
+ result["errors"].append(
+ pandas_error("GL04", mentioned_private_classes=", ".join(mentioned_errs))
)
- correct_order = [
- section for section in ALLOWED_SECTIONS if section in doc.section_titles
- ]
- if correct_order != doc.section_titles:
- errs.append(error("GL07", correct_sections=", ".join(correct_order)))
-
- if doc.deprecated and not doc.extended_summary.startswith(".. deprecated:: "):
- errs.append(error("GL09"))
-
- directives_without_two_colons = doc.directives_without_two_colons
- if directives_without_two_colons:
- errs.append(error("GL10", directives=directives_without_two_colons))
-
- if not doc.summary:
- errs.append(error("SS01"))
- else:
- if not doc.summary[0].isupper():
- errs.append(error("SS02"))
- if doc.summary[-1] != ".":
- errs.append(error("SS03"))
- if doc.summary != doc.summary.lstrip():
- errs.append(error("SS04"))
- elif doc.is_function_or_method and doc.summary.split(" ")[0][-1] == "s":
- errs.append(error("SS05"))
- if doc.num_summary_lines > 1:
- errs.append(error("SS06"))
-
- if not doc.extended_summary:
- wrns.append(("ES01", "No extended summary found"))
-
- # PR01: Parameters not documented
- # PR02: Unknown parameters
- # PR03: Wrong parameters order
- errs += doc.parameter_mismatches
-
- for param in doc.doc_parameters:
- if not param.startswith("*"): # Check can ignore var / kwargs
- if not doc.parameter_type(param):
- if ":" in param:
- errs.append(error("PR10", param_name=param.split(":")[0]))
- else:
- errs.append(error("PR04", param_name=param))
- else:
- if doc.parameter_type(param)[-1] == ".":
- errs.append(error("PR05", param_name=param))
- common_type_errors = [
- ("integer", "int"),
- ("boolean", "bool"),
- ("string", "str"),
- ]
- for wrong_type, right_type in common_type_errors:
- if wrong_type in doc.parameter_type(param):
- errs.append(
- error(
- "PR06",
- param_name=param,
- right_type=right_type,
- wrong_type=wrong_type,
- )
- )
- if not doc.parameter_desc(param):
- errs.append(error("PR07", param_name=param))
- else:
- if not doc.parameter_desc(param)[0].isupper():
- errs.append(error("PR08", param_name=param))
- if doc.parameter_desc(param)[-1] != ".":
- errs.append(error("PR09", param_name=param))
-
- if doc.is_function_or_method:
- if not doc.returns:
- if doc.method_returns_something:
- errs.append(error("RT01"))
- else:
- if len(doc.returns) == 1 and doc.returns[0].name:
- errs.append(error("RT02"))
- for name_or_type, type_, desc in doc.returns:
- if not desc:
- errs.append(error("RT03"))
- else:
- desc = " ".join(desc)
- if not desc[0].isupper():
- errs.append(error("RT04"))
- if not desc.endswith("."):
- errs.append(error("RT05"))
-
- if not doc.yields and "yield" in doc.method_source:
- errs.append(error("YD01"))
-
- if not doc.see_also:
- wrns.append(error("SA01"))
- else:
+ if doc.see_also:
for rel_name, rel_desc in doc.see_also.items():
- if rel_desc:
- if not rel_desc.endswith("."):
- errs.append(error("SA02", reference_name=rel_name))
- if not rel_desc[0].isupper():
- errs.append(error("SA03", reference_name=rel_name))
- else:
- errs.append(error("SA04", reference_name=rel_name))
if rel_name.startswith("pandas."):
- errs.append(
- error(
+ result["errors"].append(
+ pandas_error(
"SA05",
reference_name=rel_name,
right_reference=rel_name[len("pandas.") :],
)
)
- examples_errs = ""
- if not doc.examples:
- wrns.append(error("EX01"))
- else:
- examples_errs = doc.examples_errors
- if examples_errs:
- errs.append(error("EX02", doctest_log=examples_errs))
+ result["examples_errs"] = ""
+ if doc.examples:
+ result["examples_errs"] = doc.examples_errors
+ if result["examples_errs"]:
+ result["errors"].append(
+ pandas_error("EX02", doctest_log=result["examples_errs"])
+ )
for err in doc.validate_pep8():
- errs.append(
- error(
+ result["errors"].append(
+ pandas_error(
"EX03",
error_code=err.error_code,
error_message=err.message,
- times_happening=f" ({err.count} times)" if err.count > 1 else "",
+ times_happening=" ({} times)".format(err.count)
+ if err.count > 1
+ else "",
)
)
examples_source_code = "".join(doc.examples_source_code)
for wrong_import in ("numpy", "pandas"):
- if f"import {wrong_import}" in examples_source_code:
- errs.append(error("EX04", imported_library=wrong_import))
- return errs, wrns, examples_errs
-
-
-def validate_one(func_name):
- """
- Validate the docstring for the given func_name
-
- Parameters
- ----------
- func_name : function
- Function whose docstring will be evaluated (e.g. pandas.read_csv).
+ if "import {}".format(wrong_import) in examples_source_code:
+ result["errors"].append(
+ pandas_error("EX04", imported_library=wrong_import)
+ )
- Returns
- -------
- dict
- A dictionary containing all the information obtained from validating
- the docstring.
- """
- doc = Docstring(func_name)
- errs, wrns, examples_errs = get_validation_data(doc)
- return {
- "type": doc.type,
- "docstring": doc.clean_doc,
- "deprecated": doc.deprecated,
- "file": doc.source_file_name,
- "file_line": doc.source_file_def_line,
- "github_link": doc.github_url,
- "errors": errs,
- "warnings": wrns,
- "examples_errors": examples_errs,
- }
+ return result
def validate_all(prefix, ignore_deprecated=False):
@@ -887,16 +280,16 @@ def validate_all(prefix, ignore_deprecated=False):
result = {}
seen = {}
- # functions from the API docs
api_doc_fnames = os.path.join(BASE_PATH, "doc", "source", "reference", "*.rst")
api_items = []
for api_doc_fname in glob.glob(api_doc_fnames):
with open(api_doc_fname) as f:
api_items += list(get_api_items(f))
+
for func_name, func_obj, section, subsection in api_items:
if prefix and not func_name.startswith(prefix):
continue
- doc_info = validate_one(func_name)
+ doc_info = pandas_validate(func_name)
if ignore_deprecated and doc_info["deprecated"]:
continue
result[func_name] = doc_info
@@ -914,100 +307,86 @@ def validate_all(prefix, ignore_deprecated=False):
seen[shared_code_key] = func_name
- # functions from introspecting Series and DataFrame
- api_item_names = set(list(zip(*api_items))[0])
- for class_ in (pandas.Series, pandas.DataFrame):
- for member in inspect.getmembers(class_):
- func_name = f"pandas.{class_.__name__}.{member[0]}"
- if not member[0].startswith("_") and func_name not in api_item_names:
- if prefix and not func_name.startswith(prefix):
- continue
- doc_info = validate_one(func_name)
- if ignore_deprecated and doc_info["deprecated"]:
- continue
- result[func_name] = doc_info
- result[func_name]["in_api"] = False
-
return result
-def main(func_name, prefix, errors, output_format, ignore_deprecated):
+def print_validate_all_results(
+ prefix: str,
+ errors: Optional[List[str]],
+ output_format: str,
+ ignore_deprecated: bool,
+):
+ if output_format not in ("default", "json", "actions"):
+ raise ValueError(f'Unknown output_format "{output_format}"')
+
+ result = validate_all(prefix, ignore_deprecated)
+
+ if output_format == "json":
+ sys.stdout.write(json.dumps(result))
+ return 0
+
+ prefix = "##[error]" if output_format == "actions" else ""
+ exit_status = 0
+ for name, res in result.items():
+ for err_code, err_desc in res["errors"]:
+ if errors and err_code not in errors:
+ continue
+ sys.stdout.write(
+ f'{prefix}{res["file"]}:{res["file_line"]}:'
+ f"{err_code}:{name}:{err_desc}\n"
+ )
+ exit_status += 1
+
+ return exit_status
+
+
+def print_validate_one_results(func_name: str):
def header(title, width=80, char="#"):
full_line = char * width
side_len = (width - len(title) - 2) // 2
adj = "" if len(title) % 2 == 0 else " "
- title_line = f"{char * side_len} {title}{adj} {char * side_len}"
+ title_line = "{side} {title}{adj} {side}".format(
+ side=char * side_len, title=title, adj=adj
+ )
return f"\n{full_line}\n{title_line}\n{full_line}\n\n"
- exit_status = 0
- if func_name is None:
- result = validate_all(prefix, ignore_deprecated)
-
- if output_format == "json":
- output = json.dumps(result)
- else:
- if output_format == "default":
- output_format = "{text}\n"
- elif output_format == "azure":
- output_format = (
- "##vso[task.logissue type=error;"
- "sourcepath={path};"
- "linenumber={row};"
- "code={code};"
- "]{text}\n"
- )
- else:
- raise ValueError(f'Unknown output_format "{output_format}"')
-
- output = ""
- for name, res in result.items():
- for err_code, err_desc in res["errors"]:
- # The script would be faster if instead of filtering the
- # errors after validating them, it didn't validate them
- # initially. But that would complicate the code too much
- if errors and err_code not in errors:
- continue
- exit_status += 1
- output += output_format.format(
- path=res["file"],
- row=res["file_line"],
- code=err_code,
- text=f"{name}: {err_desc}",
- )
+ result = pandas_validate(func_name)
- sys.stdout.write(output)
+ sys.stderr.write(header(f"Docstring ({func_name})"))
+ sys.stderr.write(f"{result['docstring']}\n")
- else:
- result = validate_one(func_name)
- sys.stderr.write(header(f"Docstring ({func_name})"))
- sys.stderr.write(f"{result['docstring']}\n")
- sys.stderr.write(header("Validation"))
- if result["errors"]:
- sys.stderr.write(f"{len(result['errors'])} Errors found:\n")
- for err_code, err_desc in result["errors"]:
- # Failing examples are printed at the end
- if err_code == "EX02":
- sys.stderr.write("\tExamples do not pass tests\n")
- continue
- sys.stderr.write(f"\t{err_desc}\n")
- if result["warnings"]:
- sys.stderr.write(f"{len(result['warnings'])} Warnings found:\n")
- for wrn_code, wrn_desc in result["warnings"]:
- sys.stderr.write(f"\t{wrn_desc}\n")
-
- if not result["errors"]:
- sys.stderr.write(f'Docstring for "{func_name}" correct. :)\n')
-
- if result["examples_errors"]:
- sys.stderr.write(header("Doctests"))
- sys.stderr.write(result["examples_errors"])
+ sys.stderr.write(header("Validation"))
+ if result["errors"]:
+ sys.stderr.write(f'{len(result["errors"])} Errors found:\n')
+ for err_code, err_desc in result["errors"]:
+ if err_code == "EX02": # Failing examples are printed at the end
+ sys.stderr.write("\tExamples do not pass tests\n")
+ continue
+ sys.stderr.write(f"\t{err_desc}\n")
+ elif result["errors"]:
+ sys.stderr.write(f'Docstring for "{func_name}" correct. :)\n')
- return exit_status
+ if result["examples_errs"]:
+ sys.stderr.write(header("Doctests"))
+ sys.stderr.write(result["examples_errs"])
+
+
+def main(func_name, prefix, errors, output_format, ignore_deprecated):
+ """
+ Main entry point. Call the validation for one or for all docstrings.
+ """
+ if func_name is None:
+ return print_validate_all_results(
+ prefix, errors, output_format, ignore_deprecated
+ )
+ else:
+ print_validate_one_results(func_name)
+ return 0
if __name__ == "__main__":
- format_opts = "default", "json", "azure"
+ format_opts = "default", "json", "actions"
func_help = (
"function or method to validate (e.g. pandas.DataFrame.head) "
"if not provided, all docstrings are validated and returned "
@@ -1020,16 +399,16 @@ def header(title, width=80, char="#"):
default="default",
choices=format_opts,
help="format of the output when validating "
- "multiple docstrings (ignored when validating one)."
- f"It can be {str(format_opts)[1:-1]}",
+ "multiple docstrings (ignored when validating one). "
+ "It can be {str(format_opts)[1:-1]}",
)
argparser.add_argument(
"--prefix",
default=None,
help="pattern for the "
"docstring names, in order to decide which ones "
- 'will be validated. A prefix "pandas.Series.str.'
- "will make the script validate all the docstrings"
+ 'will be validated. A prefix "pandas.Series.str."'
+ "will make the script validate all the docstrings "
"of methods starting by this pattern. It is "
"ignored if parameter function is provided",
)
diff --git a/scripts/validate_string_concatenation.py b/scripts/validate_string_concatenation.py
index 3feeddaabe8d2..fbf3bb5cfccf2 100755
--- a/scripts/validate_string_concatenation.py
+++ b/scripts/validate_string_concatenation.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
GH #30454
diff --git a/setup.cfg b/setup.cfg
index d0570cee6fe10..cf931f52489a8 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -150,6 +150,9 @@ ignore_errors=True
[mypy-pandas.tests.scalar.period.test_period]
ignore_errors=True
+[mypy-pandas._testing]
+check_untyped_defs=False
+
[mypy-pandas._version]
check_untyped_defs=False
@@ -171,9 +174,6 @@ check_untyped_defs=False
[mypy-pandas.core.computation.expressions]
check_untyped_defs=False
-[mypy-pandas.core.computation.ops]
-check_untyped_defs=False
-
[mypy-pandas.core.computation.pytables]
check_untyped_defs=False
@@ -201,9 +201,6 @@ check_untyped_defs=False
[mypy-pandas.core.indexes.base]
check_untyped_defs=False
-[mypy-pandas.core.indexes.datetimelike]
-check_untyped_defs=False
-
[mypy-pandas.core.indexes.datetimes]
check_untyped_defs=False
@@ -243,9 +240,6 @@ check_untyped_defs=False
[mypy-pandas.core.reshape.merge]
check_untyped_defs=False
-[mypy-pandas.core.reshape.reshape]
-check_untyped_defs=False
-
[mypy-pandas.core.strings]
check_untyped_defs=False
@@ -335,6 +329,3 @@ check_untyped_defs=False
[mypy-pandas.tseries.offsets]
check_untyped_defs=False
-
-[mypy-pandas._testing]
-check_untyped_defs=False
diff --git a/setup.py b/setup.py
index c33ce063cb4d9..2d49d7e1e85f2 100755
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
Parts of this file were taken from the pyzmq project
@@ -9,6 +9,7 @@
import argparse
from distutils.sysconfig import get_config_vars
from distutils.version import LooseVersion
+import multiprocessing
import os
from os.path import join as pjoin
import platform
@@ -35,17 +36,6 @@ def is_platform_mac():
min_numpy_ver = "1.13.3"
min_cython_ver = "0.29.13" # note: sync with pyproject.toml
-setuptools_kwargs = {
- "install_requires": [
- "python-dateutil >= 2.6.1",
- "pytz >= 2017.2",
- f"numpy >= {min_numpy_ver}",
- ],
- "setup_requires": [f"numpy >= {min_numpy_ver}"],
- "zip_safe": False,
-}
-
-
try:
import Cython
@@ -60,7 +50,7 @@ def is_platform_mac():
# The import of Extension must be after the import of Cython, otherwise
# we do not get the appropriately patched class.
-# See https://cython.readthedocs.io/en/latest/src/reference/compilation.html
+# See https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html # noqa
from distutils.extension import Extension # noqa: E402 isort:skip
from distutils.command.build import build # noqa: E402 isort:skip
@@ -240,6 +230,7 @@ def initialize_options(self):
pjoin(ujson_python, "ujson.c"),
pjoin(ujson_python, "objToJSON.c"),
pjoin(ujson_python, "JSONtoObj.c"),
+ pjoin(ujson_python, "date_conversions.c"),
pjoin(ujson_lib, "ultrajsonenc.c"),
pjoin(ujson_lib, "ultrajsondec.c"),
pjoin(util, "move.c"),
@@ -356,7 +347,7 @@ def run(self):
sourcefile = pyxfile[:-3] + extension
msg = (
f"{extension}-source file '{sourcefile}' not found.\n"
- f"Run 'setup.py cython' before sdist."
+ "Run 'setup.py cython' before sdist."
)
assert os.path.isfile(sourcefile), msg
sdist_class.run(self)
@@ -412,15 +403,14 @@ def run(self):
cmdclass.update({"clean": CleanCommand, "build": build})
+cmdclass["build_ext"] = CheckingBuildExt
if cython:
suffix = ".pyx"
- cmdclass["build_ext"] = CheckingBuildExt
cmdclass["cython"] = CythonCommand
else:
suffix = ".c"
cmdclass["build_src"] = DummyBuildSrc
- cmdclass["build_ext"] = CheckingBuildExt
# ----------------------------------------------------------------------
# Preparation of compiler arguments
@@ -532,11 +522,6 @@ def maybe_cythonize(extensions, *args, **kwargs):
elif parsed.j:
nthreads = parsed.j
- # GH#30356 Cythonize doesn't support parallel on Windows
- if is_platform_windows() and nthreads > 0:
- print("Parallel build for cythonize ignored on Windows")
- nthreads = 0
-
kwargs["nthreads"] = nthreads
build_ext.render_templates(_pxifiles)
return cythonize(extensions, *args, **kwargs)
@@ -715,11 +700,15 @@ def srcpath(name=None, suffix=".pyx", subdir="src"):
ujson_ext = Extension(
"pandas._libs.json",
- depends=["pandas/_libs/src/ujson/lib/ultrajson.h"],
+ depends=[
+ "pandas/_libs/src/ujson/lib/ultrajson.h",
+ "pandas/_libs/src/ujson/python/date_conversions.h",
+ ],
sources=(
[
"pandas/_libs/src/ujson/python/ujson.c",
"pandas/_libs/src/ujson/python/objToJSON.c",
+ "pandas/_libs/src/ujson/python/date_conversions.c",
"pandas/_libs/src/ujson/python/JSONtoObj.c",
"pandas/_libs/src/ujson/lib/ultrajsonenc.c",
"pandas/_libs/src/ujson/lib/ultrajsondec.c",
@@ -745,37 +734,51 @@ def srcpath(name=None, suffix=".pyx", subdir="src"):
# ----------------------------------------------------------------------
-# The build cache system does string matching below this point.
-# if you change something, be careful.
-
-setup(
- name=DISTNAME,
- maintainer=AUTHOR,
- version=versioneer.get_version(),
- packages=find_packages(include=["pandas", "pandas.*"]),
- package_data={"": ["templates/*", "_libs/*.dll"]},
- ext_modules=maybe_cythonize(extensions, compiler_directives=directives),
- maintainer_email=EMAIL,
- description=DESCRIPTION,
- license=LICENSE,
- cmdclass=cmdclass,
- url=URL,
- download_url=DOWNLOAD_URL,
- project_urls=PROJECT_URLS,
- long_description=LONG_DESCRIPTION,
- classifiers=CLASSIFIERS,
- platforms="any",
- python_requires=">=3.6.1",
- extras_require={
- "test": [
- # sync with setup.cfg minversion & install.rst
- "pytest>=4.0.2",
- "pytest-xdist",
- "hypothesis>=3.58",
- ]
- },
- entry_points={
- "pandas_plotting_backends": ["matplotlib = pandas:plotting._matplotlib"]
- },
- **setuptools_kwargs,
-)
+def setup_package():
+ setuptools_kwargs = {
+ "install_requires": [
+ "python-dateutil >= 2.6.1",
+ "pytz >= 2017.2",
+ f"numpy >= {min_numpy_ver}",
+ ],
+ "setup_requires": [f"numpy >= {min_numpy_ver}"],
+ "zip_safe": False,
+ }
+
+ setup(
+ name=DISTNAME,
+ maintainer=AUTHOR,
+ version=versioneer.get_version(),
+ packages=find_packages(include=["pandas", "pandas.*"]),
+ package_data={"": ["templates/*", "_libs/*.dll"]},
+ ext_modules=maybe_cythonize(extensions, compiler_directives=directives),
+ maintainer_email=EMAIL,
+ description=DESCRIPTION,
+ license=LICENSE,
+ cmdclass=cmdclass,
+ url=URL,
+ download_url=DOWNLOAD_URL,
+ project_urls=PROJECT_URLS,
+ long_description=LONG_DESCRIPTION,
+ classifiers=CLASSIFIERS,
+ platforms="any",
+ python_requires=">=3.6.1",
+ extras_require={
+ "test": [
+ # sync with setup.cfg minversion & install.rst
+ "pytest>=4.0.2",
+ "pytest-xdist",
+ "hypothesis>=3.58",
+ ]
+ },
+ entry_points={
+ "pandas_plotting_backends": ["matplotlib = pandas:plotting._matplotlib"]
+ },
+ **setuptools_kwargs,
+ )
+
+
+if __name__ == "__main__":
+ # Freeze to support parallel compilation when using spawn instead of fork
+ multiprocessing.freeze_support()
+ setup_package()
diff --git a/versioneer.py b/versioneer.py
index 8a4710da5958a..5882349f65f0b 100644
--- a/versioneer.py
+++ b/versioneer.py
@@ -1677,7 +1677,7 @@ def do_setup():
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
- # (http://docs.python.org/2/distutils/sourcedist.html#commands), so
+ # (https://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
diff --git a/web/pandas/_templates/layout.html b/web/pandas/_templates/layout.html
index 120058afd1190..92126a7b5a2f2 100644
--- a/web/pandas/_templates/layout.html
+++ b/web/pandas/_templates/layout.html
@@ -84,11 +84,6 @@
<i class="fab fa-stack-overflow"></i>
</a>
</li>
- <li class="list-inline-item">
- <a href="https://pandas.discourse.group">
- <i class="fab fa-discourse"></i>
- </a>
- </li>
</ul>
<p>
pandas is a fiscally sponsored project of <a href="https://numfocus.org">NumFOCUS</a>
diff --git a/web/pandas/about/citing.md b/web/pandas/about/citing.md
index 5cd31d8722b9d..d5cb64e58f0ad 100644
--- a/web/pandas/about/citing.md
+++ b/web/pandas/about/citing.md
@@ -4,7 +4,7 @@
If you use _pandas_ for a scientific publication, we would appreciate citations to one of the following papers:
-- [Data structures for statistical computing in python](http://conference.scipy.org/proceedings/scipy2010/pdfs/mckinney.pdf),
+- [Data structures for statistical computing in python](https://conference.scipy.org/proceedings/scipy2010/pdfs/mckinney.pdf),
McKinney, Proceedings of the 9th Python in Science Conference, Volume 445, 2010.
@inproceedings{mckinney2010data,
diff --git a/web/pandas/about/index.md b/web/pandas/about/index.md
index 9a0a3923a6b82..02caaa3b8c53c 100644
--- a/web/pandas/about/index.md
+++ b/web/pandas/about/index.md
@@ -2,8 +2,8 @@
## History of development
-In 2008, _pandas_ development began at [AQR Capital Management](http://www.aqr.com).
-By the end of 2009 it had been [open sourced](http://en.wikipedia.org/wiki/Open_source),
+In 2008, _pandas_ development began at [AQR Capital Management](https://www.aqr.com).
+By the end of 2009 it had been [open sourced](https://en.wikipedia.org/wiki/Open_source),
and is actively supported today by a community of like-minded individuals around the world who
contribute their valuable time and energy to help make open source _pandas_
possible. Thank you to [all of our contributors](team.html).
diff --git a/web/pandas/about/roadmap.md b/web/pandas/about/roadmap.md
index 8a5c2735b3d93..35a6b3361f32e 100644
--- a/web/pandas/about/roadmap.md
+++ b/web/pandas/about/roadmap.md
@@ -134,19 +134,6 @@ pandas documentation. Some specific goals include
subsections of the documentation to make navigation and finding
content easier.
-## Package docstring validation
-
-To improve the quality and consistency of pandas docstrings, we've
-developed tooling to check docstrings in a variety of ways.
-<https://github.com/pandas-dev/pandas/blob/master/scripts/validate_docstrings.py>
-contains the checks.
-
-Like many other projects, pandas uses the
-[numpydoc](https://numpydoc.readthedocs.io/en/latest/) style for writing
-docstrings. With the collaboration of the numpydoc maintainers, we'd
-like to move the checks to a package other than pandas so that other
-projects can easily use them as well.
-
## Performance monitoring
Pandas uses [airspeed velocity](https://asv.readthedocs.io/en/stable/)
diff --git a/web/pandas/about/sponsors.md b/web/pandas/about/sponsors.md
index dcc6e367e5d64..4473a16cfd590 100644
--- a/web/pandas/about/sponsors.md
+++ b/web/pandas/about/sponsors.md
@@ -11,31 +11,50 @@ health and sustainability of the project. Visit numfocus.org for more informatio
Donations to _pandas_ are managed by NumFOCUS. For donors in the United States, your gift is tax-deductible
to the extent provided by law. As with any donation, you should consult with your tax adviser about your particular tax situation.
-## Tidelift
+## Become a sponsor
-_pandas_ is part of the [Tidelift subscription](https://tidelift.com/subscription/pkg/pypi-pandas?utm_source=pypi-pandas&utm_medium=referral&utm_campaign=readme).
-You can support pandas by becoming a Tidelift subscriber.
+As a free and open source project, _pandas_ relies on the support of the community of users for its development.
+If you work for an organization that uses and benefits from _pandas_, please consider supporting pandas. There
+are different ways, such as employing people to work on pandas, funding the project, or becoming a
+[NumFOCUS sponsor](https://numfocus.org/sponsors) to support the broader ecosystem. Please contact us at
+[admin@numfocus.org](mailto:admin@numfocus.org) to discuss.
## Institutional partners
-Institutional Partners are companies and universities that support the project by employing contributors.
-Current Institutional Partners include:
+Institutional partners are companies and universities that support the project by employing contributors.
+Current institutional partners include:
<ul>
- {% for company in partners.active if company.employs %}
- <li><a href="{{ company.url }}">{{ company.name }}</a> ({{ company.employs }})</li>
+ {% for company in sponsors.active if company.kind == "partner" %}
+ <li><a href="{{ company.url }}">{{ company.name }}</a>: {{ company.description }}</li>
+ {% endfor %}
+</ul>
+
+## Sponsors
+
+Sponsors are organizations that provide funding for pandas. Current sponsors include:
+
+<ul>
+ {% for company in sponsors.active if company.kind == "regular" %}
+ <li><a href="{{ company.url }}">{{ company.name }}</a>: {{ company.description }}</li>
{% endfor %}
</ul>
## In-kind sponsors
-- [OVH](https://us.ovhcloud.com/): Hosting
-- [Indeed](https://opensource.indeedeng.io/): Logo and website design
+In-kind sponsors are organizations that support pandas development with goods or services.
+Current in-kind sponsors include:
+
+<ul>
+ {% for company in sponsors.inkind %}
+ <li><a href="{{ company.url }}">{{ company.name }}</a>: {{ company.description }}</li>
+ {% endfor %}
+</ul>
## Past institutional partners
<ul>
- {% for company in partners.past %}
+ {% for company in sponsors.past if company.kind == "partner" %}
<li><a href="{{ company.url }}">{{ company.name }}</a></li>
{% endfor %}
</ul>
diff --git a/web/pandas/community/blog.html b/web/pandas/community/blog.html
index ffe6f97d679e4..627aaa450893b 100644
--- a/web/pandas/community/blog.html
+++ b/web/pandas/community/blog.html
@@ -4,10 +4,10 @@
{% for post in blog.posts %}
<div class="card">
<div class="card-body">
- <h3 class="card-title"><a href="{{post.link }}" target="_blank">{{ post.title }}</a></h3>
- <h6 class="card-subtitle">Source: {{ post.feed }} | Author: {{ post.author }} | Published: {{ post.published.strftime("%b %d, %Y") }}</h6>
- <div class="card-text">{{ post.summary }}</div>
- <a class="card-link" href="{{post.link }}" target="_blank">Read</a>
+ <h5 class="card-title"><a href="{{post.link }}" target="_blank">{{ post.title }}</a></h5>
+ <h6 class="card-subtitle text-muted small mb-4">Source: {{ post.feed }} | Author: {{ post.author }} | Published: {{ post.published.strftime("%b %d, %Y") }}</h6>
+ <div class="card-text mb-2">{{ post.summary }}</div>
+ <a class="card-link small" href="{{post.link }}" target="_blank">Read more</a>
</div>
</div>
{% endfor %}
diff --git a/web/pandas/community/coc.md b/web/pandas/community/coc.md
index de0e8120f7eee..bf62f4e00f847 100644
--- a/web/pandas/community/coc.md
+++ b/web/pandas/community/coc.md
@@ -54,10 +54,10 @@ incident.
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 1.3.0, available at
-[http://contributor-covenant.org/version/1/3/0/][version],
+[https://www.contributor-covenant.org/version/1/3/0/][version],
and the [Swift Code of Conduct][swift].
-[homepage]: http://contributor-covenant.org
-[version]: http://contributor-covenant.org/version/1/3/0/
+[homepage]: https://www.contributor-covenant.org
+[version]: https://www.contributor-covenant.org/version/1/3/0/
[swift]: https://swift.org/community/#code-of-conduct
diff --git a/web/pandas/community/ecosystem.md b/web/pandas/community/ecosystem.md
index af6fd1ac77605..715a84c1babc6 100644
--- a/web/pandas/community/ecosystem.md
+++ b/web/pandas/community/ecosystem.md
@@ -84,19 +84,16 @@ pandas with the option to perform statistical estimation while plotting,
aggregating across observations and visualizing the fit of statistical
models to emphasize patterns in a dataset.
-### [yhat/ggpy](https://github.com/yhat/ggpy)
+### [plotnine](https://github.com/has2k1/plotnine/)
Hadley Wickham's [ggplot2](https://ggplot2.tidyverse.org/) is a
foundational exploratory visualization package for the R language. Based
on ["The Grammar of
Graphics"](https://www.cs.uic.edu/~wilkinson/TheGrammarOfGraphics/GOG.html)
it provides a powerful, declarative and extremely general way to
-generate bespoke plots of any kind of data. It's really quite
-incredible. Various implementations to other languages are available,
-but a faithful implementation for Python users has long been missing.
-Although still young (as of Jan-2014), the
-[yhat/ggpy](https://github.com/yhat/ggpy) project has been progressing
-quickly in that direction.
+generate bespoke plots of any kind of data.
+Various implementations to other languages are available.
+A good implementation for Python users is [has2k1/plotnine](https://github.com/has2k1/plotnine/).
### [IPython Vega](https://github.com/vega/ipyvega)
@@ -267,7 +264,7 @@ which pandas excels.
## Out-of-core
-### [Blaze](http://blaze.pydata.org/)
+### [Blaze](https://blaze.pydata.org/)
Blaze provides a standard API for doing computations with various
in-memory and on-disk backends: NumPy, Pandas, SQLAlchemy, MongoDB,
diff --git a/web/pandas/config.yml b/web/pandas/config.yml
index e2a95a5039884..ef0b2a0270a0b 100644
--- a/web/pandas/config.yml
+++ b/web/pandas/config.yml
@@ -50,8 +50,6 @@ navbar:
target: /community/blog.html
- name: "Ask a question (StackOverflow)"
target: https://stackoverflow.com/questions/tagged/pandas
- - name: "Discuss"
- target: https://pandas.discourse.group
- name: "Code of conduct"
target: /community/coc.html
- name: "Ecosystem"
@@ -61,6 +59,7 @@ navbar:
blog:
num_posts: 8
feed:
+ - https://dev.pandas.io/pandas-blog/feeds/all.atom.xml
- https://wesmckinney.com/feeds/pandas.atom.xml
- https://tomaugspurger.github.io/feed
- https://jorisvandenbossche.github.io/feeds/all.atom.xml
@@ -101,30 +100,50 @@ maintainers:
- Wes McKinney
- Jeff Reback
- Joris Van den Bossche
-partners:
+sponsors:
active:
- name: "NumFOCUS"
url: https://numfocus.org/
logo: /static/img/partners/numfocus.svg
+ kind: numfocus
- name: "Anaconda"
url: https://www.anaconda.com/
logo: /static/img/partners/anaconda.svg
- employs: "Tom Augspurger, Brock Mendel"
+ kind: partner
+ description: "Tom Augspurger, Brock Mendel"
- name: "Two Sigma"
url: https://www.twosigma.com/
logo: /static/img/partners/two_sigma.svg
- employs: "Phillip Cloud, Jeff Reback"
+ kind: partner
+ description: "Phillip Cloud, Jeff Reback"
- name: "RStudio"
url: https://www.rstudio.com/
logo: /static/img/partners/r_studio.svg
- employs: "Wes McKinney"
+ kind: partner
+ description: "Wes McKinney"
- name: "Ursa Labs"
url: https://ursalabs.org/
logo: /static/img/partners/ursa_labs.svg
- employs: "Wes McKinney, Joris Van den Bossche"
+ kind: partner
+ description: "Wes McKinney, Joris Van den Bossche"
- name: "Tidelift"
url: https://tidelift.com
logo: /static/img/partners/tidelift.svg
+ kind: regular
+ description: "<i>pandas</i> is part of the <a href=\"https://tidelift.com/subscription/pkg/pypi-pandas?utm_source=pypi-pandas&utm_medium=referral&utm_campaign=readme\">Tidelift subscription</a>. You can support pandas by becoming a Tidelift subscriber."
+ - name: "Chan Zuckerberg Initiative"
+ url: https://chanzuckerberg.com/
+ logo: /static/img/partners/czi.svg
+ kind: regular
+ description: "<i>pandas</i> is funded by the Essential Open Source Software for Science program of the Chan Zuckerberg Initiative. The funding is used for general maintainance, improve extension types, and a efficient string type."
+ inkind: # not included in active so they don't appear in the home page
+ - name: "OVH"
+ url: https://us.ovhcloud.com/
+ description: "Website and documentation hosting."
+ - name: "Indeed"
+ url: https://opensource.indeedeng.io/
+ description: "<i>pandas</i> logo design"
past:
- name: "Paris-Saclay Center for Data Science"
url: https://www.datascience-paris-saclay.fr/
+ kind: partner
diff --git a/web/pandas/index.html b/web/pandas/index.html
index 5aac5da16295b..fedb0b0c5f712 100644
--- a/web/pandas/index.html
+++ b/web/pandas/index.html
@@ -7,7 +7,7 @@
<h1>pandas</h1>
<p>
<strong>pandas</strong> is a fast, powerful, flexible and easy to use open source data analysis and manipulation tool,<br/>
- built on top of the <a href="http://www.python.org">Python</a> programming language.
+ built on top of the <a href="https://www.python.org">Python</a> programming language.
</p>
<p>
<a class="btn btn-primary" href="{{ base_url }}/getting_started.html">Install pandas now!</a>
@@ -43,15 +43,20 @@ <h5>Community</h5>
</div>
<section>
<h5>With the support of:</h5>
- <div class="row h-100">
- {% for company in partners.active %}
- <div class="col-sm-6 col-md-2 my-auto">
- <a href="{{ company.url }}" target="_blank">
- <img class="img-fluid" alt="{{ company.name }}" src="{{ base_url }}{{ company.logo }}"/>
- </a>
- </div>
- {% endfor %}
- </div>
+ {% for row in sponsors.active | batch(6, "") %}
+ <div class="row h-100">
+ {% for company in row %}
+ <div class="col-sm-6 col-md-2 my-auto">
+ {% if company %}
+ <a href="{{ company.url }}" target="_blank">
+ <img class="img-fluid" alt="{{ company.name }}" src="{{ base_url }}{{ company.logo }}"/>
+ </a>
+ {% endif %}
+ </div>
+ {% endfor %}
+ </div>
+ {% endfor %}
+ <p class="mt-4">The full list of companies supporting <i>pandas</i> is available in the <a href="{{ base_url }}/about/sponsors.html">sponsors page</a>.
</section>
</div>
<div class="col-md-3">
diff --git a/web/pandas/static/img/partners/czi.svg b/web/pandas/static/img/partners/czi.svg
new file mode 100644
index 0000000000000..b0ad9eb80580b
--- /dev/null
+++ b/web/pandas/static/img/partners/czi.svg
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<svg width="112px" height="62px" viewBox="0 0 112 62" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
+ <!-- Generator: Sketch 52.2 (67145) - http://www.bohemiancoding.com/sketch -->
+ <title>Group</title>
+ <desc>Created with Sketch.</desc>
+ <g id="Symbols" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
+ <g id="nav-elements-/-czi_logo_nav_dark" transform="translate(-48.000000, -17.000000)">
+ <g id="Group" transform="translate(48.000000, 17.000000)">
+ <path d="M13.118,16.05056 C11.6721111,16.69381 10.2262222,17.047985 8.20166667,17.047985 C3.64077778,17.047985 0.370222222,14.43236 0.370222222,9.79941 C0.370222222,5.565585 3.64077778,2.26176 8.15733333,2.26176 C9.38077778,2.26176 10.6928889,2.550835 11.627,2.949185 L12.3837778,2.195885 L13.0293333,2.195885 L13.2961111,7.62631 L12.4724444,7.67126 C11.0047778,4.545685 9.73622222,3.34831 7.93411111,3.34831 C5.70966667,3.34831 4.04133333,5.410585 4.04133333,9.466935 C4.04133333,13.678285 6.11022222,15.851385 8.869,15.851385 C9.44688889,15.851385 9.80311111,15.784735 10.3366667,15.60726 L12.9624444,12.260035 L13.5628889,12.348385 L13.118,16.05056 Z" id="Fill-1" fill="#231F20"/>
+ <path d="M21.1942889,8.26987 C20.3931778,8.26987 19.9039556,8.469045 19.1254,8.890645 L19.1254,15.585095 L20.3262889,16.07257 L20.3262889,16.715045 L14.7869556,16.715045 L14.7869556,16.07257 L16.2110667,15.585095 L16.2110667,2.83867 L14.6757333,1.907895 L14.6757333,1.331295 L18.7022889,0.289695 L19.1254,0.533045 L19.1254,7.959095 L19.3696222,8.00327 C20.3931778,7.09497 21.5948444,6.36337 22.9746222,6.36337 C24.8428444,6.36337 25.8220667,7.582445 25.8220667,9.39982 L25.8220667,15.585095 L27.2454,16.07257 L27.2454,16.715045 L21.7060667,16.715045 L21.7060667,16.07257 L22.9077333,15.585095 L22.9077333,10.375545 C22.9077333,9.08982 22.4402889,8.26987 21.1942889,8.26987" id="Fill-3" fill="#231F20"/>
+ <path d="M33.6751333,11.7944925 C31.2943556,11.8828425 30.4271333,12.7035675 30.4271333,13.8118175 C30.4271333,14.8309425 31.0058,15.3858425 32.0285778,15.3858425 C32.4742444,15.3858425 33.0746889,15.1858925 33.6751333,14.8092425 L33.6751333,11.7944925 Z M36.4339111,14.6092925 C36.4339111,15.3633675 36.7675778,15.8291425 38.0579111,15.8508425 L38.1022444,16.4274425 C37.4574667,16.6707925 36.4782444,16.9373925 35.6110222,16.9373925 C34.5874667,16.9373925 34.0088,16.5157925 33.8306889,15.4741925 L33.5639111,15.4300175 C32.5629111,16.3607925 31.6062444,16.9149175 30.4271333,16.9149175 C28.8256889,16.9149175 27.7126889,16.0507925 27.7126889,14.5209425 C27.7126889,12.6369175 29.6711333,11.3511925 33.6751333,11.0628925 L33.6751333,9.8438175 C33.6751333,8.1806675 33.0078,7.6265425 31.3394667,7.6265425 C30.8945778,7.6265425 30.4940222,7.7156675 30.2046889,7.8264925 C30.6052444,8.1589675 30.8276889,8.5356175 30.8276889,9.0230925 C30.8276889,9.7322175 30.3376889,10.2871175 29.4930222,10.2871175 C28.6919111,10.2871175 28.0914667,9.6663425 28.0914667,8.9347425 C28.0914667,7.4048925 30.5383556,6.3632925 32.8740222,6.3632925 C35.2991333,6.3632925 36.4339111,7.2723675 36.4339111,9.4446925 L36.4339111,14.6092925 Z" id="Fill-5" fill="#231F20"/>
+ <path d="M49.1152667,15.585095 L50.5168222,16.07257 L50.5168222,16.715045 L44.9992667,16.715045 L44.9992667,16.07257 L46.2009333,15.585095 L46.2009333,10.375545 C46.2009333,9.067345 45.7334889,8.26987 44.5100444,8.26987 C43.7532667,8.26987 43.2640444,8.49152 42.5080444,8.890645 L42.5080444,15.585095 L43.6871556,16.07257 L43.6871556,16.715045 L38.1696,16.715045 L38.1696,16.07257 L39.5937111,15.585095 L39.5937111,8.690695 L38.1921556,7.75992 L38.1921556,7.18332 L41.9962667,6.29672 L42.4186,6.562545 L42.2630444,8.15827 L42.5298222,8.20322 C43.8870444,6.96167 45.0443778,6.36337 46.2678222,6.36337 C48.0917111,6.36337 49.1152667,7.515795 49.1152667,9.400595 L49.1152667,15.585095 Z" id="Fill-7" fill="#231F20"/>
+ <path d="M12.5395667,38.507425 L0.148011111,38.507425 L0.0593444444,37.9533 L8.9579,25.583525 L5.55434444,25.6277 C4.33012222,25.6277 3.77401111,26.026825 2.81734444,27.268375 L1.39323333,29.041575 L0.8379,28.90905 L1.26023333,24.386925 L12.8957889,24.386925 L12.9844556,24.94105 L4.15201111,37.332525 L8.37923333,37.28835 C9.53656667,37.28835 10.0926778,36.822575 10.8261222,35.7585 L12.4726778,33.364525 L12.9844556,33.45365 L12.5395667,38.507425 Z" id="Fill-9" fill="#231F20"/>
+ <path d="M25.5768333,37.4432725 L25.5768333,37.9973975 L21.7501667,38.8839975 L21.3278333,38.6398725 L21.4833889,37.0441475 L21.2166111,36.9999725 C19.8593889,38.2415225 18.7689444,38.8398225 17.4786111,38.8398225 C15.6103889,38.8398225 14.6311667,37.5757975 14.6311667,35.7801225 L14.6311667,30.4163475 L13.2521667,29.5072725 L13.2521667,28.9756225 L17.1449444,28.0882475 L17.5455,28.3540725 L17.5455,34.7826975 C17.5455,36.1575475 18.0347222,36.9108475 19.2363889,36.9108475 C19.9923889,36.9108475 20.4823889,36.7116725 21.2383889,36.3125475 L21.2383889,30.4163475 L19.8368333,29.5072725 L19.8368333,28.9756225 L23.7303889,28.0882475 L24.1527222,28.3540725 L24.1527222,36.6233225 L25.5768333,37.4432725 Z" id="Fill-11" fill="#231F20"/>
+ <path d="M34.0978556,32.12313 C33.0968556,32.12313 32.5181889,31.39153 32.5181889,30.394105 C32.5181889,29.972505 32.6068556,29.57338 32.7631889,29.219205 C32.6294111,29.19673 32.5181889,29.17503 32.3626333,29.17503 C30.7160778,29.240905 29.2701889,30.65993 29.2701889,33.342205 C29.2701889,35.80283 30.7829667,37.088555 32.8075222,37.088555 C33.7198556,37.088555 34.3428556,36.888605 35.3881889,36.490255 L35.6106333,37.021905 C34.3646333,38.28593 32.9630778,38.861755 31.4277444,38.861755 C28.5585222,38.861755 26.2671889,36.800255 26.2671889,33.74133 C26.2671889,30.637455 28.6915222,28.15513 32.0071889,28.15513 C34.2090778,28.15513 35.5888556,29.219205 35.5888556,30.52663 C35.5888556,31.435705 34.9658556,32.12313 34.0978556,32.12313" id="Fill-13" fill="#231F20"/>
+ <polygon id="Fill-15" fill="#231F20" points="46.8457111 37.39902 47.9136 37.864795 47.9136 38.50727 42.9303778 38.50727 42.9303778 37.931445 43.7532667 37.509845 41.1726 34.00762 40.3940444 34.00762 40.3940444 37.376545 41.5062667 37.864795 41.5062667 38.50727 36.0556 38.50727 36.0556 37.864795 37.4797111 37.376545 37.4797111 24.63012 35.9443778 23.699345 35.9443778 23.122745 39.9717111 22.081145 40.3940444 22.324495 40.3940444 32.831945 41.2394889 32.831945 43.9539333 29.795495 42.7297111 29.04142 42.7297111 28.487295 47.4461556 28.487295 47.4461556 29.152245 46.0671556 29.68467 42.9972667 32.41112"/>
+ <path d="M54.8996,32.38911 C54.8778222,30.12766 54.2991556,29.063585 53.0531556,29.063585 C51.8297111,29.063585 50.9617111,30.371785 50.8730444,32.588285 L54.8996,32.38911 Z M50.8504889,33.386535 C50.8730444,35.802985 51.9183778,37.15536 54.1653778,37.15536 C55.1889333,37.15536 56.0787111,36.95541 57.3916,36.467935 L57.6366,36.977885 C56.2568222,38.107835 54.6102667,38.86191 53.1418222,38.86191 C49.9607111,38.86191 47.9361556,36.77871 47.9361556,33.696535 C47.9361556,30.50431 50.4499333,28.155285 53.2312667,28.155285 C56.5687111,28.155285 57.8582667,30.57096 57.7470444,33.386535 L50.8504889,33.386535 Z" id="Fill-17" fill="#231F20"/>
+ <path d="M66.0904222,31.657665 C65.2224222,31.657665 64.6888667,31.014415 64.5776444,30.261115 C63.8659778,30.416115 63.3090889,30.88189 62.8198667,31.524365 L62.8198667,37.37639 L64.3995333,37.86464 L64.3995333,38.507115 L58.4822,38.507115 L58.4822,37.86464 L59.9055333,37.37639 L59.9055333,30.482765 L58.5039778,29.55199 L58.5039778,28.97539 L62.3088667,28.08879 L62.7312,28.354615 L62.4862,30.482765 L62.7529778,30.549415 C63.6653111,29.04204 64.5776444,28.132965 65.8905333,28.132965 C66.9358667,28.132965 67.6926444,28.82039 67.6926444,29.88369 C67.6926444,30.90359 67.0027556,31.657665 66.0904222,31.657665" id="Fill-19" fill="#231F20"/>
+ <path d="M73.4766667,30.0172225 C72.9205556,30.0172225 72.4087778,30.1722225 71.9413333,30.3496975 L71.9413333,36.7783225 C72.4974444,37.4874475 73.1204444,37.8423975 73.9884444,37.8423975 C75.1675556,37.8423975 76.2128889,36.7116725 76.2128889,33.8960975 C76.2128889,31.2587725 75.2118889,30.0172225 73.4766667,30.0172225 M73.4097778,38.8622975 C72.1194444,38.8622975 70.3624444,38.5739975 69.027,38.1531725 L69.027,24.6301975 L67.4698889,23.6994225 L67.4698889,23.1228225 L71.519,22.0812225 L71.9413333,22.3245725 L71.9413333,29.4630975 L72.2088889,29.5072725 C73.1204444,28.6873225 74.0327778,28.1548975 75.3005556,28.1548975 C77.392,28.1548975 79.2827778,30.0830975 79.2827778,33.2094475 C79.2827778,36.1800225 77.0583333,38.8622975 73.4097778,38.8622975" id="Fill-21" fill="#231F20"/>
+ <path d="M87.4482,32.38911 C87.4256444,30.12766 86.8477556,29.063585 85.6009778,29.063585 C84.3783111,29.063585 83.5103111,30.371785 83.4216444,32.588285 L87.4482,32.38911 Z M83.3990889,33.386535 C83.4216444,35.802985 84.4669778,37.15536 86.7139778,37.15536 C87.7375333,37.15536 88.6273111,36.95541 89.9394222,36.467935 L90.1844222,36.977885 C88.8054222,38.107835 87.1588667,38.86191 85.6904222,38.86191 C82.5093111,38.86191 80.4847556,36.77871 80.4847556,33.696535 C80.4847556,30.50431 82.9985333,28.155285 85.7790889,28.155285 C89.1165333,28.155285 90.4068667,30.57096 90.2956444,33.386535 L83.3990889,33.386535 Z" id="Fill-23" fill="#231F20"/>
+ <path d="M98.6386333,31.657665 C97.7714111,31.657665 97.2370778,31.014415 97.1258556,30.261115 C96.4141889,30.416115 95.8580778,30.88189 95.3680778,31.524365 L95.3680778,37.37639 L96.9477444,37.86464 L96.9477444,38.507115 L91.0304111,38.507115 L91.0304111,37.86464 L92.4537444,37.37639 L92.4537444,30.482765 L91.0529667,29.55199 L91.0529667,28.97539 L94.8570778,28.08879 L95.2794111,28.354615 L95.0344111,30.482765 L95.3011889,30.549415 C96.2135222,29.04204 97.1258556,28.132965 98.4387444,28.132965 C99.4840778,28.132965 100.240856,28.82039 100.240856,29.88369 C100.240856,30.90359 99.5509667,31.657665 98.6386333,31.657665" id="Fill-25" fill="#231F20"/>
+ <path d="M105.268333,28.93075 C104.267333,28.93075 103.555667,29.574 103.555667,31.5022 C103.555667,33.519525 104.245556,34.07365 105.379556,34.07365 C106.380556,34.07365 107.026111,33.164575 107.026111,31.41385 C107.026111,29.773175 106.403111,28.93075 105.268333,28.93075 M106.580444,38.8399 L102.999556,38.795725 C102.710222,39.172375 102.509556,39.65985 102.509556,40.258925 C102.509556,41.41135 103.466222,42.2096 105.757556,42.2096 C107.604,42.2096 109.050667,41.300525 109.050667,40.23645 C109.050667,39.2615 108.293889,38.8616 106.580444,38.8399 M105.335222,34.849425 C104.578444,34.849425 104.111778,34.782775 103.711222,34.65025 C103.488778,34.937775 103.288889,35.337675 103.288889,35.6252 C103.288889,36.1801 103.688667,36.445925 104.756556,36.467625 L107.492778,36.5118 C109.806667,36.55675 111.008333,37.44335 111.008333,39.105725 C111.008333,41.27805 108.627556,43.1179 104.979,43.1179 C101.642333,43.1179 100.107,42.14295 100.107,40.857225 C100.107,40.037275 100.796889,39.2832 102.265333,38.729075 L102.242778,38.507425 C101.353,38.219125 100.952444,37.665 100.952444,37.044225 C100.952444,36.312625 101.575444,35.51515 102.887556,34.49525 L102.887556,34.36195 C101.709222,33.78535 100.885556,32.76545 100.885556,31.5022 C100.885556,29.4407 102.843222,28.154975 105.379556,28.154975 C106.870556,28.154975 107.804667,28.5541 108.583222,29.108225 L110.295889,28.154975 L111.564444,28.154975 L111.564444,30.083175 L111.119556,30.083175 L109.273111,29.729 L109.228,29.906475 C109.495556,30.371475 109.695444,30.859725 109.695444,31.56885 C109.695444,33.452875 108.027111,34.849425 105.335222,34.849425" id="Fill-27" fill="#231F20"/>
+ <polygon id="Fill-29" fill="#231F20" points="7.8232 59.5675075 7.8232 60.2991075 0.236755556 60.2991075 0.236755556 59.5675075 2.37253333 59.0800325 2.37253333 47.3976825 0.236755556 46.9094325 0.236755556 46.1786075 7.8232 46.1786075 7.8232 46.9094325 5.70997778 47.3976825 5.70997778 59.0800325"/>
+ <path d="M19.7256889,59.168615 L21.1272444,59.65609 L21.1272444,60.298565 L15.6096889,60.298565 L15.6096889,59.65609 L16.8113556,59.168615 L16.8113556,53.959065 C16.8113556,52.650865 16.3439111,51.85339 15.1204667,51.85339 C14.3636889,51.85339 13.8752444,52.07504 13.1184667,52.47339 L13.1184667,59.168615 L14.2975778,59.65609 L14.2975778,60.298565 L8.78002222,60.298565 L8.78002222,59.65609 L10.2041333,59.168615 L10.2041333,52.274215 L8.80257778,51.34344 L8.80257778,50.76684 L12.6066889,49.88024 L13.0298,50.146065 L12.8734667,51.74179 L13.1410222,51.78674 C14.4974667,50.54519 15.6548,49.94689 16.8782444,49.94689 C18.7021333,49.94689 19.7256889,51.099315 19.7256889,52.984115 L19.7256889,59.168615 Z" id="Fill-31" fill="#231F20"/>
+ <path d="M24.3088222,44.1832925 C25.3767111,44.1832925 26.1334889,44.9590675 26.1334889,45.9789675 C26.1334889,46.9763925 25.3323778,47.8846925 24.2201556,47.8846925 C23.1966,47.8846925 22.4180444,47.1313925 22.4180444,46.1114925 C22.4180444,45.1140675 23.2191556,44.1832925 24.3088222,44.1832925 Z M21.4388222,51.3435175 L21.4388222,50.7669175 L25.3767111,49.8803175 L25.7547111,50.1019675 L25.7547111,59.1686925 L27.1344889,59.6561675 L27.1344889,60.2986425 L21.3727111,60.2986425 L21.3727111,59.6561675 L22.8403778,59.1686925 L22.8403778,52.2742925 L21.4388222,51.3435175 Z" id="Fill-33" fill="#231F20"/>
+ <path d="M31.5837667,60.58733 C29.8259889,60.58733 28.9362111,59.744905 28.9362111,57.94923 L28.9362111,51.720555 L27.2235444,51.720555 L27.2235444,51.12148 L29.3811,49.63658 L31.0276556,47.13178 L31.8505444,47.13178 L31.8505444,50.23488 L35.2323222,50.23488 L35.2323222,51.720555 L31.8505444,51.720555 L31.8505444,56.906855 C31.8505444,57.838405 31.9843222,58.326655 32.4292111,58.636655 C32.8297667,58.90248 33.2521,58.96913 33.9863222,58.96913 C34.3425444,58.96913 34.8092111,58.946655 35.2766556,58.90248 L35.2992111,59.589905 C34.4755444,60.099855 32.8958778,60.58733 31.5837667,60.58733" id="Fill-35" fill="#231F20"/>
+ <path d="M38.4804778,44.1832925 C39.5483667,44.1832925 40.3051444,44.9590675 40.3051444,45.9789675 C40.3051444,46.9763925 39.5040333,47.8846925 38.3918111,47.8846925 C37.3682556,47.8846925 36.5897,47.1313925 36.5897,46.1114925 C36.5897,45.1140675 37.3908111,44.1832925 38.4804778,44.1832925 Z M35.6104778,51.3435175 L35.6104778,50.7669175 L39.5483667,49.8803175 L39.9263667,50.1019675 L39.9263667,59.1686925 L41.3061444,59.6561675 L41.3061444,60.2986425 L35.5443667,60.2986425 L35.5443667,59.6561675 L37.0120333,59.1686925 L37.0120333,52.2742925 L35.6104778,51.3435175 Z" id="Fill-37" fill="#231F20"/>
+ <path d="M47.9577,55.3780125 C45.5769222,55.4663625 44.7097,56.2863125 44.7097,57.3953375 C44.7097,58.4144625 45.2883667,58.9693625 46.3111444,58.9693625 C46.7568111,58.9693625 47.3572556,58.7694125 47.9577,58.3927625 L47.9577,55.3780125 Z M50.7164778,58.1928125 C50.7164778,58.9468875 51.0501444,59.4126625 52.3404778,59.4343625 L52.3848111,60.0109625 C51.7400333,60.2543125 50.7608111,60.5209125 49.8935889,60.5209125 C48.8700333,60.5209125 48.2913667,60.0993125 48.1132556,59.0577125 L47.8464778,59.0135375 C46.8454778,59.9443125 45.8888111,60.4984375 44.7097,60.4984375 C43.1082556,60.4984375 41.9952556,59.6343125 41.9952556,58.1044625 C41.9952556,56.2204375 43.9537,54.9347125 47.9577,54.6464125 L47.9577,53.4273375 C47.9577,51.7641875 47.2903667,51.2100625 45.6220333,51.2100625 C45.1771444,51.2100625 44.7765889,51.2991875 44.4872556,51.4100125 C44.8878111,51.7424875 45.1102556,52.1191375 45.1102556,52.6066125 C45.1102556,53.3157375 44.6202556,53.8706375 43.7755889,53.8706375 C42.9744778,53.8706375 42.3740333,53.2498625 42.3740333,52.5182625 C42.3740333,50.9884125 44.8209222,49.9468125 47.1565889,49.9468125 C49.5817,49.9468125 50.7164778,50.8558875 50.7164778,53.0282125 L50.7164778,58.1928125 Z" id="Fill-39" fill="#231F20"/>
+ <path d="M56.4567111,60.58733 C54.6989333,60.58733 53.8091556,59.744905 53.8091556,57.94923 L53.8091556,51.720555 L52.0964889,51.720555 L52.0964889,51.12148 L54.2540444,49.63658 L55.9006,47.13178 L56.7234889,47.13178 L56.7234889,50.23488 L60.1052667,50.23488 L60.1052667,51.720555 L56.7234889,51.720555 L56.7234889,56.906855 C56.7234889,57.838405 56.8572667,58.326655 57.3021556,58.636655 C57.7027111,58.90248 58.1250444,58.96913 58.8592667,58.96913 C59.2154889,58.96913 59.6821556,58.946655 60.1496,58.90248 L60.1721556,59.589905 C59.3484889,60.099855 57.7688222,60.58733 56.4567111,60.58733" id="Fill-41" fill="#231F20"/>
+ <path d="M63.3534222,44.1832925 C64.4213111,44.1832925 65.1780889,44.9590675 65.1780889,45.9789675 C65.1780889,46.9763925 64.3769778,47.8846925 63.2647556,47.8846925 C62.2412,47.8846925 61.4626444,47.1313925 61.4626444,46.1114925 C61.4626444,45.1140675 62.2637556,44.1832925 63.3534222,44.1832925 Z M60.4834222,51.3435175 L60.4834222,50.7669175 L64.4213111,49.8803175 L64.7993111,50.1019675 L64.7993111,59.1686925 L66.1790889,59.6561675 L66.1790889,60.2986425 L60.4173111,60.2986425 L60.4173111,59.6561675 L61.8849778,59.1686925 L61.8849778,52.2742925 L60.4834222,51.3435175 Z" id="Fill-43" fill="#231F20"/>
+ <polygon id="Fill-45" fill="#231F20" points="72.8530444 57.3727075 72.9860444 57.3727075 74.0096 54.7570825 75.0774889 51.5873325 73.7871556 50.8999075 73.7871556 50.2791325 78.1924889 50.2791325 78.1924889 50.9448575 76.7014889 51.6756825 72.8748222 60.5432325 71.2290444 60.5432325 67.3129333 51.4765075 65.9564889 50.9448575 65.9564889 50.2791325 71.9407111 50.2791325 71.9407111 50.8999075 70.5617111 51.4315575"/>
+ <path d="M85.2226667,54.180715 C85.2001111,51.92004 84.6222222,50.85519 83.3762222,50.85519 C82.1527778,50.85519 81.2847778,52.16339 81.1961111,54.37989 L85.2226667,54.180715 Z M81.1735556,55.17814 C81.1961111,57.59459 82.2414444,58.946965 84.4884444,58.946965 C85.512,58.946965 86.4017778,58.74779 87.7146667,58.25954 L87.9588889,58.76949 C86.5798889,59.900215 84.9333333,60.65429 83.4648889,60.65429 C80.2837778,60.65429 78.2592222,58.570315 78.2592222,55.48814 C78.2592222,52.29669 80.773,49.94689 83.5535556,49.94689 C86.891,49.94689 88.1813333,52.36334 88.0701111,55.17814 L81.1735556,55.17814 Z" id="Fill-47" fill="#231F20"/>
+ <path d="M99.2454556,53.522275 C98.7523444,53.86405 98.1752333,54.07795 97.3725667,54.095 C96.4065667,54.11515 95.6809,53.4781 95.6140111,52.53725 C95.5782333,52.037375 95.7462333,51.5592 96.0876778,51.1934 C96.4299,50.826825 96.8957889,50.62145 97.3990111,50.617575 C97.6541222,50.61525 97.9232333,50.667175 98.1542333,50.766375 C98.1542333,50.766375 98.0912333,51.291825 98.0702333,51.5282 L98.8651222,51.53285 L99.0074556,50.3502 L98.7842333,50.20915 C98.3665667,49.9534 97.8851222,49.81855 97.3920111,49.8232 C96.6741222,49.8294 95.9842333,50.13165 95.4973444,50.653225 C95.0034556,51.18255 94.7600111,51.87075 94.8121222,52.592275 C94.8572333,53.232425 95.1341222,53.816775 95.5899,54.2376 C96.0511222,54.664625 96.6725667,54.894025 97.3445667,54.887825 C97.3609,54.887825 97.3772333,54.887825 97.3943444,54.88705 C97.8011222,54.876975 98.2600111,54.799475 98.7647889,54.6003 L99.3380111,53.491275 C99.3380111,53.491275 99.3263444,53.4657 99.2454556,53.522275" id="Fill-49" fill="#FF414C"/>
+ <path d="M101.945278,56.363425 L101.848833,57.4523 L101.847278,57.4647 C101.808389,57.877 101.5875,58.226525 101.256167,58.4009 C100.993278,58.539625 100.7055,58.53885 100.4465,58.398575 C100.177389,58.252875 100.049056,58.03355 100.000833,57.756875 C99.9253889,57.318225 100.175833,56.860975 100.517278,56.630025 L105.846611,53.175075 C105.887056,53.468025 105.908056,53.7664 105.908056,54.069425 C105.908056,57.645275 102.991389,60.55385 99.4058333,60.55385 C95.8202778,60.55385 92.9028333,57.645275 92.9028333,54.069425 C92.9028333,50.49435 95.8202778,47.585 99.4058333,47.585 C102.086833,47.585 104.392944,49.2125 105.386167,51.5282 L106.036389,51.107375 C104.897722,48.5863 102.355167,46.8255 99.4058333,46.8255 C95.3971667,46.8255 92.1359444,50.075075 92.1359444,54.069425 C92.1359444,58.063775 95.3971667,61.31335 99.4058333,61.31335 C103.4145,61.31335 106.675722,58.063775 106.675722,54.069425 C106.675722,53.6083 106.631389,53.15725 106.548167,52.72015 C106.498389,52.434175 106.388722,52.054425 106.370056,51.9932 L104.8565,52.936375 L102.852167,54.201175 C102.781389,53.93535 102.619611,53.666425 102.277389,53.511425 C101.872167,53.32775 101.215722,53.469575 100.7335,53.742375 C100.7335,53.742375 102.053389,51.19805 102.271944,50.780325 C102.285944,50.752425 102.2665,50.72065 102.235389,50.72065 L102.1545,50.721425 L102.155278,50.719875 L99.7682778,50.719875 L99.6687222,51.54215 L100.042056,51.5406 L101.005722,51.5406 L99.1165,55.145125 C99.0799444,55.21565 99.1561667,55.290825 99.2269444,55.2544 L100.235722,54.720425 C100.721833,54.47165 101.532278,53.960925 101.932833,54.2562 C101.991167,54.298825 102.065833,54.41275 102.0705,54.531325 C102.072833,54.577825 102.0495,54.621225 102.010611,54.647575 L100.000056,56.0015 C99.3459444,56.4665 99.0908333,57.15935 99.1515,57.8119 C99.2020556,58.361375 99.5450556,58.834125 100.053722,59.110025 C100.321278,59.254175 100.609833,59.322375 100.897611,59.314625 C101.151167,59.30765 101.403944,59.241775 101.640389,59.117 C102.214389,58.815525 102.5955,58.228075 102.662389,57.546075 L102.824167,55.7132 L101.945278,56.363425 Z" id="Fill-51" fill="#FF414C"/>
+ </g>
+ </g>
+ </g>
+</svg>
\ No newline at end of file
diff --git a/web/pandas_web.py b/web/pandas_web.py
old mode 100644
new mode 100755
index d515d8a0e1cd7..a34a31feabce0
--- a/web/pandas_web.py
+++ b/web/pandas_web.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
Simple static site generator for the pandas web.
@@ -28,14 +28,15 @@
import importlib
import operator
import os
+import re
import shutil
import sys
import time
import typing
import feedparser
-import markdown
import jinja2
+import markdown
import requests
import yaml
@@ -74,6 +75,7 @@ def blog_add_posts(context):
preprocessor fetches the posts in the feeds, and returns the relevant
information for them (sorted from newest to oldest).
"""
+ tag_expr = re.compile("<.*?>")
posts = []
for feed_url in context["blog"]["feed"]:
feed_data = feedparser.parse(feed_url)
@@ -81,6 +83,7 @@ def blog_add_posts(context):
published = datetime.datetime.fromtimestamp(
time.mktime(entry.published_parsed)
)
+ summary = re.sub(tag_expr, "", entry.summary)
posts.append(
{
"title": entry.title,
@@ -89,7 +92,7 @@ def blog_add_posts(context):
"feed": feed_data["feed"]["title"],
"link": entry.link,
"description": entry.description,
- "summary": entry.summary,
+ "summary": summary,
}
)
posts.sort(key=operator.itemgetter("published"), reverse=True)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31546 | 2020-02-01T16:33:45Z | 2020-02-01T16:51:18Z | null | 2020-02-01T16:54:02Z |
BUG&TST: df.replace fail after converting to new dtype | diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index aa91e7a489356..92b74c4409d7d 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -1356,3 +1356,10 @@ def test_replace_replacer_dtype(self, replacer):
result = df.replace({"a": replacer, "b": replacer})
expected = pd.DataFrame([replacer])
tm.assert_frame_equal(result, expected)
+
+ def test_replace_after_convert_dtypes(self):
+ # GH31517
+ df = pd.DataFrame({"grp": [1, 2, 3, 4, 5]}, dtype="Int64")
+ result = df.replace(1, 10)
+ expected = pd.DataFrame({"grp": [10, 2, 3, 4, 5]}, dtype="Int64")
+ tm.assert_frame_equal(result, expected)
| - [x] closes #31517
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31545 | 2020-02-01T16:21:18Z | 2020-02-01T23:10:40Z | 2020-02-01T23:10:40Z | 2020-02-01T23:10:57Z |
Backport PR #31529 on branch 1.0.x (BUG: Series multiplication with timedelta scalar numexpr path) | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 95fab6a18ffe1..df001bc18b0a0 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -44,6 +44,7 @@ Timezones
Numeric
^^^^^^^
- Bug in dtypes being lost in ``DataFrame.__invert__`` (``~`` operator) with mixed dtypes (:issue:`31183`)
+- Bug in :class:`Series` multiplication when multiplying a numeric :class:`Series` with >10000 elements with a timedelta-like scalar (:issue:`31467`)
-
Conversion
diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py
index b84d468fff736..56519846c702d 100644
--- a/pandas/core/ops/array_ops.py
+++ b/pandas/core/ops/array_ops.py
@@ -8,7 +8,7 @@
import numpy as np
-from pandas._libs import Timestamp, lib, ops as libops
+from pandas._libs import Timedelta, Timestamp, lib, ops as libops
from pandas.core.dtypes.cast import (
construct_1d_object_array_from_listlike,
@@ -184,11 +184,12 @@ def arithmetic_op(
rvalues = maybe_upcast_for_op(rvalues, lvalues.shape)
if should_extension_dispatch(left, rvalues) or isinstance(
- rvalues, (ABCTimedeltaArray, ABCDatetimeArray, Timestamp)
+ rvalues, (ABCTimedeltaArray, ABCDatetimeArray, Timestamp, Timedelta)
):
# TimedeltaArray, DatetimeArray, and Timestamp are included here
# because they have `freq` attribute which is handled correctly
# by dispatch_to_extension_op.
+ # Timedelta is included because numexpr will fail on it, see GH#31457
res_values = dispatch_to_extension_op(op, lvalues, rvalues)
else:
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index f55e2b98ee912..22da3de80e196 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -176,6 +176,28 @@ def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box):
commute = scalar_td * index
tm.assert_equal(commute, expected)
+ @pytest.mark.parametrize(
+ "scalar_td",
+ [
+ Timedelta(days=1),
+ Timedelta(days=1).to_timedelta64(),
+ Timedelta(days=1).to_pytimedelta(),
+ ],
+ ids=lambda x: type(x).__name__,
+ )
+ def test_numeric_arr_mul_tdscalar_numexpr_path(self, scalar_td, box):
+ arr = np.arange(2 * 10 ** 4).astype(np.int64)
+ obj = tm.box_expected(arr, box, transpose=False)
+
+ expected = arr.view("timedelta64[D]").astype("timedelta64[ns]")
+ expected = tm.box_expected(expected, box, transpose=False)
+
+ result = obj * scalar_td
+ tm.assert_equal(result, expected)
+
+ result = scalar_td * obj
+ tm.assert_equal(result, expected)
+
def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box):
index = numeric_idx[1:3]
| Backport PR #31529: BUG: Series multiplication with timedelta scalar numexpr path | https://api.github.com/repos/pandas-dev/pandas/pulls/31543 | 2020-02-01T15:24:54Z | 2020-02-01T19:38:45Z | 2020-02-01T19:38:45Z | 2020-02-01T19:38:45Z |
Backport PR #31513 on branch 1.0.x (REGR: Fixed truncation with na_rep) | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 95fab6a18ffe1..16e463e3a5944 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -86,6 +86,7 @@ MultiIndex
I/O
^^^
+- Fixed regression in :meth:`~DataFrame.to_csv` where specifying an ``na_rep`` might truncate the values written (:issue:`31447`)
-
-
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 5fcd796eb41ed..6f2bcdb495d01 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -680,7 +680,10 @@ def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs):
itemsize = writers.word_len(na_rep)
if not self.is_object and not quoting and itemsize:
- values = values.astype(f"<U{itemsize}")
+ values = values.astype(str)
+ if values.dtype.itemsize / np.dtype("U1").itemsize < itemsize:
+ # enlarge for the na_rep
+ values = values.astype(f"<U{itemsize}")
else:
values = np.array(values, dtype="object")
diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py
index a211ac11cf725..b3ee8da52dece 100644
--- a/pandas/tests/io/formats/test_to_csv.py
+++ b/pandas/tests/io/formats/test_to_csv.py
@@ -583,3 +583,17 @@ def test_to_csv_timedelta_precision(self):
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
+
+ def test_na_rep_truncated(self):
+ # https://github.com/pandas-dev/pandas/issues/31447
+ result = pd.Series(range(8, 12)).to_csv(na_rep="-")
+ expected = tm.convert_rows_list_to_csv_str([",0", "0,8", "1,9", "2,10", "3,11"])
+ assert result == expected
+
+ result = pd.Series([True, False]).to_csv(na_rep="nan")
+ expected = tm.convert_rows_list_to_csv_str([",0", "0,True", "1,False"])
+ assert result == expected
+
+ result = pd.Series([1.1, 2.2]).to_csv(na_rep=".")
+ expected = tm.convert_rows_list_to_csv_str([",0", "0,1.1", "1,2.2"])
+ assert result == expected
| Backport PR #31513: REGR: Fixed truncation with na_rep | https://api.github.com/repos/pandas-dev/pandas/pulls/31542 | 2020-02-01T15:21:22Z | 2020-02-01T19:39:15Z | 2020-02-01T19:39:14Z | 2020-02-01T19:39:15Z |
Backport PR #31456 on branch 1.0.x (BUG: Groupby.apply wasn't allowing for functions which return lists) | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index 95fab6a18ffe1..478e986a26231 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -15,7 +15,7 @@ including other versions of pandas.
Bug fixes
~~~~~~~~~
-
+- Bug in :meth:`GroupBy.apply` was raising ``TypeError`` if called with function which returned a non-pandas non-scalar object (e.g. a list) (:issue:`31441`)
Categorical
^^^^^^^^^^^
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 8571761f77265..89164c527002a 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -501,9 +501,9 @@ def apply_frame_axis0(object frame, object f, object names,
if not is_scalar(piece):
# Need to copy data to avoid appending references
- if hasattr(piece, "copy"):
+ try:
piece = piece.copy(deep="all")
- else:
+ except (TypeError, AttributeError):
piece = copy(piece)
results.append(piece)
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 9c3a832121c7f..4a879e50144e4 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -785,3 +785,27 @@ def test_apply_index_has_complex_internals(index):
df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index)
result = df.groupby("group").apply(lambda x: x)
tm.assert_frame_equal(result, df)
+
+
+@pytest.mark.parametrize(
+ "function, expected_values",
+ [
+ (lambda x: x.index.to_list(), [[0, 1], [2, 3]]),
+ (lambda x: set(x.index.to_list()), [{0, 1}, {2, 3}]),
+ (lambda x: tuple(x.index.to_list()), [(0, 1), (2, 3)]),
+ (
+ lambda x: {n: i for (n, i) in enumerate(x.index.to_list())},
+ [{0: 0, 1: 1}, {0: 2, 1: 3}],
+ ),
+ (
+ lambda x: [{n: i} for (n, i) in enumerate(x.index.to_list())],
+ [[{0: 0}, {1: 1}], [{0: 2}, {1: 3}]],
+ ),
+ ],
+)
+def test_apply_function_returns_non_pandas_non_scalar(function, expected_values):
+ # GH 31441
+ df = pd.DataFrame(["A", "A", "B", "B"], columns=["groups"])
+ result = df.groupby("groups").apply(function)
+ expected = pd.Series(expected_values, index=pd.Index(["A", "B"], name="groups"))
+ tm.assert_series_equal(result, expected)
| Backport PR #31456: BUG: Groupby.apply wasn't allowing for functions which return lists | https://api.github.com/repos/pandas-dev/pandas/pulls/31541 | 2020-02-01T15:15:25Z | 2020-02-01T19:37:49Z | 2020-02-01T19:37:49Z | 2020-02-01T19:37:49Z |
Backport PR #31520 on branch 1.0.x (REGR: to_datetime, unique with OOB values) | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index ff8433c7cafd9..95fab6a18ffe1 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -25,8 +25,8 @@ Categorical
Datetimelike
^^^^^^^^^^^^
--
--
+- Fixed regression in :meth:`to_datetime` when parsing non-nanosecond resolution datetimes (:issue:`31491`)
+- Fixed bug in :meth:`to_datetime` raising when ``cache=True`` and out-of-bound values are present (:issue:`31491`)
Timedelta
^^^^^^^^^
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 431aa56eafe5f..c6080628bcfd6 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -29,6 +29,7 @@
is_categorical_dtype,
is_complex_dtype,
is_datetime64_any_dtype,
+ is_datetime64_dtype,
is_datetime64_ns_dtype,
is_extension_array_dtype,
is_float_dtype,
@@ -191,6 +192,11 @@ def _reconstruct_data(values, dtype, original):
if isinstance(original, ABCIndexClass):
values = values.astype(object, copy=False)
elif dtype is not None:
+ if is_datetime64_dtype(dtype):
+ dtype = "datetime64[ns]"
+ elif is_timedelta64_dtype(dtype):
+ dtype = "timedelta64[ns]"
+
values = values.astype(dtype, copy=False)
return values
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 8c2be7092c37d..de52a1e46c33c 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -603,7 +603,9 @@ def to_datetime(
cache : bool, default True
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
- date strings, especially ones with timezone offsets.
+ date strings, especially ones with timezone offsets. The cache is only
+ used when there are at least 50 values. The presence of out-of-bounds
+ values will render the cache unusable and may slow down parsing.
.. versionadded:: 0.23.0
@@ -735,7 +737,17 @@ def to_datetime(
convert_listlike = partial(convert_listlike, name=arg.name)
result = convert_listlike(arg, format)
elif is_list_like(arg):
- cache_array = _maybe_cache(arg, format, cache, convert_listlike)
+ try:
+ cache_array = _maybe_cache(arg, format, cache, convert_listlike)
+ except tslibs.OutOfBoundsDatetime:
+ # caching attempts to create a DatetimeIndex, which may raise
+ # an OOB. If that's the desired behavior, then just reraise...
+ if errors == "raise":
+ raise
+ # ... otherwise, continue without the cache.
+ from pandas import Series
+
+ cache_array = Series([], dtype=object) # just an empty array
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array)
else:
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index fe65653ba6545..198ae1cbd4967 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -559,9 +559,14 @@ def test_to_datetime_dt64s_out_of_bounds(self, cache, dt):
assert pd.to_datetime(dt, errors="coerce", cache=cache) is NaT
@pytest.mark.parametrize("cache", [True, False])
- def test_to_datetime_array_of_dt64s(self, cache):
- dts = [np.datetime64("2000-01-01"), np.datetime64("2000-01-02")]
-
+ @pytest.mark.parametrize("unit", ["s", "D"])
+ def test_to_datetime_array_of_dt64s(self, cache, unit):
+ # https://github.com/pandas-dev/pandas/issues/31491
+ # Need at least 50 to ensure cache is used.
+ dts = [
+ np.datetime64("2000-01-01", unit),
+ np.datetime64("2000-01-02", unit),
+ ] * 30
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
tm.assert_index_equal(
@@ -579,11 +584,8 @@ def test_to_datetime_array_of_dt64s(self, cache):
tm.assert_index_equal(
pd.to_datetime(dts_with_oob, errors="coerce", cache=cache),
pd.DatetimeIndex(
- [
- Timestamp(dts_with_oob[0]).asm8,
- Timestamp(dts_with_oob[1]).asm8,
- pd.NaT,
- ]
+ [Timestamp(dts_with_oob[0]).asm8, Timestamp(dts_with_oob[1]).asm8] * 30
+ + [pd.NaT],
),
)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 2b46f86d49c5e..57ee3bedd4d9f 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -420,6 +420,18 @@ def test_datetime64_dtype_array_returned(self):
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
+ def test_datetime_non_ns(self):
+ a = np.array(["2000", "2000", "2001"], dtype="datetime64[s]")
+ result = pd.unique(a)
+ expected = np.array(["2000", "2001"], dtype="datetime64[ns]")
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_timedelta_non_ns(self):
+ a = np.array(["2000", "2000", "2001"], dtype="timedelta64[s]")
+ result = pd.unique(a)
+ expected = np.array([2000000000000, 2001000000000], dtype="timedelta64[ns]")
+ tm.assert_numpy_array_equal(result, expected)
+
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype="m8[ns]")
| Backport PR #31520: REGR: to_datetime, unique with OOB values | https://api.github.com/repos/pandas-dev/pandas/pulls/31540 | 2020-02-01T14:30:13Z | 2020-02-01T15:07:26Z | 2020-02-01T15:07:26Z | 2020-02-01T15:07:26Z |
TST: Added regression test | diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index f7211ab5f9fd4..9a6ae76658949 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -1,7 +1,7 @@
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
-from datetime import datetime, timedelta
+from datetime import datetime, time, timedelta
from itertools import product, starmap
import operator
import warnings
@@ -1032,6 +1032,8 @@ def test_dt64arr_add_timestamp_raises(self, box_with_array):
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
pd.Period("2011-01-01", freq="D"),
+ # https://github.com/pandas-dev/pandas/issues/10329
+ time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
@@ -1069,6 +1071,60 @@ def test_dt64arr_add_sub_parr(
)
assert_invalid_addsub_type(dtarr, parr, msg)
+ def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
+ # https://github.com/pandas-dev/pandas/issues/10329
+
+ tz = tz_naive_fixture
+
+ obj1 = pd.date_range("2012-01-01", periods=3, tz=tz)
+ obj2 = [time(i, i, i) for i in range(3)]
+
+ obj1 = tm.box_expected(obj1, box_with_array)
+ obj2 = tm.box_expected(obj2, box_with_array)
+
+ with warnings.catch_warnings(record=True):
+ # pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
+ # applied to Series or DatetimeIndex
+ # we aren't testing that here, so ignore.
+ warnings.simplefilter("ignore", PerformanceWarning)
+
+ # If `x + y` raises, then `y + x` should raise here as well
+
+ msg = (
+ r"unsupported operand type\(s\) for -: "
+ "'(Timestamp|DatetimeArray)' and 'datetime.time'"
+ )
+ with pytest.raises(TypeError, match=msg):
+ obj1 - obj2
+
+ msg = "|".join(
+ [
+ "cannot subtract DatetimeArray from ndarray",
+ "ufunc (subtract|'subtract') cannot use operands with types "
+ r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
+ ]
+ )
+ with pytest.raises(TypeError, match=msg):
+ obj2 - obj1
+
+ msg = (
+ r"unsupported operand type\(s\) for \+: "
+ "'(Timestamp|DatetimeArray)' and 'datetime.time'"
+ )
+ with pytest.raises(TypeError, match=msg):
+ obj1 + obj2
+
+ msg = "|".join(
+ [
+ r"unsupported operand type\(s\) for \+: "
+ "'(Timestamp|DatetimeArray)' and 'datetime.time'",
+ "ufunc (add|'add') cannot use operands with types "
+ r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
+ ]
+ )
+ with pytest.raises(TypeError, match=msg):
+ obj2 + obj1
+
class TestDatetime64DateOffsetArithmetic:
| - [x] closes #10329
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31538 | 2020-02-01T13:36:47Z | 2020-04-03T20:20:04Z | 2020-04-03T20:20:04Z | 2020-04-06T08:46:06Z |
TST: Added regression test case | diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index 202e30287881f..792b8bb4f25ab 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -1305,3 +1305,35 @@ def test_dataframe_div_silenced():
)
with tm.assert_produces_warning(None):
pdf1.div(pdf2, fill_value=0)
+
+
+class TestNumericArraylikeArithmeticWithBool:
+ @pytest.mark.parametrize("num", [complex(1), np.int64(1), 1, 1.0])
+ def test_array_like_bool_and_num_op_coerce(
+ self, num, all_arithmetic_functions, box_with_array
+ ):
+ # https://github.com/pandas-dev/pandas/issues/18549
+ op = all_arithmetic_functions
+
+ if op.__name__ in [
+ "floordiv",
+ "mod",
+ "mul",
+ "pow",
+ "rfloordiv",
+ "rpow",
+ "rmod",
+ "rmul",
+ "rtruediv",
+ "truediv",
+ ]:
+ pytest.xfail("Arithmetic operation is not supported")
+
+ bool_box = [True]
+ expected = [op(num, num)]
+
+ bool_box = tm.box_expected(bool_box, box_with_array)
+ expected = tm.box_expected(expected, box_with_array)
+
+ tm.assert_equal(expected, op(bool_box, num))
+ tm.assert_equal(expected, op(num, bool_box))
| Resurrection of #28966
- [x] closes #18549
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31536 | 2020-02-01T13:11:59Z | 2020-04-06T09:48:29Z | null | 2020-04-06T09:49:03Z |
CLN: Replace isinstace(foo, Class) with isinstance(foo, ABCClass) | diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index d9b53aa4a867c..8383b783d90e7 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -33,6 +33,7 @@
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.generic import (
ABCIndexClass,
+ ABCPeriod,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
@@ -960,8 +961,8 @@ def _get_ordinal_range(start, end, periods, freq, mult=1):
if end is not None:
end = Period(end, freq)
- is_start_per = isinstance(start, Period)
- is_end_per = isinstance(end, Period)
+ is_start_per = isinstance(start, ABCPeriod)
+ is_end_per = isinstance(end, ABCPeriod)
if is_start_per and is_end_per and start.freq != end.freq:
raise ValueError("start and end must have same freq")
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 22ba317e78e63..95cfab4c96af3 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3582,8 +3582,8 @@ def _join_multi(self, other, how, return_indexers=True):
if not overlap:
raise ValueError("cannot join with no overlapping index names")
- self_is_mi = isinstance(self, MultiIndex)
- other_is_mi = isinstance(other, MultiIndex)
+ self_is_mi = isinstance(self, ABCMultiIndex)
+ other_is_mi = isinstance(other, ABCMultiIndex)
if self_is_mi and other_is_mi:
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index b42497b507e1f..8829c242b1129 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -1,5 +1,5 @@
"""
-concat routines
+Concat routines.
"""
from typing import Hashable, Iterable, List, Mapping, Optional, Union, overload
@@ -8,6 +8,8 @@
from pandas._typing import FrameOrSeriesUnion
+from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
+
from pandas import DataFrame, Index, MultiIndex, Series
from pandas.core.arrays.categorical import (
factorize_from_iterable,
@@ -394,11 +396,11 @@ def __init__(
axis = sample._get_axis_number(axis)
# Need to flip BlockManager axis in the DataFrame special case
- self._is_frame = isinstance(sample, DataFrame)
+ self._is_frame = isinstance(sample, ABCDataFrame)
if self._is_frame:
axis = 1 if axis == 0 else 0
- self._is_series = isinstance(sample, Series)
+ self._is_series = isinstance(sample, ABCSeries)
if not 0 <= axis <= sample.ndim:
raise AssertionError(
"axis must be between 0 and {ndim}, input was "
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index b0e8e4033edf2..14e79538541af 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -14,7 +14,7 @@
from pandas.core.dtypes import missing
from pandas.core.dtypes.common import is_float, is_scalar
-from pandas.core.dtypes.generic import ABCMultiIndex, ABCPeriodIndex
+from pandas.core.dtypes.generic import ABCIndex, ABCMultiIndex, ABCPeriodIndex
from pandas import Index
import pandas.core.common as com
@@ -452,7 +452,7 @@ def _format_header_mi(self):
"index ('index'=False) is not yet implemented."
)
- has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
+ has_aliases = isinstance(self.header, (tuple, list, np.ndarray, ABCIndex))
if not (has_aliases or self.header):
return
@@ -500,7 +500,7 @@ def _format_header_mi(self):
self.rowcounter = lnum
def _format_header_regular(self):
- has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
+ has_aliases = isinstance(self.header, (tuple, list, np.ndarray, ABCIndex))
if has_aliases or self.header:
coloffset = 0
@@ -550,7 +550,7 @@ def _format_body(self):
return self._format_regular_rows()
def _format_regular_rows(self):
- has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
+ has_aliases = isinstance(self.header, (tuple, list, np.ndarray, ABCIndex))
if has_aliases or self.header:
self.rowcounter += 1
@@ -590,7 +590,7 @@ def _format_regular_rows(self):
yield cell
def _format_hierarchical_rows(self):
- has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
+ has_aliases = isinstance(self.header, (tuple, list, np.ndarray, ABCIndex))
if has_aliases or self.header:
self.rowcounter += 1
| - [x] ref #27353
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31535 | 2020-02-01T11:37:51Z | 2020-02-01T15:08:17Z | 2020-02-01T15:08:17Z | 2020-02-01T15:24:23Z |
CI: Reverted changes related to 'jedi' warnings | diff --git a/pandas/tests/arrays/categorical/test_warnings.py b/pandas/tests/arrays/categorical/test_warnings.py
index 9e164a250cdb1..f66c327e9967d 100644
--- a/pandas/tests/arrays/categorical/test_warnings.py
+++ b/pandas/tests/arrays/categorical/test_warnings.py
@@ -14,16 +14,6 @@ async def test_tab_complete_warning(self, ip):
code = "import pandas as pd; c = Categorical([])"
await ip.run_code(code)
-
- # GH 31324 newer jedi version raises Deprecation warning
- import jedi
-
- if jedi.__version__ < "0.16.0":
- warning = tm.assert_produces_warning(None)
- else:
- warning = tm.assert_produces_warning(
- DeprecationWarning, check_stacklevel=False
- )
- with warning:
+ with tm.assert_produces_warning(None):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("c.", 1))
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 04af9b09bbf89..8d8bd9af73a90 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -2410,17 +2410,7 @@ async def test_tab_complete_warning(self, ip):
code = "import pandas as pd; idx = pd.Index([1, 2])"
await ip.run_code(code)
-
- # GH 31324 newer jedi version raises Deprecation warning
- import jedi
-
- if jedi.__version__ < "0.16.0":
- warning = tm.assert_produces_warning(None)
- else:
- warning = tm.assert_produces_warning(
- DeprecationWarning, check_stacklevel=False
- )
- with warning:
+ with tm.assert_produces_warning(None):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("idx.", 4))
| - [x] closes #31407
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31534 | 2020-02-01T11:04:09Z | 2020-02-20T03:17:09Z | null | 2020-02-20T03:17:09Z |
BUG: Series multiplication with timedelta scalar numexpr path | diff --git a/doc/source/whatsnew/v1.0.1.rst b/doc/source/whatsnew/v1.0.1.rst
index ff8433c7cafd9..fd09f5dc13557 100644
--- a/doc/source/whatsnew/v1.0.1.rst
+++ b/doc/source/whatsnew/v1.0.1.rst
@@ -44,6 +44,7 @@ Timezones
Numeric
^^^^^^^
- Bug in dtypes being lost in ``DataFrame.__invert__`` (``~`` operator) with mixed dtypes (:issue:`31183`)
+- Bug in :class:`Series` multiplication when multiplying a numeric :class:`Series` with >10000 elements with a timedelta-like scalar (:issue:`31467`)
-
Conversion
diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py
index c393b8028113b..3302ed9c219e6 100644
--- a/pandas/core/ops/array_ops.py
+++ b/pandas/core/ops/array_ops.py
@@ -8,7 +8,7 @@
import numpy as np
-from pandas._libs import Timestamp, lib, ops as libops
+from pandas._libs import Timedelta, Timestamp, lib, ops as libops
from pandas.core.dtypes.cast import (
construct_1d_object_array_from_listlike,
@@ -186,11 +186,12 @@ def arithmetic_op(
rvalues = maybe_upcast_for_op(rvalues, lvalues.shape)
if should_extension_dispatch(left, rvalues) or isinstance(
- rvalues, (ABCTimedeltaArray, ABCDatetimeArray, Timestamp)
+ rvalues, (ABCTimedeltaArray, ABCDatetimeArray, Timestamp, Timedelta)
):
# TimedeltaArray, DatetimeArray, and Timestamp are included here
# because they have `freq` attribute which is handled correctly
# by dispatch_to_extension_op.
+ # Timedelta is included because numexpr will fail on it, see GH#31457
res_values = dispatch_to_extension_op(op, lvalues, rvalues)
else:
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index 7c0f94001d306..51d09a92773b1 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -177,6 +177,28 @@ def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box):
commute = scalar_td * index
tm.assert_equal(commute, expected)
+ @pytest.mark.parametrize(
+ "scalar_td",
+ [
+ Timedelta(days=1),
+ Timedelta(days=1).to_timedelta64(),
+ Timedelta(days=1).to_pytimedelta(),
+ ],
+ ids=lambda x: type(x).__name__,
+ )
+ def test_numeric_arr_mul_tdscalar_numexpr_path(self, scalar_td, box):
+ arr = np.arange(2 * 10 ** 4).astype(np.int64)
+ obj = tm.box_expected(arr, box, transpose=False)
+
+ expected = arr.view("timedelta64[D]").astype("timedelta64[ns]")
+ expected = tm.box_expected(expected, box, transpose=False)
+
+ result = obj * scalar_td
+ tm.assert_equal(result, expected)
+
+ result = scalar_td * obj
+ tm.assert_equal(result, expected)
+
def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box):
index = numeric_idx[1:3]
| - [x] closes #31457
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31529 | 2020-02-01T03:58:14Z | 2020-02-01T15:24:45Z | 2020-02-01T15:24:45Z | 2020-02-01T16:10:25Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.