title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
Move normalization funcs up to conversion | diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index 2fbbc81c4b5a1..e1312a40971f0 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
# cython: profile=False
cimport numpy as cnp
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index bf22a3a528259..b5285d158b1ed 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -97,9 +97,8 @@ from tslibs.conversion cimport (tz_convert_single, _TSObject,
convert_to_tsobject,
convert_datetime_to_tsobject,
get_datetime64_nanos)
-from tslibs.conversion import (
- tz_localize_to_utc, tz_convert,
- tz_convert_single)
+from tslibs.conversion import (tz_localize_to_utc,
+ tz_convert_single, date_normalize)
from tslibs.nattype import NaT, nat_strings
from tslibs.nattype cimport _checknull_with_nat
@@ -1849,26 +1848,6 @@ cdef inline _to_i8(object val):
return val
-cpdef pydt_to_i8(object pydt):
- """
- Convert to int64 representation compatible with numpy datetime64; converts
- to UTC
- """
- cdef:
- _TSObject ts
-
- ts = convert_to_tsobject(pydt, None, None, 0, 0)
-
- return ts.value
-
-
-def i8_to_pydt(int64_t i8, object tzinfo=None):
- """
- Inverse of pydt_to_i8
- """
- return Timestamp(i8)
-
-
# ----------------------------------------------------------------------
# Accessors
@@ -1892,130 +1871,6 @@ def get_time_micros(ndarray[int64_t] dtindex):
return micros
-cdef int64_t DAY_NS = 86400000000000LL
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def date_normalize(ndarray[int64_t] stamps, tz=None):
- cdef:
- Py_ssize_t i, n = len(stamps)
- pandas_datetimestruct dts
- ndarray[int64_t] result = np.empty(n, dtype=np.int64)
-
- if tz is not None:
- tz = maybe_get_tz(tz)
- result = _normalize_local(stamps, tz)
- else:
- with nogil:
- for i in range(n):
- if stamps[i] == NPY_NAT:
- result[i] = NPY_NAT
- continue
- dt64_to_dtstruct(stamps[i], &dts)
- result[i] = _normalized_stamp(&dts)
-
- return result
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef _normalize_local(ndarray[int64_t] stamps, object tz):
- cdef:
- Py_ssize_t n = len(stamps)
- ndarray[int64_t] result = np.empty(n, dtype=np.int64)
- ndarray[int64_t] trans, deltas, pos
- pandas_datetimestruct dts
-
- if is_utc(tz):
- with nogil:
- for i in range(n):
- if stamps[i] == NPY_NAT:
- result[i] = NPY_NAT
- continue
- dt64_to_dtstruct(stamps[i], &dts)
- result[i] = _normalized_stamp(&dts)
- elif is_tzlocal(tz):
- for i in range(n):
- if stamps[i] == NPY_NAT:
- result[i] = NPY_NAT
- continue
- dt64_to_dtstruct(stamps[i], &dts)
- dt = datetime(dts.year, dts.month, dts.day, dts.hour,
- dts.min, dts.sec, dts.us, tz)
- delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000
- dt64_to_dtstruct(stamps[i] + delta, &dts)
- result[i] = _normalized_stamp(&dts)
- else:
- # Adjust datetime64 timestamp, recompute datetimestruct
- trans, deltas, typ = get_dst_info(tz)
-
- _pos = trans.searchsorted(stamps, side='right') - 1
- if _pos.dtype != np.int64:
- _pos = _pos.astype(np.int64)
- pos = _pos
-
- # statictzinfo
- if typ not in ['pytz', 'dateutil']:
- for i in range(n):
- if stamps[i] == NPY_NAT:
- result[i] = NPY_NAT
- continue
- dt64_to_dtstruct(stamps[i] + deltas[0], &dts)
- result[i] = _normalized_stamp(&dts)
- else:
- for i in range(n):
- if stamps[i] == NPY_NAT:
- result[i] = NPY_NAT
- continue
- dt64_to_dtstruct(stamps[i] + deltas[pos[i]], &dts)
- result[i] = _normalized_stamp(&dts)
-
- return result
-
-cdef inline int64_t _normalized_stamp(pandas_datetimestruct *dts) nogil:
- dts.hour = 0
- dts.min = 0
- dts.sec = 0
- dts.us = 0
- dts.ps = 0
- return dtstruct_to_dt64(dts)
-
-
-def dates_normalized(ndarray[int64_t] stamps, tz=None):
- cdef:
- Py_ssize_t i, n = len(stamps)
- ndarray[int64_t] trans, deltas
- pandas_datetimestruct dts
-
- if tz is None or is_utc(tz):
- for i in range(n):
- dt64_to_dtstruct(stamps[i], &dts)
- if (dts.hour + dts.min + dts.sec + dts.us) > 0:
- return False
- elif is_tzlocal(tz):
- for i in range(n):
- dt64_to_dtstruct(stamps[i], &dts)
- dt = datetime(dts.year, dts.month, dts.day, dts.hour, dts.min,
- dts.sec, dts.us, tz)
- dt = dt + tz.utcoffset(dt)
- if (dt.hour + dt.minute + dt.second + dt.microsecond) > 0:
- return False
- else:
- trans, deltas, typ = get_dst_info(tz)
-
- for i in range(n):
- # Adjust datetime64 timestamp, recompute datetimestruct
- pos = trans.searchsorted(stamps[i]) - 1
- inf = tz._transition_info[pos]
-
- dt64_to_dtstruct(stamps[i] + deltas[pos], &dts)
- if (dts.hour + dts.min + dts.sec + dts.us) > 0:
- return False
-
- return True
-
-
# ----------------------------------------------------------------------
# Some general helper functions
diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd
index 843a688a2630c..ad817ce8852f2 100644
--- a/pandas/_libs/tslibs/conversion.pxd
+++ b/pandas/_libs/tslibs/conversion.pxd
@@ -26,3 +26,5 @@ cdef void _localize_tso(_TSObject obj, object tz)
cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2)
cdef int64_t get_datetime64_nanos(object val) except? -1
+
+cpdef int64_t pydt_to_i8(object pydt) except? -1
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 61efc865112a9..88372699911c4 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -53,7 +53,6 @@ UTC = pytz.UTC
# ----------------------------------------------------------------------
# Misc Helpers
-
# TODO: How to declare np.datetime64 as the input type?
cdef inline int64_t get_datetime64_nanos(object val) except? -1:
"""
@@ -90,6 +89,27 @@ cdef class _TSObject:
return self.value
+cpdef int64_t pydt_to_i8(object pydt) except? -1:
+ """
+ Convert to int64 representation compatible with numpy datetime64; converts
+ to UTC
+
+ Parameters
+ ----------
+ pydt : object
+
+ Returns
+ -------
+ i8value : np.int64
+ """
+ cdef:
+ _TSObject ts
+
+ ts = convert_to_tsobject(pydt, None, None, 0, 0)
+
+ return ts.value
+
+
cdef convert_to_tsobject(object ts, object tz, object unit,
bint dayfirst, bint yearfirst):
"""
@@ -334,18 +354,18 @@ cdef inline void _localize_tso(_TSObject obj, object tz):
Py_ssize_t delta, posn
datetime dt
+ assert obj.tzinfo is None
+
if is_utc(tz):
- obj.tzinfo = tz
+ pass
+ elif obj.value == NPY_NAT:
+ pass
elif is_tzlocal(tz):
dt64_to_dtstruct(obj.value, &obj.dts)
dt = datetime(obj.dts.year, obj.dts.month, obj.dts.day, obj.dts.hour,
obj.dts.min, obj.dts.sec, obj.dts.us, tz)
delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000
- if obj.value != NPY_NAT:
- dt64_to_dtstruct(obj.value + delta, &obj.dts)
- else:
- dt64_to_dtstruct(obj.value, &obj.dts)
- obj.tzinfo = tz
+ dt64_to_dtstruct(obj.value + delta, &obj.dts)
else:
# Adjust datetime64 timestamp, recompute datetimestruct
trans, deltas, typ = get_dst_info(tz)
@@ -355,26 +375,17 @@ cdef inline void _localize_tso(_TSObject obj, object tz):
# static/pytz/dateutil specific code
if is_fixed_offset(tz):
# statictzinfo
- if len(deltas) > 0 and obj.value != NPY_NAT:
- dt64_to_dtstruct(obj.value + deltas[0], &obj.dts)
- else:
- dt64_to_dtstruct(obj.value, &obj.dts)
- obj.tzinfo = tz
+ assert len(deltas) == 1, len(deltas)
+ dt64_to_dtstruct(obj.value + deltas[0], &obj.dts)
elif treat_tz_as_pytz(tz):
- inf = tz._transition_info[pos]
- if obj.value != NPY_NAT:
- dt64_to_dtstruct(obj.value + deltas[pos], &obj.dts)
- else:
- dt64_to_dtstruct(obj.value, &obj.dts)
- obj.tzinfo = tz._tzinfos[inf]
+ tz = tz._tzinfos[tz._transition_info[pos]]
+ dt64_to_dtstruct(obj.value + deltas[pos], &obj.dts)
elif treat_tz_as_dateutil(tz):
- if obj.value != NPY_NAT:
- dt64_to_dtstruct(obj.value + deltas[pos], &obj.dts)
- else:
- dt64_to_dtstruct(obj.value, &obj.dts)
- obj.tzinfo = tz
+ dt64_to_dtstruct(obj.value + deltas[pos], &obj.dts)
else:
- obj.tzinfo = tz
+ pass
+
+ obj.tzinfo = tz
cdef inline datetime _localize_pydatetime(datetime dt, tzinfo tz):
@@ -782,3 +793,183 @@ cdef inline str _render_tstamp(int64_t val):
""" Helper function to render exception messages"""
from pandas._libs.tslib import Timestamp
return str(Timestamp(val))
+
+
+# ----------------------------------------------------------------------
+# Normalization
+
+@cython.wraparound(False)
+@cython.boundscheck(False)
+def date_normalize(ndarray[int64_t] stamps, tz=None):
+ """
+ Normalize each of the (nanosecond) timestamps in the given array by
+ rounding down to the beginning of the day (i.e. midnight). If `tz`
+ is not None, then this is midnight for this timezone.
+
+ Parameters
+ ----------
+ stamps : int64 ndarray
+ tz : tzinfo or None
+
+ Returns
+ -------
+ result : int64 ndarray of converted of normalized nanosecond timestamps
+ """
+ cdef:
+ Py_ssize_t i, n = len(stamps)
+ pandas_datetimestruct dts
+ ndarray[int64_t] result = np.empty(n, dtype=np.int64)
+
+ if tz is not None:
+ tz = maybe_get_tz(tz)
+ result = _normalize_local(stamps, tz)
+ else:
+ with nogil:
+ for i in range(n):
+ if stamps[i] == NPY_NAT:
+ result[i] = NPY_NAT
+ continue
+ dt64_to_dtstruct(stamps[i], &dts)
+ result[i] = _normalized_stamp(&dts)
+
+ return result
+
+
+@cython.wraparound(False)
+@cython.boundscheck(False)
+cdef ndarray[int64_t] _normalize_local(ndarray[int64_t] stamps, object tz):
+ """
+ Normalize each of the (nanosecond) timestamps in the given array by
+ rounding down to the beginning of the day (i.e. midnight) for the
+ given timezone `tz`.
+
+ Parameters
+ ----------
+ stamps : int64 ndarray
+ tz : tzinfo or None
+
+ Returns
+ -------
+ result : int64 ndarray of converted of normalized nanosecond timestamps
+ """
+ cdef:
+ Py_ssize_t n = len(stamps)
+ ndarray[int64_t] result = np.empty(n, dtype=np.int64)
+ ndarray[int64_t] trans, deltas, pos
+ pandas_datetimestruct dts
+ datetime dt
+
+ if is_utc(tz):
+ with nogil:
+ for i in range(n):
+ if stamps[i] == NPY_NAT:
+ result[i] = NPY_NAT
+ continue
+ dt64_to_dtstruct(stamps[i], &dts)
+ result[i] = _normalized_stamp(&dts)
+ elif is_tzlocal(tz):
+ for i in range(n):
+ if stamps[i] == NPY_NAT:
+ result[i] = NPY_NAT
+ continue
+ dt64_to_dtstruct(stamps[i], &dts)
+ dt = datetime(dts.year, dts.month, dts.day, dts.hour,
+ dts.min, dts.sec, dts.us, tz)
+ delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000
+ dt64_to_dtstruct(stamps[i] + delta, &dts)
+ result[i] = _normalized_stamp(&dts)
+ else:
+ # Adjust datetime64 timestamp, recompute datetimestruct
+ trans, deltas, typ = get_dst_info(tz)
+
+ _pos = trans.searchsorted(stamps, side='right') - 1
+ if _pos.dtype != np.int64:
+ _pos = _pos.astype(np.int64)
+ pos = _pos
+
+ # statictzinfo
+ if typ not in ['pytz', 'dateutil']:
+ for i in range(n):
+ if stamps[i] == NPY_NAT:
+ result[i] = NPY_NAT
+ continue
+ dt64_to_dtstruct(stamps[i] + deltas[0], &dts)
+ result[i] = _normalized_stamp(&dts)
+ else:
+ for i in range(n):
+ if stamps[i] == NPY_NAT:
+ result[i] = NPY_NAT
+ continue
+ dt64_to_dtstruct(stamps[i] + deltas[pos[i]], &dts)
+ result[i] = _normalized_stamp(&dts)
+
+ return result
+
+
+cdef inline int64_t _normalized_stamp(pandas_datetimestruct *dts) nogil:
+ """
+ Normalize the given datetimestruct to midnight, then convert to int64_t.
+
+ Parameters
+ ----------
+ *dts : pointer to pandas_datetimestruct
+
+ Returns
+ -------
+ stamp : int64
+ """
+ dts.hour = 0
+ dts.min = 0
+ dts.sec = 0
+ dts.us = 0
+ dts.ps = 0
+ return dtstruct_to_dt64(dts)
+
+
+def is_date_array_normalized(ndarray[int64_t] stamps, tz=None):
+ """
+ Check if all of the given (nanosecond) timestamps are normalized to
+ midnight, i.e. hour == minute == second == 0. If the optional timezone
+ `tz` is not None, then this is midnight for this timezone.
+
+ Parameters
+ ----------
+ stamps : int64 ndarray
+ tz : tzinfo or None
+
+ Returns
+ -------
+ is_normalized : bool True if all stamps are normalized
+ """
+ cdef:
+ Py_ssize_t i, n = len(stamps)
+ ndarray[int64_t] trans, deltas
+ pandas_datetimestruct dts
+ datetime dt
+
+ if tz is None or is_utc(tz):
+ for i in range(n):
+ dt64_to_dtstruct(stamps[i], &dts)
+ if (dts.hour + dts.min + dts.sec + dts.us) > 0:
+ return False
+ elif is_tzlocal(tz):
+ for i in range(n):
+ dt64_to_dtstruct(stamps[i], &dts)
+ dt = datetime(dts.year, dts.month, dts.day, dts.hour, dts.min,
+ dts.sec, dts.us, tz)
+ dt = dt + tz.utcoffset(dt)
+ if (dt.hour + dt.minute + dt.second + dt.microsecond) > 0:
+ return False
+ else:
+ trans, deltas, typ = get_dst_info(tz)
+
+ for i in range(n):
+ # Adjust datetime64 timestamp, recompute datetimestruct
+ pos = trans.searchsorted(stamps[i]) - 1
+ inf = tz._transition_info[pos]
+
+ dt64_to_dtstruct(stamps[i] + deltas[pos], &dts)
+ if (dts.hour + dts.min + dts.sec + dts.us) > 0:
+ return False
+
+ return True
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index c64b6568a0495..2d8ce4c59fedc 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -15,10 +15,10 @@ np.import_array()
from util cimport is_string_object, is_integer_object
-from pandas._libs.tslib import pydt_to_i8, monthrange
+from pandas._libs.tslib import monthrange
+from conversion cimport tz_convert_single, pydt_to_i8
from frequencies cimport get_freq_code
-from conversion cimport tz_convert_single
# ---------------------------------------------------------------------
# Constants
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index aa99e8920d9b5..2e022cb151008 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -55,8 +55,7 @@
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timestamp, period as libperiod)
-from pandas._libs.tslibs import timezones
-
+from pandas._libs.tslibs import timezones, conversion
# -------- some conversion wrapper functions
@@ -384,8 +383,8 @@ def __new__(cls, data=None,
getattr(data, 'tz', None) is None):
# Convert tz-naive to UTC
ints = subarr.view('i8')
- subarr = libts.tz_localize_to_utc(ints, tz,
- ambiguous=ambiguous)
+ subarr = conversion.tz_localize_to_utc(ints, tz,
+ ambiguous=ambiguous)
subarr = subarr.view(_NS_DTYPE)
subarr = cls._simple_new(subarr, name=name, freq=freq, tz=tz)
@@ -531,8 +530,8 @@ def _generate(cls, start, end, periods, name, offset,
index = _generate_regular_range(start, end, periods, offset)
if tz is not None and getattr(index, 'tz', None) is None:
- index = libts.tz_localize_to_utc(_ensure_int64(index), tz,
- ambiguous=ambiguous)
+ index = conversion.tz_localize_to_utc(_ensure_int64(index), tz,
+ ambiguous=ambiguous)
index = index.view(_NS_DTYPE)
# index is localized datetime64 array -> have to convert
@@ -561,11 +560,11 @@ def _convert_for_op(self, value):
def _local_timestamps(self):
if self.is_monotonic:
- return libts.tz_convert(self.asi8, utc, self.tz)
+ return conversion.tz_convert(self.asi8, utc, self.tz)
else:
values = self.asi8
indexer = values.argsort()
- result = libts.tz_convert(values.take(indexer), utc, self.tz)
+ result = conversion.tz_convert(values.take(indexer), utc, self.tz)
n = len(indexer)
reverse = np.empty(n, dtype=np.int_)
@@ -1644,7 +1643,7 @@ def normalize(self):
-------
normalized : DatetimeIndex
"""
- new_values = libts.date_normalize(self.asi8, self.tz)
+ new_values = conversion.date_normalize(self.asi8, self.tz)
return DatetimeIndex(new_values, freq='infer', name=self.name,
tz=self.tz)
@@ -1683,7 +1682,7 @@ def is_normalized(self):
"""
Returns True if all of the dates are at midnight ("no time")
"""
- return libts.dates_normalized(self.asi8, self.tz)
+ return conversion.is_date_array_normalized(self.asi8, self.tz)
@cache_readonly
def _resolution(self):
@@ -1724,7 +1723,7 @@ def insert(self, loc, item):
new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
if self.tz is not None:
- new_dates = libts.tz_convert(new_dates, 'UTC', self.tz)
+ new_dates = conversion.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq,
tz=self.tz)
@@ -1764,7 +1763,7 @@ def delete(self, loc):
freq = self.freq
if self.tz is not None:
- new_dates = libts.tz_convert(new_dates, 'UTC', self.tz)
+ new_dates = conversion.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz)
def tz_convert(self, tz):
@@ -1844,16 +1843,16 @@ def tz_localize(self, tz, ambiguous='raise', errors='raise'):
"""
if self.tz is not None:
if tz is None:
- new_dates = libts.tz_convert(self.asi8, 'UTC', self.tz)
+ new_dates = conversion.tz_convert(self.asi8, 'UTC', self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
# Convert to UTC
- new_dates = libts.tz_localize_to_utc(self.asi8, tz,
- ambiguous=ambiguous,
- errors=errors)
+ new_dates = conversion.tz_localize_to_utc(self.asi8, tz,
+ ambiguous=ambiguous,
+ errors=errors)
new_dates = new_dates.view(_NS_DTYPE)
return self._shallow_copy(new_dates, tz=tz)
@@ -2194,7 +2193,7 @@ def _to_m8(key, tz=None):
# this also converts strings
key = Timestamp(key, tz=tz)
- return np.int64(libts.pydt_to_i8(key)).view(_NS_DTYPE)
+ return np.int64(conversion.pydt_to_i8(key)).view(_NS_DTYPE)
_CACHE_START = Timestamp(datetime(1950, 1, 1))
diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py
index 1d1eeb9da2364..a79fb554f9454 100644
--- a/pandas/tests/scalar/test_timestamp.py
+++ b/pandas/tests/scalar/test_timestamp.py
@@ -16,8 +16,9 @@
import pandas.util.testing as tm
from pandas.tseries import offsets, frequencies
-from pandas._libs import tslib, period
+from pandas._libs import period
from pandas._libs.tslibs.timezones import get_timezone
+from pandas._libs.tslibs import conversion
from pandas.compat import lrange, long, PY3
from pandas.util.testing import assert_series_equal
@@ -77,12 +78,12 @@ def test_constructor(self):
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
- assert tslib.pydt_to_i8(result) == expected
+ assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
- assert tslib.pydt_to_i8(result) == expected
+ assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
@@ -90,18 +91,18 @@ def test_constructor(self):
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
- assert tslib.pydt_to_i8(result) == expected_tz
+ assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
- assert tslib.pydt_to_i8(result) == expected_tz
+ assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
- assert tslib.pydt_to_i8(result) == expected_utc
+ assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
@@ -129,30 +130,30 @@ def test_constructor_with_stringoffset(self):
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
- assert tslib.pydt_to_i8(result) == expected
+ assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
- assert tslib.pydt_to_i8(result) == expected
+ assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
- assert tslib.pydt_to_i8(result) == expected_tz
+ assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
- assert tslib.pydt_to_i8(result) == expected_tz
+ assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
assert result.value == expected_utc
- assert tslib.pydt_to_i8(result) == expected_utc
+ assert conversion.pydt_to_i8(result) == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py
index 724628649796d..3dfad2d4af75e 100644
--- a/pandas/tests/tseries/test_timezones.py
+++ b/pandas/tests/tseries/test_timezones.py
@@ -17,7 +17,7 @@
from pandas.core.indexes.datetimes import bdate_range, date_range
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas._libs import tslib
-from pandas._libs.tslibs import timezones
+from pandas._libs.tslibs import timezones, conversion
from pandas import (Index, Series, DataFrame, isna, Timestamp, NaT,
DatetimeIndex, to_datetime)
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
@@ -1738,14 +1738,14 @@ class TestTslib(object):
def test_tslib_tz_convert(self):
def compare_utc_to_local(tz_didx, utc_didx):
- f = lambda x: tslib.tz_convert_single(x, 'UTC', tz_didx.tz)
- result = tslib.tz_convert(tz_didx.asi8, 'UTC', tz_didx.tz)
+ f = lambda x: conversion.tz_convert_single(x, 'UTC', tz_didx.tz)
+ result = conversion.tz_convert(tz_didx.asi8, 'UTC', tz_didx.tz)
result_single = np.vectorize(f)(tz_didx.asi8)
tm.assert_numpy_array_equal(result, result_single)
def compare_local_to_utc(tz_didx, utc_didx):
- f = lambda x: tslib.tz_convert_single(x, tz_didx.tz, 'UTC')
- result = tslib.tz_convert(utc_didx.asi8, tz_didx.tz, 'UTC')
+ f = lambda x: conversion.tz_convert_single(x, tz_didx.tz, 'UTC')
+ result = conversion.tz_convert(utc_didx.asi8, tz_didx.tz, 'UTC')
result_single = np.vectorize(f)(utc_didx.asi8)
tm.assert_numpy_array_equal(result, result_single)
@@ -1770,14 +1770,14 @@ def compare_local_to_utc(tz_didx, utc_didx):
compare_local_to_utc(tz_didx, utc_didx)
# Check empty array
- result = tslib.tz_convert(np.array([], dtype=np.int64),
- timezones.maybe_get_tz('US/Eastern'),
- timezones.maybe_get_tz('Asia/Tokyo'))
+ result = conversion.tz_convert(np.array([], dtype=np.int64),
+ timezones.maybe_get_tz('US/Eastern'),
+ timezones.maybe_get_tz('Asia/Tokyo'))
tm.assert_numpy_array_equal(result, np.array([], dtype=np.int64))
# Check all-NaT array
- result = tslib.tz_convert(np.array([tslib.iNaT], dtype=np.int64),
- timezones.maybe_get_tz('US/Eastern'),
- timezones.maybe_get_tz('Asia/Tokyo'))
+ result = conversion.tz_convert(np.array([tslib.iNaT], dtype=np.int64),
+ timezones.maybe_get_tz('US/Eastern'),
+ timezones.maybe_get_tz('Asia/Tokyo'))
tm.assert_numpy_array_equal(result, np.array(
[tslib.iNaT], dtype=np.int64))
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index be25a439f9075..128dd51a2abea 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -21,6 +21,7 @@
from pandas._libs import lib, tslib
from pandas._libs.tslib import Timedelta
+from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.frequencies import ( # noqa
get_freq_code, _base_and_stride, _period_str_to_code,
_INVALID_FREQ_ERROR, opattern, _lite_rule_alias, _dont_uppercase,
@@ -583,7 +584,8 @@ def __init__(self, index, warn=True):
# the timezone so they are in local time
if hasattr(index, 'tz'):
if index.tz is not None:
- self.values = tslib.tz_convert(self.values, 'UTC', index.tz)
+ self.values = conversion.tz_convert(self.values,
+ 'UTC', index.tz)
self.warn = warn
| We're going to need `date_normalize` upstream of `tslib` before long, so this moves it to conversion.
Simplifies repeated checking in `localize_tso`, closes #17944 | https://api.github.com/repos/pandas-dev/pandas/pulls/18086 | 2017-11-03T03:57:15Z | 2017-11-12T21:04:47Z | 2017-11-12T21:04:47Z | 2017-12-08T19:38:48Z |
move implementation of Timedelta to tslibs.timedeltas | diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx
index 0456033dbb731..40d970c7b20f2 100644
--- a/pandas/_libs/period.pyx
+++ b/pandas/_libs/period.pyx
@@ -30,6 +30,7 @@ from pandas._libs import tslib
from pandas._libs.tslib import Timestamp, iNaT, NaT
from tslibs.timezones cimport (
is_utc, is_tzlocal, get_utcoffset, get_dst_info, maybe_get_tz)
+from tslibs.timedeltas cimport delta_to_nanoseconds
from tslibs.parsing import parse_time_string, NAT_SENTINEL
from tslibs.frequencies cimport get_freq_code
@@ -716,8 +717,8 @@ cdef class _Period(object):
if isinstance(other, (timedelta, np.timedelta64, offsets.Tick)):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, offsets.Tick):
- nanos = tslib._delta_to_nanoseconds(other)
- offset_nanos = tslib._delta_to_nanoseconds(offset)
+ nanos = delta_to_nanoseconds(other)
+ offset_nanos = delta_to_nanoseconds(offset)
if nanos % offset_nanos == 0:
ordinal = self.ordinal + (nanos // offset_nanos)
diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx
index ec060335c220e..f2edf48a6b829 100644
--- a/pandas/_libs/src/inference.pyx
+++ b/pandas/_libs/src/inference.pyx
@@ -3,7 +3,8 @@ from decimal import Decimal
cimport util
cimport cython
from tslibs.nattype import NaT
-from tslib cimport convert_to_tsobject, convert_to_timedelta64
+from tslib cimport convert_to_tsobject
+from tslibs.timedeltas cimport convert_to_timedelta64
from tslibs.timezones cimport get_timezone
from datetime import datetime, timedelta
iNaT = util.get_nat()
diff --git a/pandas/_libs/tslib.pxd b/pandas/_libs/tslib.pxd
index 443b3867eb2b5..1c2c679904868 100644
--- a/pandas/_libs/tslib.pxd
+++ b/pandas/_libs/tslib.pxd
@@ -2,7 +2,6 @@ from numpy cimport ndarray, int64_t
from tslibs.conversion cimport convert_to_tsobject
-cpdef convert_to_timedelta64(object, object)
cdef bint _check_all_nulls(obj)
cdef _to_i8(object val)
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 08a0ed713d936..6d793b6770113 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -71,8 +71,6 @@ from .tslibs.parsing import parse_datetime_string
cimport cython
-from pandas.compat import iteritems
-
import warnings
import pytz
@@ -85,7 +83,8 @@ import_array()
cdef int64_t NPY_NAT = util.get_nat()
iNaT = NPY_NAT
-from tslibs.timedeltas cimport parse_timedelta_string, cast_from_unit
+from tslibs.timedeltas cimport cast_from_unit, delta_to_nanoseconds
+from tslibs.timedeltas import Timedelta
from tslibs.timezones cimport (
is_utc, is_tzlocal, is_fixed_offset,
treat_tz_as_dateutil, treat_tz_as_pytz,
@@ -1069,7 +1068,7 @@ cdef class _Timestamp(datetime):
return Timestamp((self.freq * other).apply(self), freq=self.freq)
elif PyDelta_Check(other) or hasattr(other, 'delta'):
- nanos = _delta_to_nanoseconds(other)
+ nanos = delta_to_nanoseconds(other)
result = Timestamp(self.value + nanos,
tz=self.tzinfo, freq=self.freq)
if getattr(other, 'normalize', False):
@@ -1789,366 +1788,6 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise',
return oresult
-from tslibs.timedeltas cimport _Timedelta as __Timedelta
-
-# Similar to Timestamp/datetime, this is a construction requirement for
-# timedeltas that we need to do object instantiation in python. This will
-# serve as a C extension type that shadows the Python class, where we do any
-# heavy lifting.
-cdef class _Timedelta(__Timedelta):
-
- def __hash__(_Timedelta self):
- if self._has_ns():
- return hash(self.value)
- else:
- return timedelta.__hash__(self)
-
- def __richcmp__(_Timedelta self, object other, int op):
- cdef:
- _Timedelta ots
- int ndim
-
- if isinstance(other, _Timedelta):
- ots = other
- elif PyDelta_Check(other):
- ots = Timedelta(other)
- else:
- ndim = getattr(other, _NDIM_STRING, -1)
-
- if ndim != -1:
- if ndim == 0:
- if is_timedelta64_object(other):
- other = Timedelta(other)
- else:
- if op == Py_EQ:
- return False
- elif op == Py_NE:
- return True
-
- # only allow ==, != ops
- raise TypeError('Cannot compare type %r with type %r' %
- (type(self).__name__,
- type(other).__name__))
- if util.is_array(other):
- return PyObject_RichCompare(np.array([self]), other, op)
- return PyObject_RichCompare(other, self, reverse_ops[op])
- else:
- if op == Py_EQ:
- return False
- elif op == Py_NE:
- return True
- raise TypeError('Cannot compare type %r with type %r' %
- (type(self).__name__, type(other).__name__))
-
- return cmp_scalar(self.value, ots.value, op)
-
-
-def _binary_op_method_timedeltalike(op, name):
- # define a binary operation that only works if the other argument is
- # timedelta like or an array of timedeltalike
- def f(self, other):
- # an offset
- if hasattr(other, 'delta') and not isinstance(other, Timedelta):
- return op(self, other.delta)
-
- # a datetimelike
- if (isinstance(other, (datetime, np.datetime64))
- and not (isinstance(other, Timestamp) or other is NaT)):
- return op(self, Timestamp(other))
-
- # nd-array like
- if hasattr(other, 'dtype'):
- if other.dtype.kind not in ['m', 'M']:
- # raise rathering than letting numpy return wrong answer
- return NotImplemented
- return op(self.to_timedelta64(), other)
-
- if not _validate_ops_compat(other):
- return NotImplemented
-
- if other is NaT:
- return NaT
-
- try:
- other = Timedelta(other)
- except ValueError:
- # failed to parse as timedelta
- return NotImplemented
-
- return Timedelta(op(self.value, other.value), unit='ns')
-
- f.__name__ = name
- return f
-
-
-def _op_unary_method(func, name):
-
- def f(self):
- return Timedelta(func(self.value), unit='ns')
- f.__name__ = name
- return f
-
-
-cdef bint _validate_ops_compat(other):
- # return True if we are compat with operating
- if _checknull_with_nat(other):
- return True
- elif PyDelta_Check(other) or is_timedelta64_object(other):
- return True
- elif util.is_string_object(other):
- return True
- elif hasattr(other, 'delta'):
- return True
- return False
-
-
-# Python front end to C extension type _Timedelta
-# This serves as the box for timedelta64
-
-
-class Timedelta(_Timedelta):
- """
- Represents a duration, the difference between two dates or times.
-
- Timedelta is the pandas equivalent of python's ``datetime.timedelta``
- and is interchangable with it in most cases.
-
- Parameters
- ----------
- value : Timedelta, timedelta, np.timedelta64, string, or integer
- unit : string, [D,h,m,s,ms,us,ns]
- Denote the unit of the input, if input is an integer. Default 'ns'.
- days, seconds, microseconds,
- milliseconds, minutes, hours, weeks : numeric, optional
- Values for construction in compat with datetime.timedelta.
- np ints and floats will be coereced to python ints and floats.
-
- Notes
- -----
- The ``.value`` attribute is always in ns.
-
- """
-
- def __new__(cls, object value=_no_input, unit=None, **kwargs):
- cdef _Timedelta td_base
-
- if value is _no_input:
- if not len(kwargs):
- raise ValueError("cannot construct a Timedelta without a "
- "value/unit or descriptive keywords "
- "(days,seconds....)")
-
- def _to_py_int_float(v):
- if is_integer_object(v):
- return int(v)
- elif is_float_object(v):
- return float(v)
- raise TypeError("Invalid type {0}. Must be int or "
- "float.".format(type(v)))
-
- kwargs = dict([(k, _to_py_int_float(v))
- for k, v in iteritems(kwargs)])
-
- try:
- nano = kwargs.pop('nanoseconds', 0)
- value = convert_to_timedelta64(
- timedelta(**kwargs), 'ns') + nano
- except TypeError as e:
- raise ValueError("cannot construct a Timedelta from the "
- "passed arguments, allowed keywords are "
- "[weeks, days, hours, minutes, seconds, "
- "milliseconds, microseconds, nanoseconds]")
-
- if isinstance(value, Timedelta):
- value = value.value
- elif is_string_object(value):
- value = np.timedelta64(parse_timedelta_string(value))
- elif PyDelta_Check(value):
- value = convert_to_timedelta64(value, 'ns')
- elif is_timedelta64_object(value):
- if unit is not None:
- value = value.astype('timedelta64[{0}]'.format(unit))
- value = value.astype('timedelta64[ns]')
- elif hasattr(value, 'delta'):
- value = np.timedelta64(_delta_to_nanoseconds(value.delta), 'ns')
- elif is_integer_object(value) or is_float_object(value):
- # unit=None is de-facto 'ns'
- value = convert_to_timedelta64(value, unit)
- elif _checknull_with_nat(value):
- return NaT
- else:
- raise ValueError("Value must be Timedelta, string, integer, "
- "float, timedelta or convertible")
-
- if is_timedelta64_object(value):
- value = value.view('i8')
-
- # nat
- if value == NPY_NAT:
- return NaT
-
- # make timedelta happy
- td_base = _Timedelta.__new__(cls, microseconds=int(value) / 1000)
- td_base.value = value
- td_base.is_populated = 0
- return td_base
-
- def _round(self, freq, rounder):
-
- cdef int64_t result, unit
-
- from pandas.tseries.frequencies import to_offset
- unit = to_offset(freq).nanos
- result = unit * rounder(self.value / float(unit))
- return Timedelta(result, unit='ns')
-
- def round(self, freq):
- """
- Round the Timedelta to the specified resolution
-
- Returns
- -------
- a new Timedelta rounded to the given resolution of `freq`
-
- Parameters
- ----------
- freq : a freq string indicating the rounding resolution
-
- Raises
- ------
- ValueError if the freq cannot be converted
- """
- return self._round(freq, np.round)
-
- def floor(self, freq):
- """
- return a new Timedelta floored to this resolution
-
- Parameters
- ----------
- freq : a freq string indicating the flooring resolution
- """
- return self._round(freq, np.floor)
-
- def ceil(self, freq):
- """
- return a new Timedelta ceiled to this resolution
-
- Parameters
- ----------
- freq : a freq string indicating the ceiling resolution
- """
- return self._round(freq, np.ceil)
-
- def __setstate__(self, state):
- (value) = state
- self.value = value
-
- def __reduce__(self):
- object_state = self.value,
- return (Timedelta, object_state)
-
- __add__ = _binary_op_method_timedeltalike(lambda x, y: x + y, '__add__')
- __radd__ = _binary_op_method_timedeltalike(lambda x, y: x + y, '__radd__')
- __sub__ = _binary_op_method_timedeltalike(lambda x, y: x - y, '__sub__')
- __rsub__ = _binary_op_method_timedeltalike(lambda x, y: y - x, '__rsub__')
-
- def __mul__(self, other):
-
- # nd-array like
- if hasattr(other, 'dtype'):
- return other * self.to_timedelta64()
-
- if other is NaT:
- return NaT
-
- # only integers and floats allowed
- if not (is_integer_object(other) or is_float_object(other)):
- return NotImplemented
-
- return Timedelta(other * self.value, unit='ns')
-
- __rmul__ = __mul__
-
- def __truediv__(self, other):
-
- if hasattr(other, 'dtype'):
- return self.to_timedelta64() / other
-
- # integers or floats
- if is_integer_object(other) or is_float_object(other):
- return Timedelta(self.value /other, unit='ns')
-
- if not _validate_ops_compat(other):
- return NotImplemented
-
- other = Timedelta(other)
- if other is NaT:
- return np.nan
- return self.value /float(other.value)
-
- def __rtruediv__(self, other):
- if hasattr(other, 'dtype'):
- return other / self.to_timedelta64()
-
- if not _validate_ops_compat(other):
- return NotImplemented
-
- other = Timedelta(other)
- if other is NaT:
- return NaT
- return float(other.value) / self.value
-
- if not PY3:
- __div__ = __truediv__
- __rdiv__ = __rtruediv__
-
- def __floordiv__(self, other):
-
- if hasattr(other, 'dtype'):
-
- # work with i8
- other = other.astype('m8[ns]').astype('i8')
-
- return self.value // other
-
- # integers only
- if is_integer_object(other):
- return Timedelta(self.value // other, unit='ns')
-
- if not _validate_ops_compat(other):
- return NotImplemented
-
- other = Timedelta(other)
- if other is NaT:
- return np.nan
- return self.value // other.value
-
- def __rfloordiv__(self, other):
- if hasattr(other, 'dtype'):
-
- # work with i8
- other = other.astype('m8[ns]').astype('i8')
- return other // self.value
-
- if not _validate_ops_compat(other):
- return NotImplemented
-
- other = Timedelta(other)
- if other is NaT:
- return NaT
- return other.value // self.value
-
- __inv__ = _op_unary_method(lambda x: -x, '__inv__')
- __neg__ = _op_unary_method(lambda x: -x, '__neg__')
- __pos__ = _op_unary_method(lambda x: x, '__pos__')
- __abs__ = _op_unary_method(lambda x: abs(x), '__abs__')
-
-
-# resolution in ns
-Timedelta.min = Timedelta(np.iinfo(np.int64).min +1)
-Timedelta.max = Timedelta(np.iinfo(np.int64).max)
-
cdef PyTypeObject* td_type = <PyTypeObject*> Timedelta
@@ -2156,122 +1795,9 @@ cdef inline bint is_timedelta(object o):
return Py_TYPE(o) == td_type # isinstance(o, Timedelta)
-cpdef array_to_timedelta64(ndarray[object] values, unit='ns', errors='raise'):
- """
- Convert an ndarray to an array of timedeltas. If errors == 'coerce',
- coerce non-convertible objects to NaT. Otherwise, raise.
- """
-
- cdef:
- Py_ssize_t i, n
- ndarray[int64_t] iresult
-
- if errors not in ('ignore', 'raise', 'coerce'):
- raise ValueError("errors must be one of 'ignore', "
- "'raise', or 'coerce'}")
-
- n = values.shape[0]
- result = np.empty(n, dtype='m8[ns]')
- iresult = result.view('i8')
-
- # Usually, we have all strings. If so, we hit the fast path.
- # If this path fails, we try conversion a different way, and
- # this is where all of the error handling will take place.
- try:
- for i in range(n):
- result[i] = parse_timedelta_string(values[i])
- except:
- for i in range(n):
- try:
- result[i] = convert_to_timedelta64(values[i], unit)
- except ValueError:
- if errors == 'coerce':
- result[i] = NPY_NAT
- else:
- raise
-
- return iresult
-
-
-cpdef convert_to_timedelta64(object ts, object unit):
- """
- Convert an incoming object to a timedelta64 if possible
-
- Handle these types of objects:
- - timedelta/Timedelta
- - timedelta64
- - an offset
- - np.int64 (with unit providing a possible modifier)
- - None/NaT
-
- Return an ns based int64
-
- # kludgy here until we have a timedelta scalar
- # handle the numpy < 1.7 case
- """
- if _checknull_with_nat(ts):
- return np.timedelta64(NPY_NAT)
- elif isinstance(ts, Timedelta):
- # already in the proper format
- ts = np.timedelta64(ts.value)
- elif is_datetime64_object(ts):
- # only accept a NaT here
- if ts.astype('int64') == NPY_NAT:
- return np.timedelta64(NPY_NAT)
- elif is_timedelta64_object(ts):
- ts = ts.astype("m8[{0}]".format(unit.lower()))
- elif is_integer_object(ts):
- if ts == NPY_NAT:
- return np.timedelta64(NPY_NAT)
- else:
- if util.is_array(ts):
- ts = ts.astype('int64').item()
- if unit in ['Y', 'M', 'W']:
- ts = np.timedelta64(ts, unit)
- else:
- ts = cast_from_unit(ts, unit)
- ts = np.timedelta64(ts)
- elif is_float_object(ts):
- if util.is_array(ts):
- ts = ts.astype('int64').item()
- if unit in ['Y', 'M', 'W']:
- ts = np.timedelta64(int(ts), unit)
- else:
- ts = cast_from_unit(ts, unit)
- ts = np.timedelta64(ts)
- elif is_string_object(ts):
- ts = np.timedelta64(parse_timedelta_string(ts))
- elif hasattr(ts, 'delta'):
- ts = np.timedelta64(_delta_to_nanoseconds(ts), 'ns')
-
- if PyDelta_Check(ts):
- ts = np.timedelta64(_delta_to_nanoseconds(ts), 'ns')
- elif not is_timedelta64_object(ts):
- raise ValueError("Invalid type for timedelta "
- "scalar: %s" % type(ts))
- return ts.astype('timedelta64[ns]')
-
-
# ----------------------------------------------------------------------
# Conversion routines
-cpdef int64_t _delta_to_nanoseconds(delta) except? -1:
- if util.is_array(delta):
- return delta.astype('m8[ns]').astype('int64')
- if hasattr(delta, 'nanos'):
- return delta.nanos
- if hasattr(delta, 'delta'):
- delta = delta.delta
- if is_timedelta64_object(delta):
- return delta.astype("timedelta64[ns]").item()
- if is_integer_object(delta):
- return delta
-
- return (delta.days * 24 * 60 * 60 * 1000000 +
- delta.seconds * 1000000 +
- delta.microseconds) * 1000
-
-
def cast_to_nanoseconds(ndarray arr):
cdef:
Py_ssize_t i, n = arr.size
diff --git a/pandas/_libs/tslibs/timedeltas.pxd b/pandas/_libs/tslibs/timedeltas.pxd
index 4dfd3f3e9eca5..3e7b88b208e89 100644
--- a/pandas/_libs/tslibs/timedeltas.pxd
+++ b/pandas/_libs/tslibs/timedeltas.pxd
@@ -3,19 +3,11 @@
from cpython.datetime cimport timedelta
-from numpy cimport int64_t
+from numpy cimport int64_t, ndarray
# Exposed for tslib, not intended for outside use.
cdef parse_timedelta_string(object ts)
cpdef int64_t cast_from_unit(object ts, object unit) except? -1
-
-
-cdef class _Timedelta(timedelta):
- cdef readonly:
- int64_t value # nanoseconds
- object freq # frequency reference
- bint is_populated # are my components populated
- int64_t _sign, _d, _h, _m, _s, _ms, _us, _ns
-
- cpdef timedelta to_pytimedelta(_Timedelta self)
- cpdef bint _has_ns(self)
+cpdef int64_t delta_to_nanoseconds(delta) except? -1
+cpdef convert_to_timedelta64(object ts, object unit)
+cpdef array_to_timedelta64(ndarray[object] values, unit=*, errors=*)
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 2f177868a6947..623babe5422a8 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -5,22 +5,31 @@ import collections
import sys
cdef bint PY3 = (sys.version_info[0] >= 3)
-from cpython cimport PyUnicode_Check
+from cython cimport Py_ssize_t
+
+from cpython cimport PyUnicode_Check, Py_NE, Py_EQ, PyObject_RichCompare
import numpy as np
cimport numpy as np
-from numpy cimport int64_t
+from numpy cimport int64_t, ndarray
np.import_array()
from cpython.datetime cimport (datetime, timedelta,
- PyDelta_Check, PyDateTime_IMPORT)
+ PyDateTime_CheckExact,
+ PyDateTime_Check, PyDelta_Check,
+ PyDateTime_IMPORT)
PyDateTime_IMPORT
cimport util
-from util cimport is_timedelta64_object
+from util cimport (is_timedelta64_object, is_datetime64_object,
+ is_integer_object, is_float_object,
+ is_string_object)
+
+from np_datetime cimport cmp_scalar, reverse_ops
-from nattype import nat_strings
+from nattype import nat_strings, NaT
+from nattype cimport _checknull_with_nat
# ----------------------------------------------------------------------
# Constants
@@ -66,8 +75,122 @@ cdef dict timedelta_abbrevs = { 'D': 'd',
'nanos': 'ns',
'nanosecond': 'ns'}
+_no_input = object()
+
# ----------------------------------------------------------------------
+cpdef int64_t delta_to_nanoseconds(delta) except? -1:
+ if util.is_array(delta):
+ return delta.astype('m8[ns]').astype('int64')
+ if hasattr(delta, 'nanos'):
+ return delta.nanos
+ if hasattr(delta, 'delta'):
+ delta = delta.delta
+ if is_timedelta64_object(delta):
+ return delta.astype("timedelta64[ns]").item()
+ if is_integer_object(delta):
+ return delta
+
+ return (delta.days * 24 * 60 * 60 * 1000000 +
+ delta.seconds * 1000000 +
+ delta.microseconds) * 1000
+
+
+cpdef convert_to_timedelta64(object ts, object unit):
+ """
+ Convert an incoming object to a timedelta64 if possible
+
+ Handle these types of objects:
+ - timedelta/Timedelta
+ - timedelta64
+ - an offset
+ - np.int64 (with unit providing a possible modifier)
+ - None/NaT
+
+ Return an ns based int64
+
+ # kludgy here until we have a timedelta scalar
+ # handle the numpy < 1.7 case
+ """
+ if _checknull_with_nat(ts):
+ return np.timedelta64(NPY_NAT)
+ elif isinstance(ts, Timedelta):
+ # already in the proper format
+ ts = np.timedelta64(ts.value)
+ elif is_datetime64_object(ts):
+ # only accept a NaT here
+ if ts.astype('int64') == NPY_NAT:
+ return np.timedelta64(NPY_NAT)
+ elif is_timedelta64_object(ts):
+ ts = ts.astype("m8[{0}]".format(unit.lower()))
+ elif is_integer_object(ts):
+ if ts == NPY_NAT:
+ return np.timedelta64(NPY_NAT)
+ else:
+ if util.is_array(ts):
+ ts = ts.astype('int64').item()
+ if unit in ['Y', 'M', 'W']:
+ ts = np.timedelta64(ts, unit)
+ else:
+ ts = cast_from_unit(ts, unit)
+ ts = np.timedelta64(ts)
+ elif is_float_object(ts):
+ if util.is_array(ts):
+ ts = ts.astype('int64').item()
+ if unit in ['Y', 'M', 'W']:
+ ts = np.timedelta64(int(ts), unit)
+ else:
+ ts = cast_from_unit(ts, unit)
+ ts = np.timedelta64(ts)
+ elif is_string_object(ts):
+ ts = np.timedelta64(parse_timedelta_string(ts))
+ elif hasattr(ts, 'delta'):
+ ts = np.timedelta64(delta_to_nanoseconds(ts), 'ns')
+
+ if PyDelta_Check(ts):
+ ts = np.timedelta64(delta_to_nanoseconds(ts), 'ns')
+ elif not is_timedelta64_object(ts):
+ raise ValueError("Invalid type for timedelta "
+ "scalar: %s" % type(ts))
+ return ts.astype('timedelta64[ns]')
+
+
+cpdef array_to_timedelta64(ndarray[object] values, unit='ns', errors='raise'):
+ """
+ Convert an ndarray to an array of timedeltas. If errors == 'coerce',
+ coerce non-convertible objects to NaT. Otherwise, raise.
+ """
+
+ cdef:
+ Py_ssize_t i, n
+ ndarray[int64_t] iresult
+
+ if errors not in ('ignore', 'raise', 'coerce'):
+ raise ValueError("errors must be one of 'ignore', "
+ "'raise', or 'coerce'}")
+
+ n = values.shape[0]
+ result = np.empty(n, dtype='m8[ns]')
+ iresult = result.view('i8')
+
+ # Usually, we have all strings. If so, we hit the fast path.
+ # If this path fails, we try conversion a different way, and
+ # this is where all of the error handling will take place.
+ try:
+ for i in range(n):
+ result[i] = parse_timedelta_string(values[i])
+ except:
+ for i in range(n):
+ try:
+ result[i] = convert_to_timedelta64(values[i], unit)
+ except ValueError:
+ if errors == 'coerce':
+ result[i] = NPY_NAT
+ else:
+ raise
+
+ return iresult
+
cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1:
""" return a casting of the unit represented to nanoseconds
@@ -315,23 +438,145 @@ cdef inline timedelta_from_spec(object number, object frac, object unit):
n = ''.join(number) + '.' + ''.join(frac)
return cast_from_unit(float(n), unit)
+
+# ----------------------------------------------------------------------
+# Timedelta ops utilities
+
+cdef bint _validate_ops_compat(other):
+ # return True if we are compat with operating
+ if _checknull_with_nat(other):
+ return True
+ elif PyDelta_Check(other) or is_timedelta64_object(other):
+ return True
+ elif is_string_object(other):
+ return True
+ elif hasattr(other, 'delta'):
+ return True
+ return False
+
+
+def _op_unary_method(func, name):
+ def f(self):
+ return Timedelta(func(self.value), unit='ns')
+ f.__name__ = name
+ return f
+
+
+def _binary_op_method_timedeltalike(op, name):
+ # define a binary operation that only works if the other argument is
+ # timedelta like or an array of timedeltalike
+ def f(self, other):
+ if hasattr(other, 'delta') and not PyDelta_Check(other):
+ # offsets.Tick
+ return op(self, other.delta)
+
+ elif other is NaT:
+ return NaT
+
+ elif is_datetime64_object(other) or PyDateTime_CheckExact(other):
+ # the PyDateTime_CheckExact case is for a datetime object that
+ # is specifically *not* a Timestamp, as the Timestamp case will be
+ # handled after `_validate_ops_compat` returns False below
+ from ..tslib import Timestamp
+ return op(self, Timestamp(other))
+ # We are implicitly requiring the canonical behavior to be
+ # defined by Timestamp methods.
+
+ elif hasattr(other, 'dtype'):
+ # nd-array like
+ if other.dtype.kind not in ['m', 'M']:
+ # raise rathering than letting numpy return wrong answer
+ return NotImplemented
+ return op(self.to_timedelta64(), other)
+
+ elif not _validate_ops_compat(other):
+ return NotImplemented
+
+ try:
+ other = Timedelta(other)
+ except ValueError:
+ # failed to parse as timedelta
+ return NotImplemented
+
+ return Timedelta(op(self.value, other.value), unit='ns')
+
+ f.__name__ = name
+ return f
+
+
# ----------------------------------------------------------------------
# Timedelta Construction
+cdef _to_py_int_float(v):
+ # Note: This used to be defined inside Timedelta.__new__
+ # but cython will not allow `cdef` functions to be defined dynamically.
+ if is_integer_object(v):
+ return int(v)
+ elif is_float_object(v):
+ return float(v)
+ raise TypeError("Invalid type {0}. Must be int or "
+ "float.".format(type(v)))
+
+
# Similar to Timestamp/datetime, this is a construction requirement for
# timedeltas that we need to do object instantiation in python. This will
# serve as a C extension type that shadows the Python class, where we do any
# heavy lifting.
cdef class _Timedelta(timedelta):
- # cdef readonly:
- # int64_t value # nanoseconds
- # object freq # frequency reference
- # bint is_populated # are my components populated
- # int64_t _sign, _d, _h, _m, _s, _ms, _us, _ns
+ cdef readonly:
+ int64_t value # nanoseconds
+ object freq # frequency reference
+ bint is_populated # are my components populated
+ int64_t _sign, _d, _h, _m, _s, _ms, _us, _ns
# higher than np.ndarray and np.matrix
__array_priority__ = 100
+ def __hash__(_Timedelta self):
+ if self._has_ns():
+ return hash(self.value)
+ else:
+ return timedelta.__hash__(self)
+
+ def __richcmp__(_Timedelta self, object other, int op):
+ cdef:
+ _Timedelta ots
+ int ndim
+
+ if isinstance(other, _Timedelta):
+ ots = other
+ elif PyDelta_Check(other):
+ ots = Timedelta(other)
+ else:
+ ndim = getattr(other, "ndim", -1)
+
+ if ndim != -1:
+ if ndim == 0:
+ if is_timedelta64_object(other):
+ other = Timedelta(other)
+ else:
+ if op == Py_EQ:
+ return False
+ elif op == Py_NE:
+ return True
+
+ # only allow ==, != ops
+ raise TypeError('Cannot compare type %r with type %r' %
+ (type(self).__name__,
+ type(other).__name__))
+ if util.is_array(other):
+ return PyObject_RichCompare(np.array([self]), other, op)
+ return PyObject_RichCompare(other, self, reverse_ops[op])
+ else:
+ if op == Py_EQ:
+ return False
+ elif op == Py_NE:
+ return True
+ raise TypeError('Cannot compare type %r with type %r' %
+ (type(self).__name__, type(other).__name__))
+
+ return cmp_scalar(self.value, ots.value, op)
+
cpdef bint _has_ns(self):
return self.value % 1000 != 0
@@ -621,3 +866,239 @@ cdef class _Timedelta(timedelta):
tpl = 'P{td.days}DT{td.hours}H{td.minutes}M{seconds}S'.format(
td=components, seconds=seconds)
return tpl
+
+
+# Python front end to C extension type _Timedelta
+# This serves as the box for timedelta64
+
+class Timedelta(_Timedelta):
+ """
+ Represents a duration, the difference between two dates or times.
+
+ Timedelta is the pandas equivalent of python's ``datetime.timedelta``
+ and is interchangable with it in most cases.
+
+ Parameters
+ ----------
+ value : Timedelta, timedelta, np.timedelta64, string, or integer
+ unit : string, [D,h,m,s,ms,us,ns]
+ Denote the unit of the input, if input is an integer. Default 'ns'.
+ days, seconds, microseconds,
+ milliseconds, minutes, hours, weeks : numeric, optional
+ Values for construction in compat with datetime.timedelta.
+ np ints and floats will be coereced to python ints and floats.
+
+ Notes
+ -----
+ The ``.value`` attribute is always in ns.
+
+ """
+ def __new__(cls, object value=_no_input, unit=None, **kwargs):
+ cdef _Timedelta td_base
+
+ if value is _no_input:
+ if not len(kwargs):
+ raise ValueError("cannot construct a Timedelta without a "
+ "value/unit or descriptive keywords "
+ "(days,seconds....)")
+
+ kwargs = {key: _to_py_int_float(kwargs[key]) for key in kwargs}
+
+ nano = kwargs.pop('nanoseconds', 0)
+ try:
+ value = nano + convert_to_timedelta64(timedelta(**kwargs),
+ 'ns')
+ except TypeError as e:
+ raise ValueError("cannot construct a Timedelta from the "
+ "passed arguments, allowed keywords are "
+ "[weeks, days, hours, minutes, seconds, "
+ "milliseconds, microseconds, nanoseconds]")
+
+ if isinstance(value, Timedelta):
+ value = value.value
+ elif util.is_string_object(value):
+ value = np.timedelta64(parse_timedelta_string(value))
+ elif PyDelta_Check(value):
+ value = convert_to_timedelta64(value, 'ns')
+ elif is_timedelta64_object(value):
+ if unit is not None:
+ value = value.astype('timedelta64[{0}]'.format(unit))
+ value = value.astype('timedelta64[ns]')
+ elif hasattr(value, 'delta'):
+ value = np.timedelta64(delta_to_nanoseconds(value.delta), 'ns')
+ elif is_integer_object(value) or util.is_float_object(value):
+ # unit=None is de-facto 'ns'
+ value = convert_to_timedelta64(value, unit)
+ elif _checknull_with_nat(value):
+ return NaT
+ else:
+ raise ValueError(
+ "Value must be Timedelta, string, integer, "
+ "float, timedelta or convertible")
+
+ if is_timedelta64_object(value):
+ value = value.view('i8')
+
+ # nat
+ if value == NPY_NAT:
+ return NaT
+
+ # make timedelta happy
+ td_base = _Timedelta.__new__(cls, microseconds=int(value) / 1000)
+ td_base.value = value
+ td_base.is_populated = 0
+ return td_base
+
+ def __setstate__(self, state):
+ (value) = state
+ self.value = value
+
+ def __reduce__(self):
+ object_state = self.value,
+ return (Timedelta, object_state)
+
+ def _round(self, freq, rounder):
+ cdef:
+ int64_t result, unit
+
+ from pandas.tseries.frequencies import to_offset
+ unit = to_offset(freq).nanos
+ result = unit * rounder(self.value / float(unit))
+ return Timedelta(result, unit='ns')
+
+ def round(self, freq):
+ """
+ Round the Timedelta to the specified resolution
+
+ Returns
+ -------
+ a new Timedelta rounded to the given resolution of `freq`
+
+ Parameters
+ ----------
+ freq : a freq string indicating the rounding resolution
+
+ Raises
+ ------
+ ValueError if the freq cannot be converted
+ """
+ return self._round(freq, np.round)
+
+ def floor(self, freq):
+ """
+ return a new Timedelta floored to this resolution
+
+ Parameters
+ ----------
+ freq : a freq string indicating the flooring resolution
+ """
+ return self._round(freq, np.floor)
+
+ def ceil(self, freq):
+ """
+ return a new Timedelta ceiled to this resolution
+
+ Parameters
+ ----------
+ freq : a freq string indicating the ceiling resolution
+ """
+ return self._round(freq, np.ceil)
+
+ # ----------------------------------------------------------------
+ # Arithmetic Methods
+ # TODO: Can some of these be defined in the cython class?
+
+ __inv__ = _op_unary_method(lambda x: -x, '__inv__')
+ __neg__ = _op_unary_method(lambda x: -x, '__neg__')
+ __pos__ = _op_unary_method(lambda x: x, '__pos__')
+ __abs__ = _op_unary_method(lambda x: abs(x), '__abs__')
+
+ __add__ = _binary_op_method_timedeltalike(lambda x, y: x + y, '__add__')
+ __radd__ = _binary_op_method_timedeltalike(lambda x, y: x + y, '__radd__')
+ __sub__ = _binary_op_method_timedeltalike(lambda x, y: x - y, '__sub__')
+ __rsub__ = _binary_op_method_timedeltalike(lambda x, y: y - x, '__rsub__')
+
+ def __mul__(self, other):
+ if hasattr(other, 'dtype'):
+ # ndarray-like
+ return other * self.to_timedelta64()
+
+ elif other is NaT:
+ return NaT
+
+ elif not (is_integer_object(other) or is_float_object(other)):
+ # only integers and floats allowed
+ return NotImplemented
+
+ return Timedelta(other * self.value, unit='ns')
+
+ __rmul__ = __mul__
+
+ def __truediv__(self, other):
+ if hasattr(other, 'dtype'):
+ return self.to_timedelta64() / other
+
+ elif is_integer_object(other) or is_float_object(other):
+ # integers or floats
+ return Timedelta(self.value / other, unit='ns')
+
+ elif not _validate_ops_compat(other):
+ return NotImplemented
+
+ other = Timedelta(other)
+ if other is NaT:
+ return np.nan
+ return self.value / float(other.value)
+
+ def __rtruediv__(self, other):
+ if hasattr(other, 'dtype'):
+ return other / self.to_timedelta64()
+
+ elif not _validate_ops_compat(other):
+ return NotImplemented
+
+ other = Timedelta(other)
+ if other is NaT:
+ return NaT
+ return float(other.value) / self.value
+
+ if not PY3:
+ __div__ = __truediv__
+ __rdiv__ = __rtruediv__
+
+ def __floordiv__(self, other):
+ if hasattr(other, 'dtype'):
+ # work with i8
+ other = other.astype('m8[ns]').astype('i8')
+ return self.value // other
+
+ elif is_integer_object(other):
+ # integers only
+ return Timedelta(self.value // other, unit='ns')
+
+ elif not _validate_ops_compat(other):
+ return NotImplemented
+
+ other = Timedelta(other)
+ if other is NaT:
+ return np.nan
+ return self.value // other.value
+
+ def __rfloordiv__(self, other):
+ if hasattr(other, 'dtype'):
+ # work with i8
+ other = other.astype('m8[ns]').astype('i8')
+ return other // self.value
+
+ elif not _validate_ops_compat(other):
+ return NotImplemented
+
+ other = Timedelta(other)
+ if other is NaT:
+ return NaT
+ return other.value // self.value
+
+
+# resolution in ns
+Timedelta.min = Timedelta(np.iinfo(np.int64).min +1)
+Timedelta.max = Timedelta(np.iinfo(np.int64).max)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index ebc0d50d8ba05..4934ccb49b844 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -24,8 +24,9 @@
from pandas.core.common import AbstractMethodError
import pandas.io.formats.printing as printing
-from pandas._libs import (tslib as libts, lib, iNaT, NaT)
+from pandas._libs import lib, iNaT, NaT
from pandas._libs.period import Period
+from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.util._decorators import Appender, cache_readonly
@@ -701,7 +702,7 @@ def _add_delta_td(self, other):
# add a delta of a timedeltalike
# return the i8 result view
- inc = libts._delta_to_nanoseconds(other)
+ inc = delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc,
arr_mask=self._isnan).view('i8')
if self.hasnans:
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index c4938b556c8dd..bd069c1d22403 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -36,6 +36,7 @@
get_period_field_arr, _validate_end_alias,
_quarter_to_myear)
from pandas._libs.tslibs.fields import isleapyear_arr
+from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds
from pandas.core.base import _shared_docs
from pandas.core.indexes.base import _index_shared_docs, _ensure_index
@@ -652,10 +653,10 @@ def _maybe_convert_timedelta(self, other):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, offsets.Tick):
if isinstance(other, np.ndarray):
- nanos = np.vectorize(tslib._delta_to_nanoseconds)(other)
+ nanos = np.vectorize(delta_to_nanoseconds)(other)
else:
- nanos = tslib._delta_to_nanoseconds(other)
- offset_nanos = tslib._delta_to_nanoseconds(offset)
+ nanos = delta_to_nanoseconds(other)
+ offset_nanos = delta_to_nanoseconds(offset)
check = np.all(nanos % offset_nanos == 0)
if check:
return nanos // offset_nanos
@@ -672,8 +673,8 @@ def _maybe_convert_timedelta(self, other):
elif is_timedelta64_dtype(other):
offset = frequencies.to_offset(self.freq)
if isinstance(offset, offsets.Tick):
- nanos = tslib._delta_to_nanoseconds(other)
- offset_nanos = tslib._delta_to_nanoseconds(offset)
+ nanos = delta_to_nanoseconds(other)
+ offset_nanos = delta_to_nanoseconds(offset)
if (nanos % offset_nanos).all() == 0:
return nanos // offset_nanos
elif is_integer(other):
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 729edc81bb642..c9701d0d8dae8 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -34,6 +34,7 @@
from pandas.tseries.offsets import Tick, DateOffset
from pandas._libs import (lib, index as libindex, tslib as libts,
join as libjoin, Timedelta, NaT, iNaT)
+from pandas._libs.tslibs.timedeltas import array_to_timedelta64
def _td_index_cmp(opname, nat_result=False):
@@ -286,7 +287,7 @@ def _box_func(self):
def _simple_new(cls, values, name=None, freq=None, **kwargs):
values = np.array(values, copy=False)
if values.dtype == np.object_:
- values = libts.array_to_timedelta64(values)
+ values = array_to_timedelta64(values)
if values.dtype != _TD_DTYPE:
values = _ensure_int64(values).view(_TD_DTYPE)
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 5a571f9077999..eeb6faf20ffce 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -13,7 +13,7 @@
from pandas.tseries.frequencies import to_offset, is_subperiod, is_superperiod
from pandas.core.indexes.datetimes import DatetimeIndex, date_range
from pandas.core.indexes.timedeltas import TimedeltaIndex
-from pandas.tseries.offsets import DateOffset, Tick, Day, _delta_to_nanoseconds
+from pandas.tseries.offsets import DateOffset, Tick, Day, delta_to_nanoseconds
from pandas.core.indexes.period import PeriodIndex
import pandas.core.common as com
import pandas.core.algorithms as algos
@@ -1186,7 +1186,7 @@ def _adjust_bin_edges(self, binner, ax_values):
bin_edges = binner.asi8
if self.freq != 'D' and is_superperiod(self.freq, 'D'):
- day_nanos = _delta_to_nanoseconds(timedelta(1))
+ day_nanos = delta_to_nanoseconds(timedelta(1))
if self.closed == 'right':
bin_edges = bin_edges + day_nanos - 1
@@ -1312,7 +1312,7 @@ def _get_range_edges(first, last, offset, closed='left', base=0):
if isinstance(offset, Tick):
is_day = isinstance(offset, Day)
- day_nanos = _delta_to_nanoseconds(timedelta(1))
+ day_nanos = delta_to_nanoseconds(timedelta(1))
# #1165
if (is_day and day_nanos % offset.nanos == 0) or not is_day:
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index f61d9f90d6ca2..94e2f2342bd51 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -5,6 +5,8 @@
import numpy as np
import pandas as pd
import pandas._libs.tslib as tslib
+from pandas._libs.tslibs.timedeltas import (convert_to_timedelta64,
+ array_to_timedelta64)
from pandas.core.dtypes.common import (
_ensure_object,
@@ -140,7 +142,7 @@ def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'):
"""Convert string 'r' to a timedelta object."""
try:
- result = tslib.convert_to_timedelta64(r, unit)
+ result = convert_to_timedelta64(r, unit)
except ValueError:
if errors == 'raise':
raise
@@ -169,8 +171,8 @@ def _convert_listlike(arg, unit='ns', box=True, errors='raise', name=None):
'timedelta64[ns]', copy=False)
else:
try:
- value = tslib.array_to_timedelta64(_ensure_object(arg),
- unit=unit, errors=errors)
+ value = array_to_timedelta64(_ensure_object(arg),
+ unit=unit, errors=errors)
value = value.astype('timedelta64[ns]', copy=False)
except ValueError:
if errors == 'ignore':
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 862f289d81954..5843aaa23be57 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -14,7 +14,7 @@
from pandas._libs import tslib, Timestamp, OutOfBoundsDatetime, Timedelta
from pandas.util._decorators import cache_readonly
-from pandas._libs.tslib import _delta_to_nanoseconds
+from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds
from pandas._libs.tslibs.offsets import (
ApplyTypeError,
as_datetime, _is_normalized,
@@ -2569,7 +2569,7 @@ def delta(self):
@property
def nanos(self):
- return _delta_to_nanoseconds(self.delta)
+ return delta_to_nanoseconds(self.delta)
def apply(self, other):
# Timestamp can handle tz and nano sec, thus no need to use apply_wraps
@@ -2612,7 +2612,7 @@ def _delta_to_tick(delta):
else:
return Second(seconds)
else:
- nanos = _delta_to_nanoseconds(delta)
+ nanos = delta_to_nanoseconds(delta)
if nanos % 1000000 == 0:
return Milli(nanos // 1000000)
elif nanos % 1000 == 0:
diff --git a/setup.py b/setup.py
index f5c27eb3498c5..572c426f26ae3 100755
--- a/setup.py
+++ b/setup.py
@@ -525,6 +525,7 @@ def pxd(name):
'pyxfile': '_libs/period',
'pxdfiles': ['_libs/src/util',
'_libs/lib',
+ '_libs/tslibs/timedeltas',
'_libs/tslibs/timezones',
'_libs/tslibs/nattype'],
'depends': tseries_depends + ['pandas/_libs/src/period_helper.h'],
@@ -587,7 +588,8 @@ def pxd(name):
'sources': np_datetime_sources},
'_libs.tslibs.timedeltas': {
'pyxfile': '_libs/tslibs/timedeltas',
- 'pxdfiles': ['_libs/src/util'],
+ 'pxdfiles': ['_libs/src/util',
+ '_libs/tslibs/nattype'],
'depends': np_datetime_headers,
'sources': np_datetime_sources},
'_libs.tslibs.timezones': {
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18085 | 2017-11-03T02:38:27Z | 2017-11-08T12:00:26Z | 2017-11-08T12:00:26Z | 2017-11-10T16:10:18Z |
DOC: Fix various warnings | diff --git a/doc/source/api.rst b/doc/source/api.rst
index e8b8b3624740d..b5cf593ac0d1f 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -1822,7 +1822,7 @@ Interval
Properties
~~~~~~~~~~
.. autosummary::
- :toctree generated/
+ :toctree: generated/
Interval.closed
Interval.closed_left
@@ -1843,7 +1843,7 @@ Timedelta
Properties
~~~~~~~~~~
.. autosummary::
- :toctree generated/
+ :toctree: generated/
Timedelta.asm8
Timedelta.components
@@ -1860,7 +1860,7 @@ Properties
Methods
~~~~~~~
.. autosummary::
- :toctree generated/
+ :toctree: generated/
Timedelta.ceil
Timedelta.floor
diff --git a/doc/source/computation.rst b/doc/source/computation.rst
index 0325e54d18e36..2a358900e340d 100644
--- a/doc/source/computation.rst
+++ b/doc/source/computation.rst
@@ -670,6 +670,7 @@ columns of a ``DataFrame``:
.. ipython:: python
:okexcept:
+ :okwarning:
r.agg({'A' : np.sum,
'B' : lambda x: np.std(x, ddof=1)})
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 6eb12324ee461..e006f1809da5a 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -358,13 +358,12 @@
# latex_use_modindex = True
-# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'statsmodels': ('http://www.statsmodels.org/devel/', None),
'matplotlib': ('http://matplotlib.org/', None),
- 'python': ('http://docs.python.org/3', None),
- 'numpy': ('http://docs.scipy.org/doc/numpy', None),
- 'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
+ 'python': ('https://docs.python.org/3/', None),
+ 'numpy': ('https://docs.scipy.org/doc/numpy/', None),
+ 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'py': ('https://pylib.readthedocs.io/en/latest/', None)
}
import glob
@@ -573,6 +572,15 @@ def remove_flags_docstring(app, what, name, obj, options, lines):
if what == "attribute" and name.endswith(".flags"):
del lines[:]
+
+suppress_warnings = [
+ # We "overwrite" autosummary with our PandasAutosummary, but
+ # still want the regular autosummary setup to run. So we just
+ # suppress this warning.
+ 'app.add_directive'
+]
+
+
def setup(app):
app.connect("autodoc-process-docstring", remove_flags_docstring)
app.add_autodocumenter(AccessorDocumenter)
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 2a1aa3d0cf17a..40189f0e45518 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -60,7 +60,7 @@ Bug reports must:
The issue will then show up to the *pandas* community and be open to comments/ideas from others.
-.. _contributing.github
+.. _contributing.github:
Working with the code
=====================
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 5d6b00a4db72e..36f216601b491 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -115,7 +115,7 @@ header : int or list of ints, default ``'infer'``
names : array-like, default ``None``
List of column names to use. If file contains no header row, then you should
explicitly pass ``header=None``. Duplicates in this list will cause
- a ``UserWarning`` to be issued.
+ a ``UserWarning`` to be issued.
index_col : int or sequence or ``False``, default ``None``
Column to use as the row labels of the DataFrame. If a sequence is given, a
MultiIndex is used. If you have a malformed file with delimiters at the end of
diff --git a/doc/source/whatsnew/v0.15.2.txt b/doc/source/whatsnew/v0.15.2.txt
index feba3d6224e65..b908b60334f4c 100644
--- a/doc/source/whatsnew/v0.15.2.txt
+++ b/doc/source/whatsnew/v0.15.2.txt
@@ -163,7 +163,7 @@ Other enhancements:
p.all()
- Added support for ``utcfromtimestamp()``, ``fromtimestamp()``, and ``combine()`` on `Timestamp` class (:issue:`5351`).
-- Added Google Analytics (`pandas.io.ga`) basic documentation (:issue:`8835`). See `here<http://pandas.pydata.org/pandas-docs/version/0.15.2/remote_data.html#remote-data-ga>`__.
+- Added Google Analytics (`pandas.io.ga`) basic documentation (:issue:`8835`). See `here <http://pandas.pydata.org/pandas-docs/version/0.15.2/remote_data.html#remote-data-ga>`__.
- ``Timedelta`` arithmetic returns ``NotImplemented`` in unknown cases, allowing extensions by custom classes (:issue:`8813`).
- ``Timedelta`` now supports arithemtic with ``numpy.ndarray`` objects of the appropriate dtype (numpy 1.8 or newer only) (:issue:`8884`).
- Added ``Timedelta.to_timedelta64()`` method to the public API (:issue:`8884`).
diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index bc5e278df743f..6093e53029cb6 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -216,6 +216,7 @@ contained the values ``[0, 3]``.
**New behavior**:
.. ipython:: python
+ :okwarning:
pd.read_csv(StringIO(data), names=names)
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 1a7b75266bfdf..fc869956c820e 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -1375,6 +1375,7 @@ Convert to a MultiIndex DataFrame
Convert to an xarray DataArray
.. ipython:: python
+ :okwarning:
p.to_xarray()
diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx
index 40d970c7b20f2..72523a19b9595 100644
--- a/pandas/_libs/period.pyx
+++ b/pandas/_libs/period.pyx
@@ -967,7 +967,7 @@ cdef class _Period(object):
def strftime(self, fmt):
"""
Returns the string representation of the :class:`Period`, depending
- on the selected :keyword:`format`. :keyword:`format` must be a string
+ on the selected ``fmt``. ``fmt`` must be a string
containing one or several directives. The method recognizes the same
directives as the :func:`time.strftime` function of the standard Python
distribution, as well as the specific additional directives ``%f``,
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 6d793b6770113..bf22a3a528259 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -237,15 +237,13 @@ _no_input = object()
class Timestamp(_Timestamp):
- """TimeStamp is the pandas equivalent of python's Datetime
+ """Pandas replacement for datetime.datetime
+
+ TimeStamp is the pandas equivalent of python's Datetime
and is interchangable with it in most cases. It's the type used
for the entries that make up a DatetimeIndex, and other timeseries
oriented data structures in pandas.
- There are essentially three calling conventions for the constructor. The
- primary form accepts four parameters. They can be passed by position or
- keyword.
-
Parameters
----------
ts_input : datetime-like, str, int, float
@@ -259,22 +257,32 @@ class Timestamp(_Timestamp):
offset : str, DateOffset
Deprecated, use freq
+ year, month, day : int
+ .. versionadded:: 0.19.0
+ hour, minute, second, microsecond : int, optional, default 0
+ .. versionadded:: 0.19.0
+ tzinfo : datetime.tzinfo, optional, default None
+ .. versionadded:: 0.19.0
+
+ Notes
+ -----
+ There are essentially three calling conventions for the constructor. The
+ primary form accepts four parameters. They can be passed by position or
+ keyword.
+
The other two forms mimic the parameters from ``datetime.datetime``. They
can be passed by either position or keyword, but not both mixed together.
- :func:`datetime.datetime` Parameters
- ------------------------------------
+ Examples
+ --------
+ >>> pd.Timestamp('2017-01-01T12')
+ Timestamp('2017-01-01 12:00:00')
- .. versionadded:: 0.19.0
+ >>> pd.Timestamp(2017, 1, 1, 12)
+ Timestamp('2017-01-01 12:00:00')
- year : int
- month : int
- day : int
- hour : int, optional, default is 0
- minute : int, optional, default is 0
- second : int, optional, default is 0
- microsecond : int, optional, default is 0
- tzinfo : datetime.tzinfo, optional, default is None
+ >>> pd.Timestamp(year=2017, month=1, day=1, hour=12)
+ Timestamp('2017-01-01 12:00:00')
"""
@classmethod
@@ -592,11 +600,13 @@ class Timestamp(_Timestamp):
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time which Timestamp will be converted to.
None will remove timezone holding local time.
+
ambiguous : bool, 'NaT', default 'raise'
- bool contains flags to determine if time is dst or not (note
- that this flag is only applicable for ambiguous fall dst dates)
+ that this flag is only applicable for ambiguous fall dst dates)
- 'NaT' will return NaT for an ambiguous time
- 'raise' will raise an AmbiguousTimeError for an ambiguous time
+
errors : 'raise', 'coerce', default 'raise'
- 'raise' will raise a NonExistentTimeError if a timestamp is not
valid in the specified timezone (e.g. due to a transition from
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index dedc115501cd0..a5861f5865a39 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -478,11 +478,13 @@ class NaTType(_NaT):
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time which Timestamp will be converted to.
None will remove timezone holding local time.
+
ambiguous : bool, 'NaT', default 'raise'
- bool contains flags to determine if time is dst or not (note
- that this flag is only applicable for ambiguous fall dst dates)
+ that this flag is only applicable for ambiguous fall dst dates)
- 'NaT' will return NaT for an ambiguous time
- 'raise' will raise an AmbiguousTimeError for an ambiguous time
+
errors : 'raise', 'coerce', default 'raise'
- 'raise' will raise a NonExistentTimeError if a timestamp is not
valid in the specified timezone (e.g. due to a transition from
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 623babe5422a8..869ff5ee77bda 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -820,7 +820,7 @@ cdef class _Timedelta(timedelta):
def isoformat(self):
"""
Format Timedelta as ISO 8601 Duration like
- `P[n]Y[n]M[n]DT[n]H[n]M[n]S`, where the `[n]`s are replaced by the
+ ``P[n]Y[n]M[n]DT[n]H[n]M[n]S``, where the ``[n]`` s are replaced by the
values. See https://en.wikipedia.org/wiki/ISO_8601#Durations
.. versionadded:: 0.20.0
@@ -881,7 +881,7 @@ class Timedelta(_Timedelta):
Parameters
----------
value : Timedelta, timedelta, np.timedelta64, string, or integer
- unit : string, [D,h,m,s,ms,us,ns]
+ unit : string, {'ns', 'us', 'ms', 's', 'm', 'h', 'D'}, optional
Denote the unit of the input, if input is an integer. Default 'ns'.
days, seconds, microseconds,
milliseconds, minutes, hours, weeks : numeric, optional
diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py
index a5df6aea055ab..196f4b2679576 100644
--- a/pandas/core/computation/eval.py
+++ b/pandas/core/computation/eval.py
@@ -202,7 +202,7 @@ def eval(expr, parser='pandas', engine=None, truediv=True,
you can use to inject an additional collection of namespaces to use for
variable lookup. For example, this is used in the
:meth:`~pandas.DataFrame.query` method to inject the
- :attr:`~pandas.DataFrame.index` and :attr:`~pandas.DataFrame.columns`
+ ``DataFrame.index`` and ``DataFrame.columns``
variables that refer to their respective :class:`~pandas.DataFrame`
instance attributes.
level : int, optional
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index 33531e80449d8..59578b96807e1 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -392,8 +392,9 @@ def table_schema_cb(key):
cf.register_option('sim_interactive', False, tc_sim_interactive_doc)
use_inf_as_null_doc = """
-use_inf_as_null had been deprecated and will be removed in a future version.
-Use `use_inf_as_na` instead.
+: boolean
+ use_inf_as_null had been deprecated and will be removed in a future
+ version. Use `use_inf_as_na` instead.
"""
use_inf_as_na_doc = """
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5f5f785111fb4..70f1ff0a5380d 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1692,7 +1692,7 @@ def to_html(self, buf=None, columns=None, col_space=None, header=True,
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table
escape : boolean, default True
- Convert the characters <, >, and & to HTML-safe sequences.=
+ Convert the characters <, >, and & to HTML-safe sequences.
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
@@ -1703,6 +1703,7 @@ def to_html(self, buf=None, columns=None, col_space=None, header=True,
Character recognized as decimal separator, e.g. ',' in Europe
.. versionadded:: 0.18.0
+
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 48e6f8d4d50d3..f1edfe276dfad 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6278,6 +6278,7 @@ def truncate(self, before=None, after=None, axis=None, copy=True):
* 0 or 'index': apply truncation to rows
* 1 or 'columns': apply truncation to columns
+
Default is stat axis for given data type (0 for Series and
DataFrames, 1 for Panels)
copy : boolean, default is True,
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 1acc8c3ed0bbb..8db75accc84e5 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -742,8 +742,8 @@ def _cumcount_array(self, ascending=True):
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
- Note
- ----
+ Notes
+ -----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
@@ -1257,7 +1257,6 @@ def expanding(self, *args, **kwargs):
return ExpandingGroupby(self, *args, **kwargs)
@Substitution(name='groupby')
- @Appender(_doc_template)
def pad(self, limit=None):
"""
Forward fill the values
@@ -1269,6 +1268,8 @@ def pad(self, limit=None):
See Also
--------
+ Series.pad
+ DataFrame.pad
Series.fillna
DataFrame.fillna
"""
@@ -1276,7 +1277,6 @@ def pad(self, limit=None):
ffill = pad
@Substitution(name='groupby')
- @Appender(_doc_template)
def backfill(self, limit=None):
"""
Backward fill the values
@@ -1288,6 +1288,8 @@ def backfill(self, limit=None):
See Also
--------
+ Series.backfill
+ DataFrame.backfill
Series.fillna
DataFrame.fillna
"""
@@ -1450,7 +1452,6 @@ def nth(self, n, dropna=None):
return result
@Substitution(name='groupby')
- @Appender(_doc_template)
def ngroup(self, ascending=True):
"""
Number each group from 0 to the number of groups - 1.
@@ -1507,7 +1508,6 @@ def ngroup(self, ascending=True):
See also
--------
.cumcount : Number the rows in each group.
-
"""
self._set_group_selection()
@@ -1519,7 +1519,6 @@ def ngroup(self, ascending=True):
return result
@Substitution(name='groupby')
- @Appender(_doc_template)
def cumcount(self, ascending=True):
"""
Number each item in each group from 0 to the length of that group - 1.
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index bd069c1d22403..a6d5690767c10 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -624,9 +624,9 @@ def to_timestamp(self, freq=None, how='start'):
Parameters
----------
- freq : string or DateOffset, default 'D' for week or longer, 'S'
- otherwise
- Target frequency
+ freq : string or DateOffset, optional
+ Target frequency. The default is 'D' for week or longer,
+ 'S' otherwise
how : {'s', 'e', 'start', 'end'}
Returns
@@ -1039,8 +1039,8 @@ def tz_convert(self, tz):
-------
normalized : DatetimeIndex
- Note
- ----
+ Notes
+ -----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
@@ -1063,8 +1063,8 @@ def tz_localize(self, tz, infer_dst=False):
-------
localized : DatetimeIndex
- Note
- ----
+ Notes
+ -----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index ab98b9c4e4f49..35a87fbe7b15b 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -84,12 +84,23 @@
"match-parent", "initial", "unset")
justify_docstring = """
- justify : {'left', 'right', 'center', 'justify',
- 'justify-all', 'start', 'end', 'inherit',
- 'match-parent', 'initial', 'unset'}, default None
+ justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
- of the box."""
+ of the box. Valid values are
+
+ * left
+ * right
+ * center
+ * justify
+ * justify-all
+ * start
+ * end
+ * inherit
+ * match-parent
+ * initial
+ * unset
+"""
return_docstring = """
diff --git a/setup.py b/setup.py
index 572c426f26ae3..dd24c5c14ee69 100755
--- a/setup.py
+++ b/setup.py
@@ -225,8 +225,8 @@ def build_extensions(self):
of the analysis into a form suitable for plotting or tabular display. pandas is
the ideal tool for all of these tasks.
-Note
-----
+Notes
+-----
Windows binaries built against NumPy 1.8.1
"""
| I'm trying to get our doc build cleaned up. Just fixing warnings in this one
(mostly formatting, some references).
Later on I'll figure out why things are slow on sphinx 1.6 and get rid of our hacked numpydoc / ipython directive. | https://api.github.com/repos/pandas-dev/pandas/pulls/18083 | 2017-11-02T22:03:22Z | 2017-11-09T11:58:38Z | 2017-11-09T11:58:38Z | 2017-12-08T18:41:28Z |
Fix 18068: Updates merge_asof error, now outputs datatypes | diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt
index 4adafe7c06450..b7b8240a8d77e 100644
--- a/doc/source/whatsnew/v0.21.1.txt
+++ b/doc/source/whatsnew/v0.21.1.txt
@@ -102,7 +102,7 @@ Sparse
Reshaping
^^^^^^^^^
--
+- Error message in ``pd.merge_asof()`` for key datatype mismatch now includes datatype of left and right key (:issue:`18068`)
-
-
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index e409090e76944..0234a5563326c 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1253,10 +1253,12 @@ def _get_merge_keys(self):
join_names) = super(_AsOfMerge, self)._get_merge_keys()
# validate index types are the same
- for lk, rk in zip(left_join_keys, right_join_keys):
+ for i, (lk, rk) in enumerate(zip(left_join_keys, right_join_keys)):
if not is_dtype_equal(lk.dtype, rk.dtype):
- raise MergeError("incompatible merge keys, "
- "must be the same type")
+ raise MergeError("incompatible merge keys [{i}] {lkdtype} and "
+ "{rkdtype}, must be the same type"
+ .format(i=i, lkdtype=lk.dtype,
+ rkdtype=rk.dtype))
# validate tolerance; must be a Timedelta if we have a DTI
if self.tolerance is not None:
diff --git a/pandas/tests/reshape/test_merge_asof.py b/pandas/tests/reshape/test_merge_asof.py
index 78bfa2ff8597c..4b2680b9be592 100644
--- a/pandas/tests/reshape/test_merge_asof.py
+++ b/pandas/tests/reshape/test_merge_asof.py
@@ -973,3 +973,15 @@ def test_on_float_by_int(self):
columns=['symbol', 'exch', 'price', 'mpv'])
assert_frame_equal(result, expected)
+
+ def test_merge_datatype_error(self):
+ """ Tests merge datatype mismatch error """
+ msg = 'merge keys \[0\] object and int64, must be the same type'
+
+ left = pd.DataFrame({'left_val': [1, 5, 10],
+ 'a': ['a', 'b', 'c']})
+ right = pd.DataFrame({'right_val': [1, 2, 3, 6, 7],
+ 'a': [1, 2, 3, 6, 7]})
+
+ with tm.assert_raises_regex(MergeError, msg):
+ merge_asof(left, right, on='a')
| - [ ] closes #18068
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18082 | 2017-11-02T16:32:22Z | 2017-11-03T23:03:51Z | 2017-11-03T23:03:51Z | 2017-11-03T23:03:58Z |
Move comparison utilities to np_datetime; | diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index d2492064c900c..8a882a465f9f7 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -55,6 +55,8 @@ from datetime cimport (
from datetime import time as datetime_time
from tslibs.np_datetime cimport (check_dts_bounds,
+ reverse_ops,
+ cmp_scalar,
pandas_datetimestruct,
dt64_to_dtstruct, dtstruct_to_dt64,
pydatetime_to_dt64, pydate_to_dt64)
@@ -893,31 +895,6 @@ def unique_deltas(ndarray[int64_t] arr):
return result
-cdef inline bint _cmp_scalar(int64_t lhs, int64_t rhs, int op) except -1:
- if op == Py_EQ:
- return lhs == rhs
- elif op == Py_NE:
- return lhs != rhs
- elif op == Py_LT:
- return lhs < rhs
- elif op == Py_LE:
- return lhs <= rhs
- elif op == Py_GT:
- return lhs > rhs
- elif op == Py_GE:
- return lhs >= rhs
-
-
-cdef int _reverse_ops[6]
-
-_reverse_ops[Py_LT] = Py_GT
-_reverse_ops[Py_LE] = Py_GE
-_reverse_ops[Py_EQ] = Py_EQ
-_reverse_ops[Py_NE] = Py_NE
-_reverse_ops[Py_GT] = Py_LT
-_reverse_ops[Py_GE] = Py_LE
-
-
cdef str _NDIM_STRING = "ndim"
# This is PITA. Because we inherit from datetime, which has very specific
@@ -970,7 +947,7 @@ cdef class _Timestamp(datetime):
raise TypeError('Cannot compare type %r with type %r' %
(type(self).__name__,
type(other).__name__))
- return PyObject_RichCompare(other, self, _reverse_ops[op])
+ return PyObject_RichCompare(other, self, reverse_ops[op])
else:
if op == Py_EQ:
return False
@@ -980,7 +957,7 @@ cdef class _Timestamp(datetime):
(type(self).__name__, type(other).__name__))
self._assert_tzawareness_compat(other)
- return _cmp_scalar(self.value, ots.value, op)
+ return cmp_scalar(self.value, ots.value, op)
def __reduce_ex__(self, protocol):
# python 3.6 compat
@@ -2066,7 +2043,7 @@ cdef class _Timedelta(timedelta):
type(other).__name__))
if util.is_array(other):
return PyObject_RichCompare(np.array([self]), other, op)
- return PyObject_RichCompare(other, self, _reverse_ops[op])
+ return PyObject_RichCompare(other, self, reverse_ops[op])
else:
if op == Py_EQ:
return False
@@ -2075,7 +2052,7 @@ cdef class _Timedelta(timedelta):
raise TypeError('Cannot compare type %r with type %r' %
(type(self).__name__, type(other).__name__))
- return _cmp_scalar(self.value, ots.value, op)
+ return cmp_scalar(self.value, ots.value, op)
def _ensure_components(_Timedelta self):
"""
diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd
index 0e6eda0c88beb..ab77049a9ff5b 100644
--- a/pandas/_libs/tslibs/np_datetime.pxd
+++ b/pandas/_libs/tslibs/np_datetime.pxd
@@ -12,6 +12,10 @@ cdef extern from "../src/datetime/np_datetime.h":
int32_t month, day, hour, min, sec, us, ps, as
+cdef int reverse_ops[6]
+
+cdef bint cmp_scalar(int64_t lhs, int64_t rhs, int op) except -1
+
cdef check_dts_bounds(pandas_datetimestruct *dts)
cdef int64_t dtstruct_to_dt64(pandas_datetimestruct* dts) nogil
diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx
index 217cde2aad677..1c635e6cecc13 100644
--- a/pandas/_libs/tslibs/np_datetime.pyx
+++ b/pandas/_libs/tslibs/np_datetime.pyx
@@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
# cython: profile=False
+from cpython cimport Py_EQ, Py_NE, Py_GE, Py_GT, Py_LT, Py_LE
+
from cpython.datetime cimport (datetime, date,
PyDateTime_IMPORT,
PyDateTime_GET_YEAR, PyDateTime_GET_MONTH,
@@ -47,6 +49,35 @@ cdef extern from "../src/datetime/np_datetime.h":
pandas_datetimestruct _NS_MIN_DTS, _NS_MAX_DTS
# ----------------------------------------------------------------------
+# Comparison
+
+cdef int reverse_ops[6]
+
+reverse_ops[Py_LT] = Py_GT
+reverse_ops[Py_LE] = Py_GE
+reverse_ops[Py_EQ] = Py_EQ
+reverse_ops[Py_NE] = Py_NE
+reverse_ops[Py_GT] = Py_LT
+reverse_ops[Py_GE] = Py_LE
+
+
+cdef inline bint cmp_scalar(int64_t lhs, int64_t rhs, int op) except -1:
+ """
+ cmp_scalar is a more performant version of PyObject_RichCompare
+ typed for int64_t arguments.
+ """
+ if op == Py_EQ:
+ return lhs == rhs
+ elif op == Py_NE:
+ return lhs != rhs
+ elif op == Py_LT:
+ return lhs < rhs
+ elif op == Py_LE:
+ return lhs <= rhs
+ elif op == Py_GT:
+ return lhs > rhs
+ elif op == Py_GE:
+ return lhs >= rhs
class OutOfBoundsDatetime(ValueError):
| we will need them available upstream of tslib
De-privatizes the appropriate names.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18080 | 2017-11-02T15:22:09Z | 2017-11-02T23:49:55Z | 2017-11-02T23:49:55Z | 2017-12-08T19:38:58Z |
CI: temp disable scipy on windows 3.6 build | diff --git a/ci/requirements-3.6_WIN.run b/ci/requirements-3.6_WIN.run
index af7a90b126f22..5d6c074ec1f85 100644
--- a/ci/requirements-3.6_WIN.run
+++ b/ci/requirements-3.6_WIN.run
@@ -6,7 +6,7 @@ openpyxl
xlsxwriter
xlrd
xlwt
-scipy
+# scipy
feather-format
numexpr
pytables
| xref #18073 | https://api.github.com/repos/pandas-dev/pandas/pulls/18078 | 2017-11-02T10:16:11Z | 2017-11-02T10:48:36Z | 2017-11-02T10:48:36Z | 2017-11-02T11:07:51Z |
DOC: Remove duplicate 'in' from contributing.rst (#18040) | diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index e345f79dad5c2..1eb3a52e1b050 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -330,7 +330,7 @@ The utility script ``scripts/api_rst_coverage.py`` can be used to compare
the list of methods documented in ``doc/source/api.rst`` (which is used to generate
the `API Reference <http://pandas.pydata.org/pandas-docs/stable/api.html>`_ page)
and the actual public methods.
-This will identify methods documented in in ``doc/source/api.rst`` that are not actually
+This will identify methods documented in ``doc/source/api.rst`` that are not actually
class methods, and existing methods that are not documented in ``doc/source/api.rst``.
| - [x] closes #18040 (already closed)
- [ ] Tests added / passed (N/A)
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry (N/A)
Removes a duplicate "in" from the second sentence of the last paragraph of the "About the _pandas_ documentation" section of the "Contributing to Pandas" docs. | https://api.github.com/repos/pandas-dev/pandas/pulls/18076 | 2017-11-02T07:04:36Z | 2017-11-02T11:25:19Z | 2017-11-02T11:25:19Z | 2017-12-08T18:32:29Z |
Move scalar arithmetic tests to tests.scalars | diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py
index 2f3d567599fa6..bf0217e9bf22a 100644
--- a/pandas/tests/indexes/datetimes/test_arithmetic.py
+++ b/pandas/tests/indexes/datetimes/test_arithmetic.py
@@ -199,25 +199,6 @@ def test_ufunc_coercions(self):
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
- def test_overflow_offset(self):
- # xref https://github.com/statsmodels/statsmodels/issues/3374
- # ends up multiplying really large numbers which overflow
-
- t = Timestamp('2017-01-13 00:00:00', freq='D')
- offset = 20169940 * pd.offsets.Day(1)
-
- def f():
- t + offset
- pytest.raises(OverflowError, f)
-
- def f():
- offset + t
- pytest.raises(OverflowError, f)
-
- def f():
- t - offset
- pytest.raises(OverflowError, f)
-
# GH 10699
@pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex],
diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py
index 9341cf2202f4c..bbc8dd6577b2c 100644
--- a/pandas/tests/indexes/timedeltas/test_arithmetic.py
+++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py
@@ -51,44 +51,6 @@ def test_numeric_compat(self):
pytest.raises(ValueError, lambda: idx * self._holder(np.arange(3)))
pytest.raises(ValueError, lambda: idx * np.array([1, 2]))
- # FIXME: duplicate. This came from `test_timedelta`, whereas the
- # version above came from `test_astype`. Make sure there aren't more
- # duplicates.
- def test_numeric_compat__(self):
-
- idx = self._holder(np.arange(5, dtype='int64'))
- didx = self._holder(np.arange(5, dtype='int64') ** 2)
- result = idx * 1
- tm.assert_index_equal(result, idx)
-
- result = 1 * idx
- tm.assert_index_equal(result, idx)
-
- result = idx / 1
- tm.assert_index_equal(result, idx)
-
- result = idx // 1
- tm.assert_index_equal(result, idx)
-
- result = idx * np.array(5, dtype='int64')
- tm.assert_index_equal(result,
- self._holder(np.arange(5, dtype='int64') * 5))
-
- result = idx * np.arange(5, dtype='int64')
- tm.assert_index_equal(result, didx)
-
- result = idx * Series(np.arange(5, dtype='int64'))
- tm.assert_index_equal(result, didx)
-
- result = idx * Series(np.arange(5, dtype='float64') + 0.1)
- tm.assert_index_equal(result, self._holder(np.arange(
- 5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1)))
-
- # invalid
- pytest.raises(TypeError, lambda: idx * idx)
- pytest.raises(ValueError, lambda: idx * self._holder(np.arange(3)))
- pytest.raises(ValueError, lambda: idx * np.array([1, 2]))
-
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
@@ -406,47 +368,6 @@ def test_addition_ops(self):
expected = Timestamp('20130102')
assert result == expected
- # TODO: Split by op, better name
- def test_ops(self):
- td = Timedelta(10, unit='d')
- assert -td == Timedelta(-10, unit='d')
- assert +td == Timedelta(10, unit='d')
- assert td - td == Timedelta(0, unit='ns')
- assert (td - pd.NaT) is pd.NaT
- assert td + td == Timedelta(20, unit='d')
- assert (td + pd.NaT) is pd.NaT
- assert td * 2 == Timedelta(20, unit='d')
- assert (td * pd.NaT) is pd.NaT
- assert td / 2 == Timedelta(5, unit='d')
- assert td // 2 == Timedelta(5, unit='d')
- assert abs(td) == td
- assert abs(-td) == td
- assert td / td == 1
- assert (td / pd.NaT) is np.nan
- assert (td // pd.NaT) is np.nan
-
- # invert
- assert -td == Timedelta('-10d')
- assert td * -1 == Timedelta('-10d')
- assert -1 * td == Timedelta('-10d')
- assert abs(-td) == Timedelta('10d')
-
- # invalid multiply with another timedelta
- pytest.raises(TypeError, lambda: td * td)
-
- # can't operate with integers
- pytest.raises(TypeError, lambda: td + 2)
- pytest.raises(TypeError, lambda: td - 2)
-
- def test_ops_offsets(self):
- td = Timedelta(10, unit='d')
- assert Timedelta(241, unit='h') == td + pd.offsets.Hour(1)
- assert Timedelta(241, unit='h') == pd.offsets.Hour(1) + td
- assert 240 == td / pd.offsets.Hour(1)
- assert 1 / 240.0 == pd.offsets.Hour(1) / td
- assert Timedelta(239, unit='h') == td - pd.offsets.Hour(1)
- assert Timedelta(-239, unit='h') == pd.offsets.Hour(1) - td
-
def test_ops_ndarray(self):
td = Timedelta('1 day')
@@ -530,50 +451,6 @@ def test_ops_series_object(self):
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
- def test_ops_notimplemented(self):
- class Other:
- pass
-
- other = Other()
-
- td = Timedelta('1 day')
- assert td.__add__(other) is NotImplemented
- assert td.__sub__(other) is NotImplemented
- assert td.__truediv__(other) is NotImplemented
- assert td.__mul__(other) is NotImplemented
- assert td.__floordiv__(other) is NotImplemented
-
- def test_timedelta_ops_scalar(self):
- # GH 6808
- base = pd.to_datetime('20130101 09:01:12.123456')
- expected_add = pd.to_datetime('20130101 09:01:22.123456')
- expected_sub = pd.to_datetime('20130101 09:01:02.123456')
-
- for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
- np.timedelta64(10, 's'),
- np.timedelta64(10000000000, 'ns'),
- pd.offsets.Second(10)]:
- result = base + offset
- assert result == expected_add
-
- result = base - offset
- assert result == expected_sub
-
- base = pd.to_datetime('20130102 09:01:12.123456')
- expected_add = pd.to_datetime('20130103 09:01:22.123456')
- expected_sub = pd.to_datetime('20130101 09:01:02.123456')
-
- for offset in [pd.to_timedelta('1 day, 00:00:10'),
- pd.to_timedelta('1 days, 00:00:10'),
- timedelta(days=1, seconds=10),
- np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
- pd.offsets.Day() + pd.offsets.Second(10)]:
- result = base + offset
- assert result == expected_add
-
- result = base - offset
- assert result == expected_sub
-
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py
index d4434b3af385b..17c818779c76d 100644
--- a/pandas/tests/scalar/test_timedelta.py
+++ b/pandas/tests/scalar/test_timedelta.py
@@ -40,6 +40,91 @@ def test_to_timedelta_on_nanoseconds(self):
pytest.raises(TypeError, lambda: Timedelta(nanoseconds='abc'))
+ def test_ops_notimplemented(self):
+ class Other:
+ pass
+
+ other = Other()
+
+ td = Timedelta('1 day')
+ assert td.__add__(other) is NotImplemented
+ assert td.__sub__(other) is NotImplemented
+ assert td.__truediv__(other) is NotImplemented
+ assert td.__mul__(other) is NotImplemented
+ assert td.__floordiv__(other) is NotImplemented
+
+ def test_timedelta_ops_scalar(self):
+ # GH 6808
+ base = pd.to_datetime('20130101 09:01:12.123456')
+ expected_add = pd.to_datetime('20130101 09:01:22.123456')
+ expected_sub = pd.to_datetime('20130101 09:01:02.123456')
+
+ for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
+ np.timedelta64(10, 's'),
+ np.timedelta64(10000000000, 'ns'),
+ pd.offsets.Second(10)]:
+ result = base + offset
+ assert result == expected_add
+
+ result = base - offset
+ assert result == expected_sub
+
+ base = pd.to_datetime('20130102 09:01:12.123456')
+ expected_add = pd.to_datetime('20130103 09:01:22.123456')
+ expected_sub = pd.to_datetime('20130101 09:01:02.123456')
+
+ for offset in [pd.to_timedelta('1 day, 00:00:10'),
+ pd.to_timedelta('1 days, 00:00:10'),
+ timedelta(days=1, seconds=10),
+ np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
+ pd.offsets.Day() + pd.offsets.Second(10)]:
+ result = base + offset
+ assert result == expected_add
+
+ result = base - offset
+ assert result == expected_sub
+
+ def test_ops_offsets(self):
+ td = Timedelta(10, unit='d')
+ assert Timedelta(241, unit='h') == td + pd.offsets.Hour(1)
+ assert Timedelta(241, unit='h') == pd.offsets.Hour(1) + td
+ assert 240 == td / pd.offsets.Hour(1)
+ assert 1 / 240.0 == pd.offsets.Hour(1) / td
+ assert Timedelta(239, unit='h') == td - pd.offsets.Hour(1)
+ assert Timedelta(-239, unit='h') == pd.offsets.Hour(1) - td
+
+ # TODO: Split by op, better name
+ def test_ops(self):
+ td = Timedelta(10, unit='d')
+ assert -td == Timedelta(-10, unit='d')
+ assert +td == Timedelta(10, unit='d')
+ assert td - td == Timedelta(0, unit='ns')
+ assert (td - pd.NaT) is pd.NaT
+ assert td + td == Timedelta(20, unit='d')
+ assert (td + pd.NaT) is pd.NaT
+ assert td * 2 == Timedelta(20, unit='d')
+ assert (td * pd.NaT) is pd.NaT
+ assert td / 2 == Timedelta(5, unit='d')
+ assert td // 2 == Timedelta(5, unit='d')
+ assert abs(td) == td
+ assert abs(-td) == td
+ assert td / td == 1
+ assert (td / pd.NaT) is np.nan
+ assert (td // pd.NaT) is np.nan
+
+ # invert
+ assert -td == Timedelta('-10d')
+ assert td * -1 == Timedelta('-10d')
+ assert -1 * td == Timedelta('-10d')
+ assert abs(-td) == Timedelta('10d')
+
+ # invalid multiply with another timedelta
+ pytest.raises(TypeError, lambda: td * td)
+
+ # can't operate with integers
+ pytest.raises(TypeError, lambda: td + 2)
+ pytest.raises(TypeError, lambda: td - 2)
+
class TestTimedeltas(object):
_multiprocess_can_split_ = True
diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py
index efee096797510..4cd9a2fadeb32 100644
--- a/pandas/tests/scalar/test_timestamp.py
+++ b/pandas/tests/scalar/test_timestamp.py
@@ -28,6 +28,24 @@
RESO_MS, RESO_SEC)
+class TestTimestampArithmetic(object):
+ def test_overflow_offset(self):
+ # xref https://github.com/statsmodels/statsmodels/issues/3374
+ # ends up multiplying really large numbers which overflow
+
+ stamp = Timestamp('2017-01-13 00:00:00', freq='D')
+ offset = 20169940 * offsets.Day(1)
+
+ with pytest.raises(OverflowError):
+ stamp + offset
+
+ with pytest.raises(OverflowError):
+ offset + stamp
+
+ with pytest.raises(OverflowError):
+ stamp - offset
+
+
class TestTimestamp(object):
def test_constructor(self):
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18075 | 2017-11-02T04:58:11Z | 2017-11-02T11:56:16Z | 2017-11-02T11:56:16Z | 2017-11-02T14:54:19Z |
Index tests in the wrong places | diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py
index 46be24b90faae..0197fc4c52617 100644
--- a/pandas/tests/indexes/datetimes/test_astype.py
+++ b/pandas/tests/indexes/datetimes/test_astype.py
@@ -117,6 +117,15 @@ def test_astype_datetime64(self):
dtype='datetime64[ns]')
tm.assert_index_equal(result, expected)
+ def test_astype_object(self):
+ rng = date_range('1/1/2000', periods=20)
+
+ casted = rng.astype('O')
+ exp_values = list(rng)
+
+ tm.assert_index_equal(casted, Index(exp_values, dtype=np.object_))
+ assert casted.tolist() == exp_values
+
def test_astype_raises(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
@@ -287,12 +296,18 @@ def test_to_period_tz_dateutil(self):
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
- def test_astype_object(self):
- # NumPy 1.6.1 weak ns support
- rng = date_range('1/1/2000', periods=20)
-
- casted = rng.astype('O')
- exp_values = list(rng)
-
- tm.assert_index_equal(casted, Index(exp_values, dtype=np.object_))
- assert casted.tolist() == exp_values
+ def test_to_period_nofreq(self):
+ idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
+ pytest.raises(ValueError, idx.to_period)
+
+ idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
+ freq='infer')
+ assert idx.freqstr == 'D'
+ expected = pd.PeriodIndex(['2000-01-01', '2000-01-02',
+ '2000-01-03'], freq='D')
+ tm.assert_index_equal(idx.to_period(), expected)
+
+ # GH 7606
+ idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
+ assert idx.freqstr is None
+ tm.assert_index_equal(idx.to_period(), expected)
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index 88bf8a4024112..cc6eeb44c99c9 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -8,10 +8,10 @@
import pandas.util.testing as tm
from pandas.compat import lrange
from pandas.compat.numpy import np_datetime64_compat
-from pandas import (DatetimeIndex, Index, date_range, Series, DataFrame,
+from pandas import (DatetimeIndex, Index, date_range, DataFrame,
Timestamp, datetime, offsets)
-from pandas.util.testing import assert_series_equal, assert_almost_equal
+from pandas.util.testing import assert_almost_equal
randn = np.random.randn
@@ -223,22 +223,6 @@ def test_append_join_nondatetimeindex(self):
# it works
rng.join(idx, how='outer')
- def test_to_period_nofreq(self):
- idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
- pytest.raises(ValueError, idx.to_period)
-
- idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
- freq='infer')
- assert idx.freqstr == 'D'
- expected = pd.PeriodIndex(['2000-01-01', '2000-01-02',
- '2000-01-03'], freq='D')
- tm.assert_index_equal(idx.to_period(), expected)
-
- # GH 7606
- idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
- assert idx.freqstr is None
- tm.assert_index_equal(idx.to_period(), expected)
-
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
@@ -567,13 +551,6 @@ def test_does_not_convert_mixed_integer(self):
assert cols.dtype == joined.dtype
tm.assert_numpy_array_equal(cols.values, joined.values)
- def test_slice_keeps_name(self):
- # GH4226
- st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
- et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
- dr = pd.date_range(st, et, freq='H', name='timebucket')
- assert dr[1:].name == dr.name
-
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
@@ -687,59 +664,3 @@ def test_factorize_dst(self):
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
-
- def test_slice_with_negative_step(self):
- ts = Series(np.arange(20),
- date_range('2014-01-01', periods=20, freq='MS'))
- SLC = pd.IndexSlice
-
- def assert_slices_equivalent(l_slc, i_slc):
- assert_series_equal(ts[l_slc], ts.iloc[i_slc])
- assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
- assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
-
- assert_slices_equivalent(SLC[Timestamp('2014-10-01')::-1], SLC[9::-1])
- assert_slices_equivalent(SLC['2014-10-01'::-1], SLC[9::-1])
-
- assert_slices_equivalent(SLC[:Timestamp('2014-10-01'):-1], SLC[:8:-1])
- assert_slices_equivalent(SLC[:'2014-10-01':-1], SLC[:8:-1])
-
- assert_slices_equivalent(SLC['2015-02-01':'2014-10-01':-1],
- SLC[13:8:-1])
- assert_slices_equivalent(SLC[Timestamp('2015-02-01'):Timestamp(
- '2014-10-01'):-1], SLC[13:8:-1])
- assert_slices_equivalent(SLC['2015-02-01':Timestamp('2014-10-01'):-1],
- SLC[13:8:-1])
- assert_slices_equivalent(SLC[Timestamp('2015-02-01'):'2014-10-01':-1],
- SLC[13:8:-1])
-
- assert_slices_equivalent(SLC['2014-10-01':'2015-02-01':-1], SLC[:0])
-
- def test_slice_with_zero_step_raises(self):
- ts = Series(np.arange(20),
- date_range('2014-01-01', periods=20, freq='MS'))
- tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
- lambda: ts[::0])
- tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
- lambda: ts.loc[::0])
- tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
- lambda: ts.loc[::0])
-
- def test_slice_bounds_empty(self):
- # GH 14354
- empty_idx = DatetimeIndex(freq='1H', periods=0, end='2015')
-
- right = empty_idx._maybe_cast_slice_bound('2015-01-02', 'right', 'loc')
- exp = Timestamp('2015-01-02 23:59:59.999999999')
- assert right == exp
-
- left = empty_idx._maybe_cast_slice_bound('2015-01-02', 'left', 'loc')
- exp = Timestamp('2015-01-02 00:00:00')
- assert left == exp
-
- def test_slice_duplicate_monotonic(self):
- # https://github.com/pandas-dev/pandas/issues/16515
- idx = pd.DatetimeIndex(['2017', '2017'])
- result = idx._maybe_cast_slice_bound('2017-01-01', 'left', 'loc')
- expected = Timestamp('2017-01-01')
- assert result == expected
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index 14217ae291a4c..6e66e4a36f905 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -383,49 +383,6 @@ def test_resolution(self):
tz=tz)
assert idx.resolution == expected
- def test_union(self):
- for tz in self.tz:
- # union
- rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
- other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
- expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
-
- rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
- other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
- expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
-
- rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
- other3 = pd.DatetimeIndex([], tz=tz)
- expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
-
- for rng, other, expected in [(rng1, other1, expected1),
- (rng2, other2, expected2),
- (rng3, other3, expected3)]:
-
- result_union = rng.union(other)
- tm.assert_index_equal(result_union, expected)
-
- def test_difference(self):
- for tz in self.tz:
- # diff
- rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
- other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
- expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
-
- rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
- other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
- expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
-
- rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
- other3 = pd.DatetimeIndex([], tz=tz)
- expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
-
- for rng, other, expected in [(rng1, other1, expected1),
- (rng2, other2, expected2),
- (rng3, other3, expected3)]:
- result_diff = rng.difference(other)
- tm.assert_index_equal(result_diff, expected)
-
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py
index e7d03aa193cbd..50ee88bd82f40 100644
--- a/pandas/tests/indexes/datetimes/test_partial_slicing.py
+++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py
@@ -12,6 +12,68 @@
class TestSlicing(object):
+ def test_slice_keeps_name(self):
+ # GH4226
+ st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
+ et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
+ dr = pd.date_range(st, et, freq='H', name='timebucket')
+ assert dr[1:].name == dr.name
+
+ def test_slice_with_negative_step(self):
+ ts = Series(np.arange(20),
+ date_range('2014-01-01', periods=20, freq='MS'))
+ SLC = pd.IndexSlice
+
+ def assert_slices_equivalent(l_slc, i_slc):
+ tm.assert_series_equal(ts[l_slc], ts.iloc[i_slc])
+ tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
+ tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
+
+ assert_slices_equivalent(SLC[Timestamp('2014-10-01')::-1], SLC[9::-1])
+ assert_slices_equivalent(SLC['2014-10-01'::-1], SLC[9::-1])
+
+ assert_slices_equivalent(SLC[:Timestamp('2014-10-01'):-1], SLC[:8:-1])
+ assert_slices_equivalent(SLC[:'2014-10-01':-1], SLC[:8:-1])
+
+ assert_slices_equivalent(SLC['2015-02-01':'2014-10-01':-1],
+ SLC[13:8:-1])
+ assert_slices_equivalent(SLC[Timestamp('2015-02-01'):Timestamp(
+ '2014-10-01'):-1], SLC[13:8:-1])
+ assert_slices_equivalent(SLC['2015-02-01':Timestamp('2014-10-01'):-1],
+ SLC[13:8:-1])
+ assert_slices_equivalent(SLC[Timestamp('2015-02-01'):'2014-10-01':-1],
+ SLC[13:8:-1])
+
+ assert_slices_equivalent(SLC['2014-10-01':'2015-02-01':-1], SLC[:0])
+
+ def test_slice_with_zero_step_raises(self):
+ ts = Series(np.arange(20),
+ date_range('2014-01-01', periods=20, freq='MS'))
+ tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
+ lambda: ts[::0])
+ tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
+ lambda: ts.loc[::0])
+ tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
+ lambda: ts.loc[::0])
+
+ def test_slice_bounds_empty(self):
+ # GH 14354
+ empty_idx = DatetimeIndex(freq='1H', periods=0, end='2015')
+
+ right = empty_idx._maybe_cast_slice_bound('2015-01-02', 'right', 'loc')
+ exp = Timestamp('2015-01-02 23:59:59.999999999')
+ assert right == exp
+
+ left = empty_idx._maybe_cast_slice_bound('2015-01-02', 'left', 'loc')
+ exp = Timestamp('2015-01-02 00:00:00')
+ assert left == exp
+
+ def test_slice_duplicate_monotonic(self):
+ # https://github.com/pandas-dev/pandas/issues/16515
+ idx = pd.DatetimeIndex(['2017', '2017'])
+ result = idx._maybe_cast_slice_bound('2017-01-01', 'left', 'loc')
+ expected = Timestamp('2017-01-01')
+ assert result == expected
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py
index ff436e0501849..5df75338d01d7 100644
--- a/pandas/tests/indexes/datetimes/test_setops.py
+++ b/pandas/tests/indexes/datetimes/test_setops.py
@@ -1,5 +1,6 @@
from datetime import datetime
+import pytest
import numpy as np
import pandas as pd
@@ -11,14 +12,30 @@
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
-class TestDatetimeIndex(object):
+class TestDatetimeIndexSetOps(object):
+ tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
+ 'dateutil/US/Pacific']
- def test_union(self):
- i1 = Int64Index(np.arange(0, 20, 2))
- i2 = Int64Index(np.arange(10, 30, 2))
- result = i1.union(i2)
- expected = Int64Index(np.arange(0, 30, 2))
- tm.assert_index_equal(result, expected)
+ @pytest.mark.parametrize("tz", tz)
+ def test_union(self, tz):
+ rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+ other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
+ expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
+
+ rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+ other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
+ expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
+
+ rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+ other3 = pd.DatetimeIndex([], tz=tz)
+ expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+
+ for rng, other, expected in [(rng1, other1, expected1),
+ (rng2, other2, expected2),
+ (rng3, other3, expected3)]:
+
+ result_union = rng.union(other)
+ tm.assert_index_equal(result_union, expected)
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
@@ -83,62 +100,62 @@ def test_union_with_DatetimeIndex(self):
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
- def test_intersection(self):
+ @pytest.mark.parametrize("tz", [None, 'Asia/Tokyo', 'US/Eastern',
+ 'dateutil/US/Pacific'])
+ def test_intersection(self, tz):
# GH 4690 (with tz)
- for tz in [None, 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']:
- base = date_range('6/1/2000', '6/30/2000', freq='D', name='idx')
-
- # if target has the same name, it is preserved
- rng2 = date_range('5/15/2000', '6/20/2000', freq='D', name='idx')
- expected2 = date_range('6/1/2000', '6/20/2000', freq='D',
- name='idx')
-
- # if target name is different, it will be reset
- rng3 = date_range('5/15/2000', '6/20/2000', freq='D', name='other')
- expected3 = date_range('6/1/2000', '6/20/2000', freq='D',
- name=None)
-
- rng4 = date_range('7/1/2000', '7/31/2000', freq='D', name='idx')
- expected4 = DatetimeIndex([], name='idx')
-
- for (rng, expected) in [(rng2, expected2), (rng3, expected3),
- (rng4, expected4)]:
- result = base.intersection(rng)
- tm.assert_index_equal(result, expected)
- assert result.name == expected.name
- assert result.freq == expected.freq
- assert result.tz == expected.tz
-
- # non-monotonic
- base = DatetimeIndex(['2011-01-05', '2011-01-04',
- '2011-01-02', '2011-01-03'],
- tz=tz, name='idx')
-
- rng2 = DatetimeIndex(['2011-01-04', '2011-01-02',
- '2011-02-02', '2011-02-03'],
- tz=tz, name='idx')
- expected2 = DatetimeIndex(
- ['2011-01-04', '2011-01-02'], tz=tz, name='idx')
-
- rng3 = DatetimeIndex(['2011-01-04', '2011-01-02',
- '2011-02-02', '2011-02-03'],
- tz=tz, name='other')
- expected3 = DatetimeIndex(
- ['2011-01-04', '2011-01-02'], tz=tz, name=None)
-
- # GH 7880
- rng4 = date_range('7/1/2000', '7/31/2000', freq='D', tz=tz,
- name='idx')
- expected4 = DatetimeIndex([], tz=tz, name='idx')
-
- for (rng, expected) in [(rng2, expected2), (rng3, expected3),
- (rng4, expected4)]:
- result = base.intersection(rng)
- tm.assert_index_equal(result, expected)
- assert result.name == expected.name
- assert result.freq is None
- assert result.tz == expected.tz
-
+ base = date_range('6/1/2000', '6/30/2000', freq='D', name='idx')
+
+ # if target has the same name, it is preserved
+ rng2 = date_range('5/15/2000', '6/20/2000', freq='D', name='idx')
+ expected2 = date_range('6/1/2000', '6/20/2000', freq='D', name='idx')
+
+ # if target name is different, it will be reset
+ rng3 = date_range('5/15/2000', '6/20/2000', freq='D', name='other')
+ expected3 = date_range('6/1/2000', '6/20/2000', freq='D', name=None)
+
+ rng4 = date_range('7/1/2000', '7/31/2000', freq='D', name='idx')
+ expected4 = DatetimeIndex([], name='idx')
+
+ for (rng, expected) in [(rng2, expected2), (rng3, expected3),
+ (rng4, expected4)]:
+ result = base.intersection(rng)
+ tm.assert_index_equal(result, expected)
+ assert result.name == expected.name
+ assert result.freq == expected.freq
+ assert result.tz == expected.tz
+
+ # non-monotonic
+ base = DatetimeIndex(['2011-01-05', '2011-01-04',
+ '2011-01-02', '2011-01-03'],
+ tz=tz, name='idx')
+
+ rng2 = DatetimeIndex(['2011-01-04', '2011-01-02',
+ '2011-02-02', '2011-02-03'],
+ tz=tz, name='idx')
+ expected2 = DatetimeIndex(['2011-01-04', '2011-01-02'],
+ tz=tz, name='idx')
+
+ rng3 = DatetimeIndex(['2011-01-04', '2011-01-02',
+ '2011-02-02', '2011-02-03'],
+ tz=tz, name='other')
+ expected3 = DatetimeIndex(['2011-01-04', '2011-01-02'],
+ tz=tz, name=None)
+
+ # GH 7880
+ rng4 = date_range('7/1/2000', '7/31/2000', freq='D', tz=tz,
+ name='idx')
+ expected4 = DatetimeIndex([], tz=tz, name='idx')
+
+ for (rng, expected) in [(rng2, expected2), (rng3, expected3),
+ (rng4, expected4)]:
+ result = base.intersection(rng)
+ tm.assert_index_equal(result, expected)
+ assert result.name == expected.name
+ assert result.freq is None
+ assert result.tz == expected.tz
+
+ def test_intersection_empty(self):
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
@@ -155,6 +172,26 @@ def test_intersection_bug_1708(self):
result = index_1 & index_2
assert len(result) == 0
+ @pytest.mark.parametrize("tz", tz)
+ def test_difference(self, tz):
+ rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+ other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
+ expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+
+ rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+ other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
+ expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
+
+ rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+ other3 = pd.DatetimeIndex([], tz=tz)
+ expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
+
+ for rng, other, expected in [(rng1, other1, expected1),
+ (rng2, other2, expected2),
+ (rng3, other3, expected3)]:
+ result_diff = rng.difference(other)
+ tm.assert_index_equal(result_diff, expected)
+
def test_difference_freq(self):
# GH14323: difference of DatetimeIndex should not preserve frequency
diff --git a/pandas/tests/indexes/timedeltas/test_partial_slicing.py b/pandas/tests/indexes/timedeltas/test_partial_slicing.py
index 8e5eae2a7a3ef..7c5f82193da6d 100644
--- a/pandas/tests/indexes/timedeltas/test_partial_slicing.py
+++ b/pandas/tests/indexes/timedeltas/test_partial_slicing.py
@@ -9,6 +9,10 @@
class TestSlicing(object):
+ def test_slice_keeps_name(self):
+ # GH4226
+ dr = pd.timedelta_range('1d', '5d', freq='H', name='timebucket')
+ assert dr[1:].name == dr.name
def test_partial_slice(self):
rng = timedelta_range('1 day 10:11:12', freq='h', periods=500)
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index 2683110f2f02e..615c0d0ffa210 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -233,12 +233,6 @@ def test_join_self(self):
joined = index.join(index, how=kind)
tm.assert_index_equal(index, joined)
- def test_slice_keeps_name(self):
-
- # GH4226
- dr = pd.timedelta_range('1d', '5d', freq='H', name='timebucket')
- assert dr[1:].name == dr.name
-
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10,
data_gen_f=lambda *args, **kwargs: randn(),
| https://api.github.com/repos/pandas-dev/pandas/pulls/18074 | 2017-11-02T04:48:38Z | 2017-11-02T22:49:23Z | 2017-11-02T22:49:23Z | 2017-12-08T19:40:30Z | |
CI: silence codecov for unrelated lines | diff --git a/codecov.yml b/codecov.yml
index 1644bf315e0ac..6dd1e33a7a671 100644
--- a/codecov.yml
+++ b/codecov.yml
@@ -1,7 +1,7 @@
codecov:
branch: master
-comment: off
+comment: false
coverage:
status:
@@ -11,3 +11,6 @@ coverage:
patch:
default:
target: '50'
+
+github_checks:
+ annotations: false
| https://api.github.com/repos/pandas-dev/pandas/pulls/36600 | 2020-09-24T12:07:45Z | 2020-10-03T14:58:47Z | 2020-10-03T14:58:47Z | 2020-10-03T17:33:56Z | |
numpy version in py36_locale_slow_old_np on 1.1.x | diff --git a/ci/deps/azure-36-32bit.yaml b/ci/deps/azure-36-32bit.yaml
index 6deb10a408ca4..456ae4e33d742 100644
--- a/ci/deps/azure-36-32bit.yaml
+++ b/ci/deps/azure-36-32bit.yaml
@@ -15,7 +15,7 @@ dependencies:
- attrs=19.1.0
- gcc_linux-32
- gxx_linux-32
- - numpy=1.14.*
+ - numpy=1.15.4
- python-dateutil
- pytz=2017.2
diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-36-locale_slow.yaml
index 0a151944cdceb..4f5b962d48fcd 100644
--- a/ci/deps/azure-36-locale_slow.yaml
+++ b/ci/deps/azure-36-locale_slow.yaml
@@ -17,7 +17,7 @@ dependencies:
- bottleneck=1.2.*
- lxml
- matplotlib=2.2.2
- - numpy=1.14.*
+ - numpy=1.15.4
- openpyxl=2.5.7
- python-dateutil
- python-blosc
| PR against 1.1.x
not sure if important but numpy version in https://dev.azure.com/pandas-dev/pandas/_build/results?buildId=43144&view=logs&jobId=a69e7846-138e-5465-0656-921e8964615b&j=a69e7846-138e-5465-0656-921e8964615b&t=56da51de-fd5a-5466-5244-b5f65d252624 is 1.19.2
xref #33729 | https://api.github.com/repos/pandas-dev/pandas/pulls/36599 | 2020-09-24T11:32:56Z | 2020-09-25T09:14:09Z | 2020-09-25T09:14:09Z | 2020-09-25T09:14:15Z |
Partial Revert "ENH: infer freq in timedelta_range (#32377)" | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index c63a78c76572f..6f834aa7e3836 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -31,6 +31,7 @@ Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression in :meth:`DataFrame.agg`, :meth:`DataFrame.apply`, :meth:`Series.agg`, and :meth:`Series.apply` where internal suffix is exposed to the users when no relabelling is applied (:issue:`36189`)
- Fixed regression in :class:`IntegerArray` unary plus and minus operations raising a ``TypeError`` (:issue:`36063`)
+- Fixed regression when adding a :meth:`timedelta_range` to a :class:``Timestamp`` raised an ``ValueError`` (:issue:`35897`)
- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a tuple (:issue:`35534`)
- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a frozenset (:issue:`35747`)
- Fixed regression in :meth:`read_excel` with ``engine="odf"`` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`, :issue:`35802`)
@@ -60,7 +61,7 @@ Bug fixes
Other
~~~~~
--
+- Reverted enhancement added in pandas-1.1.0 where :func:`timedelta_range` infers a frequency when passed ``start``, ``stop``, and ``periods`` (:issue:`32377`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 3eaf428bc64b2..f6f7e8290489e 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -264,10 +264,6 @@ def _generate_range(cls, start, end, periods, freq, closed=None):
index = generate_regular_range(start, end, periods, freq)
else:
index = np.linspace(start.value, end.value, periods).astype("i8")
- if len(index) >= 2:
- # Infer a frequency
- td = Timedelta(index[1] - index[0])
- freq = to_offset(td)
if not left_closed:
index = index[1:]
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index df08fda78823d..20ebc80c7e0af 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -328,8 +328,8 @@ def timedelta_range(
>>> pd.timedelta_range(start='1 day', end='5 days', periods=4)
TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
- '5 days 00:00:00'],
- dtype='timedelta64[ns]', freq='32H')
+ '5 days 00:00:00'],
+ dtype='timedelta64[ns]', freq=None)
"""
if freq is None and com.any_none(periods, start, end):
freq = "D"
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index 64d3d5b6d684d..dd9b6269ce5bf 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -2136,3 +2136,20 @@ def test_td64arr_pow_invalid(self, scalar_td, box_with_array):
with pytest.raises(TypeError, match=pattern):
td1 ** scalar_td
+
+
+def test_add_timestamp_to_timedelta():
+ # GH: 35897
+ timestamp = pd.Timestamp.now()
+ result = timestamp + pd.timedelta_range("0s", "1s", periods=31)
+ expected = pd.DatetimeIndex(
+ [
+ timestamp
+ + (
+ pd.to_timedelta("0.033333333s") * i
+ + pd.to_timedelta("0.000000001s") * divmod(i, 3)[0]
+ )
+ for i in range(31)
+ ]
+ )
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta_range.py b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
index 7d78fbf9ff190..dc3df4427f351 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta_range.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
@@ -38,7 +38,6 @@ def test_linspace_behavior(self, periods, freq):
result = timedelta_range(start="0 days", end="4 days", periods=periods)
expected = timedelta_range(start="0 days", end="4 days", freq=freq)
tm.assert_index_equal(result, expected)
- assert result.freq == freq
def test_errors(self):
# not enough params
@@ -79,3 +78,8 @@ def test_timedelta_range_freq_divide_end(self, start, end, freq, expected_period
assert Timedelta(start) == res[0]
assert Timedelta(end) >= res[-1]
assert len(res) == expected_periods
+
+ def test_timedelta_range_infer_freq(self):
+ # https://github.com/pandas-dev/pandas/issues/35897
+ result = timedelta_range("0s", "1s", periods=31)
+ assert result.freq is None
| closes #35897
maybe alternative to #36582 if no changes since #32377 now fail (not tested locally)
cc @phofl @jbrockmendel | https://api.github.com/repos/pandas-dev/pandas/pulls/36595 | 2020-09-24T09:37:11Z | 2020-09-26T01:31:39Z | 2020-09-26T01:31:38Z | 2020-09-26T08:36:51Z |
CI: Add rst backtick checker | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 6319629d57512..d01956bb79e11 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -30,3 +30,38 @@ repos:
hooks:
- id: pyupgrade
args: [--py37-plus]
+- repo: https://github.com/pre-commit/pygrep-hooks
+ rev: v1.6.0
+ hooks:
+ - id: rst-backticks
+ # these exclusions should be removed and the files fixed
+ exclude: (?x)(
+ text\.rst|
+ timeseries\.rst|
+ visualization\.rst|
+ missing_data\.rst|
+ options\.rst|
+ reshaping\.rst|
+ scale\.rst|
+ merging\.rst|
+ cookbook\.rst|
+ enhancingperf\.rst|
+ groupby\.rst|
+ io\.rst|
+ overview\.rst|
+ panel\.rst|
+ plotting\.rst|
+ 10min\.rst|
+ basics\.rst|
+ categorical\.rst|
+ contributing\.rst|
+ contributing_docstring\.rst|
+ extending\.rst|
+ ecosystem\.rst|
+ comparison_with_sql\.rst|
+ install\.rst|
+ calculate_statistics\.rst|
+ combine_dataframes\.rst|
+ v0\.|
+ v1\.0\.|
+ v1\.1\.[012])
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 782e7fe16a2dc..226ac87f39d3f 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -117,7 +117,7 @@ Other enhancements
- :meth:`DataFrame.applymap` now supports ``na_action`` (:issue:`23803`)
- :class:`Index` with object dtype supports division and multiplication (:issue:`34160`)
- :meth:`DataFrame.explode` and :meth:`Series.explode` now support exploding of sets (:issue:`35614`)
-- `Styler` now allows direct CSS class name addition to individual data cells (:issue:`36159`)
+- ``Styler`` now allows direct CSS class name addition to individual data cells (:issue:`36159`)
- :meth:`Rolling.mean()` and :meth:`Rolling.sum()` use Kahan summation to calculate the mean to avoid numerical problems (:issue:`10319`, :issue:`11645`, :issue:`13254`, :issue:`32761`, :issue:`36031`)
- :meth:`DatetimeIndex.searchsorted`, :meth:`TimedeltaIndex.searchsorted`, :meth:`PeriodIndex.searchsorted`, and :meth:`Series.searchsorted` with datetimelike dtypes will now try to cast string arguments (listlike and scalar) to the matching datetimelike type (:issue:`36346`)
@@ -223,12 +223,12 @@ Deprecations
Performance improvements
~~~~~~~~~~~~~~~~~~~~~~~~
-- Performance improvements when creating DataFrame or Series with dtype `str` or :class:`StringDtype` from array with many string elements (:issue:`36304`, :issue:`36317`, :issue:`36325`, :issue:`36432`)
+- Performance improvements when creating DataFrame or Series with dtype ``str`` or :class:`StringDtype` from array with many string elements (:issue:`36304`, :issue:`36317`, :issue:`36325`, :issue:`36432`)
- Performance improvement in :meth:`GroupBy.agg` with the ``numba`` engine (:issue:`35759`)
- Performance improvements when creating :meth:`pd.Series.map` from a huge dictionary (:issue:`34717`)
- Performance improvement in :meth:`GroupBy.transform` with the ``numba`` engine (:issue:`36240`)
- ``Styler`` uuid method altered to compress data transmission over web whilst maintaining reasonably low table collision probability (:issue:`36345`)
-- Performance improvement in :meth:`pd.to_datetime` with non-`ns` time unit for `float` `dtype` columns (:issue:`20445`)
+- Performance improvement in :meth:`pd.to_datetime` with non-ns time unit for ``float`` ``dtype`` columns (:issue:`20445`)
.. ---------------------------------------------------------------------------
@@ -263,7 +263,7 @@ Timedelta
Timezones
^^^^^^^^^
-- Bug in :func:`date_range` was raising AmbiguousTimeError for valid input with `ambiguous=False` (:issue:`35297`)
+- Bug in :func:`date_range` was raising AmbiguousTimeError for valid input with ``ambiguous=False`` (:issue:`35297`)
-
@@ -304,13 +304,13 @@ Indexing
Missing
^^^^^^^
-- Bug in :meth:`SeriesGroupBy.transform` now correctly handles missing values for `dropna=False` (:issue:`35014`)
+- Bug in :meth:`SeriesGroupBy.transform` now correctly handles missing values for ``dropna=False`` (:issue:`35014`)
-
MultiIndex
^^^^^^^^^^
-- Bug in :meth:`DataFrame.xs` when used with :class:`IndexSlice` raises ``TypeError`` with message `Expected label or tuple of labels` (:issue:`35301`)
+- Bug in :meth:`DataFrame.xs` when used with :class:`IndexSlice` raises ``TypeError`` with message ``"Expected label or tuple of labels"`` (:issue:`35301`)
-
I/O
@@ -318,15 +318,15 @@ I/O
- :func:`read_sas` no longer leaks resources on failure (:issue:`35566`)
- Bug in :meth:`to_csv` caused a ``ValueError`` when it was called with a filename in combination with ``mode`` containing a ``b`` (:issue:`35058`)
-- In :meth:`read_csv` `float_precision='round_trip'` now handles `decimal` and `thousands` parameters (:issue:`35365`)
+- In :meth:`read_csv` ``float_precision='round_trip'`` now handles ``decimal`` and ``thousands`` parameters (:issue:`35365`)
- :meth:`to_pickle` and :meth:`read_pickle` were closing user-provided file objects (:issue:`35679`)
-- :meth:`to_csv` passes compression arguments for `'gzip'` always to `gzip.GzipFile` (:issue:`28103`)
+- :meth:`to_csv` passes compression arguments for ``'gzip'`` always to ``gzip.GzipFile`` (:issue:`28103`)
- :meth:`to_csv` did not support zip compression for binary file object not having a filename (:issue:`35058`)
-- :meth:`to_csv` and :meth:`read_csv` did not honor `compression` and `encoding` for path-like objects that are internally converted to file-like objects (:issue:`35677`, :issue:`26124`, and :issue:`32392`)
+- :meth:`to_csv` and :meth:`read_csv` did not honor ``compression`` and ``encoding`` for path-like objects that are internally converted to file-like objects (:issue:`35677`, :issue:`26124`, and :issue:`32392`)
- :meth:`to_picke` and :meth:`read_pickle` did not support compression for file-objects (:issue:`26237`, :issue:`29054`, and :issue:`29570`)
- Bug in :func:`LongTableBuilder.middle_separator` was duplicating LaTeX longtable entires in the List of Tables of a LaTeX document (:issue:`34360`)
-- Bug in :meth:`read_csv` with `engine='python'` truncating data if multiple items present in first row and first element started with BOM (:issue:`36343`)
-- Removed ``private_key`` and ``verbose`` from :func:`read_gbq` as they are no longer supported in `pandas-gbq` (:issue:`34654` :issue:`30200`)
+- Bug in :meth:`read_csv` with ``engine='python'`` truncating data if multiple items present in first row and first element started with BOM (:issue:`36343`)
+- Removed ``private_key`` and ``verbose`` from :func:`read_gbq` as they are no longer supported in ``pandas-gbq`` (:issue:`34654`, :issue:`30200`)
Plotting
^^^^^^^^
diff --git a/doc/sphinxext/README.rst b/doc/sphinxext/README.rst
index 2be5372bc0216..8f0f4a8b2636d 100644
--- a/doc/sphinxext/README.rst
+++ b/doc/sphinxext/README.rst
@@ -7,7 +7,7 @@ pandas documentation. These copies originate from other projects:
- ``numpydoc`` - Numpy's Sphinx extensions: this can be found at its own
repository: https://github.com/numpy/numpydoc
- ``ipython_directive`` and ``ipython_console_highlighting`` in the folder
- `ipython_sphinxext` - Sphinx extensions from IPython: these are included
+ ``ipython_sphinxext`` - Sphinx extensions from IPython: these are included
in IPython: https://github.com/ipython/ipython/tree/master/IPython/sphinxext
.. note::
| Adding a pre-commit hook for detecting single backticks around code in RST files. Running for instance on the v1.2.0 whatsnew shows errors:
```
doc/source/whatsnew/v1.2.0.rst:120:- `Styler` now allows direct CSS class name addition to individual data cells (:issue:`36159`)
doc/source/whatsnew/v1.2.0.rst:225:- Performance improvements when creating DataFrame or Series with dtype `str` or :class:`StringDtype` from array with many string elements (:issue:`36304`, :issue:`36317`, :issue:`36325`, :issue:`36432`)
doc/source/whatsnew/v1.2.0.rst:230:- Performance improvement in :meth:`pd.to_datetime` with non-`ns` time unit for `float` `dtype` columns (:issue:`20445`)
doc/source/whatsnew/v1.2.0.rst:265:- Bug in :func:`date_range` was raising AmbiguousTimeError for valid input with `ambiguous=False` (:issue:`35297`)
doc/source/whatsnew/v1.2.0.rst:306:- Bug in :meth:`SeriesGroupBy.transform` now correctly handles missing values for `dropna=False` (:issue:`35014`)
doc/source/whatsnew/v1.2.0.rst:312:- Bug in :meth:`DataFrame.xs` when used with :class:`IndexSlice` raises ``TypeError`` with message `Expected label or tuple of labels` (:issue:`35301`)
doc/source/whatsnew/v1.2.0.rst:320:- In :meth:`read_csv` `float_precision='round_trip'` now handles `decimal` and `thousands` parameters (:issue:`35365`)
doc/source/whatsnew/v1.2.0.rst:322:- :meth:`to_csv` passes compression arguments for `'gzip'` always to `gzip.GzipFile` (:issue:`28103`)
doc/source/whatsnew/v1.2.0.rst:324:- :meth:`to_csv` and :meth:`read_csv` did not honor `compression` and `encoding` for path-like objects that are internally converted to file-like objects (:issue:`35677`, :issue:`26124`, and :issue:`32392`)
doc/source/whatsnew/v1.2.0.rst:327:- Bug in :meth:`read_csv` with `engine='python'` truncating data if multiple items present in first row and first element started with BOM (:issue:`36343`)
doc/source/whatsnew/v1.2.0.rst:328:- Removed ``private_key`` and ``verbose`` from :func:`read_gbq` as they are no longer supported in `pandas-gbq` (:issue:`34654` :issue:`30200`)
```
~This is going to fail for now so need to figure out how to make it pass.~ | https://api.github.com/repos/pandas-dev/pandas/pulls/36591 | 2020-09-24T02:08:34Z | 2020-09-25T00:48:27Z | 2020-09-25T00:48:27Z | 2020-09-25T00:48:30Z |
CLN: clean up new detected trailing whitespace | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index e6b021133dd90..3e1222b7be277 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -71,3 +71,5 @@ repos:
hooks:
- id: end-of-file-fixer
exclude: ^LICENSES/|\.(html|csv|txt|svg|py)$
+ - id: trailing-whitespace
+ exclude: \.(html|svg)$
diff --git a/pandas/_libs/src/ujson/lib/ultrajsonenc.c b/pandas/_libs/src/ujson/lib/ultrajsonenc.c
index 5343999c369f7..2af10a5b72d33 100644
--- a/pandas/_libs/src/ujson/lib/ultrajsonenc.c
+++ b/pandas/_libs/src/ujson/lib/ultrajsonenc.c
@@ -1134,7 +1134,7 @@ void encode(JSOBJ obj, JSONObjectEncoder *enc, const char *name,
}
break;
-
+
}
}
diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime.c b/pandas/_libs/tslibs/src/datetime/np_datetime.c
index f647098140528..8eb995dee645b 100644
--- a/pandas/_libs/tslibs/src/datetime/np_datetime.c
+++ b/pandas/_libs/tslibs/src/datetime/np_datetime.c
@@ -312,7 +312,7 @@ int cmp_npy_datetimestruct(const npy_datetimestruct *a,
* object into a NumPy npy_datetimestruct. Uses tzinfo (if present)
* to convert to UTC time.
*
- * The following implementation just asks for attributes, and thus
+ * The following implementation just asks for attributes, and thus
* supports datetime duck typing. The tzinfo time zone conversion
* requires this style of access as well.
*
| - [N/A] closes #xxxx (xref [#36386 (comment)](https://github.com/pandas-dev/pandas/pull/36386#discussion_r493865987))
- [N/A] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [N/A] whatsnew entry
Fixed the remaining trailing whitespace problems that weren't being found by `ci/code_checks.sh` because of the file type filters.
| https://api.github.com/repos/pandas-dev/pandas/pulls/36588 | 2020-09-23T21:47:47Z | 2020-10-18T10:39:49Z | 2020-10-18T10:39:48Z | 2020-10-18T10:40:18Z |
TST: 32bit dtype compat #36579 | diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index f42499147cdbb..85a01f1c5278c 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -450,7 +450,7 @@ def test_get_indexer_non_unique(self):
result = idx1.get_indexer_non_unique(idx2)
expected_indexer = np.array([1, 0, 2, -1, -1], dtype=np.intp)
- expected_missing = np.array([2, 3], dtype=np.int64)
+ expected_missing = np.array([2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(result[0], expected_indexer)
tm.assert_numpy_array_equal(result[1], expected_missing)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index f811bd579aaaa..7cafdb61fcb31 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -2607,7 +2607,7 @@ def construct(dtype):
ex1 = np.array([0, 3, 1, 4, 2, 5] * 2, dtype=np.intp)
ex2 = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result[0], ex1)
- tm.assert_numpy_array_equal(result[1], ex2.astype(np.int64))
+ tm.assert_numpy_array_equal(result[1], ex2)
else:
no_matches = np.array([-1] * 6, dtype=np.intp)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 6102f43f4db6a..28ceaa61c558f 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -1545,7 +1545,7 @@ def test_lookup_nan(self, writable):
xs.setflags(write=writable)
m = ht.Float64HashTable()
m.map_locations(xs)
- tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.int64))
+ tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.intp))
def test_add_signed_zeros(self):
# GH 21866 inconsistent hash-function for float64
@@ -1578,7 +1578,7 @@ def test_lookup_overflow(self, writable):
xs.setflags(write=writable)
m = ht.UInt64HashTable()
m.map_locations(xs)
- tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.int64))
+ tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.intp))
def test_get_unique(self):
s = Series([1, 2, 2 ** 63, 2 ** 63], dtype=np.uint64)
| Part of #36579
| https://api.github.com/repos/pandas-dev/pandas/pulls/36584 | 2020-09-23T20:14:59Z | 2020-09-24T22:22:57Z | 2020-09-24T22:22:57Z | 2020-10-03T05:42:18Z |
[BUG]: Fix regression when adding timeldeta_range to timestamp | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index c1effad34ab93..bef38a2900901 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -37,6 +37,7 @@ Fixed regressions
- Fixed regression in :class:`DataFrame` and :class:`Series` comparisons between numeric arrays and strings (:issue:`35700`, :issue:`36377`)
- Fixed regression when setting empty :class:`DataFrame` column to a :class:`Series` in preserving name of index in frame (:issue:`36527`)
- Fixed regression in :class:`Period` incorrect value for ordinal over the maximum timestamp (:issue:`36430`)
+- Fixed regression when adding a :meth:`timedelta_range` to a :class:``Timestamp`` raised an ``ValueError`` (:issue:`35897`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 3eaf428bc64b2..4526fb9c8623c 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -450,7 +450,7 @@ def _add_datetimelike_scalar(self, other):
result = checked_add_with_arr(i8, other.value, arr_mask=self._isnan)
result = self._maybe_mask_results(result)
dtype = DatetimeTZDtype(tz=other.tz) if other.tz else DT64NS_DTYPE
- return DatetimeArray(result, dtype=dtype, freq=self.freq)
+ return DatetimeArray._simple_new(result, dtype=dtype, freq=self.freq)
def _addsub_object_array(self, other, op):
# Add or subtract Array-like of objects
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index 64d3d5b6d684d..dd9b6269ce5bf 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -2136,3 +2136,20 @@ def test_td64arr_pow_invalid(self, scalar_td, box_with_array):
with pytest.raises(TypeError, match=pattern):
td1 ** scalar_td
+
+
+def test_add_timestamp_to_timedelta():
+ # GH: 35897
+ timestamp = pd.Timestamp.now()
+ result = timestamp + pd.timedelta_range("0s", "1s", periods=31)
+ expected = pd.DatetimeIndex(
+ [
+ timestamp
+ + (
+ pd.to_timedelta("0.033333333s") * i
+ + pd.to_timedelta("0.000000001s") * divmod(i, 3)[0]
+ )
+ for i in range(31)
+ ]
+ )
+ tm.assert_index_equal(result, expected)
| - [x] closes #35897
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36582 | 2020-09-23T18:25:51Z | 2020-09-24T23:47:58Z | 2020-09-24T23:47:58Z | 2020-09-29T21:07:39Z |
REF: refactor/cleanup CSSResolver | diff --git a/pandas/io/formats/css.py b/pandas/io/formats/css.py
index 2e9ee192a1182..8abe13db370ca 100644
--- a/pandas/io/formats/css.py
+++ b/pandas/io/formats/css.py
@@ -3,7 +3,7 @@
"""
import re
-from typing import Optional
+from typing import Dict, Optional
import warnings
@@ -12,8 +12,6 @@ class CSSWarning(UserWarning):
This CSS syntax cannot currently be parsed.
"""
- pass
-
def _side_expander(prop_fmt: str):
def expand(self, prop, value: str):
@@ -34,7 +32,64 @@ class CSSResolver:
A callable for parsing and resolving CSS to atomic properties.
"""
- def __call__(self, declarations_str, inherited=None):
+ UNIT_RATIOS = {
+ "rem": ("pt", 12),
+ "ex": ("em", 0.5),
+ # 'ch':
+ "px": ("pt", 0.75),
+ "pc": ("pt", 12),
+ "in": ("pt", 72),
+ "cm": ("in", 1 / 2.54),
+ "mm": ("in", 1 / 25.4),
+ "q": ("mm", 0.25),
+ "!!default": ("em", 0),
+ }
+
+ FONT_SIZE_RATIOS = UNIT_RATIOS.copy()
+ FONT_SIZE_RATIOS.update(
+ {
+ "%": ("em", 0.01),
+ "xx-small": ("rem", 0.5),
+ "x-small": ("rem", 0.625),
+ "small": ("rem", 0.8),
+ "medium": ("rem", 1),
+ "large": ("rem", 1.125),
+ "x-large": ("rem", 1.5),
+ "xx-large": ("rem", 2),
+ "smaller": ("em", 1 / 1.2),
+ "larger": ("em", 1.2),
+ "!!default": ("em", 1),
+ }
+ )
+
+ MARGIN_RATIOS = UNIT_RATIOS.copy()
+ MARGIN_RATIOS.update({"none": ("pt", 0)})
+
+ BORDER_WIDTH_RATIOS = UNIT_RATIOS.copy()
+ BORDER_WIDTH_RATIOS.update(
+ {
+ "none": ("pt", 0),
+ "thick": ("px", 4),
+ "medium": ("px", 2),
+ "thin": ("px", 1),
+ # Default: medium only if solid
+ }
+ )
+
+ SIDE_SHORTHANDS = {
+ 1: [0, 0, 0, 0],
+ 2: [0, 1, 0, 1],
+ 3: [0, 1, 2, 1],
+ 4: [0, 1, 2, 3],
+ }
+
+ SIDES = ("top", "right", "bottom", "left")
+
+ def __call__(
+ self,
+ declarations_str: str,
+ inherited: Optional[Dict[str, str]] = None,
+ ) -> Dict[str, str]:
"""
The given declarations to atomic properties.
@@ -76,100 +131,78 @@ def __call__(self, declarations_str, inherited=None):
if inherited is None:
inherited = {}
+ props = self._update_initial(props, inherited)
+ props = self._update_font_size(props, inherited)
+ return self._update_other_units(props)
+
+ def _update_initial(
+ self,
+ props: Dict[str, str],
+ inherited: Dict[str, str],
+ ) -> Dict[str, str]:
# 1. resolve inherited, initial
for prop, val in inherited.items():
if prop not in props:
props[prop] = val
- for prop, val in list(props.items()):
+ new_props = props.copy()
+ for prop, val in props.items():
if val == "inherit":
val = inherited.get(prop, "initial")
- if val == "initial":
- val = None
- if val is None:
+ if val in ("initial", None):
# we do not define a complete initial stylesheet
- del props[prop]
+ del new_props[prop]
else:
- props[prop] = val
-
+ new_props[prop] = val
+ return new_props
+
+ def _update_font_size(
+ self,
+ props: Dict[str, str],
+ inherited: Dict[str, str],
+ ) -> Dict[str, str]:
# 2. resolve relative font size
- font_size: Optional[float]
if props.get("font-size"):
- if "font-size" in inherited:
- em_pt = inherited["font-size"]
- assert em_pt[-2:] == "pt"
- em_pt = float(em_pt[:-2])
- else:
- em_pt = None
props["font-size"] = self.size_to_pt(
- props["font-size"], em_pt, conversions=self.FONT_SIZE_RATIOS
+ props["font-size"],
+ self._get_font_size(inherited),
+ conversions=self.FONT_SIZE_RATIOS,
)
+ return props
- font_size = float(props["font-size"][:-2])
- else:
- font_size = None
+ def _get_font_size(self, props: Dict[str, str]) -> Optional[float]:
+ if props.get("font-size"):
+ font_size_string = props["font-size"]
+ return self._get_float_font_size_from_pt(font_size_string)
+ return None
+
+ def _get_float_font_size_from_pt(self, font_size_string: str) -> float:
+ assert font_size_string.endswith("pt")
+ return float(font_size_string.rstrip("pt"))
+ def _update_other_units(self, props: Dict[str, str]) -> Dict[str, str]:
+ font_size = self._get_font_size(props)
# 3. TODO: resolve other font-relative units
for side in self.SIDES:
prop = f"border-{side}-width"
if prop in props:
props[prop] = self.size_to_pt(
- props[prop], em_pt=font_size, conversions=self.BORDER_WIDTH_RATIOS
+ props[prop],
+ em_pt=font_size,
+ conversions=self.BORDER_WIDTH_RATIOS,
)
+
for prop in [f"margin-{side}", f"padding-{side}"]:
if prop in props:
# TODO: support %
props[prop] = self.size_to_pt(
- props[prop], em_pt=font_size, conversions=self.MARGIN_RATIOS
+ props[prop],
+ em_pt=font_size,
+ conversions=self.MARGIN_RATIOS,
)
-
return props
- UNIT_RATIOS = {
- "rem": ("pt", 12),
- "ex": ("em", 0.5),
- # 'ch':
- "px": ("pt", 0.75),
- "pc": ("pt", 12),
- "in": ("pt", 72),
- "cm": ("in", 1 / 2.54),
- "mm": ("in", 1 / 25.4),
- "q": ("mm", 0.25),
- "!!default": ("em", 0),
- }
-
- FONT_SIZE_RATIOS = UNIT_RATIOS.copy()
- FONT_SIZE_RATIOS.update(
- {
- "%": ("em", 0.01),
- "xx-small": ("rem", 0.5),
- "x-small": ("rem", 0.625),
- "small": ("rem", 0.8),
- "medium": ("rem", 1),
- "large": ("rem", 1.125),
- "x-large": ("rem", 1.5),
- "xx-large": ("rem", 2),
- "smaller": ("em", 1 / 1.2),
- "larger": ("em", 1.2),
- "!!default": ("em", 1),
- }
- )
-
- MARGIN_RATIOS = UNIT_RATIOS.copy()
- MARGIN_RATIOS.update({"none": ("pt", 0)})
-
- BORDER_WIDTH_RATIOS = UNIT_RATIOS.copy()
- BORDER_WIDTH_RATIOS.update(
- {
- "none": ("pt", 0),
- "thick": ("px", 4),
- "medium": ("px", 2),
- "thin": ("px", 1),
- # Default: medium only if solid
- }
- )
-
def size_to_pt(self, in_val, em_pt=None, conversions=UNIT_RATIOS):
def _error():
warnings.warn(f"Unhandled size: {repr(in_val)}", CSSWarning)
@@ -222,14 +255,6 @@ def atomize(self, declarations):
for prop, value in expand(prop, value):
yield prop, value
- SIDE_SHORTHANDS = {
- 1: [0, 0, 0, 0],
- 2: [0, 1, 0, 1],
- 3: [0, 1, 2, 1],
- 4: [0, 1, 2, 3],
- }
- SIDES = ("top", "right", "bottom", "left")
-
expand_border_color = _side_expander("border-{:s}-color")
expand_border_style = _side_expander("border-{:s}-style")
expand_border_width = _side_expander("border-{:s}-width")
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 0140804e8c7b5..2fccb4f3e9258 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -62,12 +62,13 @@ class CSSToExcelConverter:
# and __call__ make use of instance attributes. We leave them as
# instancemethods so that users can easily experiment with extensions
# without monkey-patching.
+ inherited: Optional[Dict[str, str]]
def __init__(self, inherited: Optional[str] = None):
if inherited is not None:
- inherited = self.compute_css(inherited)
-
- self.inherited = inherited
+ self.inherited = self.compute_css(inherited)
+ else:
+ self.inherited = None
compute_css = CSSResolver()
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Refactor ``__call__`` method of ``CSSResolver``.
- Extract methods
- Reorder class attributes
- Add type annotations | https://api.github.com/repos/pandas-dev/pandas/pulls/36581 | 2020-09-23T18:17:15Z | 2020-09-24T23:50:32Z | 2020-09-24T23:50:32Z | 2020-10-04T13:24:32Z |
BUG: use cmath to test complex number equality in pandas._testing | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index f87dac0669e00..6c4bd35c8f183 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -276,6 +276,7 @@ Numeric
- Bug in :meth:`DataFrame.any` with ``axis=1`` and ``bool_only=True`` ignoring the ``bool_only`` keyword (:issue:`32432`)
- Bug in :meth:`Series.equals` where a ``ValueError`` was raised when numpy arrays were compared to scalars (:issue:`35267`)
- Bug in :class:`Series` where two :class:`Series` each have a :class:`DatetimeIndex` with different timezones having those indexes incorrectly changed when performing arithmetic operations (:issue:`33671`)
+- Bug in :meth:`pd._testing.assert_almost_equal` was incorrect for complex numeric types (:issue:`28235`)
-
Conversion
diff --git a/pandas/_libs/testing.pyx b/pandas/_libs/testing.pyx
index 64fc8d615ea9c..b2f19fcf5f5da 100644
--- a/pandas/_libs/testing.pyx
+++ b/pandas/_libs/testing.pyx
@@ -1,3 +1,4 @@
+import cmath
import math
import numpy as np
@@ -7,6 +8,7 @@ from numpy cimport import_array
import_array()
from pandas._libs.util cimport is_array
+from pandas._libs.lib import is_complex
from pandas.core.dtypes.common import is_dtype_equal
from pandas.core.dtypes.missing import array_equivalent, isna
@@ -210,4 +212,14 @@ cpdef assert_almost_equal(a, b,
f"with rtol={rtol}, atol={atol}")
return True
+ if is_complex(a) and is_complex(b):
+ if array_equivalent(a, b, strict_nan=True):
+ # inf comparison
+ return True
+
+ if not cmath.isclose(a, b, rel_tol=rtol, abs_tol=atol):
+ assert False, (f"expected {b:.5f} but got {a:.5f}, "
+ f"with rtol={rtol}, atol={atol}")
+ return True
+
raise AssertionError(f"{a} != {b}")
diff --git a/pandas/tests/util/test_assert_almost_equal.py b/pandas/tests/util/test_assert_almost_equal.py
index c25668c33bfc4..c4bc3b7ee352d 100644
--- a/pandas/tests/util/test_assert_almost_equal.py
+++ b/pandas/tests/util/test_assert_almost_equal.py
@@ -146,6 +146,37 @@ def test_assert_not_almost_equal_numbers_rtol(a, b):
_assert_not_almost_equal_both(a, b, rtol=0.05)
+@pytest.mark.parametrize(
+ "a,b,rtol",
+ [
+ (1.00001, 1.00005, 0.001),
+ (-0.908356 + 0.2j, -0.908358 + 0.2j, 1e-3),
+ (0.1 + 1.009j, 0.1 + 1.006j, 0.1),
+ (0.1001 + 2.0j, 0.1 + 2.001j, 0.01),
+ ],
+)
+def test_assert_almost_equal_complex_numbers(a, b, rtol):
+ _assert_almost_equal_both(a, b, rtol=rtol)
+ _assert_almost_equal_both(np.complex64(a), np.complex64(b), rtol=rtol)
+ _assert_almost_equal_both(np.complex128(a), np.complex128(b), rtol=rtol)
+
+
+@pytest.mark.parametrize(
+ "a,b,rtol",
+ [
+ (0.58310768, 0.58330768, 1e-7),
+ (-0.908 + 0.2j, -0.978 + 0.2j, 0.001),
+ (0.1 + 1j, 0.1 + 2j, 0.01),
+ (-0.132 + 1.001j, -0.132 + 1.005j, 1e-5),
+ (0.58310768j, 0.58330768j, 1e-9),
+ ],
+)
+def test_assert_not_almost_equal_complex_numbers(a, b, rtol):
+ _assert_not_almost_equal_both(a, b, rtol=rtol)
+ _assert_not_almost_equal_both(np.complex64(a), np.complex64(b), rtol=rtol)
+ _assert_not_almost_equal_both(np.complex128(a), np.complex128(b), rtol=rtol)
+
+
@pytest.mark.parametrize("a,b", [(0, 0), (0, 0.0), (0, np.float64(0)), (0.00000001, 0)])
def test_assert_almost_equal_numbers_with_zeros(a, b):
_assert_almost_equal_both(a, b)
| - [x] closes #28235
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Adds `cmath`-based equality testing for complex numeric types (`complex`, `np.complex64` and `np.complex128`) | https://api.github.com/repos/pandas-dev/pandas/pulls/36580 | 2020-09-23T17:39:39Z | 2020-10-01T17:52:14Z | 2020-10-01T17:52:13Z | 2020-10-01T18:05:12Z |
REF: refactor/cleanup of CSSToExcelConverter | diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 0140804e8c7b5..79f1b5d73f122 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -5,7 +5,7 @@
from functools import reduce
import itertools
import re
-from typing import Callable, Dict, Optional, Sequence, Union
+from typing import Callable, Dict, Mapping, Optional, Sequence, Union
import warnings
import numpy as np
@@ -58,6 +58,68 @@ class CSSToExcelConverter:
CSS processed by :meth:`__call__`.
"""
+ NAMED_COLORS = {
+ "maroon": "800000",
+ "brown": "A52A2A",
+ "red": "FF0000",
+ "pink": "FFC0CB",
+ "orange": "FFA500",
+ "yellow": "FFFF00",
+ "olive": "808000",
+ "green": "008000",
+ "purple": "800080",
+ "fuchsia": "FF00FF",
+ "lime": "00FF00",
+ "teal": "008080",
+ "aqua": "00FFFF",
+ "blue": "0000FF",
+ "navy": "000080",
+ "black": "000000",
+ "gray": "808080",
+ "grey": "808080",
+ "silver": "C0C0C0",
+ "white": "FFFFFF",
+ }
+
+ VERTICAL_MAP = {
+ "top": "top",
+ "text-top": "top",
+ "middle": "center",
+ "baseline": "bottom",
+ "bottom": "bottom",
+ "text-bottom": "bottom",
+ # OpenXML also has 'justify', 'distributed'
+ }
+
+ BOLD_MAP = {
+ "bold": True,
+ "bolder": True,
+ "600": True,
+ "700": True,
+ "800": True,
+ "900": True,
+ "normal": False,
+ "lighter": False,
+ "100": False,
+ "200": False,
+ "300": False,
+ "400": False,
+ "500": False,
+ }
+
+ ITALIC_MAP = {
+ "normal": False,
+ "italic": True,
+ "oblique": True,
+ }
+
+ FAMILY_MAP = {
+ "serif": 1, # roman
+ "sans-serif": 2, # swiss
+ "cursive": 4, # script
+ "fantasy": 5, # decorative
+ }
+
# NB: Most of the methods here could be classmethods, as only __init__
# and __call__ make use of instance attributes. We leave them as
# instancemethods so that users can easily experiment with extensions
@@ -91,7 +153,7 @@ def __call__(self, declarations_str: str) -> Dict[str, Dict[str, str]]:
properties = self.compute_css(declarations_str, self.inherited)
return self.build_xlstyle(properties)
- def build_xlstyle(self, props: Dict[str, str]) -> Dict[str, Dict[str, str]]:
+ def build_xlstyle(self, props: Mapping[str, str]) -> Dict[str, Dict[str, str]]:
out = {
"alignment": self.build_alignment(props),
"border": self.build_border(props),
@@ -115,29 +177,30 @@ def remove_none(d: Dict[str, str]) -> None:
remove_none(out)
return out
- VERTICAL_MAP = {
- "top": "top",
- "text-top": "top",
- "middle": "center",
- "baseline": "bottom",
- "bottom": "bottom",
- "text-bottom": "bottom",
- # OpenXML also has 'justify', 'distributed'
- }
-
- def build_alignment(self, props) -> Dict[str, Optional[Union[bool, str]]]:
+ def build_alignment(
+ self, props: Mapping[str, str]
+ ) -> Dict[str, Optional[Union[bool, str]]]:
# TODO: text-indent, padding-left -> alignment.indent
return {
"horizontal": props.get("text-align"),
- "vertical": self.VERTICAL_MAP.get(props.get("vertical-align")),
- "wrap_text": (
- None
- if props.get("white-space") is None
- else props["white-space"] not in ("nowrap", "pre", "pre-line")
- ),
+ "vertical": self._get_vertical_alignment(props),
+ "wrap_text": self._get_is_wrap_text(props),
}
- def build_border(self, props: Dict) -> Dict[str, Dict[str, str]]:
+ def _get_vertical_alignment(self, props: Mapping[str, str]) -> Optional[str]:
+ vertical_align = props.get("vertical-align")
+ if vertical_align:
+ return self.VERTICAL_MAP.get(vertical_align)
+ return None
+
+ def _get_is_wrap_text(self, props: Mapping[str, str]) -> Optional[bool]:
+ if props.get("white-space") is None:
+ return None
+ return bool(props["white-space"] not in ("nowrap", "pre", "pre-line"))
+
+ def build_border(
+ self, props: Mapping[str, str]
+ ) -> Dict[str, Dict[str, Optional[str]]]:
return {
side: {
"style": self._border_style(
@@ -149,7 +212,7 @@ def build_border(self, props: Dict) -> Dict[str, Dict[str, str]]:
for side in ["top", "right", "bottom", "left"]
}
- def _border_style(self, style: Optional[str], width):
+ def _border_style(self, style: Optional[str], width: Optional[str]):
# convert styles and widths to openxml, one of:
# 'dashDot'
# 'dashDotDot'
@@ -169,26 +232,16 @@ def _border_style(self, style: Optional[str], width):
if style == "none" or style == "hidden":
return None
- if width is None:
- width = "2pt"
- width = float(width[:-2])
- if width < 1e-5:
+ width_name = self._get_width_name(width)
+ if width_name is None:
return None
- elif width < 1.3:
- width_name = "thin"
- elif width < 2.8:
- width_name = "medium"
- else:
- width_name = "thick"
- if style in (None, "groove", "ridge", "inset", "outset"):
+ if style in (None, "groove", "ridge", "inset", "outset", "solid"):
# not handled
- style = "solid"
+ return width_name
if style == "double":
return "double"
- if style == "solid":
- return width_name
if style == "dotted":
if width_name in ("hair", "thin"):
return "dotted"
@@ -198,36 +251,89 @@ def _border_style(self, style: Optional[str], width):
return "dashed"
return "mediumDashed"
- def build_fill(self, props: Dict[str, str]):
+ def _get_width_name(self, width_input: Optional[str]) -> Optional[str]:
+ width = self._width_to_float(width_input)
+ if width < 1e-5:
+ return None
+ elif width < 1.3:
+ return "thin"
+ elif width < 2.8:
+ return "medium"
+ return "thick"
+
+ def _width_to_float(self, width: Optional[str]) -> float:
+ if width is None:
+ width = "2pt"
+ return self._pt_to_float(width)
+
+ def _pt_to_float(self, pt_string: str) -> float:
+ assert pt_string.endswith("pt")
+ return float(pt_string.rstrip("pt"))
+
+ def build_fill(self, props: Mapping[str, str]):
# TODO: perhaps allow for special properties
# -excel-pattern-bgcolor and -excel-pattern-type
fill_color = props.get("background-color")
if fill_color not in (None, "transparent", "none"):
return {"fgColor": self.color_to_excel(fill_color), "patternType": "solid"}
- BOLD_MAP = {
- "bold": True,
- "bolder": True,
- "600": True,
- "700": True,
- "800": True,
- "900": True,
- "normal": False,
- "lighter": False,
- "100": False,
- "200": False,
- "300": False,
- "400": False,
- "500": False,
- }
- ITALIC_MAP = {"normal": False, "italic": True, "oblique": True}
+ def build_number_format(self, props: Mapping[str, str]) -> Dict[str, Optional[str]]:
+ return {"format_code": props.get("number-format")}
- def build_font(self, props) -> Dict[str, Optional[Union[bool, int, str]]]:
- size = props.get("font-size")
- if size is not None:
- assert size.endswith("pt")
- size = float(size[:-2])
+ def build_font(
+ self, props: Mapping[str, str]
+ ) -> Dict[str, Optional[Union[bool, int, float, str]]]:
+ font_names = self._get_font_names(props)
+ decoration = self._get_decoration(props)
+ return {
+ "name": font_names[0] if font_names else None,
+ "family": self._select_font_family(font_names),
+ "size": self._get_font_size(props),
+ "bold": self._get_is_bold(props),
+ "italic": self._get_is_italic(props),
+ "underline": ("single" if "underline" in decoration else None),
+ "strike": ("line-through" in decoration) or None,
+ "color": self.color_to_excel(props.get("color")),
+ # shadow if nonzero digit before shadow color
+ "shadow": self._get_shadow(props),
+ # FIXME: dont leave commented-out
+ # 'vertAlign':,
+ # 'charset': ,
+ # 'scheme': ,
+ # 'outline': ,
+ # 'condense': ,
+ }
+
+ def _get_is_bold(self, props: Mapping[str, str]) -> Optional[bool]:
+ weight = props.get("font-weight")
+ if weight:
+ return self.BOLD_MAP.get(weight)
+ return None
+
+ def _get_is_italic(self, props: Mapping[str, str]) -> Optional[bool]:
+ font_style = props.get("font-style")
+ if font_style:
+ return self.ITALIC_MAP.get(font_style)
+ return None
+
+ def _get_decoration(self, props: Mapping[str, str]) -> Sequence[str]:
+ decoration = props.get("text-decoration")
+ if decoration is not None:
+ return decoration.split()
+ else:
+ return ()
+
+ def _get_underline(self, decoration: Sequence[str]) -> Optional[str]:
+ if "underline" in decoration:
+ return "single"
+ return None
+
+ def _get_shadow(self, props: Mapping[str, str]) -> Optional[bool]:
+ if "text-shadow" in props:
+ return bool(re.search("^[^#(]*[1-9]", props["text-shadow"]))
+ return None
+ def _get_font_names(self, props: Mapping[str, str]) -> Sequence[str]:
font_names_tmp = re.findall(
r"""(?x)
(
@@ -240,6 +346,7 @@ def build_font(self, props) -> Dict[str, Optional[Union[bool, int, str]]]:
""",
props.get("font-family", ""),
)
+
font_names = []
for name in font_names_tmp:
if name[:1] == '"':
@@ -250,88 +357,58 @@ def build_font(self, props) -> Dict[str, Optional[Union[bool, int, str]]]:
name = name.strip()
if name:
font_names.append(name)
+ return font_names
+
+ def _get_font_size(self, props: Mapping[str, str]) -> Optional[float]:
+ size = props.get("font-size")
+ if size is None:
+ return size
+ return self._pt_to_float(size)
+ def _select_font_family(self, font_names) -> Optional[int]:
family = None
for name in font_names:
- if name == "serif":
- family = 1 # roman
- break
- elif name == "sans-serif":
- family = 2 # swiss
- break
- elif name == "cursive":
- family = 4 # script
- break
- elif name == "fantasy":
- family = 5 # decorative
+ family = self.FAMILY_MAP.get(name)
+ if family:
break
- decoration = props.get("text-decoration")
- if decoration is not None:
- decoration = decoration.split()
- else:
- decoration = ()
-
- return {
- "name": font_names[0] if font_names else None,
- "family": family,
- "size": size,
- "bold": self.BOLD_MAP.get(props.get("font-weight")),
- "italic": self.ITALIC_MAP.get(props.get("font-style")),
- "underline": ("single" if "underline" in decoration else None),
- "strike": ("line-through" in decoration) or None,
- "color": self.color_to_excel(props.get("color")),
- # shadow if nonzero digit before shadow color
- "shadow": (
- bool(re.search("^[^#(]*[1-9]", props["text-shadow"]))
- if "text-shadow" in props
- else None
- ),
- # FIXME: dont leave commented-out
- # 'vertAlign':,
- # 'charset': ,
- # 'scheme': ,
- # 'outline': ,
- # 'condense': ,
- }
-
- NAMED_COLORS = {
- "maroon": "800000",
- "brown": "A52A2A",
- "red": "FF0000",
- "pink": "FFC0CB",
- "orange": "FFA500",
- "yellow": "FFFF00",
- "olive": "808000",
- "green": "008000",
- "purple": "800080",
- "fuchsia": "FF00FF",
- "lime": "00FF00",
- "teal": "008080",
- "aqua": "00FFFF",
- "blue": "0000FF",
- "navy": "000080",
- "black": "000000",
- "gray": "808080",
- "grey": "808080",
- "silver": "C0C0C0",
- "white": "FFFFFF",
- }
+ return family
- def color_to_excel(self, val: Optional[str]):
+ def color_to_excel(self, val: Optional[str]) -> Optional[str]:
if val is None:
return None
- if val.startswith("#") and len(val) == 7:
- return val[1:].upper()
- if val.startswith("#") and len(val) == 4:
- return (val[1] * 2 + val[2] * 2 + val[3] * 2).upper()
+
+ if self._is_hex_color(val):
+ return self._convert_hex_to_excel(val)
+
try:
return self.NAMED_COLORS[val]
except KeyError:
warnings.warn(f"Unhandled color format: {repr(val)}", CSSWarning)
+ return None
- def build_number_format(self, props: Dict) -> Dict[str, Optional[str]]:
- return {"format_code": props.get("number-format")}
+ def _is_hex_color(self, color_string: str) -> bool:
+ return bool(color_string.startswith("#"))
+
+ def _convert_hex_to_excel(self, color_string: str) -> str:
+ code = color_string.lstrip("#")
+ if self._is_shorthand_color(color_string):
+ return (code[0] * 2 + code[1] * 2 + code[2] * 2).upper()
+ else:
+ return code.upper()
+
+ def _is_shorthand_color(self, color_string: str) -> bool:
+ """Check if color code is shorthand.
+
+ #FFF is a shorthand as opposed to full #FFFFFF.
+ """
+ code = color_string.lstrip("#")
+ if len(code) == 3:
+ return True
+ elif len(code) == 6:
+ return False
+ else:
+ raise ValueError(f"Unexpected color {color_string}")
class ExcelFormatter:
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Refactor/clean up of ``CSSToExcelConverter`` in module ``pandas.io.formats.excel``.
- Move class variables to the top of the class
- Add font family mapping
- Extract methods
- Add missing typing
- Make color parsing cleaner | https://api.github.com/repos/pandas-dev/pandas/pulls/36576 | 2020-09-23T15:23:03Z | 2020-09-24T23:57:04Z | 2020-09-24T23:57:04Z | 2020-10-04T13:24:27Z |
Backport PR #36523: DOC: a few sphinx fixes in release notes | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index e3b0f59c3edcc..c1effad34ab93 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -33,8 +33,8 @@ Fixed regressions
- Fixed regression in :class:`IntegerArray` unary plus and minus operations raising a ``TypeError`` (:issue:`36063`)
- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a tuple (:issue:`35534`)
- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a frozenset (:issue:`35747`)
-- Fixed regression in :meth:`read_excel` with ``engine="odf"`` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`,:issue:`35802`)
-- Fixed regression in :class:`DataFrame` and :class:`Series` comparisons between numeric arrays and strings (:issue:`35700`,:issue:`36377`)
+- Fixed regression in :meth:`read_excel` with ``engine="odf"`` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`, :issue:`35802`)
+- Fixed regression in :class:`DataFrame` and :class:`Series` comparisons between numeric arrays and strings (:issue:`35700`, :issue:`36377`)
- Fixed regression when setting empty :class:`DataFrame` column to a :class:`Series` in preserving name of index in frame (:issue:`36527`)
- Fixed regression in :class:`Period` incorrect value for ordinal over the maximum timestamp (:issue:`36430`)
| Backport PR #36523 | https://api.github.com/repos/pandas-dev/pandas/pulls/36573 | 2020-09-23T11:36:25Z | 2020-09-23T12:16:10Z | 2020-09-23T12:16:10Z | 2020-09-23T14:58:24Z |
Backport PR #36535 on branch 1.1.x (Regr/period range large value/issue 36430) | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index e3a96c69918db..e3b0f59c3edcc 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -36,6 +36,7 @@ Fixed regressions
- Fixed regression in :meth:`read_excel` with ``engine="odf"`` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`,:issue:`35802`)
- Fixed regression in :class:`DataFrame` and :class:`Series` comparisons between numeric arrays and strings (:issue:`35700`,:issue:`36377`)
- Fixed regression when setting empty :class:`DataFrame` column to a :class:`Series` in preserving name of index in frame (:issue:`36527`)
+- Fixed regression in :class:`Period` incorrect value for ordinal over the maximum timestamp (:issue:`36430`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 86b6533f5caf5..27402c8d255b6 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -861,6 +861,7 @@ cdef int64_t get_time_nanos(int freq, int64_t unix_date, int64_t ordinal) nogil:
"""
cdef:
int64_t sub, factor
+ int64_t nanos_in_day = 24 * 3600 * 10**9
freq = get_freq_group(freq)
@@ -886,7 +887,7 @@ cdef int64_t get_time_nanos(int freq, int64_t unix_date, int64_t ordinal) nogil:
# We must have freq == FR_HR
factor = 10**9 * 3600
- sub = ordinal - unix_date * 24 * 3600 * 10**9 / factor
+ sub = ordinal - unix_date * (nanos_in_day / factor)
return sub * factor
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index dcef0615121c1..795021a260028 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -486,6 +486,13 @@ def test_period_cons_combined(self):
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="1D1W")
+ @pytest.mark.parametrize("hour", range(24))
+ def test_period_large_ordinal(self, hour):
+ # Issue #36430
+ # Integer overflow for Period over the maximum timestamp
+ p = pd.Period(ordinal=2562048 + hour, freq="1H")
+ assert p.hour == hour
+
class TestPeriodMethods:
def test_round_trip(self):
| Backport PR #36535: Regr/period range large value/issue 36430 | https://api.github.com/repos/pandas-dev/pandas/pulls/36572 | 2020-09-23T10:35:46Z | 2020-09-23T11:31:48Z | 2020-09-23T11:31:48Z | 2020-09-23T11:31:48Z |
CLN clean ups in code | diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 8b6f49cc7d589..2a8cf502de53b 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -1192,7 +1192,7 @@ def _add_timedelta_arraylike(self, other):
self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan
)
if self._hasnans or other._hasnans:
- mask = (self._isnan) | (other._isnan)
+ mask = self._isnan | other._isnan
new_values[mask] = iNaT
return type(self)(new_values, dtype=self.dtype)
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index db73c84b39cf9..1e879e32bed5f 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -2010,6 +2010,7 @@ def objects_to_datetime64ns(
utc : bool, default False
Whether to convert timezone-aware timestamps to UTC.
errors : {'raise', 'ignore', 'coerce'}
+ require_iso8601 : bool, default False
allow_object : bool
Whether to return an object-dtype ndarray instead of raising if the
data contains more than one timezone.
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 15f2842e39875..ed45b4da7279e 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -648,7 +648,7 @@ def _sub_period_array(self, other):
new_values = np.array([self.freq.base * x for x in new_values])
if self._hasnans or other._hasnans:
- mask = (self._isnan) | (other._isnan)
+ mask = self._isnan | other._isnan
new_values[mask] = NaT
return new_values
| Started with whitespaces before colons, ended up doing some other clean ups as well. | https://api.github.com/repos/pandas-dev/pandas/pulls/36570 | 2020-09-23T08:59:14Z | 2020-10-07T01:05:30Z | 2020-10-07T01:05:30Z | 2020-10-07T01:06:22Z |
CLN: clean up pandas core arrays | diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 528d78a5414ea..7dbb6e7e47b23 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -452,7 +452,7 @@ def from_spmatrix(cls, data):
return cls._simple_new(arr, index, dtype)
- def __array__(self, dtype=None, copy=True) -> np.ndarray:
+ def __array__(self, dtype=None) -> np.ndarray:
fill_value = self.fill_value
if self.sp_index.ngaps == 0:
@@ -1515,7 +1515,7 @@ def _formatter(self, boxed=False):
SparseArray._add_unary_ops()
-def make_sparse(arr: np.ndarray, kind="block", fill_value=None, dtype=None, copy=False):
+def make_sparse(arr: np.ndarray, kind="block", fill_value=None, dtype=None):
"""
Convert ndarray to sparse format
| Some clean up in `pandas/core/arrays/array.py`. | https://api.github.com/repos/pandas-dev/pandas/pulls/36569 | 2020-09-23T08:46:06Z | 2020-09-24T06:44:53Z | 2020-09-24T06:44:53Z | 2020-09-24T08:07:12Z |
REF: Remove rolling window fixed algorithms | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 0ab95dd260a9c..57e3c9dd66afb 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -310,6 +310,7 @@ Performance improvements
- The internal index method :meth:`~Index._shallow_copy` now makes the new index and original index share cached attributes,
avoiding creating these again, if created on either. This can speed up operations that depend on creating copies of existing indexes (:issue:`36840`)
- Performance improvement in :meth:`RollingGroupby.count` (:issue:`35625`)
+- Small performance decrease to :meth:`Rolling.min` and :meth:`Rolling.max` for fixed windows (:issue:`36567`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index c6fd569247b90..937f7d8df7728 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -137,8 +137,8 @@ cdef inline void remove_sum(float64_t val, int64_t *nobs, float64_t *sum_x,
sum_x[0] = t
-def roll_sum_variable(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
+def roll_sum(ndarray[float64_t] values, ndarray[int64_t] start,
+ ndarray[int64_t] end, int64_t minp):
cdef:
float64_t sum_x = 0, compensation_add = 0, compensation_remove = 0
int64_t s, e
@@ -181,36 +181,6 @@ def roll_sum_variable(ndarray[float64_t] values, ndarray[int64_t] start,
return output
-def roll_sum_fixed(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp, int64_t win):
- cdef:
- float64_t val, prev_x, sum_x = 0, compensation_add = 0, compensation_remove = 0
- int64_t range_endpoint
- int64_t nobs = 0, i, N = len(values)
- ndarray[float64_t] output
-
- output = np.empty(N, dtype=float)
-
- range_endpoint = int_max(minp, 1) - 1
-
- with nogil:
-
- for i in range(0, range_endpoint):
- add_sum(values[i], &nobs, &sum_x, &compensation_add)
- output[i] = NaN
-
- for i in range(range_endpoint, N):
- val = values[i]
- add_sum(val, &nobs, &sum_x, &compensation_add)
-
- if i > win - 1:
- prev_x = values[i - win]
- remove_sum(prev_x, &nobs, &sum_x, &compensation_remove)
-
- output[i] = calc_sum(minp, nobs, sum_x)
-
- return output
-
# ----------------------------------------------------------------------
# Rolling mean
@@ -268,36 +238,8 @@ cdef inline void remove_mean(float64_t val, Py_ssize_t *nobs, float64_t *sum_x,
neg_ct[0] = neg_ct[0] - 1
-def roll_mean_fixed(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp, int64_t win):
- cdef:
- float64_t val, prev_x, sum_x = 0, compensation_add = 0, compensation_remove = 0
- Py_ssize_t nobs = 0, i, neg_ct = 0, N = len(values)
- ndarray[float64_t] output
-
- output = np.empty(N, dtype=float)
-
- with nogil:
- for i in range(minp - 1):
- val = values[i]
- add_mean(val, &nobs, &sum_x, &neg_ct, &compensation_add)
- output[i] = NaN
-
- for i in range(minp - 1, N):
- val = values[i]
- add_mean(val, &nobs, &sum_x, &neg_ct, &compensation_add)
-
- if i > win - 1:
- prev_x = values[i - win]
- remove_mean(prev_x, &nobs, &sum_x, &neg_ct, &compensation_remove)
-
- output[i] = calc_mean(minp, nobs, neg_ct, sum_x)
-
- return output
-
-
-def roll_mean_variable(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
+def roll_mean(ndarray[float64_t] values, ndarray[int64_t] start,
+ ndarray[int64_t] end, int64_t minp):
cdef:
float64_t val, compensation_add = 0, compensation_remove = 0, sum_x = 0
int64_t s, e
@@ -358,7 +300,9 @@ cdef inline float64_t calc_var(int64_t minp, int ddof, float64_t nobs,
result = 0
else:
result = ssqdm_x / (nobs - <float64_t>ddof)
- if result < 0:
+ # Fix for numerical imprecision.
+ # Can be result < 0 once Kahan Summation is implemented
+ if result < 1e-15:
result = 0
else:
result = NaN
@@ -403,64 +347,8 @@ cdef inline void remove_var(float64_t val, float64_t *nobs, float64_t *mean_x,
ssqdm_x[0] = 0
-def roll_var_fixed(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp, int64_t win, int ddof=1):
- """
- Numerically stable implementation using Welford's method.
- """
- cdef:
- float64_t mean_x = 0, ssqdm_x = 0, nobs = 0,
- float64_t val, prev, delta, mean_x_old
- int64_t s, e
- Py_ssize_t i, j, N = len(values)
- ndarray[float64_t] output
-
- output = np.empty(N, dtype=float)
-
- # Check for windows larger than array, addresses #7297
- win = min(win, N)
-
- with nogil:
-
- # Over the first window, observations can only be added, never
- # removed
- for i in range(win):
- add_var(values[i], &nobs, &mean_x, &ssqdm_x)
- output[i] = calc_var(minp, ddof, nobs, ssqdm_x)
-
- # a part of Welford's method for the online variance-calculation
- # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
-
- # After the first window, observations can both be added and
- # removed
- for i in range(win, N):
- val = values[i]
- prev = values[i - win]
-
- if notnan(val):
- if prev == prev:
-
- # Adding one observation and removing another one
- delta = val - prev
- mean_x_old = mean_x
-
- mean_x += delta / nobs
- ssqdm_x += ((nobs - 1) * val
- + (nobs + 1) * prev
- - 2 * nobs * mean_x_old) * delta / nobs
-
- else:
- add_var(val, &nobs, &mean_x, &ssqdm_x)
- elif prev == prev:
- remove_var(prev, &nobs, &mean_x, &ssqdm_x)
-
- output[i] = calc_var(minp, ddof, nobs, ssqdm_x)
-
- return output
-
-
-def roll_var_variable(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp, int ddof=1):
+def roll_var(ndarray[float64_t] values, ndarray[int64_t] start,
+ ndarray[int64_t] end, int64_t minp, int ddof=1):
"""
Numerically stable implementation using Welford's method.
"""
@@ -578,38 +466,8 @@ cdef inline void remove_skew(float64_t val, int64_t *nobs,
xxx[0] = xxx[0] - val * val * val
-def roll_skew_fixed(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp, int64_t win):
- cdef:
- float64_t val, prev
- float64_t x = 0, xx = 0, xxx = 0
- int64_t nobs = 0, i, j, N = len(values)
- int64_t s, e
- ndarray[float64_t] output
-
- output = np.empty(N, dtype=float)
-
- with nogil:
- for i in range(minp - 1):
- val = values[i]
- add_skew(val, &nobs, &x, &xx, &xxx)
- output[i] = NaN
-
- for i in range(minp - 1, N):
- val = values[i]
- add_skew(val, &nobs, &x, &xx, &xxx)
-
- if i > win - 1:
- prev = values[i - win]
- remove_skew(prev, &nobs, &x, &xx, &xxx)
-
- output[i] = calc_skew(minp, nobs, x, xx, xxx)
-
- return output
-
-
-def roll_skew_variable(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
+def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
+ ndarray[int64_t] end, int64_t minp):
cdef:
float64_t val, prev
float64_t x = 0, xx = 0, xxx = 0
@@ -733,37 +591,8 @@ cdef inline void remove_kurt(float64_t val, int64_t *nobs,
xxxx[0] = xxxx[0] - val * val * val * val
-def roll_kurt_fixed(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp, int64_t win):
- cdef:
- float64_t val, prev
- float64_t x = 0, xx = 0, xxx = 0, xxxx = 0
- int64_t nobs = 0, i, j, N = len(values)
- int64_t s, e
- ndarray[float64_t] output
-
- output = np.empty(N, dtype=float)
-
- with nogil:
-
- for i in range(minp - 1):
- add_kurt(values[i], &nobs, &x, &xx, &xxx, &xxxx)
- output[i] = NaN
-
- for i in range(minp - 1, N):
- add_kurt(values[i], &nobs, &x, &xx, &xxx, &xxxx)
-
- if i > win - 1:
- prev = values[i - win]
- remove_kurt(prev, &nobs, &x, &xx, &xxx, &xxxx)
-
- output[i] = calc_kurt(minp, nobs, x, xx, xxx, xxxx)
-
- return output
-
-
-def roll_kurt_variable(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
+def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
+ ndarray[int64_t] end, int64_t minp):
cdef:
float64_t val, prev
float64_t x = 0, xx = 0, xxx = 0, xxxx = 0
@@ -943,28 +772,8 @@ cdef inline numeric calc_mm(int64_t minp, Py_ssize_t nobs,
return result
-def roll_max_fixed(float64_t[:] values, int64_t[:] start,
- int64_t[:] end, int64_t minp, int64_t win):
- """
- Moving max of 1d array of any numeric type along axis=0 ignoring NaNs.
-
- Parameters
- ----------
- values : np.ndarray[np.float64]
- window : int, size of rolling window
- minp : if number of observations in window
- is below this, output a NaN
- index : ndarray, optional
- index for window computation
- closed : 'right', 'left', 'both', 'neither'
- make the interval closed on the right, left,
- both or neither endpoints
- """
- return _roll_min_max_fixed(values, minp, win, is_max=1)
-
-
-def roll_max_variable(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
+def roll_max(ndarray[float64_t] values, ndarray[int64_t] start,
+ ndarray[int64_t] end, int64_t minp):
"""
Moving max of 1d array of any numeric type along axis=0 ignoring NaNs.
@@ -980,11 +789,11 @@ def roll_max_variable(ndarray[float64_t] values, ndarray[int64_t] start,
make the interval closed on the right, left,
both or neither endpoints
"""
- return _roll_min_max_variable(values, start, end, minp, is_max=1)
+ return _roll_min_max(values, start, end, minp, is_max=1)
-def roll_min_fixed(float64_t[:] values, int64_t[:] start,
- int64_t[:] end, int64_t minp, int64_t win):
+def roll_min(ndarray[float64_t] values, ndarray[int64_t] start,
+ ndarray[int64_t] end, int64_t minp):
"""
Moving min of 1d array of any numeric type along axis=0 ignoring NaNs.
@@ -997,31 +806,14 @@ def roll_min_fixed(float64_t[:] values, int64_t[:] start,
index : ndarray, optional
index for window computation
"""
- return _roll_min_max_fixed(values, minp, win, is_max=0)
-
+ return _roll_min_max(values, start, end, minp, is_max=0)
-def roll_min_variable(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
- """
- Moving min of 1d array of any numeric type along axis=0 ignoring NaNs.
- Parameters
- ----------
- values : np.ndarray[np.float64]
- window : int, size of rolling window
- minp : if number of observations in window
- is below this, output a NaN
- index : ndarray, optional
- index for window computation
- """
- return _roll_min_max_variable(values, start, end, minp, is_max=0)
-
-
-cdef _roll_min_max_variable(ndarray[numeric] values,
- ndarray[int64_t] starti,
- ndarray[int64_t] endi,
- int64_t minp,
- bint is_max):
+cdef _roll_min_max(ndarray[numeric] values,
+ ndarray[int64_t] starti,
+ ndarray[int64_t] endi,
+ int64_t minp,
+ bint is_max):
cdef:
numeric ai
int64_t i, k, curr_win_size, start
@@ -1084,93 +876,6 @@ cdef _roll_min_max_variable(ndarray[numeric] values,
return output
-cdef _roll_min_max_fixed(numeric[:] values,
- int64_t minp,
- int64_t win,
- bint is_max):
- cdef:
- numeric ai
- bint should_replace
- int64_t i, removed, window_i,
- Py_ssize_t nobs = 0, N = len(values)
- int64_t* death
- numeric* ring
- numeric* minvalue
- numeric* end
- numeric* last
- ndarray[float64_t, ndim=1] output
-
- output = np.empty(N, dtype=float)
- # setup the rings of death!
- ring = <numeric *>malloc(win * sizeof(numeric))
- death = <int64_t *>malloc(win * sizeof(int64_t))
-
- end = ring + win
- last = ring
- minvalue = ring
- ai = values[0]
- minvalue[0] = init_mm(values[0], &nobs, is_max)
- death[0] = win
- nobs = 0
-
- with nogil:
-
- for i in range(N):
- ai = init_mm(values[i], &nobs, is_max)
-
- if i >= win:
- remove_mm(values[i - win], &nobs)
-
- if death[minvalue - ring] == i:
- minvalue = minvalue + 1
- if minvalue >= end:
- minvalue = ring
-
- if is_max:
- should_replace = ai >= minvalue[0]
- else:
- should_replace = ai <= minvalue[0]
- if should_replace:
-
- minvalue[0] = ai
- death[minvalue - ring] = i + win
- last = minvalue
-
- else:
-
- if is_max:
- should_replace = last[0] <= ai
- else:
- should_replace = last[0] >= ai
- while should_replace:
- if last == ring:
- last = end
- last -= 1
- if is_max:
- should_replace = last[0] <= ai
- else:
- should_replace = last[0] >= ai
-
- last += 1
- if last == end:
- last = ring
- last[0] = ai
- death[last - ring] = i + win
-
- output[i] = calc_mm(minp, nobs, minvalue[0])
-
- for i in range(minp - 1):
- if numeric in cython.floating:
- output[i] = NaN
- else:
- output[i] = 0
-
- free(ring)
- free(death)
-
- return output
-
-
cdef enum InterpolationType:
LINEAR,
LOWER,
@@ -1300,19 +1005,16 @@ def roll_quantile(ndarray[float64_t, cast=True] values, ndarray[int64_t] start,
return output
-def roll_generic_fixed(object obj,
- ndarray[int64_t] start, ndarray[int64_t] end,
- int64_t minp, int64_t win,
- int offset, object func, bint raw,
- object args, object kwargs):
+def roll_apply(object obj,
+ ndarray[int64_t] start, ndarray[int64_t] end,
+ int64_t minp,
+ object func, bint raw,
+ tuple args, dict kwargs):
cdef:
- ndarray[float64_t] output, counts, bufarr
+ ndarray[float64_t] output, counts
ndarray[float64_t, cast=True] arr
- float64_t *buf
- float64_t *oldbuf
- int64_t nobs = 0, i, j, s, e, N = len(start)
+ Py_ssize_t i, s, e, N = len(start), n = len(obj)
- n = len(obj)
if n == 0:
return obj
@@ -1323,83 +1025,12 @@ def roll_generic_fixed(object obj,
if not arr.flags.c_contiguous:
arr = arr.copy('C')
- counts = roll_sum_fixed(np.concatenate([np.isfinite(arr).astype(float),
- np.array([0.] * offset)]),
- start, end, minp, win)[offset:]
+ counts = roll_sum(np.isfinite(arr).astype(float), start, end, minp)
output = np.empty(N, dtype=float)
- if not raw:
- # series
- for i in range(N):
- if counts[i] >= minp:
- sl = slice(int_max(i + offset - win + 1, 0),
- int_min(i + offset + 1, N))
- output[i] = func(obj.iloc[sl], *args, **kwargs)
- else:
- output[i] = NaN
-
- else:
-
- # truncated windows at the beginning, through first full-length window
- for i in range((int_min(win, N) - offset)):
- if counts[i] >= minp:
- output[i] = func(arr[0: (i + offset + 1)], *args, **kwargs)
- else:
- output[i] = NaN
-
- # remaining full-length windows
- for j, i in enumerate(range((win - offset), (N - offset)), 1):
- if counts[i] >= minp:
- output[i] = func(arr[j:j + win], *args, **kwargs)
- else:
- output[i] = NaN
-
- # truncated windows at the end
- for i in range(int_max(N - offset, 0), N):
- if counts[i] >= minp:
- output[i] = func(arr[int_max(i + offset - win + 1, 0): N],
- *args,
- **kwargs)
- else:
- output[i] = NaN
-
- return output
-
-
-def roll_generic_variable(object obj,
- ndarray[int64_t] start, ndarray[int64_t] end,
- int64_t minp,
- int offset, object func, bint raw,
- object args, object kwargs):
- cdef:
- ndarray[float64_t] output, counts, bufarr
- ndarray[float64_t, cast=True] arr
- float64_t *buf
- float64_t *oldbuf
- int64_t nobs = 0, i, j, s, e, N = len(start)
-
- n = len(obj)
- if n == 0:
- return obj
-
- arr = np.asarray(obj)
-
- # ndarray input
- if raw:
- if not arr.flags.c_contiguous:
- arr = arr.copy('C')
-
- counts = roll_sum_variable(np.concatenate([np.isfinite(arr).astype(float),
- np.array([0.] * offset)]),
- start, end, minp)[offset:]
-
- output = np.empty(N, dtype=float)
-
- if offset != 0:
- raise ValueError("unable to roll_generic with a non-zero offset")
+ for i in range(N):
- for i in range(0, N):
s = start[i]
e = end[i]
diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py
index 2e7e7cd47c336..aa71c44f75ead 100644
--- a/pandas/core/window/common.py
+++ b/pandas/core/window/common.py
@@ -64,7 +64,6 @@ def __init__(self, obj, *args, **kwargs):
def _apply(
self,
func: Callable,
- center: bool,
require_min_periods: int = 0,
floor: int = 1,
is_weighted: bool = False,
diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py
index a21521f4ce8bb..023f598f606f3 100644
--- a/pandas/core/window/indexers.py
+++ b/pandas/core/window/indexers.py
@@ -89,6 +89,20 @@ def get_window_bounds(
end_s = np.arange(self.window_size, dtype="int64") + 1
end_e = start_e + self.window_size
end = np.concatenate([end_s, end_e])[:num_values]
+
+ if center and self.window_size > 2:
+ offset = min((self.window_size - 1) // 2, num_values - 1)
+ start_s_buffer = np.roll(start, -offset)[: num_values - offset]
+ end_s_buffer = np.roll(end, -offset)[: num_values - offset]
+
+ start_e_buffer = np.arange(
+ start[-1] + 1, start[-1] + 1 + offset, dtype="int64"
+ )
+ end_e_buffer = np.array([end[-1]] * offset, dtype="int64")
+
+ start = np.concatenate([start_s_buffer, start_e_buffer])
+ end = np.concatenate([end_s_buffer, end_e_buffer])
+
return start, end
@@ -327,10 +341,4 @@ def get_window_bounds(
end_arrays.append(window_indicies.take(ensure_platform_int(end)))
start = np.concatenate(start_arrays)
end = np.concatenate(end_arrays)
- # GH 35552: Need to adjust start and end based on the nans appended to values
- # when center=True
- if num_values > len(start):
- offset = num_values - len(start)
- start = np.concatenate([start, np.array([end[-1]] * offset)])
- end = np.concatenate([end, np.array([end[-1]] * offset)])
return start, end
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 466b320f1771f..9e829ef774d42 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -74,22 +74,21 @@
from pandas.core.internals import Block # noqa:F401
-def calculate_center_offset(window) -> int:
+def calculate_center_offset(window: np.ndarray) -> int:
"""
- Calculate an offset necessary to have the window label to be centered.
+ Calculate an offset necessary to have the window label to be centered
+ for weighted windows.
Parameters
----------
- window: ndarray or int
- window weights or window
+ window: ndarray
+ window weights
Returns
-------
int
"""
- if not is_integer(window):
- window = len(window)
- return int((window - 1) / 2.0)
+ return (len(window) - 1) // 2
def calculate_min_periods(
@@ -417,9 +416,9 @@ def _insert_on_column(self, result: "DataFrame", obj: "DataFrame"):
# insert at the end
result[name] = extra_col
- def _center_window(self, result: np.ndarray, window) -> np.ndarray:
+ def _center_window(self, result: np.ndarray, window: np.ndarray) -> np.ndarray:
"""
- Center the result in the window.
+ Center the result in the window for weighted rolling aggregations.
"""
if self.axis > result.ndim - 1:
raise ValueError("Requested axis is larger then no. of argument dimensions")
@@ -451,16 +450,6 @@ def _get_roll_func(self, func_name: str) -> Callable:
)
return window_func
- def _get_cython_func_type(self, func: str) -> Callable:
- """
- Return a variable or fixed cython function type.
-
- Variable algorithms do not use window while fixed do.
- """
- if self.is_freq_type or isinstance(self.window, BaseIndexer):
- return self._get_roll_func(f"{func}_variable")
- return partial(self._get_roll_func(f"{func}_fixed"), win=self._get_window())
-
def _get_window_indexer(self, window: int) -> BaseIndexer:
"""
Return an indexer class that will compute the window start and end bounds
@@ -526,7 +515,6 @@ def hfunc(bvalues: ArrayLike) -> ArrayLike:
def _apply(
self,
func: Callable,
- center: bool,
require_min_periods: int = 0,
floor: int = 1,
is_weighted: bool = False,
@@ -542,7 +530,6 @@ def _apply(
Parameters
----------
func : callable function to apply
- center : bool
require_min_periods : int
floor : int
is_weighted : bool
@@ -568,13 +555,9 @@ def homogeneous_func(values: np.ndarray):
if values.size == 0:
return values.copy()
- offset = calculate_center_offset(window) if center else 0
- additional_nans = np.array([np.nan] * offset)
-
if not is_weighted:
def calc(x):
- x = np.concatenate((x, additional_nans))
if not isinstance(self.window, BaseIndexer):
min_periods = calculate_min_periods(
window, self.min_periods, len(x), require_min_periods, floor
@@ -598,6 +581,8 @@ def calc(x):
else:
def calc(x):
+ offset = calculate_center_offset(window) if self.center else 0
+ additional_nans = np.array([np.nan] * offset)
x = np.concatenate((x, additional_nans))
return func(x, window, self.min_periods)
@@ -611,7 +596,7 @@ def calc(x):
if use_numba_cache:
NUMBA_FUNC_CACHE[(kwargs["original_func"], "rolling_apply")] = func
- if center:
+ if self.center and is_weighted:
result = self._center_window(result, window)
return result
@@ -1200,9 +1185,7 @@ def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
window_func = self._get_roll_func("roll_weighted_sum")
window_func = get_weighted_roll_func(window_func)
- return self._apply(
- window_func, center=self.center, is_weighted=True, name="sum", **kwargs
- )
+ return self._apply(window_func, is_weighted=True, name="sum", **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["mean"])
@@ -1210,9 +1193,7 @@ def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
window_func = self._get_roll_func("roll_weighted_mean")
window_func = get_weighted_roll_func(window_func)
- return self._apply(
- window_func, center=self.center, is_weighted=True, name="mean", **kwargs
- )
+ return self._apply(window_func, is_weighted=True, name="mean", **kwargs)
@Substitution(name="window", versionadded="\n.. versionadded:: 1.0.0\n")
@Appender(_shared_docs["var"])
@@ -1221,9 +1202,7 @@ def var(self, ddof=1, *args, **kwargs):
window_func = partial(self._get_roll_func("roll_weighted_var"), ddof=ddof)
window_func = get_weighted_roll_func(window_func)
kwargs.pop("name", None)
- return self._apply(
- window_func, center=self.center, is_weighted=True, name="var", **kwargs
- )
+ return self._apply(window_func, is_weighted=True, name="var", **kwargs)
@Substitution(name="window", versionadded="\n.. versionadded:: 1.0.0\n")
@Appender(_shared_docs["std"])
@@ -1275,8 +1254,8 @@ class RollingAndExpandingMixin(BaseWindow):
)
def count(self):
- window_func = self._get_cython_func_type("roll_sum")
- return self._apply(window_func, center=self.center, name="count")
+ window_func = self._get_roll_func("roll_sum")
+ return self._apply(window_func, name="count")
_shared_docs["apply"] = dedent(
r"""
@@ -1362,25 +1341,16 @@ def apply(
if raw is False:
raise ValueError("raw must be `True` when using the numba engine")
apply_func = generate_numba_apply_func(args, kwargs, func, engine_kwargs)
- center = self.center
elif engine in ("cython", None):
if engine_kwargs is not None:
raise ValueError("cython engine does not accept engine_kwargs")
- # Cython apply functions handle center, so don't need to use
- # _apply's center handling
- window = self._get_window()
- offset = calculate_center_offset(window) if self.center else 0
- apply_func = self._generate_cython_apply_func(
- args, kwargs, raw, offset, func
- )
- center = False
+ apply_func = self._generate_cython_apply_func(args, kwargs, raw, func)
else:
raise ValueError("engine must be either 'numba' or 'cython'")
# name=func & raw=raw for WindowGroupByMixin._apply
return self._apply(
apply_func,
- center=center,
floor=0,
name=func,
use_numba_cache=maybe_use_numba(engine),
@@ -1390,15 +1360,14 @@ def apply(
kwargs=kwargs,
)
- def _generate_cython_apply_func(self, args, kwargs, raw, offset, func):
+ def _generate_cython_apply_func(self, args, kwargs, raw, func):
from pandas import Series
window_func = partial(
- self._get_cython_func_type("roll_generic"),
+ self._get_roll_func("roll_apply"),
args=args,
kwargs=kwargs,
raw=raw,
- offset=offset,
func=func,
)
@@ -1411,11 +1380,9 @@ def apply_func(values, begin, end, min_periods, raw=raw):
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
- window_func = self._get_cython_func_type("roll_sum")
+ window_func = self._get_roll_func("roll_sum")
kwargs.pop("floor", None)
- return self._apply(
- window_func, center=self.center, floor=0, name="sum", **kwargs
- )
+ return self._apply(window_func, floor=0, name="sum", **kwargs)
_shared_docs["max"] = dedent(
"""
@@ -1430,8 +1397,8 @@ def sum(self, *args, **kwargs):
def max(self, *args, **kwargs):
nv.validate_window_func("max", args, kwargs)
- window_func = self._get_cython_func_type("roll_max")
- return self._apply(window_func, center=self.center, name="max", **kwargs)
+ window_func = self._get_roll_func("roll_max")
+ return self._apply(window_func, name="max", **kwargs)
_shared_docs["min"] = dedent(
"""
@@ -1472,13 +1439,13 @@ def max(self, *args, **kwargs):
def min(self, *args, **kwargs):
nv.validate_window_func("min", args, kwargs)
- window_func = self._get_cython_func_type("roll_min")
- return self._apply(window_func, center=self.center, name="min", **kwargs)
+ window_func = self._get_roll_func("roll_min")
+ return self._apply(window_func, name="min", **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
- window_func = self._get_cython_func_type("roll_mean")
- return self._apply(window_func, center=self.center, name="mean", **kwargs)
+ window_func = self._get_roll_func("roll_mean")
+ return self._apply(window_func, name="mean", **kwargs)
_shared_docs["median"] = dedent(
"""
@@ -1521,12 +1488,12 @@ def median(self, **kwargs):
window_func = self._get_roll_func("roll_median_c")
# GH 32865. Move max window size calculation to
# the median function implementation
- return self._apply(window_func, center=self.center, name="median", **kwargs)
+ return self._apply(window_func, name="median", **kwargs)
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
kwargs.pop("require_min_periods", None)
- window_func = self._get_cython_func_type("roll_var")
+ window_func = self._get_roll_func("roll_var")
def zsqrt_func(values, begin, end, min_periods):
return zsqrt(window_func(values, begin, end, min_periods, ddof=ddof))
@@ -1534,7 +1501,6 @@ def zsqrt_func(values, begin, end, min_periods):
# ddof passed again for compat with groupby.rolling
return self._apply(
zsqrt_func,
- center=self.center,
require_min_periods=1,
name="std",
ddof=ddof,
@@ -1544,11 +1510,10 @@ def zsqrt_func(values, begin, end, min_periods):
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
kwargs.pop("require_min_periods", None)
- window_func = partial(self._get_cython_func_type("roll_var"), ddof=ddof)
+ window_func = partial(self._get_roll_func("roll_var"), ddof=ddof)
# ddof passed again for compat with groupby.rolling
return self._apply(
window_func,
- center=self.center,
require_min_periods=1,
name="var",
ddof=ddof,
@@ -1567,11 +1532,10 @@ def var(self, ddof=1, *args, **kwargs):
"""
def skew(self, **kwargs):
- window_func = self._get_cython_func_type("roll_skew")
+ window_func = self._get_roll_func("roll_skew")
kwargs.pop("require_min_periods", None)
return self._apply(
window_func,
- center=self.center,
require_min_periods=3,
name="skew",
**kwargs,
@@ -1610,11 +1574,10 @@ def skew(self, **kwargs):
)
def kurt(self, **kwargs):
- window_func = self._get_cython_func_type("roll_kurt")
+ window_func = self._get_roll_func("roll_kurt")
kwargs.pop("require_min_periods", None)
return self._apply(
window_func,
- center=self.center,
require_min_periods=4,
name="kurt",
**kwargs,
@@ -1676,9 +1639,9 @@ def kurt(self, **kwargs):
def quantile(self, quantile, interpolation="linear", **kwargs):
if quantile == 1.0:
- window_func = self._get_cython_func_type("roll_max")
+ window_func = self._get_roll_func("roll_max")
elif quantile == 0.0:
- window_func = self._get_cython_func_type("roll_min")
+ window_func = self._get_roll_func("roll_min")
else:
window_func = partial(
self._get_roll_func("roll_quantile"),
@@ -1690,7 +1653,7 @@ def quantile(self, quantile, interpolation="linear", **kwargs):
# Pass through for groupby.rolling
kwargs["quantile"] = quantile
kwargs["interpolation"] = interpolation
- return self._apply(window_func, center=self.center, name="quantile", **kwargs)
+ return self._apply(window_func, name="quantile", **kwargs)
_shared_docs[
"cov"
@@ -2179,7 +2142,6 @@ class RollingGroupby(WindowGroupByMixin, Rolling):
def _apply(
self,
func: Callable,
- center: bool,
require_min_periods: int = 0,
floor: int = 1,
is_weighted: bool = False,
@@ -2190,7 +2152,6 @@ def _apply(
result = Rolling._apply(
self,
func,
- center,
require_min_periods,
floor,
is_weighted,
@@ -2239,16 +2200,6 @@ def _create_data(self, obj: FrameOrSeries) -> FrameOrSeries:
obj = obj.take(groupby_order)
return super()._create_data(obj)
- def _get_cython_func_type(self, func: str) -> Callable:
- """
- Return the cython function type.
-
- RollingGroupby needs to always use "variable" algorithms since processing
- the data in group order may not be monotonic with the data which
- "fixed" algorithms assume
- """
- return self._get_roll_func(f"{func}_variable")
-
def _get_window_indexer(self, window: int) -> GroupbyRollingIndexer:
"""
Return an indexer class that will compute the window start and end bounds
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index eaee276c7a388..73831d518032d 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -774,7 +774,7 @@ def test_rolling_numerical_too_large_numbers():
ds[2] = -9e33
result = ds.rolling(5).mean()
expected = pd.Series(
- [np.nan, np.nan, np.nan, np.nan, -1.8e33, -1.8e33, -1.8e33, 0.0, 6.0, 7.0],
+ [np.nan, np.nan, np.nan, np.nan, -1.8e33, -1.8e33, -1.8e33, 5.0, 6.0, 7.0],
index=dates,
)
tm.assert_series_equal(result, expected)
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Pros:
* Less code to maintain + makes internals simplier
* Fixed a bug: https://github.com/pandas-dev/pandas/pull/36567/files#diff-7656adc204bcddba0534588cabdab62eR770
Cons:
* A performance hit
```
before after ratio
[d9722efe] [198ab9e8]
<clean/rolling_aggregations^2> <clean/rolling_aggregations>
+ 1.27±0.02ms 4.05±1ms 3.19 rolling.Quantile.time_quantile('Series', 10, 'float', 1, 'linear')
+ 1.27±0.01ms 3.67±0.7ms 2.88 rolling.Quantile.time_quantile('Series', 10, 'float', 1, 'nearest')
+ 1.27±0.01ms 3.22±0.3ms 2.54 rolling.Quantile.time_quantile('Series', 10, 'float', 1, 'higher')
+ 1.27±0.01ms 2.99±0.08ms 2.36 rolling.Quantile.time_quantile('Series', 10, 'float', 1, 'midpoint')
+ 1.29±0ms 2.92±0.01ms 2.26 rolling.Quantile.time_quantile('Series', 10, 'float', 0, 'nearest')
+ 1.29±0ms 2.91±0.01ms 2.26 rolling.Quantile.time_quantile('Series', 10, 'float', 0, 'midpoint')
+ 1.29±0.08ms 2.91±0.01ms 2.25 rolling.Methods.time_rolling('Series', 10, 'float', 'max')
+ 1.29±0.01ms 2.91±0ms 2.25 rolling.Quantile.time_quantile('Series', 10, 'float', 0, 'higher')
+ 1.29±0.01ms 2.90±0.02ms 2.24 rolling.Quantile.time_quantile('Series', 10, 'float', 0, 'linear')
+ 1.30±0.02ms 2.91±0.04ms 2.24 rolling.Quantile.time_quantile('Series', 10, 'float', 0, 'lower')
+ 1.30±0.03ms 2.90±0.01ms 2.23 rolling.Quantile.time_quantile('Series', 10, 'float', 1, 'lower')
+ 1.32±0.08ms 2.93±0.02ms 2.22 rolling.Methods.time_rolling('Series', 10, 'float', 'min')
+ 1.33±0.03ms 2.95±0ms 2.21 rolling.Methods.time_rolling('Series', 10, 'int', 'max')
+ 1.44±0.01ms 3.11±0.03ms 2.16 rolling.Quantile.time_quantile('DataFrame', 10, 'float', 1, 'lower')
+ 1.37±0.05ms 2.96±0.01ms 2.16 rolling.Methods.time_rolling('Series', 10, 'int', 'min')
+ 1.44±0.01ms 3.10±0.02ms 2.15 rolling.Quantile.time_quantile('DataFrame', 10, 'float', 1, 'midpoint')
+ 1.44±0.01ms 3.07±0.01ms 2.14 rolling.Quantile.time_quantile('DataFrame', 10, 'float', 1, 'higher')
+ 1.44±0ms 3.08±0.01ms 2.14 rolling.Quantile.time_quantile('DataFrame', 10, 'float', 1, 'nearest')
+ 1.45±0.01ms 3.07±0.01ms 2.13 rolling.Methods.time_rolling('DataFrame', 10, 'float', 'max')
+ 1.46±0.01ms 3.07±0.01ms 2.10 rolling.Quantile.time_quantile('DataFrame', 10, 'float', 1, 'linear')
+ 1.47±0ms 3.09±0.02ms 2.10 rolling.Quantile.time_quantile('DataFrame', 10, 'float', 0, 'linear')
+ 1.48±0.02ms 3.10±0.02ms 2.10 rolling.Quantile.time_quantile('DataFrame', 10, 'float', 0, 'higher')
+ 1.47±0ms 3.08±0ms 2.09 rolling.Methods.time_rolling('DataFrame', 10, 'float', 'min')
+ 1.47±0.01ms 3.07±0.01ms 2.09 rolling.Quantile.time_quantile('DataFrame', 10, 'float', 0, 'midpoint')
+ 1.48±0.2ms 3.10±0.02ms 2.09 rolling.Quantile.time_quantile('DataFrame', 10, 'float', 0, 'nearest')
+ 1.47±0.04ms 3.05±0.02ms 2.08 rolling.Quantile.time_quantile('Series', 1000, 'float', 1, 'linear')
+ 1.52±0.01ms 3.15±0.02ms 2.07 rolling.Methods.time_rolling('DataFrame', 10, 'int', 'max')
+ 1.54±0.01ms 3.17±0.03ms 2.06 rolling.Methods.time_rolling('DataFrame', 10, 'int', 'min')
+ 1.49±0ms 3.05±0.06ms 2.04 rolling.Quantile.time_quantile('Series', 1000, 'float', 1, 'nearest')
+ 1.49±0ms 3.03±0.03ms 2.03 rolling.Quantile.time_quantile('Series', 1000, 'float', 1, 'lower')
+ 1.49±0ms 2.98±0.03ms 1.99 rolling.Quantile.time_quantile('Series', 1000, 'float', 1, 'midpoint')
+ 1.40±0.01ms 2.75±0.01ms 1.97 rolling.ExpandingMethods.time_expanding('Series', 'float', 'max')
+ 1.49±0ms 2.93±0.02ms 1.96 rolling.Quantile.time_quantile('Series', 1000, 'float', 1, 'higher')
+ 1.44±0.01ms 2.81±0.02ms 1.96 rolling.ExpandingMethods.time_expanding('Series', 'int', 'max')
+ 1.59±0.2ms 3.11±0.03ms 1.95 rolling.Quantile.time_quantile('DataFrame', 10, 'float', 0, 'lower')
+ 930±3μs 1.80±0.01ms 1.94 rolling.Quantile.time_quantile('Series', 10, 'int', 0, 'higher')
+ 1.52±0ms 2.94±0.02ms 1.93 rolling.Quantile.time_quantile('Series', 1000, 'float', 0, 'linear')
+ 1.51±0.2ms 2.92±0.01ms 1.93 rolling.Methods.time_rolling('Series', 1000, 'float', 'max')
+ 933±4μs 1.80±0.01ms 1.93 rolling.Quantile.time_quantile('Series', 10, 'int', 0, 'nearest')
+ 931±4μs 1.79±0.01ms 1.93 rolling.Quantile.time_quantile('Series', 10, 'int', 1, 'nearest')
+ 1.69±0.01ms 3.26±0.01ms 1.93 rolling.Quantile.time_quantile('DataFrame', 1000, 'float', 1, 'linear')
+ 933±4μs 1.80±0.01ms 1.93 rolling.Quantile.time_quantile('Series', 10, 'int', 0, 'midpoint')
+ 1.52±0.2ms 2.93±0.01ms 1.93 rolling.Methods.time_rolling('Series', 1000, 'float', 'min')
+ 937±10μs 1.80±0.01ms 1.92 rolling.Quantile.time_quantile('Series', 10, 'int', 0, 'lower')
+ 1.56±0ms 2.99±0.05ms 1.92 rolling.ExpandingMethods.time_expanding('DataFrame', 'float', 'max')
+ 940±30μs 1.80±0.01ms 1.91 rolling.Quantile.time_quantile('Series', 10, 'int', 1, 'midpoint')
+ 1.56±0.02ms 2.98±0.01ms 1.91 rolling.Methods.time_rolling('Series', 1000, 'int', 'max')
+ 935±3μs 1.79±0.01ms 1.91 rolling.Quantile.time_quantile('Series', 10, 'int', 1, 'higher')
+ 934±7μs 1.78±0.01ms 1.91 rolling.Quantile.time_quantile('Series', 10, 'int', 0, 'linear')
+ 931±3μs 1.78±0.01ms 1.91 rolling.Quantile.time_quantile('Series', 10, 'int', 1, 'lower')
+ 1.45±0.01ms 2.76±0ms 1.91 rolling.ExpandingMethods.time_expanding('Series', 'float', 'min')
+ 1.63±0ms 3.11±0.01ms 1.90 rolling.Quantile.time_quantile('DataFrame', 1000, 'float', 0, 'nearest')
+ 937±4μs 1.78±0.01ms 1.90 rolling.Quantile.time_quantile('Series', 10, 'int', 1, 'linear')
+ 961±2μs 1.82±0.04ms 1.90 rolling.Quantile.time_quantile('Series', 1000, 'int', 0, 'midpoint')
+ 1.49±0.02ms 2.82±0.01ms 1.89 rolling.ExpandingMethods.time_expanding('Series', 'int', 'min')
+ 964±9μs 1.82±0.02ms 1.88 rolling.Quantile.time_quantile('Series', 1000, 'int', 0, 'nearest')
+ 963±7μs 1.81±0.05ms 1.88 rolling.Quantile.time_quantile('Series', 1000, 'int', 0, 'linear')
+ 962±8μs 1.81±0.01ms 1.88 rolling.Quantile.time_quantile('Series', 1000, 'int', 0, 'higher')
+ 967±3μs 1.81±0.01ms 1.88 rolling.Quantile.time_quantile('Series', 1000, 'int', 0, 'lower')
+ 966±3μs 1.81±0.02ms 1.87 rolling.Quantile.time_quantile('Series', 1000, 'int', 1, 'linear')
+ 964±1μs 1.80±0.02ms 1.87 rolling.Quantile.time_quantile('Series', 1000, 'int', 1, 'nearest')
+ 1.66±0.03ms 3.10±0.01ms 1.87 rolling.Quantile.time_quantile('DataFrame', 1000, 'float', 1, 'nearest')
+ 964±4μs 1.80±0ms 1.87 rolling.Quantile.time_quantile('Series', 1000, 'int', 1, 'higher')
+ 1.12±0ms 2.08±0.01ms 1.87 rolling.Quantile.time_quantile('DataFrame', 10, 'int', 1, 'nearest')
+ 1.67±0.01ms 3.10±0.01ms 1.86 rolling.Quantile.time_quantile('DataFrame', 1000, 'float', 1, 'midpoint')
+ 1.63±0.02ms 3.02±0.03ms 1.86 rolling.ExpandingMethods.time_expanding('DataFrame', 'int', 'max')
+ 1.68±0.01ms 3.11±0.01ms 1.86 rolling.Quantile.time_quantile('DataFrame', 1000, 'float', 1, 'lower')
+ 969±3μs 1.80±0.02ms 1.85 rolling.Quantile.time_quantile('Series', 1000, 'int', 1, 'midpoint')
+ 1.68±0.01ms 3.11±0.02ms 1.85 rolling.Quantile.time_quantile('DataFrame', 1000, 'float', 1, 'higher')
+ 1.61±0.09ms 2.97±0.01ms 1.85 rolling.Methods.time_rolling('Series', 1000, 'int', 'min')
+ 970±7μs 1.79±0.01ms 1.84 rolling.Quantile.time_quantile('Series', 1000, 'int', 1, 'lower')
+ 1.72±0.01ms 3.16±0.1ms 1.84 rolling.Quantile.time_quantile('DataFrame', 1000, 'float', 0, 'linear')
+ 1.71±0.02ms 3.16±0.07ms 1.84 rolling.Methods.time_rolling('DataFrame', 1000, 'int', 'max')
+ 1.70±0.08ms 3.13±0.02ms 1.84 rolling.Methods.time_rolling('DataFrame', 1000, 'float', 'min')
+ 1.70±0.07ms 3.11±0.01ms 1.83 rolling.Quantile.time_quantile('DataFrame', 1000, 'float', 0, 'lower')
+ 1.71±0.01ms 3.13±0.02ms 1.83 rolling.Quantile.time_quantile('DataFrame', 1000, 'float', 0, 'higher')
+ 1.65±0.01ms 3.00±0.02ms 1.82 rolling.ExpandingMethods.time_expanding('DataFrame', 'int', 'min')
+ 1.15±0.01ms 2.09±0.01ms 1.82 rolling.Quantile.time_quantile('DataFrame', 1000, 'int', 1, 'linear')
+ 1.71±0.02ms 3.11±0.01ms 1.82 rolling.Methods.time_rolling('DataFrame', 1000, 'float', 'max')
+ 1.71±0ms 3.11±0.01ms 1.82 rolling.Quantile.time_quantile('DataFrame', 1000, 'float', 0, 'midpoint')
+ 1.62±0.01ms 2.94±0.03ms 1.82 rolling.ExpandingMethods.time_expanding('DataFrame', 'float', 'min')
+ 1.12±0ms 2.02±0.05ms 1.80 rolling.Quantile.time_quantile('DataFrame', 10, 'int', 1, 'higher')
+ 1.76±0.08ms 3.15±0.01ms 1.80 rolling.Methods.time_rolling('DataFrame', 1000, 'int', 'min')
+ 1.12±0.01ms 1.99±0.03ms 1.78 rolling.Quantile.time_quantile('DataFrame', 10, 'int', 0, 'nearest')
+ 1.12±0.01ms 1.99±0.02ms 1.78 rolling.Quantile.time_quantile('DataFrame', 10, 'int', 1, 'lower')
+ 1.14±0ms 2.03±0.03ms 1.78 rolling.Quantile.time_quantile('DataFrame', 1000, 'int', 0, 'higher')
+ 1.12±0ms 1.99±0.01ms 1.78 rolling.Quantile.time_quantile('DataFrame', 10, 'int', 0, 'higher')
+ 1.15±0ms 2.03±0.05ms 1.77 rolling.Quantile.time_quantile('DataFrame', 1000, 'int', 0, 'linear')
+ 1.13±0.01ms 2.00±0.05ms 1.77 rolling.Quantile.time_quantile('DataFrame', 1000, 'int', 0, 'nearest')
+ 1.12±0ms 1.98±0.04ms 1.77 rolling.Quantile.time_quantile('DataFrame', 10, 'int', 0, 'lower')
+ 1.12±0ms 1.97±0.01ms 1.76 rolling.Quantile.time_quantile('DataFrame', 10, 'int', 1, 'midpoint')
+ 1.13±0.01ms 1.99±0.01ms 1.76 rolling.Quantile.time_quantile('DataFrame', 10, 'int', 0, 'midpoint')
+ 1.12±0ms 1.97±0.01ms 1.76 rolling.Quantile.time_quantile('DataFrame', 10, 'int', 1, 'linear')
+ 1.14±0.01ms 1.98±0.01ms 1.74 rolling.Quantile.time_quantile('DataFrame', 1000, 'int', 1, 'midpoint')
+ 1.14±0.02ms 1.97±0ms 1.74 rolling.Quantile.time_quantile('DataFrame', 1000, 'int', 0, 'lower')
+ 1.14±0.01ms 1.97±0ms 1.73 rolling.Quantile.time_quantile('DataFrame', 1000, 'int', 1, 'higher')
+ 1.14±0.01ms 1.97±0.01ms 1.73 rolling.Quantile.time_quantile('DataFrame', 1000, 'int', 0, 'midpoint')
+ 1.14±0ms 1.97±0.01ms 1.72 rolling.Quantile.time_quantile('DataFrame', 1000, 'int', 1, 'nearest')
+ 1.14±0.01ms 1.97±0.01ms 1.72 rolling.Quantile.time_quantile('DataFrame', 1000, 'int', 1, 'lower')
+ 1.46±0.07ms 2.48±0.01ms 1.70 rolling.Methods.time_rolling('Series', 10, 'float', 'std')
+ 1.21±0.09ms 2.00±0.02ms 1.66 rolling.Quantile.time_quantile('DataFrame', 10, 'int', 0, 'linear')
+ 1.54±0.03ms 2.54±0.01ms 1.65 rolling.Methods.time_rolling('Series', 1000, 'int', 'std')
+ 1.62±0.04ms 2.66±0.01ms 1.65 rolling.Methods.time_rolling('DataFrame', 10, 'float', 'std')
+ 1.68±0.01ms 2.75±0.02ms 1.64 rolling.Methods.time_rolling('DataFrame', 10, 'int', 'std')
+ 1.56±0.1ms 2.53±0.01ms 1.63 rolling.Methods.time_rolling('Series', 10, 'int', 'std')
+ 1.66±0.03ms 2.67±0.01ms 1.61 rolling.Methods.time_rolling('DataFrame', 1000, 'float', 'std')
+ 1.71±0.03ms 2.73±0.01ms 1.59 rolling.Methods.time_rolling('DataFrame', 1000, 'int', 'std')
+ 913±40μs 1.42±0ms 1.56 rolling.Methods.time_rolling('Series', 10, 'float', 'mean')
+ 1.62±0.2ms 2.49±0.01ms 1.54 rolling.Methods.time_rolling('Series', 1000, 'float', 'std')
+ 933±100μs 1.43±0.01ms 1.53 rolling.Methods.time_rolling('Series', 1000, 'float', 'mean')
+ 980±30μs 1.48±0.01ms 1.51 rolling.Methods.time_rolling('Series', 10, 'int', 'mean')
+ 1.07±0.01ms 1.60±0.01ms 1.50 rolling.Methods.time_rolling('DataFrame', 10, 'float', 'mean')
+ 985±60μs 1.47±0.01ms 1.50 rolling.Methods.time_rolling('Series', 1000, 'int', 'mean')
+ 1.21±0ms 1.81±0.01ms 1.49 rolling.ExpandingMethods.time_expanding('Series', 'float', 'kurt')
+ 1.43±0ms 2.11±0.01ms 1.47 rolling.Methods.time_rolling('Series', 1000, 'int', 'kurt')
+ 1.14±0ms 1.68±0.01ms 1.47 rolling.Methods.time_rolling('DataFrame', 1000, 'int', 'mean')
+ 1.26±0ms 1.86±0ms 1.47 rolling.ExpandingMethods.time_expanding('Series', 'int', 'kurt')
+ 1.40±0.08ms 2.06±0.01ms 1.47 rolling.Methods.time_rolling('Series', 10, 'float', 'kurt')
+ 1.14±0.01ms 1.67±0ms 1.46 rolling.Methods.time_rolling('DataFrame', 10, 'int', 'mean')
+ 1.12±0.01ms 1.63±0.01ms 1.45 rolling.Methods.time_rolling('DataFrame', 1000, 'float', 'mean')
+ 1.55±0.01ms 2.24±0.01ms 1.44 rolling.Methods.time_rolling('DataFrame', 10, 'float', 'kurt')
+ 846±4μs 1.22±0ms 1.44 rolling.ExpandingMethods.time_expanding('Series', 'float', 'mean')
+ 1.38±0.01ms 1.98±0.01ms 1.43 rolling.ExpandingMethods.time_expanding('DataFrame', 'float', 'kurt')
+ 1.62±0ms 2.30±0.01ms 1.42 rolling.Methods.time_rolling('DataFrame', 10, 'int', 'kurt')
+ 1.27±0.02ms 1.80±0.02ms 1.41 rolling.Methods.time_rolling('Series', 10, 'float', 'skew')
+ 1.57±0.02ms 2.22±0.06ms 1.41 rolling.Methods.time_rolling('DataFrame', 1000, 'float', 'kurt')
+ 1.32±0.08ms 1.86±0.01ms 1.41 rolling.Methods.time_rolling('Series', 10, 'int', 'skew')
+ 1.44±0.01ms 2.03±0.01ms 1.41 rolling.ExpandingMethods.time_expanding('DataFrame', 'int', 'kurt')
+ 895±3μs 1.26±0ms 1.40 rolling.ExpandingMethods.time_expanding('Series', 'int', 'mean')
+ 1.32±0.06ms 1.84±0ms 1.40 rolling.Methods.time_rolling('Series', 1000, 'int', 'skew')
+ 1.64±0.04ms 2.29±0.01ms 1.40 rolling.Methods.time_rolling('DataFrame', 1000, 'int', 'kurt')
+ 1.42±0.01ms 1.97±0ms 1.39 rolling.Methods.time_rolling('DataFrame', 10, 'float', 'skew')
+ 900±50μs 1.25±0.02ms 1.38 rolling.Methods.time_rolling('Series', 10, 'float', 'sum')
+ 2.12±0.07ms 2.92±0.01ms 1.38 rolling.Quantile.time_quantile('Series', 1000, 'float', 0, 'lower')
+ 1.49±0.01ms 2.04±0ms 1.37 rolling.Methods.time_rolling('DataFrame', 10, 'int', 'skew')
+ 1.45±0.04ms 1.98±0.03ms 1.37 rolling.Methods.time_rolling('DataFrame', 1000, 'float', 'skew')
+ 1.51±0.02ms 2.05±0.01ms 1.36 rolling.Methods.time_rolling('DataFrame', 1000, 'int', 'skew')
+ 1.51±0.2ms 2.05±0.01ms 1.36 rolling.Methods.time_rolling('Series', 1000, 'float', 'kurt')
+ 1.02±0.01ms 1.38±0.01ms 1.36 rolling.ExpandingMethods.time_expanding('DataFrame', 'float', 'mean')
+ 1.08±0.01ms 1.46±0.02ms 1.36 rolling.ExpandingMethods.time_expanding('DataFrame', 'int', 'mean')
+ 962±60μs 1.28±0ms 1.34 rolling.Methods.time_rolling('Series', 10, 'int', 'sum')
+ 841±5μs 1.12±0.01ms 1.33 rolling.ExpandingMethods.time_expanding('Series', 'float', 'sum')
+ 1.26±0.01ms 1.66±0.02ms 1.33 rolling.ExpandingMethods.time_expanding('Series', 'int', 'skew')
+ 969±80μs 1.28±0.01ms 1.32 rolling.Methods.time_rolling('Series', 1000, 'int', 'sum')
+ 1.20±0.02ms 1.58±0.01ms 1.31 rolling.ExpandingMethods.time_expanding('Series', 'float', 'skew')
+ 1.14±0.01ms 1.48±0.01ms 1.30 rolling.Methods.time_rolling('DataFrame', 10, 'int', 'sum')
+ 899±5μs 1.17±0ms 1.30 rolling.ExpandingMethods.time_expanding('Series', 'int', 'sum')
+ 1.08±0.04ms 1.40±0.01ms 1.30 rolling.Methods.time_rolling('DataFrame', 10, 'float', 'sum')
+ 1.09±0.04ms 1.41±0.01ms 1.29 rolling.Methods.time_rolling('DataFrame', 1000, 'float', 'sum')
+ 1.36±0ms 1.75±0.01ms 1.29 rolling.ExpandingMethods.time_expanding('DataFrame', 'float', 'skew')
+ 1.15±0.06ms 1.47±0.01ms 1.28 rolling.Methods.time_rolling('DataFrame', 1000, 'int', 'sum')
+ 1.01±0ms 1.28±0ms 1.27 rolling.ExpandingMethods.time_expanding('DataFrame', 'float', 'sum')
+ 1.43±0ms 1.81±0.01ms 1.27 rolling.ExpandingMethods.time_expanding('DataFrame', 'int', 'skew')
+ 1.22±0.03ms 1.54±0.01ms 1.26 rolling.Methods.time_rolling('Series', 10, 'float', 'count')
+ 1.10±0ms 1.38±0.01ms 1.26 rolling.ExpandingMethods.time_expanding('Series', 'int', 'count')
+ 1.07±0.01ms 1.35±0ms 1.26 rolling.ExpandingMethods.time_expanding('DataFrame', 'int', 'sum')
+ 1.36±0ms 1.71±0.02ms 1.26 rolling.Methods.time_rolling('DataFrame', 10, 'int', 'count')
+ 1.40±0.01ms 1.74±0.01ms 1.24 rolling.Methods.time_rolling('DataFrame', 10, 'float', 'count')
+ 1.14±0.01ms 1.40±0ms 1.23 rolling.ExpandingMethods.time_expanding('Series', 'float', 'count')
+ 1.22±0.1ms 1.49±0ms 1.22 rolling.Methods.time_rolling('Series', 10, 'int', 'count')
+ 1.40±0.04ms 1.70±0.01ms 1.21 rolling.Methods.time_rolling('DataFrame', 1000, 'int', 'count')
+ 1.43±0.09ms 1.73±0.01ms 1.21 rolling.Methods.time_rolling('DataFrame', 1000, 'float', 'count')
+ 1.29±0ms 1.56±0.01ms 1.21 rolling.ExpandingMethods.time_expanding('DataFrame', 'int', 'count')
+ 1.33±0.01ms 1.60±0.01ms 1.20 rolling.ExpandingMethods.time_expanding('DataFrame', 'float', 'count')
+ 1.38±0ms 1.66±0.01ms 1.20 rolling.ExpandingMethods.time_expanding('Series', 'float', 'std')
+ 1.44±0.01ms 1.72±0.01ms 1.19 rolling.ExpandingMethods.time_expanding('Series', 'int', 'std')
+ 1.56±0.01ms 1.84±0ms 1.18 rolling.ExpandingMethods.time_expanding('DataFrame', 'float', 'std')
+ 1.62±0.01ms 1.89±0.01ms 1.17 rolling.ExpandingMethods.time_expanding('DataFrame', 'int', 'std')
SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY.
PERFORMANCE DECREASED.
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/36567 | 2020-09-23T06:33:13Z | 2020-10-09T23:16:36Z | 2020-10-09T23:16:36Z | 2020-10-09T23:34:10Z |
ENH: return RangeIndex from difference, symmetric_difference | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 6d1196b783f74..9881a71f7dcdf 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -169,6 +169,7 @@ Other enhancements
- :meth:`Rolling.mean()` and :meth:`Rolling.sum()` use Kahan summation to calculate the mean to avoid numerical problems (:issue:`10319`, :issue:`11645`, :issue:`13254`, :issue:`32761`, :issue:`36031`)
- :meth:`DatetimeIndex.searchsorted`, :meth:`TimedeltaIndex.searchsorted`, :meth:`PeriodIndex.searchsorted`, and :meth:`Series.searchsorted` with datetimelike dtypes will now try to cast string arguments (listlike and scalar) to the matching datetimelike type (:issue:`36346`)
- Added methods :meth:`IntegerArray.prod`, :meth:`IntegerArray.min`, and :meth:`IntegerArray.max` (:issue:`33790`)
+- Where possible :meth:`RangeIndex.difference` and :meth:`RangeIndex.symmetric_difference` will return :class:`RangeIndex` instead of :class:`Int64Index` (:issue:`36564`)
.. _whatsnew_120.api_breaking.python:
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 4dffda2605ef7..79cc411347502 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -468,6 +468,9 @@ def equals(self, other: object) -> bool:
return self._range == other._range
return super().equals(other)
+ # --------------------------------------------------------------------
+ # Set Operations
+
def intersection(self, other, sort=False):
"""
Form the intersection of two Index objects.
@@ -634,6 +637,57 @@ def _union(self, other, sort):
return type(self)(start_r, end_r + step_o, step_o)
return self._int64index._union(other, sort=sort)
+ def difference(self, other, sort=None):
+ # optimized set operation if we have another RangeIndex
+ self._validate_sort_keyword(sort)
+
+ if not isinstance(other, RangeIndex):
+ return super().difference(other, sort=sort)
+
+ res_name = ops.get_op_result_name(self, other)
+
+ first = self._range[::-1] if self.step < 0 else self._range
+ overlap = self.intersection(other)
+ if overlap.step < 0:
+ overlap = overlap[::-1]
+
+ if len(overlap) == 0:
+ return self._shallow_copy(name=res_name)
+ if len(overlap) == len(self):
+ return self[:0].rename(res_name)
+ if not isinstance(overlap, RangeIndex):
+ # We wont end up with RangeIndex, so fall back
+ return super().difference(other, sort=sort)
+
+ if overlap[0] == first.start:
+ # The difference is everything after the intersection
+ new_rng = range(overlap[-1] + first.step, first.stop, first.step)
+ elif overlap[-1] == first.stop:
+ # The difference is everything before the intersection
+ new_rng = range(first.start, overlap[0] - first.step, first.step)
+ else:
+ # The difference is not range-like
+ return super().difference(other, sort=sort)
+
+ new_index = type(self)._simple_new(new_rng, name=res_name)
+ if first is not self._range:
+ new_index = new_index[::-1]
+ return new_index
+
+ def symmetric_difference(self, other, result_name=None, sort=None):
+ if not isinstance(other, RangeIndex) or sort is not None:
+ return super().symmetric_difference(other, result_name, sort)
+
+ left = self.difference(other)
+ right = other.difference(self)
+ result = left.union(right)
+
+ if result_name is not None:
+ result = result.rename(result_name)
+ return result
+
+ # --------------------------------------------------------------------
+
@doc(Int64Index.join)
def join(self, other, how="left", level=None, return_indexers=False, sort=False):
if how == "outer" and self is not other:
@@ -746,12 +800,17 @@ def __floordiv__(self, other):
return self._simple_new(new_range, name=self.name)
return self._int64index // other
+ # --------------------------------------------------------------------
+ # Reductions
+
def all(self) -> bool:
return 0 not in self._range
def any(self) -> bool:
return any(self._range)
+ # --------------------------------------------------------------------
+
@classmethod
def _add_numeric_methods_binary(cls):
""" add in numeric methods, specialized to RangeIndex """
diff --git a/pandas/tests/indexes/ranges/test_setops.py b/pandas/tests/indexes/ranges/test_setops.py
index 5b565310cfb9c..9c9f5dbdf7e7f 100644
--- a/pandas/tests/indexes/ranges/test_setops.py
+++ b/pandas/tests/indexes/ranges/test_setops.py
@@ -239,3 +239,51 @@ def test_union_sorted(self, unions):
res3 = idx1._int64index.union(idx2, sort=None)
tm.assert_index_equal(res2, expected_sorted, exact=True)
tm.assert_index_equal(res3, expected_sorted)
+
+ def test_difference(self):
+ # GH#12034 Cases where we operate against another RangeIndex and may
+ # get back another RangeIndex
+ obj = RangeIndex.from_range(range(1, 10), name="foo")
+
+ result = obj.difference(obj)
+ expected = RangeIndex.from_range(range(0), name="foo")
+ tm.assert_index_equal(result, expected)
+
+ result = obj.difference(expected.rename("bar"))
+ tm.assert_index_equal(result, obj.rename(None))
+
+ result = obj.difference(obj[:3])
+ tm.assert_index_equal(result, obj[3:])
+
+ result = obj.difference(obj[-3:])
+ tm.assert_index_equal(result, obj[:-3])
+
+ result = obj.difference(obj[2:6])
+ expected = Int64Index([1, 2, 7, 8, 9], name="foo")
+ tm.assert_index_equal(result, expected)
+
+ def test_symmetric_difference(self):
+ # GH#12034 Cases where we operate against another RangeIndex and may
+ # get back another RangeIndex
+ left = RangeIndex.from_range(range(1, 10), name="foo")
+
+ result = left.symmetric_difference(left)
+ expected = RangeIndex.from_range(range(0), name="foo")
+ tm.assert_index_equal(result, expected)
+
+ result = left.symmetric_difference(expected.rename("bar"))
+ tm.assert_index_equal(result, left.rename(None))
+
+ result = left[:-2].symmetric_difference(left[2:])
+ expected = Int64Index([1, 2, 8, 9], name="foo")
+ tm.assert_index_equal(result, expected)
+
+ right = RangeIndex.from_range(range(10, 15))
+
+ result = left.symmetric_difference(right)
+ expected = RangeIndex.from_range(range(1, 15))
+ tm.assert_index_equal(result, expected)
+
+ result = left.symmetric_difference(right[1:])
+ expected = Int64Index([1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14])
+ tm.assert_index_equal(result, expected)
| - [x] closes #12034
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36564 | 2020-09-23T02:03:47Z | 2020-10-07T03:24:42Z | 2020-10-07T03:24:42Z | 2020-10-07T03:32:08Z |
Closes #36541 (BUG: ValueError: cannot convert float NaN to integer when resetting MultiIndex with NaT values) | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 5c2d099ed3119..febfa59b7e962 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -370,6 +370,7 @@ MultiIndex
^^^^^^^^^^
- Bug in :meth:`DataFrame.xs` when used with :class:`IndexSlice` raises ``TypeError`` with message ``"Expected label or tuple of labels"`` (:issue:`35301`)
+- Bug in :meth:`DataFrame.reset_index` with ``NaT`` values in index raises ``ValueError`` with message ``"cannot convert float NaN to integer"`` (:issue:`36541`)
-
I/O
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 3aa1317f6db6d..48391ab7d9373 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -73,7 +73,7 @@
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
-from pandas.core.dtypes.missing import isna, notna
+from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, notna
if TYPE_CHECKING:
from pandas import Series
@@ -1559,6 +1559,11 @@ def construct_1d_arraylike_from_scalar(
dtype = np.dtype("object")
if not isna(value):
value = ensure_str(value)
+ elif dtype.kind in ["M", "m"] and is_valid_nat_for_dtype(value, dtype):
+ # GH36541: can't fill array directly with pd.NaT
+ # > np.empty(10, dtype="datetime64[64]").fill(pd.NaT)
+ # ValueError: cannot convert float NaN to integer
+ value = np.datetime64("NaT")
subarr = np.empty(length, dtype=dtype)
subarr.fill(value)
diff --git a/pandas/tests/series/indexing/test_multiindex.py b/pandas/tests/series/indexing/test_multiindex.py
index e98a32d62b767..0420d76b5e8a8 100644
--- a/pandas/tests/series/indexing/test_multiindex.py
+++ b/pandas/tests/series/indexing/test_multiindex.py
@@ -1,8 +1,10 @@
""" test get/set & misc """
+import pytest
import pandas as pd
from pandas import MultiIndex, Series
+import pandas._testing as tm
def test_access_none_value_in_multiindex():
@@ -20,3 +22,30 @@ def test_access_none_value_in_multiindex():
s = Series([1] * len(midx), dtype=object, index=midx)
result = s.loc[("Level1", "Level2_a")]
assert result == 1
+
+
+@pytest.mark.parametrize(
+ "ix_data, exp_data",
+ [
+ (
+ [(pd.NaT, 1), (pd.NaT, 2)],
+ {"a": [pd.NaT, pd.NaT], "b": [1, 2], "x": [11, 12]},
+ ),
+ (
+ [(pd.NaT, 1), (pd.Timestamp("2020-01-01"), 2)],
+ {"a": [pd.NaT, pd.Timestamp("2020-01-01")], "b": [1, 2], "x": [11, 12]},
+ ),
+ (
+ [(pd.NaT, 1), (pd.Timedelta(123, "d"), 2)],
+ {"a": [pd.NaT, pd.Timedelta(123, "d")], "b": [1, 2], "x": [11, 12]},
+ ),
+ ],
+)
+def test_nat_multi_index(ix_data, exp_data):
+ # GH36541: that reset_index() does not raise ValueError
+ ix = pd.MultiIndex.from_tuples(ix_data, names=["a", "b"])
+ result = pd.DataFrame({"x": [11, 12]}, index=ix)
+ result = result.reset_index()
+
+ expected = pd.DataFrame(exp_data)
+ tm.assert_frame_equal(result, expected)
| - [x] closes #36541
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36563 | 2020-09-23T01:33:27Z | 2020-10-07T02:43:42Z | 2020-10-07T02:43:42Z | 2020-10-07T02:43:48Z |
REF: share _reduce | diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index 808d598558c83..2bf530eb2bad4 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -227,3 +227,11 @@ def fillna(self: _T, value=None, method=None, limit=None) -> _T:
else:
new_values = self.copy()
return new_values
+
+ def _reduce(self, name: str, skipna: bool = True, **kwargs):
+ meth = getattr(self, name, None)
+ if meth:
+ return meth(skipna=skipna, **kwargs)
+ else:
+ msg = f"'{type(self).__name__}' does not implement reduction '{name}'"
+ raise TypeError(msg)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index ef69d6565cfeb..1efe6ac6ae70b 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1986,12 +1986,6 @@ def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]:
# ------------------------------------------------------------------
# Reductions
- def _reduce(self, name: str, skipna: bool = True, **kwargs):
- func = getattr(self, name, None)
- if func is None:
- raise TypeError(f"Categorical cannot perform the operation {name}")
- return func(skipna=skipna, **kwargs)
-
@deprecate_kwarg(old_arg_name="numeric_only", new_arg_name="skipna")
def min(self, skipna=True, **kwargs):
"""
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 7051507f9a90e..6752a98345b6a 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -1453,13 +1453,6 @@ def __isub__(self, other):
# --------------------------------------------------------------
# Reductions
- def _reduce(self, name: str, skipna: bool = True, **kwargs):
- op = getattr(self, name, None)
- if op:
- return op(skipna=skipna, **kwargs)
- else:
- return super()._reduce(name, skipna, **kwargs)
-
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Array or minimum along
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 61076132b24cd..f65b130b396da 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -272,14 +272,6 @@ def _values_for_factorize(self) -> Tuple[np.ndarray, int]:
# ------------------------------------------------------------------------
# Reductions
- def _reduce(self, name, skipna=True, **kwargs):
- meth = getattr(self, name, None)
- if meth:
- return meth(skipna=skipna, **kwargs)
- else:
- msg = f"'{type(self).__name__}' does not implement reduction '{name}'"
- raise TypeError(msg)
-
def any(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_any((), dict(out=out, keepdims=keepdims))
return nanops.nanany(self._ndarray, axis=axis, skipna=skipna)
diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py
index 9d118f1ed8753..34194738bf4ab 100644
--- a/pandas/tests/arrays/categorical/test_operators.py
+++ b/pandas/tests/arrays/categorical/test_operators.py
@@ -353,7 +353,7 @@ def test_numeric_like_ops(self):
# min/max)
s = df["value_group"]
for op in ["kurt", "skew", "var", "std", "mean", "sum", "median"]:
- msg = f"Categorical cannot perform the operation {op}"
+ msg = f"'Categorical' does not implement reduction '{op}'"
with pytest.raises(TypeError, match=msg):
getattr(s, op)(numeric_only=False)
@@ -362,7 +362,7 @@ def test_numeric_like_ops(self):
# numpy ops
s = Series(Categorical([1, 2, 3, 4]))
with pytest.raises(
- TypeError, match="Categorical cannot perform the operation sum"
+ TypeError, match="'Categorical' does not implement reduction 'sum'"
):
np.sum(s)
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index f512b168d2795..3f5ab5baa7d69 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -205,7 +205,8 @@ def test_reduce_invalid(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
- with pytest.raises(TypeError, match="cannot perform"):
+ msg = f"'{type(arr).__name__}' does not implement reduction 'not a method'"
+ with pytest.raises(TypeError, match=msg):
arr._reduce("not a method")
@pytest.mark.parametrize("method", ["pad", "backfill"])
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index bbf2d9f1f0784..db7cd54d23a2b 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -351,6 +351,7 @@ def test_invalid_td64_reductions(self, opname):
[
f"reduction operation '{opname}' not allowed for this dtype",
rf"cannot perform {opname} with type timedelta64\[ns\]",
+ f"'TimedeltaArray' does not implement reduction '{opname}'",
]
)
@@ -695,6 +696,7 @@ def test_ops_consistency_on_empty(self, method):
[
"operation 'var' not allowed",
r"cannot perform var with type timedelta64\[ns\]",
+ "'TimedeltaArray' does not implement reduction 'var'",
]
)
with pytest.raises(TypeError, match=msg):
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36561 | 2020-09-22T22:35:00Z | 2020-09-23T13:14:20Z | 2020-09-23T13:14:20Z | 2020-09-23T15:34:50Z |
[BUG]: Fix regression in read_table with delim_whitespace=True | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index 7c7e40e633acc..8ff30f8fe5b4e 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -39,6 +39,7 @@ Fixed regressions
- Fixed regression in :meth:`DataFrame.apply` with ``raw=True`` and user-function returning string (:issue:`35940`)
- Fixed regression when setting empty :class:`DataFrame` column to a :class:`Series` in preserving name of index in frame (:issue:`36527`)
- Fixed regression in :class:`Period` incorrect value for ordinal over the maximum timestamp (:issue:`36430`)
+- Fixed regression in :func:`read_table` raised ``ValueError`` when ``delim_whitespace`` was set to ``True`` (:issue:`35958`)
- Fixed regression in :meth:`Series.dt.normalize` when normalizing pre-epoch dates the result was shifted one day (:issue:`36294`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index c839129b91e12..e5b7aea895f86 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -757,6 +757,16 @@ def read_table(
memory_map=False,
float_precision=None,
):
+ # TODO: validation duplicated in read_csv
+ if delim_whitespace and (delimiter is not None or sep != "\t"):
+ raise ValueError(
+ "Specified a delimiter with both sep and "
+ "delim_whitespace=True; you can only specify one."
+ )
+ if delim_whitespace:
+ # In this case sep is not used so we set it to the read_csv
+ # default to avoid a ValueError
+ sep = ","
return read_csv(**locals())
diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py
index 08eab69900400..78c2f2bce5a02 100644
--- a/pandas/tests/io/parser/test_common.py
+++ b/pandas/tests/io/parser/test_common.py
@@ -2200,3 +2200,24 @@ def test_read_csv_with_use_inf_as_na(all_parsers):
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([1.0, np.nan, 3.0])
tm.assert_frame_equal(result, expected)
+
+
+def test_read_table_delim_whitespace_default_sep(all_parsers):
+ # GH: 35958
+ f = StringIO("a b c\n1 -2 -3\n4 5 6")
+ parser = all_parsers
+ result = parser.read_table(f, delim_whitespace=True)
+ expected = DataFrame({"a": [1, 4], "b": [-2, 5], "c": [-3, 6]})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_read_table_delim_whitespace_non_default_sep(all_parsers):
+ # GH: 35958
+ f = StringIO("a b c\n1 -2 -3\n4 5 6")
+ parser = all_parsers
+ msg = (
+ "Specified a delimiter with both sep and "
+ "delim_whitespace=True; you can only specify one."
+ )
+ with pytest.raises(ValueError, match=msg):
+ parser.read_table(f, delim_whitespace=True, sep=",")
| - [x] closes #35958
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
We could write a private function which is called from read_csv and read_table with the appropriate default_set alternatively.
Edit: Black_pandas formats files not touched by myself. Was there an update or change of guidelines? | https://api.github.com/repos/pandas-dev/pandas/pulls/36560 | 2020-09-22T21:07:21Z | 2020-09-26T10:31:11Z | 2020-09-26T10:31:10Z | 2020-09-29T21:07:59Z |
TYP/CLN: exclusions in BaseGroupBy | diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 9a14323dd8c3a..f1a61f433fc51 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -24,6 +24,7 @@ class providing the base-class of operations.
Mapping,
Optional,
Sequence,
+ Set,
Tuple,
Type,
TypeVar,
@@ -36,7 +37,7 @@ class providing the base-class of operations.
from pandas._libs import Timestamp, lib
import pandas._libs.groupby as libgroupby
-from pandas._typing import F, FrameOrSeries, FrameOrSeriesUnion, Scalar
+from pandas._typing import F, FrameOrSeries, FrameOrSeriesUnion, Label, Scalar
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly, doc
@@ -488,7 +489,7 @@ def __init__(
axis: int = 0,
level=None,
grouper: Optional["ops.BaseGrouper"] = None,
- exclusions=None,
+ exclusions: Optional[Set[Label]] = None,
selection=None,
as_index: bool = True,
sort: bool = True,
@@ -537,7 +538,7 @@ def __init__(
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
- self.exclusions = set(exclusions) if exclusions else set()
+ self.exclusions = exclusions or set()
def __len__(self) -> int:
return len(self.groups)
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 59ea7781025c4..6263d5337f42f 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -2,12 +2,12 @@
Provide user facing operators for doing the split part of the
split-apply-combine paradigm.
"""
-from typing import Dict, Hashable, List, Optional, Tuple
+from typing import Dict, Hashable, List, Optional, Set, Tuple
import warnings
import numpy as np
-from pandas._typing import FrameOrSeries
+from pandas._typing import FrameOrSeries, Label
from pandas.errors import InvalidIndexError
from pandas.util._decorators import cache_readonly
@@ -614,7 +614,7 @@ def get_grouper(
mutated: bool = False,
validate: bool = True,
dropna: bool = True,
-) -> Tuple["ops.BaseGrouper", List[Hashable], FrameOrSeries]:
+) -> Tuple["ops.BaseGrouper", Set[Label], FrameOrSeries]:
"""
Create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
@@ -690,13 +690,13 @@ def get_grouper(
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj, validate=False)
if key.key is None:
- return grouper, [], obj
+ return grouper, set(), obj
else:
- return grouper, [key.key], obj
+ return grouper, {key.key}, obj
# already have a BaseGrouper, just return it
elif isinstance(key, ops.BaseGrouper):
- return key, [], obj
+ return key, set(), obj
if not isinstance(key, list):
keys = [key]
@@ -739,7 +739,7 @@ def get_grouper(
levels = [level] * len(keys)
groupings: List[Grouping] = []
- exclusions: List[Hashable] = []
+ exclusions: Set[Label] = set()
# if the actual grouper should be obj[key]
def is_in_axis(key) -> bool:
@@ -769,21 +769,21 @@ def is_in_obj(gpr) -> bool:
if is_in_obj(gpr): # df.groupby(df['name'])
in_axis, name = True, gpr.name
- exclusions.append(name)
+ exclusions.add(name)
elif is_in_axis(gpr): # df.groupby('name')
if gpr in obj:
if validate:
obj._check_label_or_level_ambiguity(gpr, axis=axis)
in_axis, name, gpr = True, gpr, obj[gpr]
- exclusions.append(name)
+ exclusions.add(name)
elif obj._is_level_reference(gpr, axis=axis):
in_axis, name, level, gpr = False, None, gpr, None
else:
raise KeyError(gpr)
elif isinstance(gpr, Grouper) and gpr.key is not None:
# Add key to exclusions
- exclusions.append(gpr.key)
+ exclusions.add(gpr.key)
in_axis, name = False, None
else:
in_axis, name = False, None
| Part of the code had exclusions as a list of hashables, change this to a set. | https://api.github.com/repos/pandas-dev/pandas/pulls/36559 | 2020-09-22T20:58:50Z | 2020-09-22T22:16:05Z | 2020-09-22T22:16:05Z | 2020-09-26T12:00:16Z |
REF: de-duplicate Categorical validators | diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index ef69d6565cfeb..e984f2c26b916 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1177,13 +1177,7 @@ def _validate_where_value(self, value):
return self._validate_listlike(value)
def _validate_insert_value(self, value) -> int:
- code = self.categories.get_indexer([value])
- if (code == -1) and not (is_scalar(value) and isna(value)):
- raise TypeError(
- "cannot insert an item into a CategoricalIndex "
- "that is not already an existing category"
- )
- return code[0]
+ return self._validate_fill_value(value)
def _validate_searchsorted_value(self, value):
# searchsorted is very performance sensitive. By converting codes
@@ -1213,7 +1207,7 @@ def _validate_fill_value(self, fill_value):
ValueError
"""
- if isna(fill_value):
+ if is_valid_nat_for_dtype(fill_value, self.categories.dtype):
fill_value = -1
elif fill_value in self.categories:
fill_value = self._unbox_scalar(fill_value)
diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py
index a3a06338a0277..81b31e3ea180c 100644
--- a/pandas/tests/indexes/categorical/test_category.py
+++ b/pandas/tests/indexes/categorical/test_category.py
@@ -171,11 +171,8 @@ def test_insert(self):
tm.assert_index_equal(result, expected, exact=True)
# invalid
- msg = (
- "cannot insert an item into a CategoricalIndex that is not "
- "already an existing category"
- )
- with pytest.raises(TypeError, match=msg):
+ msg = "'fill_value=d' is not present in this Categorical's categories"
+ with pytest.raises(ValueError, match=msg):
ci.insert(0, "d")
# GH 18295 (test missing)
@@ -184,6 +181,12 @@ def test_insert(self):
result = CategoricalIndex(list("aabcb")).insert(1, na)
tm.assert_index_equal(result, expected)
+ def test_insert_na_mismatched_dtype(self):
+ ci = pd.CategoricalIndex([0, 1, 1])
+ msg = "'fill_value=NaT' is not present in this Categorical's categories"
+ with pytest.raises(ValueError, match=msg):
+ ci.insert(0, pd.NaT)
+
def test_delete(self):
ci = self.create_index()
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index 98edb56260b01..9f3ee81fac2eb 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -76,9 +76,10 @@ def test_loc_scalar(self):
"cannot insert an item into a CategoricalIndex that is not "
"already an existing category"
)
- with pytest.raises(TypeError, match=msg):
+ msg = "'fill_value=d' is not present in this Categorical's categories"
+ with pytest.raises(ValueError, match=msg):
df.loc["d", "A"] = 10
- with pytest.raises(TypeError, match=msg):
+ with pytest.raises(ValueError, match=msg):
df.loc["d", "C"] = 10
with pytest.raises(KeyError, match="^1$"):
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36558 | 2020-09-22T20:48:48Z | 2020-09-22T22:17:16Z | 2020-09-22T22:17:16Z | 2020-09-22T22:18:07Z |
[BUG]: Fix bug with pre epoch normalization | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index c63a78c76572f..4ad85fd6bafa6 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -38,6 +38,7 @@ Fixed regressions
- Fixed regression in :meth:`DataFrame.apply` with ``raw=True`` and user-function returning string (:issue:`35940`)
- Fixed regression when setting empty :class:`DataFrame` column to a :class:`Series` in preserving name of index in frame (:issue:`36527`)
- Fixed regression in :class:`Period` incorrect value for ordinal over the maximum timestamp (:issue:`36430`)
+- Fixed regression in :meth:`Series.dt.normalize` when normalizing pre-epoch dates the result was shifted one day (:issue:`36294`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index adf1dfbc1ac72..3b52b4d499694 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -830,7 +830,7 @@ cpdef inline datetime localize_pydatetime(datetime dt, object tz):
# ----------------------------------------------------------------------
# Normalization
-@cython.cdivision
+@cython.cdivision(False)
cdef inline int64_t normalize_i8_stamp(int64_t local_val) nogil:
"""
Round the localized nanosecond timestamp down to the previous midnight.
diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py
index 8641bbd0a66f2..e8196cd8328e7 100644
--- a/pandas/tests/scalar/timestamp/test_unary_ops.py
+++ b/pandas/tests/scalar/timestamp/test_unary_ops.py
@@ -397,6 +397,12 @@ def test_normalize(self, tz_naive_fixture, arg):
expected = Timestamp("2013-11-30", tz=tz)
assert result == expected
+ def test_normalize_pre_epoch_dates(self):
+ # GH: 36294
+ result = Timestamp("1969-01-01 09:00:00").normalize()
+ expected = Timestamp("1969-01-01 00:00:00")
+ assert result == expected
+
# --------------------------------------------------------------
@td.skip_if_windows
diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py
index 723bd303b1974..b0926089bd7b4 100644
--- a/pandas/tests/series/test_datetime_values.py
+++ b/pandas/tests/series/test_datetime_values.py
@@ -702,3 +702,11 @@ def test_week_and_weekofyear_are_deprecated():
series.dt.week
with tm.assert_produces_warning(FutureWarning):
series.dt.weekofyear
+
+
+def test_normalize_pre_epoch_dates():
+ # GH: 36294
+ s = pd.to_datetime(pd.Series(["1969-01-01 09:00:00", "2016-01-01 09:00:00"]))
+ result = s.dt.normalize()
+ expected = pd.to_datetime(pd.Series(["1969-01-01", "2016-01-01"]))
+ tm.assert_series_equal(result, expected)
| - [x] closes #36294
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
The fix propsed by @jbrockmendel worked.
Hope the test is in the right place | https://api.github.com/repos/pandas-dev/pandas/pulls/36557 | 2020-09-22T20:23:57Z | 2020-09-26T01:22:29Z | 2020-09-26T01:22:28Z | 2020-09-29T21:08:14Z |
[TST]: Add test for duplicate keys in concat | diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py
index db93e831e8e0b..cd58df4fc5da6 100644
--- a/pandas/tests/reshape/concat/test_concat.py
+++ b/pandas/tests/reshape/concat/test_concat.py
@@ -554,3 +554,21 @@ def test_concat_preserves_extension_int64_dtype():
result = pd.concat([df_a, df_b], ignore_index=True)
expected = DataFrame({"a": [-1, None], "b": [None, 1]}, dtype="Int64")
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ ("keys", "integrity"),
+ [
+ (["red"] * 3, True),
+ (["red"] * 3, False),
+ (["red", "blue", "red"], False),
+ (["red", "blue", "red"], True),
+ ],
+)
+def test_concat_repeated_keys(keys, integrity):
+ # GH: 20816
+ series_list = [Series({"a": 1}), Series({"b": 2}), Series({"c": 3})]
+ result = concat(series_list, keys=keys, verify_integrity=integrity)
+ tuples = list(zip(keys, ["a", "b", "c"]))
+ expected = Series([1, 2, 3], index=MultiIndex.from_tuples(tuples))
+ tm.assert_series_equal(result, expected)
| - [x] closes #20816
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Added a test | https://api.github.com/repos/pandas-dev/pandas/pulls/36556 | 2020-09-22T20:05:24Z | 2020-11-26T23:29:27Z | 2020-11-26T23:29:27Z | 2020-11-27T11:55:16Z |
Call finalize in Series.dt | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 6a5b4b3b9ff16..7280ccc633f17 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -374,6 +374,7 @@ Other
^^^^^
- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly raising ``AssertionError`` instead of ``ValueError`` when invalid parameter combinations are passed (:issue:`36045`)
- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` with numeric values and string ``to_replace`` (:issue:`34789`)
+- Fixed metadata propagation in the :class:`Series.dt` accessor (:issue:`28283`)
- Bug in :meth:`Series.transform` would give incorrect results or raise when the argument ``func`` was dictionary (:issue:`35811`)
- Bug in :meth:`Index.union` behaving differently depending on whether operand is a :class:`Index` or other list-like (:issue:`36384`)
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index 881d5ce1fbaab..aa2c04e48eb81 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -78,7 +78,7 @@ def _delegate_property_get(self, name):
else:
index = self._parent.index
# return the result as a Series, which is by definition a copy
- result = Series(result, index=index, name=self.name)
+ result = Series(result, index=index, name=self.name).__finalize__(self._parent)
# setting this object will show a SettingWithCopyWarning/Error
result._is_copy = (
@@ -106,7 +106,9 @@ def _delegate_method(self, name, *args, **kwargs):
if not is_list_like(result):
return result
- result = Series(result, index=self._parent.index, name=self.name)
+ result = Series(result, index=self._parent.index, name=self.name).__finalize__(
+ self._parent
+ )
# setting this object will show a SettingWithCopyWarning/Error
result._is_copy = (
@@ -371,7 +373,11 @@ def components(self):
3 0 0 0 3 0 0 0
4 0 0 0 4 0 0 0
"""
- return self._get_values().components.set_index(self._parent.index)
+ return (
+ self._get_values()
+ .components.set_index(self._parent.index)
+ .__finalize__(self._parent)
+ )
@property
def freq(self):
diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py
index 8898619e374ab..6692102bc9008 100644
--- a/pandas/tests/generic/test_finalize.py
+++ b/pandas/tests/generic/test_finalize.py
@@ -678,7 +678,6 @@ def test_string_method(method):
],
ids=idfn,
)
-@not_implemented_mark
def test_datetime_method(method):
s = pd.Series(pd.date_range("2000", periods=4))
s.attrs = {"a": 1}
@@ -714,7 +713,6 @@ def test_datetime_method(method):
"days_in_month",
],
)
-@not_implemented_mark
def test_datetime_property(attr):
s = pd.Series(pd.date_range("2000", periods=4))
s.attrs = {"a": 1}
@@ -725,7 +723,6 @@ def test_datetime_property(attr):
@pytest.mark.parametrize(
"attr", ["days", "seconds", "microseconds", "nanoseconds", "components"]
)
-@not_implemented_mark
def test_timedelta_property(attr):
s = pd.Series(pd.timedelta_range("2000", periods=4))
s.attrs = {"a": 1}
@@ -734,7 +731,6 @@ def test_timedelta_property(attr):
@pytest.mark.parametrize("method", [operator.methodcaller("total_seconds")])
-@not_implemented_mark
def test_timedelta_methods(method):
s = pd.Series(pd.timedelta_range("2000", periods=4))
s.attrs = {"a": 1}
| xref #28283 | https://api.github.com/repos/pandas-dev/pandas/pulls/36554 | 2020-09-22T19:39:41Z | 2020-09-22T22:18:07Z | 2020-09-22T22:18:07Z | 2020-09-22T22:18:11Z |
REGR: Series.__mod__ behaves different with numexpr | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index 91b9cf59687b3..15777abcb8084 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -34,6 +34,7 @@ Fixed regressions
- Fixed regression when adding a :meth:`timedelta_range` to a :class:`Timestamp` raised a ``ValueError`` (:issue:`35897`)
- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a tuple (:issue:`35534`)
- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a frozenset (:issue:`35747`)
+- Fixed regression in modulo of :class:`Index`, :class:`Series` and :class:`DataFrame` using ``numexpr`` using C not Python semantics (:issue:`36047`, :issue:`36526`)
- Fixed regression in :meth:`read_excel` with ``engine="odf"`` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`, :issue:`35802`)
- Fixed regression in :meth:`DataFrame.replace` inconsistent replace when using a float in the replace method (:issue:`35376`)
- Fixed regression in :class:`DataFrame` and :class:`Series` comparisons between numeric arrays and strings (:issue:`35700`, :issue:`36377`)
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py
index 0032fe97b8b33..5bfd2e93a9247 100644
--- a/pandas/core/computation/expressions.py
+++ b/pandas/core/computation/expressions.py
@@ -133,7 +133,10 @@ def _evaluate_numexpr(op, op_str, a, b):
roperator.rtruediv: "/",
operator.floordiv: "//",
roperator.rfloordiv: "//",
- operator.mod: "%",
+ # we require Python semantics for mod of negative for backwards compatibility
+ # see https://github.com/pydata/numexpr/issues/365
+ # so sticking with unaccelerated for now
+ operator.mod: None,
roperator.rmod: "%",
operator.pow: "**",
roperator.rpow: "**",
diff --git a/pandas/core/ops/methods.py b/pandas/core/ops/methods.py
index e04db92b58c36..852157e52d5fe 100644
--- a/pandas/core/ops/methods.py
+++ b/pandas/core/ops/methods.py
@@ -171,8 +171,6 @@ def _create_methods(cls, arith_method, comp_method, bool_method, special):
mul=arith_method(cls, operator.mul, special),
truediv=arith_method(cls, operator.truediv, special),
floordiv=arith_method(cls, operator.floordiv, special),
- # Causes a floating point exception in the tests when numexpr enabled,
- # so for now no speedup
mod=arith_method(cls, operator.mod, special),
pow=arith_method(cls, operator.pow, special),
# not entirely sure why this is necessary, but previously was included
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index da7f8b9b4a721..6db1078fcde4f 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -6,7 +6,7 @@
import pytest
import pandas._testing as tm
-from pandas.core.api import DataFrame
+from pandas.core.api import DataFrame, Index, Series
from pandas.core.computation import expressions as expr
_frame = DataFrame(randn(10000, 4), columns=list("ABCD"), dtype="float64")
@@ -380,3 +380,41 @@ def test_frame_series_axis(self, axis, arith):
result = op_func(other, axis=axis)
tm.assert_frame_equal(expected, result)
+
+ @pytest.mark.parametrize(
+ "op",
+ [
+ "__mod__",
+ pytest.param("__rmod__", marks=pytest.mark.xfail(reason="GH-36552")),
+ "__floordiv__",
+ "__rfloordiv__",
+ ],
+ )
+ @pytest.mark.parametrize("box", [DataFrame, Series, Index])
+ @pytest.mark.parametrize("scalar", [-5, 5])
+ def test_python_semantics_with_numexpr_installed(self, op, box, scalar):
+ # https://github.com/pandas-dev/pandas/issues/36047
+ expr._MIN_ELEMENTS = 0
+ data = np.arange(-50, 50)
+ obj = box(data)
+ method = getattr(obj, op)
+ result = method(scalar)
+
+ # compare result with numpy
+ expr.set_use_numexpr(False)
+ expected = method(scalar)
+ expr.set_use_numexpr(True)
+ tm.assert_equal(result, expected)
+
+ # compare result element-wise with Python
+ for i, elem in enumerate(data):
+ if box == DataFrame:
+ scalar_result = result.iloc[i, 0]
+ else:
+ scalar_result = result[i]
+ try:
+ expected = getattr(int(elem), op)(scalar)
+ except ZeroDivisionError:
+ pass
+ else:
+ assert scalar_result == expected
| - [ ] closes #36047
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36552 | 2020-09-22T19:17:36Z | 2020-09-30T20:27:08Z | 2020-09-30T20:27:08Z | 2020-09-30T20:29:25Z |
PERF: TimedeltaArray.__iter__ | diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py
index 27c904dda5b45..4ed542b3a28e3 100644
--- a/asv_bench/benchmarks/timeseries.py
+++ b/asv_bench/benchmarks/timeseries.py
@@ -3,7 +3,14 @@
import dateutil
import numpy as np
-from pandas import DataFrame, Series, date_range, period_range, to_datetime
+from pandas import (
+ DataFrame,
+ Series,
+ date_range,
+ period_range,
+ timedelta_range,
+ to_datetime,
+)
from pandas.tseries.frequencies import infer_freq
@@ -121,12 +128,15 @@ def time_convert(self):
class Iteration:
- params = [date_range, period_range]
+ params = [date_range, period_range, timedelta_range]
param_names = ["time_index"]
def setup(self, time_index):
N = 10 ** 6
- self.idx = time_index(start="20140101", freq="T", periods=N)
+ if time_index is timedelta_range:
+ self.idx = time_index(start=0, freq="T", periods=N)
+ else:
+ self.idx = time_index(start="20140101", freq="T", periods=N)
self.exit = 10000
def time_iter(self, time_index):
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 7051507f9a90e..e573d9e8f0504 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -469,7 +469,10 @@ def _box_values(self, values):
return lib.map_infer(values, self._box_func)
def __iter__(self):
- return (self._box_func(v) for v in self.asi8)
+ if self.ndim > 1:
+ return (self[n] for n in range(len(self)))
+ else:
+ return (self._box_func(v) for v in self.asi8)
@property
def asi8(self) -> np.ndarray:
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index b1f98199f9fba..da641265d1d20 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -558,19 +558,21 @@ def __iter__(self):
------
tstamp : Timestamp
"""
-
- # convert in chunks of 10k for efficiency
- data = self.asi8
- length = len(self)
- chunksize = 10000
- chunks = int(length / chunksize) + 1
- for i in range(chunks):
- start_i = i * chunksize
- end_i = min((i + 1) * chunksize, length)
- converted = ints_to_pydatetime(
- data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp"
- )
- yield from converted
+ if self.ndim > 1:
+ return (self[n] for n in range(len(self)))
+ else:
+ # convert in chunks of 10k for efficiency
+ data = self.asi8
+ length = len(self)
+ chunksize = 10000
+ chunks = int(length / chunksize) + 1
+ for i in range(chunks):
+ start_i = i * chunksize
+ end_i = min((i + 1) * chunksize, length)
+ converted = ints_to_pydatetime(
+ data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp"
+ )
+ yield from converted
def astype(self, dtype, copy=True):
# We handle
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 3eaf428bc64b2..fa90f86d328c4 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -16,7 +16,11 @@
)
from pandas._libs.tslibs.conversion import precision_from_unit
from pandas._libs.tslibs.fields import get_timedelta_field
-from pandas._libs.tslibs.timedeltas import array_to_timedelta64, parse_timedelta_unit
+from pandas._libs.tslibs.timedeltas import (
+ array_to_timedelta64,
+ ints_to_pytimedelta,
+ parse_timedelta_unit,
+)
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.common import (
@@ -328,6 +332,21 @@ def astype(self, dtype, copy=True):
return self
return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy=copy)
+ def __iter__(self):
+ if self.ndim > 1:
+ return (self[n] for n in range(len(self)))
+ else:
+ # convert in chunks of 10k for efficiency
+ data = self.asi8
+ length = len(self)
+ chunksize = 10000
+ chunks = int(length / chunksize) + 1
+ for i in range(chunks):
+ start_i = i * chunksize
+ end_i = min((i + 1) * chunksize, length)
+ converted = ints_to_pytimedelta(data[start_i:end_i], box=True)
+ yield from converted
+
# ----------------------------------------------------------------
# Reductions
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index f512b168d2795..dbe3f9f2fd5c5 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -310,6 +310,15 @@ def test_getitem_2d(self, arr1d):
expected = arr1d[-1]
assert result == expected
+ def test_iter_2d(self, arr1d):
+ data2d = arr1d._data[:3, np.newaxis]
+ arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype)
+ result = list(arr2d)
+ for x in result:
+ assert isinstance(x, type(arr1d))
+ assert x.ndim == 1
+ assert x.dtype == arr1d.dtype
+
def test_setitem(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
| - [x] closes #26713
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Also 2d compat for TDA/DTA `__iter__` | https://api.github.com/repos/pandas-dev/pandas/pulls/36551 | 2020-09-22T16:27:26Z | 2020-10-14T18:19:25Z | 2020-10-14T18:19:25Z | 2020-10-14T18:30:06Z |
CI: fix failing pre-commit | diff --git a/setup.py b/setup.py
index 8f447d5c38169..8e25705c1f4c3 100755
--- a/setup.py
+++ b/setup.py
@@ -387,8 +387,7 @@ def build_extension(self, ext):
class DummyBuildSrc(Command):
- """ numpy's build_src command interferes with Cython's build_ext.
- """
+ """numpy's build_src command interferes with Cython's build_ext."""
user_options = []
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36549 | 2020-09-22T15:22:23Z | 2020-09-22T16:18:53Z | 2020-09-22T16:18:53Z | 2020-09-22T16:22:51Z |
Backport PR #36546 on branch 1.1.x (TST: add missing assert) | diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 72bc13e67c040..337ec683ee745 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -680,3 +680,4 @@ def test_index_name_empty(self):
expected = pd.DataFrame(
{"series": [1.23] * 4}, index=pd.RangeIndex(4, name="series_index")
)
+ tm.assert_frame_equal(df, expected)
| Backport PR #36546: TST: add missing assert | https://api.github.com/repos/pandas-dev/pandas/pulls/36547 | 2020-09-22T12:51:42Z | 2020-09-22T15:30:53Z | 2020-09-22T15:30:53Z | 2020-09-22T15:30:53Z |
TST: add missing assert | diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 72bc13e67c040..337ec683ee745 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -680,3 +680,4 @@ def test_index_name_empty(self):
expected = pd.DataFrame(
{"series": [1.23] * 4}, index=pd.RangeIndex(4, name="series_index")
)
+ tm.assert_frame_equal(df, expected)
| Small follow-up on https://github.com/pandas-dev/pandas/pull/36532 | https://api.github.com/repos/pandas-dev/pandas/pulls/36546 | 2020-09-22T11:59:14Z | 2020-09-22T12:49:43Z | 2020-09-22T12:49:43Z | 2020-09-22T14:07:46Z |
Backport PR #36532 on branch 1.1.x (BUG: Fix issue in preserving index name on empty DataFrame) | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index 72937141c2870..e3a96c69918db 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -35,6 +35,7 @@ Fixed regressions
- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a frozenset (:issue:`35747`)
- Fixed regression in :meth:`read_excel` with ``engine="odf"`` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`,:issue:`35802`)
- Fixed regression in :class:`DataFrame` and :class:`Series` comparisons between numeric arrays and strings (:issue:`35700`,:issue:`36377`)
+- Fixed regression when setting empty :class:`DataFrame` column to a :class:`Series` in preserving name of index in frame (:issue:`36527`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 9af9c19392ef7..0cbcb0ce3d700 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3172,7 +3172,8 @@ def _ensure_valid_index(self, value):
# GH31368 preserve name of index
index_copy = value.index.copy()
- index_copy.name = self.index.name
+ if self.index.name is not None:
+ index_copy.name = self.index.name
self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan)
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 7afbbc2b9ab2b..72bc13e67c040 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -672,3 +672,11 @@ def test_index_name_empty(self):
)
tm.assert_frame_equal(df, expected)
+
+ # GH 36527
+ df = pd.DataFrame()
+ series = pd.Series(1.23, index=pd.RangeIndex(4, name="series_index"))
+ df["series"] = series
+ expected = pd.DataFrame(
+ {"series": [1.23] * 4}, index=pd.RangeIndex(4, name="series_index")
+ )
| Backport PR #36532: BUG: Fix issue in preserving index name on empty DataFrame | https://api.github.com/repos/pandas-dev/pandas/pulls/36545 | 2020-09-22T11:57:42Z | 2020-09-22T12:49:36Z | 2020-09-22T12:49:36Z | 2020-09-22T12:49:36Z |
Link to new location for scipy.window documentation | diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index 10e27606a1415..e7edda90610b5 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -433,7 +433,7 @@ The following methods are available:
The weights used in the window are specified by the ``win_type`` keyword.
The list of recognized types are the `scipy.signal window functions
-<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__:
+<https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows>`__:
* ``boxcar``
* ``triang``
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 06c3ad23f904f..335fc3db5cd86 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -940,7 +940,7 @@ class Window(_Window):
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
- <https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
+ <https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows>`__.
Certain window types require additional parameters to be passed. Please see
the third example below on how to add the additional parameters.
| - [x] closes #36539
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36540 | 2020-09-22T03:24:55Z | 2020-09-22T07:22:11Z | 2020-09-22T07:22:11Z | 2020-09-22T07:22:12Z |
validate fill_value in IntervalArray.take unconditionally | diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index ebabc7edcbf43..1011381f235ca 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -812,9 +812,7 @@ def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs):
fill_left = fill_right = fill_value
if allow_fill:
- if (np.asarray(indices) == -1).any():
- # We have excel tests that pass fill_value=True, xref GH#36466
- fill_left, fill_right = self._validate_fill_value(fill_value)
+ fill_left, fill_right = self._validate_fill_value(fill_value)
left_take = take(
self.left, indices, allow_fill=allow_fill, fill_value=fill_left
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index cc7b6b0bfea97..0140804e8c7b5 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -629,7 +629,9 @@ def _format_hierarchical_rows(self):
):
values = levels.take(
- level_codes, allow_fill=levels._can_hold_na, fill_value=True
+ level_codes,
+ allow_fill=levels._can_hold_na,
+ fill_value=levels._na_value,
)
for i in spans:
| xref #36466 | https://api.github.com/repos/pandas-dev/pandas/pulls/36538 | 2020-09-22T01:42:55Z | 2020-09-22T14:54:32Z | 2020-09-22T14:54:31Z | 2020-09-22T14:54:53Z |
Revert "ENH: Optimize nrows in read_excel" | diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py
index 1eaccb9f2d897..80af2cff41769 100644
--- a/asv_bench/benchmarks/io/excel.py
+++ b/asv_bench/benchmarks/io/excel.py
@@ -11,7 +11,7 @@
def _generate_dataframe():
- N = 20000
+ N = 2000
C = 5
df = DataFrame(
np.random.randn(N, C),
@@ -69,9 +69,5 @@ def time_read_excel(self, engine):
fname = self.fname_odf if engine == "odf" else self.fname_excel
read_excel(fname, engine=engine)
- def time_read_excel_nrows(self, engine):
- fname = self.fname_odf if engine == "odf" else self.fname_excel
- read_excel(fname, engine=engine, nrows=1)
-
from ..pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 19a563be0a568..18940b574b517 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -224,7 +224,6 @@ Performance improvements
- Performance improvements when creating DataFrame or Series with dtype `str` or :class:`StringDtype` from array with many string elements (:issue:`36304`, :issue:`36317`, :issue:`36325`, :issue:`36432`)
- Performance improvement in :meth:`GroupBy.agg` with the ``numba`` engine (:issue:`35759`)
-- Performance improvement in `read_excel` for when ``nrows`` is much smaller than the length of the file (:issue:`33281`).
- Performance improvements when creating :meth:`pd.Series.map` from a huge dictionary (:issue:`34717`)
- Performance improvement in :meth:`GroupBy.transform` with the ``numba`` engine (:issue:`36240`)
- ``Styler`` uuid method altered to compress data transmission over web whilst maintaining reasonably low table collision probability (:issue:`36345`)
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 667f37f47e188..604b7e12ec243 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -3,12 +3,12 @@
from io import BufferedIOBase, BytesIO, RawIOBase
import os
from textwrap import fill
-from typing import Any, List, Mapping, Optional, Union
+from typing import Any, Mapping, Union
from pandas._config import config
from pandas._libs.parsers import STR_NA_VALUES
-from pandas._typing import Scalar, StorageOptions
+from pandas._typing import StorageOptions
from pandas.errors import EmptyDataError
from pandas.util._decorators import Appender, deprecate_nonkeyword_arguments
@@ -398,14 +398,7 @@ def get_sheet_by_index(self, index):
pass
@abc.abstractmethod
- def get_sheet_data(
- self,
- sheet,
- convert_float: bool,
- header_nrows: int,
- skiprows_nrows: int,
- nrows: Optional[int],
- ) -> List[List[Scalar]]:
+ def get_sheet_data(self, sheet, convert_float):
pass
def parse(
@@ -461,22 +454,7 @@ def parse(
else: # assume an integer if not a string
sheet = self.get_sheet_by_index(asheetname)
- if isinstance(header, int):
- header_nrows = header
- elif header is None:
- header_nrows = 0
- else:
- header_nrows = max(header)
- if isinstance(skiprows, int):
- skiprows_nrows = skiprows
- elif skiprows is None:
- skiprows_nrows = 0
- else:
- skiprows_nrows = len(skiprows)
-
- data = self.get_sheet_data(
- sheet, convert_float, header_nrows, skiprows_nrows, nrows
- )
+ data = self.get_sheet_data(sheet, convert_float)
usecols = maybe_convert_usecols(usecols)
if not data:
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index 07d2f9a593b96..4f9f8a29c0010 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -1,4 +1,4 @@
-from typing import List, Optional, cast
+from typing import List, cast
import numpy as np
@@ -71,14 +71,7 @@ def get_sheet_by_name(self, name: str):
raise ValueError(f"sheet {name} not found")
- def get_sheet_data(
- self,
- sheet,
- convert_float: bool,
- header_nrows: int,
- skiprows_nrows: int,
- nrows: Optional[int],
- ) -> List[List[Scalar]]:
+ def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]:
"""
Parse an ODF Table into a list of lists
"""
@@ -94,8 +87,6 @@ def get_sheet_data(
table: List[List[Scalar]] = []
- if isinstance(nrows, int):
- sheet_rows = sheet_rows[: header_nrows + skiprows_nrows + nrows + 1]
for i, sheet_row in enumerate(sheet_rows):
sheet_cells = [x for x in sheet_row.childNodes if x.qname in cell_names]
empty_cells = 0
diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py
index bc7b168eeaaa2..a5cadf4d93389 100644
--- a/pandas/io/excel/_openpyxl.py
+++ b/pandas/io/excel/_openpyxl.py
@@ -508,14 +508,7 @@ def _convert_cell(self, cell, convert_float: bool) -> Scalar:
return cell.value
- def get_sheet_data(
- self,
- sheet,
- convert_float: bool,
- header_nrows: int,
- skiprows_nrows: int,
- nrows: Optional[int],
- ) -> List[List[Scalar]]:
+ def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]:
data: List[List[Scalar]] = []
for row in sheet.rows:
data.append([self._convert_cell(cell, convert_float) for cell in row])
diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py
index cf3dcebdff6eb..ac94f4dd3df74 100644
--- a/pandas/io/excel/_pyxlsb.py
+++ b/pandas/io/excel/_pyxlsb.py
@@ -1,4 +1,4 @@
-from typing import List, Optional
+from typing import List
from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions
from pandas.compat._optional import import_optional_dependency
@@ -68,14 +68,7 @@ def _convert_cell(self, cell, convert_float: bool) -> Scalar:
return cell.v
- def get_sheet_data(
- self,
- sheet,
- convert_float: bool,
- header_nrows: int,
- skiprows_nrows: int,
- nrows: Optional[int],
- ) -> List[List[Scalar]]:
+ def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]:
return [
[self._convert_cell(c, convert_float) for c in r]
for r in sheet.rows(sparse=False)
diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py
index e5d0d66f9570a..dfd5dde0329ae 100644
--- a/pandas/io/excel/_xlrd.py
+++ b/pandas/io/excel/_xlrd.py
@@ -1,9 +1,8 @@
from datetime import time
-from typing import List, Optional
import numpy as np
-from pandas._typing import Scalar, StorageOptions
+from pandas._typing import StorageOptions
from pandas.compat._optional import import_optional_dependency
from pandas.io.excel._base import BaseExcelReader
@@ -50,14 +49,7 @@ def get_sheet_by_name(self, name):
def get_sheet_by_index(self, index):
return self.book.sheet_by_index(index)
- def get_sheet_data(
- self,
- sheet,
- convert_float: bool,
- header_nrows: int,
- skiprows_nrows: int,
- nrows: Optional[int],
- ) -> List[List[Scalar]]:
+ def get_sheet_data(self, sheet, convert_float):
from xlrd import (
XL_CELL_BOOLEAN,
XL_CELL_DATE,
@@ -106,14 +98,9 @@ def _parse_cell(cell_contents, cell_typ):
cell_contents = val
return cell_contents
- data: List[List[Scalar]] = []
+ data = []
- sheet_nrows = sheet.nrows
-
- if isinstance(nrows, int):
- sheet_nrows = min(header_nrows + skiprows_nrows + nrows + 1, sheet_nrows)
-
- for i in range(sheet_nrows):
+ for i in range(sheet.nrows):
row = [
_parse_cell(value, typ)
for value, typ in zip(sheet.row_values(i), sheet.row_types(i))
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index 4fb1ef8fa0c15..4bdcc5b327fa7 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -1195,21 +1195,5 @@ def test_read_datetime_multiindex(self, engine, read_ext):
],
)
expected = pd.DataFrame([], columns=expected_column_index)
- tm.assert_frame_equal(expected, actual)
- @pytest.mark.parametrize(
- "header, skiprows", [(1, 2), (0, 3), (1, [0, 1]), ([2], 1)]
- )
- @td.check_file_leaks
- def test_header_skiprows_nrows(self, engine, read_ext, header, skiprows):
- # GH 32727
- data = pd.read_excel("test1" + read_ext, engine=engine)
- expected = (
- DataFrame(data.iloc[3:6])
- .reset_index(drop=True)
- .rename(columns=data.iloc[2].rename(None))
- )
- actual = pd.read_excel(
- "test1" + read_ext, engine=engine, header=header, skiprows=skiprows, nrows=3
- )
tm.assert_frame_equal(expected, actual)
| Reverts pandas-dev/pandas#35974 | https://api.github.com/repos/pandas-dev/pandas/pulls/36537 | 2020-09-22T01:41:28Z | 2020-09-22T02:58:27Z | 2020-09-22T02:58:27Z | 2020-09-22T03:17:29Z |
CLN: Break up wrap applied output | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index b9cc2c19c224b..29f13107f750a 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1210,64 +1210,77 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
self._insert_inaxis_grouper_inplace(result)
return result
else:
- # this is to silence a DeprecationWarning
- # TODO: Remove when default dtype of empty Series is object
- kwargs = first_not_none._construct_axes_dict()
- backup = create_series_with_explicit_dtype(dtype_if_empty=object, **kwargs)
- values = [x if (x is not None) else backup for x in values]
-
- all_indexed_same = all_indexes_same(x.index for x in values)
-
- # GH3596
- # provide a reduction (Frame -> Series) if groups are
- # unique
- if self.squeeze:
- applied_index = self._selected_obj._get_axis(self.axis)
- singular_series = len(values) == 1 and applied_index.nlevels == 1
-
- # assign the name to this series
- if singular_series:
- values[0].name = keys[0]
-
- # GH2893
- # we have series in the values array, we want to
- # produce a series:
- # if any of the sub-series are not indexed the same
- # OR we don't have a multi-index and we have only a
- # single values
- return self._concat_objects(
- keys, values, not_indexed_same=not_indexed_same
- )
+ # values are Series
+ return self._wrap_applied_output_series(
+ keys, values, not_indexed_same, first_not_none, key_index
+ )
- # still a series
- # path added as of GH 5545
- elif all_indexed_same:
- from pandas.core.reshape.concat import concat
-
- return concat(values)
-
- if not all_indexed_same:
- # GH 8467
- return self._concat_objects(keys, values, not_indexed_same=True)
-
- # Combine values
- # vstack+constructor is faster than concat and handles MI-columns
- stacked_values = np.vstack([np.asarray(v) for v in values])
-
- if self.axis == 0:
- index = key_index
- columns = first_not_none.index.copy()
- if columns.name is None:
- # GH6124 - propagate name of Series when it's consistent
- names = {v.name for v in values}
- if len(names) == 1:
- columns.name = list(names)[0]
- else:
- index = first_not_none.index
- columns = key_index
- stacked_values = stacked_values.T
+ def _wrap_applied_output_series(
+ self,
+ keys,
+ values: List[Series],
+ not_indexed_same: bool,
+ first_not_none,
+ key_index,
+ ) -> FrameOrSeriesUnion:
+ # this is to silence a DeprecationWarning
+ # TODO: Remove when default dtype of empty Series is object
+ kwargs = first_not_none._construct_axes_dict()
+ backup = create_series_with_explicit_dtype(dtype_if_empty=object, **kwargs)
+ values = [x if (x is not None) else backup for x in values]
+
+ all_indexed_same = all_indexes_same(x.index for x in values)
+
+ # GH3596
+ # provide a reduction (Frame -> Series) if groups are
+ # unique
+ if self.squeeze:
+ applied_index = self._selected_obj._get_axis(self.axis)
+ singular_series = len(values) == 1 and applied_index.nlevels == 1
+
+ # assign the name to this series
+ if singular_series:
+ values[0].name = keys[0]
+
+ # GH2893
+ # we have series in the values array, we want to
+ # produce a series:
+ # if any of the sub-series are not indexed the same
+ # OR we don't have a multi-index and we have only a
+ # single values
+ return self._concat_objects(
+ keys, values, not_indexed_same=not_indexed_same
+ )
+
+ # still a series
+ # path added as of GH 5545
+ elif all_indexed_same:
+ from pandas.core.reshape.concat import concat
+
+ return concat(values)
+
+ if not all_indexed_same:
+ # GH 8467
+ return self._concat_objects(keys, values, not_indexed_same=True)
+
+ # Combine values
+ # vstack+constructor is faster than concat and handles MI-columns
+ stacked_values = np.vstack([np.asarray(v) for v in values])
+
+ if self.axis == 0:
+ index = key_index
+ columns = first_not_none.index.copy()
+ if columns.name is None:
+ # GH6124 - propagate name of Series when it's consistent
+ names = {v.name for v in values}
+ if len(names) == 1:
+ columns.name = list(names)[0]
+ else:
+ index = first_not_none.index
+ columns = key_index
+ stacked_values = stacked_values.T
- result = self.obj._constructor(stacked_values, index=index, columns=columns)
+ result = self.obj._constructor(stacked_values, index=index, columns=columns)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
| Splitting off the Series case seemed to be a natural way to break up this method. Always open to other thoughts. | https://api.github.com/repos/pandas-dev/pandas/pulls/36536 | 2020-09-21T22:50:51Z | 2020-09-22T22:45:23Z | 2020-09-22T22:45:23Z | 2020-10-11T13:22:08Z |
Regr/period range large value/issue 36430 | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index e3a96c69918db..e3b0f59c3edcc 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -36,6 +36,7 @@ Fixed regressions
- Fixed regression in :meth:`read_excel` with ``engine="odf"`` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`,:issue:`35802`)
- Fixed regression in :class:`DataFrame` and :class:`Series` comparisons between numeric arrays and strings (:issue:`35700`,:issue:`36377`)
- Fixed regression when setting empty :class:`DataFrame` column to a :class:`Series` in preserving name of index in frame (:issue:`36527`)
+- Fixed regression in :class:`Period` incorrect value for ordinal over the maximum timestamp (:issue:`36430`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 86b6533f5caf5..27402c8d255b6 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -861,6 +861,7 @@ cdef int64_t get_time_nanos(int freq, int64_t unix_date, int64_t ordinal) nogil:
"""
cdef:
int64_t sub, factor
+ int64_t nanos_in_day = 24 * 3600 * 10**9
freq = get_freq_group(freq)
@@ -886,7 +887,7 @@ cdef int64_t get_time_nanos(int freq, int64_t unix_date, int64_t ordinal) nogil:
# We must have freq == FR_HR
factor = 10**9 * 3600
- sub = ordinal - unix_date * 24 * 3600 * 10**9 / factor
+ sub = ordinal - unix_date * (nanos_in_day / factor)
return sub * factor
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index dcef0615121c1..795021a260028 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -486,6 +486,13 @@ def test_period_cons_combined(self):
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="1D1W")
+ @pytest.mark.parametrize("hour", range(24))
+ def test_period_large_ordinal(self, hour):
+ # Issue #36430
+ # Integer overflow for Period over the maximum timestamp
+ p = pd.Period(ordinal=2562048 + hour, freq="1H")
+ assert p.hour == hour
+
class TestPeriodMethods:
def test_round_trip(self):
| # Checklist
- [x] closes #36430
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
# Solution
Culprit was the multiplication `unix_date * 24 * 3600 * 10**9 / factor`, for
```
unix_date = 106752
np.log2(unix_date * 24 * 3600 * 10**9)
# 63.00000011936912
```
That probably lead to an integer overflow somewhere and the observed behaviours.
Splitting the multiplication did the trick.
| https://api.github.com/repos/pandas-dev/pandas/pulls/36535 | 2020-09-21T21:59:47Z | 2020-09-22T22:08:22Z | 2020-09-22T22:08:21Z | 2020-09-23T10:34:58Z |
CLN: clean up blocks.py | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index eb5b887c8b0cb..f18bc4d0bcf85 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -175,7 +175,7 @@ def _holder(self):
@property
def _consolidate_key(self):
- return (self._can_consolidate, self.dtype.name)
+ return self._can_consolidate, self.dtype.name
@property
def is_view(self) -> bool:
@@ -1363,6 +1363,7 @@ def where(
errors : str, {'raise', 'ignore'}, default 'raise'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
+ try_cast: bool, default False
axis : int, default 0
Returns
@@ -1633,8 +1634,8 @@ def __init__(self, values, placement, ndim=None):
def shape(self):
# TODO(EA2D): override unnecessary with 2D EAs
if self.ndim == 1:
- return ((len(self.values)),)
- return (len(self.mgr_locs), len(self.values))
+ return (len(self.values),)
+ return len(self.mgr_locs), len(self.values)
def iget(self, col):
| Clean up while going through code. Will probably check more files. | https://api.github.com/repos/pandas-dev/pandas/pulls/36534 | 2020-09-21T21:34:38Z | 2020-09-24T01:12:35Z | 2020-09-24T01:12:34Z | 2020-09-24T08:07:25Z |
CLN: remove not existing argument | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 50d1810fee30d..edacacd3e26bd 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -278,7 +278,6 @@ def _check_object_for_strings(values) -> str:
Parameters
----------
values : ndarray
- ndtype : str
Returns
-------
| Small clean up of argument in docstrings which is not present in function.
| https://api.github.com/repos/pandas-dev/pandas/pulls/36533 | 2020-09-21T20:26:27Z | 2020-09-21T21:24:09Z | 2020-09-21T21:24:09Z | 2020-09-21T22:10:22Z |
BUG: Fix issue in preserving index name on empty DataFrame | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index 72937141c2870..e3a96c69918db 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -35,6 +35,7 @@ Fixed regressions
- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a frozenset (:issue:`35747`)
- Fixed regression in :meth:`read_excel` with ``engine="odf"`` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`,:issue:`35802`)
- Fixed regression in :class:`DataFrame` and :class:`Series` comparisons between numeric arrays and strings (:issue:`35700`,:issue:`36377`)
+- Fixed regression when setting empty :class:`DataFrame` column to a :class:`Series` in preserving name of index in frame (:issue:`36527`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 36dfe43bfd708..69b12bcff967f 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3190,7 +3190,8 @@ def _ensure_valid_index(self, value):
# GH31368 preserve name of index
index_copy = value.index.copy()
- index_copy.name = self.index.name
+ if self.index.name is not None:
+ index_copy.name = self.index.name
self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan)
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 7afbbc2b9ab2b..72bc13e67c040 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -672,3 +672,11 @@ def test_index_name_empty(self):
)
tm.assert_frame_equal(df, expected)
+
+ # GH 36527
+ df = pd.DataFrame()
+ series = pd.Series(1.23, index=pd.RangeIndex(4, name="series_index"))
+ df["series"] = series
+ expected = pd.DataFrame(
+ {"series": [1.23] * 4}, index=pd.RangeIndex(4, name="series_index")
+ )
| - [x] closes #36527
- [x] tests added / passed
- `tests/indexing/test_partial.py:tesst_index_name_empty`
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
- v1.1.3
| https://api.github.com/repos/pandas-dev/pandas/pulls/36532 | 2020-09-21T20:15:35Z | 2020-09-21T21:42:49Z | 2020-09-21T21:42:49Z | 2020-09-22T11:59:24Z |
Add generate pip dependency's from conda to pre-commit | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index d01956bb79e11..79c95784ff500 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -65,3 +65,12 @@ repos:
v0\.|
v1\.0\.|
v1\.1\.[012])
+- repo: local
+ hooks:
+ - id: pip_to_conda
+ name: Generate pip dependency from conda
+ description: This hook checks if the conda environment.yml and requirements-dev.txt are equal
+ language: system
+ entry: python -m scripts.generate_pip_deps_from_conda
+ files: ^(environment.yml|requirements-dev.txt)$
+ pass_filenames: false
| closes: #36529
First try, not sure yet how to activate local virtual env, error message right now is:
```
black................................................(no files to check)Skipped
flake8...............................................(no files to check)Skipped
flake8-pyx...........................................(no files to check)Skipped
flake8-pxd...........................................(no files to check)Skipped
isort................................................(no files to check)Skipped
Generate pip dependency from conda.......................................Failed
- hook id: pip_to_conda
- exit code: 1
Traceback (most recent call last):
File "scripts/generate_pip_deps_from_conda.py", line 20, in <module>
import yaml
ModuleNotFoundError: No module named 'yaml'
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/36531 | 2020-09-21T19:41:11Z | 2020-09-26T01:19:04Z | 2020-09-26T01:19:04Z | 2020-10-07T16:12:02Z |
REF: Categorical.fillna match patterns in other methods | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 18940b574b517..99a1106440f7f 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -238,7 +238,7 @@ Bug fixes
Categorical
^^^^^^^^^^^
-
+- :meth:`Categorical.fillna` will always return a copy, will validate a passed fill value regardless of whether there are any NAs to fill, and will disallow a ``NaT`` as a fill value for numeric categories (:issue:`36530`)
-
-
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index ef69d6565cfeb..7b1a66560e73b 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -37,7 +37,6 @@
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
-from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, notna
from pandas.core import ops
@@ -1213,7 +1212,7 @@ def _validate_fill_value(self, fill_value):
ValueError
"""
- if isna(fill_value):
+ if is_valid_nat_for_dtype(fill_value, self.categories.dtype):
fill_value = -1
elif fill_value in self.categories:
fill_value = self._unbox_scalar(fill_value)
@@ -1636,6 +1635,7 @@ def fillna(self, value=None, method=None, limit=None):
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
+ value = extract_array(value, extract_numpy=True)
if value is None:
value = np.nan
@@ -1644,10 +1644,8 @@ def fillna(self, value=None, method=None, limit=None):
"specifying a limit for fillna has not been implemented yet"
)
- codes = self._codes
-
- # pad / bfill
if method is not None:
+ # pad / bfill
# TODO: dispatch when self.categories is EA-dtype
values = np.asarray(self).reshape(-1, len(self))
@@ -1657,40 +1655,25 @@ def fillna(self, value=None, method=None, limit=None):
codes = _get_codes_for_values(values, self.categories)
else:
+ # We copy even if there is nothing to fill
+ codes = self._ndarray.copy()
+ mask = self.isna()
- # If value is a dict or a Series (a dict value has already
- # been converted to a Series)
- if isinstance(value, (np.ndarray, Categorical, ABCSeries)):
+ if isinstance(value, (np.ndarray, Categorical)):
# We get ndarray or Categorical if called via Series.fillna,
# where it will unwrap another aligned Series before getting here
- mask = ~algorithms.isin(value, self.categories)
- if not isna(value[mask]).all():
+ not_categories = ~algorithms.isin(value, self.categories)
+ if not isna(value[not_categories]).all():
+ # All entries in `value` must either be a category or NA
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
- indexer = np.where(codes == -1)
- codes = codes.copy()
- codes[indexer] = values_codes[indexer]
-
- # If value is not a dict or Series it should be a scalar
- elif is_hashable(value):
- if not isna(value) and value not in self.categories:
- raise ValueError("fill value must be in categories")
-
- mask = codes == -1
- if mask.any():
- codes = codes.copy()
- if isna(value):
- codes[mask] = -1
- else:
- codes[mask] = self._unbox_scalar(value)
+ codes[mask] = values_codes[mask]
else:
- raise TypeError(
- f"'value' parameter must be a scalar, dict "
- f"or Series, but you passed a {type(value).__name__}"
- )
+ new_code = self._validate_fill_value(value)
+ codes[mask] = new_code
return self._from_backing_data(codes)
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index b4f91590e09d1..5d3f8e3a2f7c1 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -362,7 +362,8 @@ def test_na_actions_categorical(self):
res = df.fillna(value={"cats": 3, "vals": "b"})
tm.assert_frame_equal(res, df_exp_fill)
- with pytest.raises(ValueError, match=("fill value must be in categories")):
+ msg = "'fill_value=4' is not present in this Categorical's categories"
+ with pytest.raises(ValueError, match=msg):
df.fillna(value={"cats": 4, "vals": "c"})
res = df.fillna(method="pad")
diff --git a/pandas/tests/indexes/categorical/test_fillna.py b/pandas/tests/indexes/categorical/test_fillna.py
index 0d878249d3800..f6a6747166011 100644
--- a/pandas/tests/indexes/categorical/test_fillna.py
+++ b/pandas/tests/indexes/categorical/test_fillna.py
@@ -14,6 +14,32 @@ def test_fillna_categorical(self):
tm.assert_index_equal(idx.fillna(1.0), exp)
# fill by value not in categories raises ValueError
- msg = "fill value must be in categories"
+ msg = "'fill_value=2.0' is not present in this Categorical's categories"
with pytest.raises(ValueError, match=msg):
idx.fillna(2.0)
+
+ def test_fillna_copies_with_no_nas(self):
+ # Nothing to fill, should still get a copy
+ ci = CategoricalIndex([0, 1, 1])
+ cat = ci._data
+ result = ci.fillna(0)
+ assert result._values._ndarray is not cat._ndarray
+ assert result._values._ndarray.base is None
+
+ # Same check directly on the Categorical object
+ result = cat.fillna(0)
+ assert result._ndarray is not cat._ndarray
+ assert result._ndarray.base is None
+
+ def test_fillna_validates_with_no_nas(self):
+ # We validate the fill value even if fillna is a no-op
+ ci = CategoricalIndex([2, 3, 3])
+ cat = ci._data
+
+ msg = "'fill_value=False' is not present in this Categorical's categories"
+ with pytest.raises(ValueError, match=msg):
+ ci.fillna(False)
+
+ # Same check directly on the Categorical
+ with pytest.raises(ValueError, match=msg):
+ cat.fillna(False)
diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py
index 80b8271e16e7a..b6a6f4e8200d4 100644
--- a/pandas/tests/series/methods/test_fillna.py
+++ b/pandas/tests/series/methods/test_fillna.py
@@ -125,7 +125,8 @@ def test_fillna_categorical_raises(self):
data = ["a", np.nan, "b", np.nan, np.nan]
ser = Series(Categorical(data, categories=["a", "b"]))
- with pytest.raises(ValueError, match="fill value must be in categories"):
+ msg = "'fill_value=d' is not present in this Categorical's categories"
+ with pytest.raises(ValueError, match=msg):
ser.fillna("d")
with pytest.raises(ValueError, match="fill value must be in categories"):
| Also:
- always return a copy even if there is nothing to fill
- validate the fill_value even if there is nothing to fill | https://api.github.com/repos/pandas-dev/pandas/pulls/36530 | 2020-09-21T19:34:52Z | 2020-09-22T22:19:59Z | 2020-09-22T22:19:59Z | 2020-09-22T22:23:11Z |
REF: test_to_latex | diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py
index 8df8796d236a5..7a0d305758802 100644
--- a/pandas/tests/io/formats/test_to_latex.py
+++ b/pandas/tests/io/formats/test_to_latex.py
@@ -1,5 +1,6 @@
import codecs
from datetime import datetime
+from textwrap import dedent
import pytest
@@ -16,14 +17,82 @@
)
+def _dedent(string):
+ """Dedent without new line in the beginning.
+
+ Built-in textwrap.dedent would keep new line character in the beginning
+ of multi-line string starting from the new line.
+ This version drops the leading new line character.
+ """
+ return dedent(string).lstrip()
+
+
class TestToLatex:
- def test_to_latex_filename(self, float_frame):
+ @pytest.fixture
+ def df_short(self):
+ """Short dataframe for testing table/tabular/longtable LaTeX env."""
+ return DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+
+ @pytest.fixture
+ def caption_table(self):
+ """Caption for table/tabular LaTeX environment."""
+ return "a table in a \\texttt{table/tabular} environment"
+
+ @pytest.fixture
+ def label_table(self):
+ """Label for table/tabular LaTeX environment."""
+ return "tab:table_tabular"
+
+ @pytest.fixture
+ def caption_longtable(self):
+ """Caption for longtable LaTeX environment."""
+ return "a table in a \\texttt{longtable} environment"
+
+ @pytest.fixture
+ def label_longtable(self):
+ """Label for longtable LaTeX environment."""
+ return "tab:longtable"
+
+ @pytest.fixture
+ def multiindex_frame(self):
+ """Multiindex dataframe for testing multirow LaTeX macros."""
+ yield DataFrame.from_dict(
+ {
+ ("c1", 0): pd.Series({x: x for x in range(4)}),
+ ("c1", 1): pd.Series({x: x + 4 for x in range(4)}),
+ ("c2", 0): pd.Series({x: x for x in range(4)}),
+ ("c2", 1): pd.Series({x: x + 4 for x in range(4)}),
+ ("c3", 0): pd.Series({x: x for x in range(4)}),
+ }
+ ).T
+
+ @pytest.fixture
+ def multicolumn_frame(self):
+ """Multicolumn dataframe for testing multicolumn LaTeX macros."""
+ yield pd.DataFrame(
+ {
+ ("c1", 0): {x: x for x in range(5)},
+ ("c1", 1): {x: x + 5 for x in range(5)},
+ ("c2", 0): {x: x for x in range(5)},
+ ("c2", 1): {x: x + 5 for x in range(5)},
+ ("c3", 0): {x: x for x in range(5)},
+ }
+ )
+
+ @pytest.fixture
+ def df_with_symbols(self):
+ """Dataframe with special characters for testing chars escaping."""
+ a = "a"
+ b = "b"
+ yield DataFrame({"co$e^x$": {a: "a", b: "b"}, "co^l1": {a: "a", b: "b"}})
+
+ def test_to_latex_to_file(self, float_frame):
with tm.ensure_clean("test.tex") as path:
float_frame.to_latex(path)
-
with open(path) as f:
assert float_frame.to_latex() == f.read()
+ def test_to_latex_to_file_utf8_with_encoding(self):
# test with utf-8 and encoding option (GH 7061)
df = DataFrame([["au\xdfgangen"]])
with tm.ensure_clean("test.tex") as path:
@@ -31,42 +100,47 @@ def test_to_latex_filename(self, float_frame):
with codecs.open(path, "r", encoding="utf-8") as f:
assert df.to_latex() == f.read()
+ def test_to_latex_to_file_utf8_without_encoding(self):
# test with utf-8 without encoding option
+ df = DataFrame([["au\xdfgangen"]])
with tm.ensure_clean("test.tex") as path:
df.to_latex(path)
with codecs.open(path, "r", encoding="utf-8") as f:
assert df.to_latex() == f.read()
- def test_to_latex(self, float_frame):
- # it works!
- float_frame.to_latex()
-
+ def test_to_latex_tabular_with_index(self):
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
- withindex_result = df.to_latex()
- withindex_expected = r"""\begin{tabular}{lrl}
-\toprule
-{} & a & b \\
-\midrule
-0 & 1 & b1 \\
-1 & 2 & b2 \\
-\bottomrule
-\end{tabular}
-"""
-
- assert withindex_result == withindex_expected
-
- withoutindex_result = df.to_latex(index=False)
- withoutindex_expected = r"""\begin{tabular}{rl}
-\toprule
- a & b \\
-\midrule
- 1 & b1 \\
- 2 & b2 \\
-\bottomrule
-\end{tabular}
-"""
+ result = df.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ {} & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
- assert withoutindex_result == withoutindex_expected
+ def test_to_latex_tabular_without_index(self):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(index=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{rl}
+ \toprule
+ a & b \\
+ \midrule
+ 1 & b1 \\
+ 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
@pytest.mark.parametrize(
"bad_column_format",
@@ -78,45 +152,55 @@ def test_to_latex_bad_column_format(self, bad_column_format):
with pytest.raises(ValueError, match=msg):
df.to_latex(column_format=bad_column_format)
- def test_to_latex_format(self, float_frame):
+ def test_to_latex_column_format(self, float_frame):
# GH Bug #9402
- float_frame.to_latex(column_format="ccc")
+ float_frame.to_latex(column_format="lcr")
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
- withindex_result = df.to_latex(column_format="ccc")
- withindex_expected = r"""\begin{tabular}{ccc}
-\toprule
-{} & a & b \\
-\midrule
-0 & 1 & b1 \\
-1 & 2 & b2 \\
-\bottomrule
-\end{tabular}
-"""
-
- assert withindex_result == withindex_expected
+ result = df.to_latex(column_format="lcr")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lcr}
+ \toprule
+ {} & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
- def test_to_latex_empty(self):
+ def test_to_latex_empty_tabular(self):
df = DataFrame()
result = df.to_latex()
- expected = r"""\begin{tabular}{l}
-\toprule
-Empty DataFrame
-Columns: Index([], dtype='object')
-Index: Index([], dtype='object') \\
-\bottomrule
-\end{tabular}
-"""
+ expected = _dedent(
+ r"""
+ \begin{tabular}{l}
+ \toprule
+ Empty DataFrame
+ Columns: Index([], dtype='object')
+ Index: Index([], dtype='object') \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
assert result == expected
+ def test_to_latex_empty_longtable(self):
+ df = DataFrame()
result = df.to_latex(longtable=True)
- expected = r"""\begin{longtable}{l}
-\toprule
-Empty DataFrame
-Columns: Index([], dtype='object')
-Index: Index([], dtype='object') \\
-\end{longtable}
-"""
+ expected = _dedent(
+ r"""
+ \begin{longtable}{l}
+ \toprule
+ Empty DataFrame
+ Columns: Index([], dtype='object')
+ Index: Index([], dtype='object') \\
+ \end{longtable}
+ """
+ )
assert result == expected
def test_to_latex_with_formatters(self):
@@ -142,119 +226,134 @@ def test_to_latex_with_formatters(self):
}
result = df.to_latex(formatters=dict(formatters))
- expected = r"""\begin{tabular}{llrrl}
-\toprule
-{} & datetime64 & float & int & object \\
-\midrule
-index: 0 & 2016-01 & [ 1.0] & 0x1 & -(1, 2)- \\
-index: 1 & 2016-02 & [ 2.0] & 0x2 & -True- \\
-index: 2 & 2016-03 & [ 3.0] & 0x3 & -False- \\
-\bottomrule
-\end{tabular}
-"""
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrrl}
+ \toprule
+ {} & datetime64 & float & int & object \\
+ \midrule
+ index: 0 & 2016-01 & [ 1.0] & 0x1 & -(1, 2)- \\
+ index: 1 & 2016-02 & [ 2.0] & 0x2 & -True- \\
+ index: 2 & 2016-03 & [ 3.0] & 0x3 & -False- \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
assert result == expected
- def test_to_latex_multiindex(self):
+ def test_to_latex_multiindex_column_tabular(self):
df = DataFrame({("x", "y"): ["a"]})
result = df.to_latex()
- expected = r"""\begin{tabular}{ll}
-\toprule
-{} & x \\
-{} & y \\
-\midrule
-0 & a \\
-\bottomrule
-\end{tabular}
-"""
-
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ {} & x \\
+ {} & y \\
+ \midrule
+ 0 & a \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
assert result == expected
+ def test_to_latex_multiindex_small_tabular(self):
+ df = DataFrame({("x", "y"): ["a"]})
result = df.T.to_latex()
- expected = r"""\begin{tabular}{lll}
-\toprule
- & & 0 \\
-\midrule
-x & y & a \\
-\bottomrule
-\end{tabular}
-"""
-
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lll}
+ \toprule
+ & & 0 \\
+ \midrule
+ x & y & a \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
assert result == expected
- df = DataFrame.from_dict(
- {
- ("c1", 0): pd.Series({x: x for x in range(4)}),
- ("c1", 1): pd.Series({x: x + 4 for x in range(4)}),
- ("c2", 0): pd.Series({x: x for x in range(4)}),
- ("c2", 1): pd.Series({x: x + 4 for x in range(4)}),
- ("c3", 0): pd.Series({x: x for x in range(4)}),
- }
- ).T
- result = df.to_latex()
- expected = r"""\begin{tabular}{llrrrr}
-\toprule
- & & 0 & 1 & 2 & 3 \\
-\midrule
-c1 & 0 & 0 & 1 & 2 & 3 \\
- & 1 & 4 & 5 & 6 & 7 \\
-c2 & 0 & 0 & 1 & 2 & 3 \\
- & 1 & 4 & 5 & 6 & 7 \\
-c3 & 0 & 0 & 1 & 2 & 3 \\
-\bottomrule
-\end{tabular}
-"""
-
+ def test_to_latex_multiindex_tabular(self, multiindex_frame):
+ result = multiindex_frame.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrrrr}
+ \toprule
+ & & 0 & 1 & 2 & 3 \\
+ \midrule
+ c1 & 0 & 0 & 1 & 2 & 3 \\
+ & 1 & 4 & 5 & 6 & 7 \\
+ c2 & 0 & 0 & 1 & 2 & 3 \\
+ & 1 & 4 & 5 & 6 & 7 \\
+ c3 & 0 & 0 & 1 & 2 & 3 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
assert result == expected
+ def test_to_latex_multicolumn_tabular(self, multiindex_frame):
# GH 14184
- df = df.T
+ df = multiindex_frame.T
df.columns.names = ["a", "b"]
result = df.to_latex()
- expected = r"""\begin{tabular}{lrrrrr}
-\toprule
-a & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\
-b & 0 & 1 & 0 & 1 & 0 \\
-\midrule
-0 & 0 & 4 & 0 & 4 & 0 \\
-1 & 1 & 5 & 1 & 5 & 1 \\
-2 & 2 & 6 & 2 & 6 & 2 \\
-3 & 3 & 7 & 3 & 7 & 3 \\
-\bottomrule
-\end{tabular}
-"""
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrrr}
+ \toprule
+ a & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\
+ b & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ 0 & 0 & 4 & 0 & 4 & 0 \\
+ 1 & 1 & 5 & 1 & 5 & 1 \\
+ 2 & 2 & 6 & 2 & 6 & 2 \\
+ 3 & 3 & 7 & 3 & 7 & 3 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
assert result == expected
+ def test_to_latex_index_has_name_tabular(self):
# GH 10660
df = pd.DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]})
result = df.set_index(["a", "b"]).to_latex()
- expected = r"""\begin{tabular}{llr}
-\toprule
- & & c \\
-a & b & \\
-\midrule
-0 & a & 1 \\
- & b & 2 \\
-1 & a & 3 \\
- & b & 4 \\
-\bottomrule
-\end{tabular}
-"""
-
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llr}
+ \toprule
+ & & c \\
+ a & b & \\
+ \midrule
+ 0 & a & 1 \\
+ & b & 2 \\
+ 1 & a & 3 \\
+ & b & 4 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
assert result == expected
+ def test_to_latex_groupby_tabular(self):
+ # GH 10660
+ df = pd.DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]})
result = df.groupby("a").describe().to_latex()
- expected = r"""\begin{tabular}{lrrrrrrrr}
-\toprule
-{} & \multicolumn{8}{l}{c} \\
-{} & count & mean & std & min & 25\% & 50\% & 75\% & max \\
-a & & & & & & & & \\
-\midrule
-0 & 2.0 & 1.5 & 0.707107 & 1.0 & 1.25 & 1.5 & 1.75 & 2.0 \\
-1 & 2.0 & 3.5 & 0.707107 & 3.0 & 3.25 & 3.5 & 3.75 & 4.0 \\
-\bottomrule
-\end{tabular}
-"""
-
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrrrrrr}
+ \toprule
+ {} & \multicolumn{8}{l}{c} \\
+ {} & count & mean & std & min & 25\% & 50\% & 75\% & max \\
+ a & & & & & & & & \\
+ \midrule
+ 0 & 2.0 & 1.5 & 0.707107 & 1.0 & 1.25 & 1.5 & 1.75 & 2.0 \\
+ 1 & 2.0 & 3.5 & 0.707107 & 3.0 & 3.25 & 3.5 & 3.75 & 4.0 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
assert result == expected
def test_to_latex_multiindex_dupe_level(self):
@@ -269,568 +368,635 @@ def test_to_latex_multiindex_dupe_level(self):
index=pd.MultiIndex.from_tuples([("A", "c"), ("B", "c")]), columns=["col"]
)
result = df.to_latex()
- expected = r"""\begin{tabular}{lll}
-\toprule
- & & col \\
-\midrule
-A & c & NaN \\
-B & c & NaN \\
-\bottomrule
-\end{tabular}
-"""
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lll}
+ \toprule
+ & & col \\
+ \midrule
+ A & c & NaN \\
+ B & c & NaN \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
assert result == expected
- def test_to_latex_multicolumnrow(self):
- df = pd.DataFrame(
- {
- ("c1", 0): {x: x for x in range(5)},
- ("c1", 1): {x: x + 5 for x in range(5)},
- ("c2", 0): {x: x for x in range(5)},
- ("c2", 1): {x: x + 5 for x in range(5)},
- ("c3", 0): {x: x for x in range(5)},
- }
+ def test_to_latex_multicolumn_default(self, multicolumn_frame):
+ result = multicolumn_frame.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrrr}
+ \toprule
+ {} & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\
+ {} & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ 0 & 0 & 5 & 0 & 5 & 0 \\
+ 1 & 1 & 6 & 1 & 6 & 1 \\
+ 2 & 2 & 7 & 2 & 7 & 2 \\
+ 3 & 3 & 8 & 3 & 8 & 3 \\
+ 4 & 4 & 9 & 4 & 9 & 4 \\
+ \bottomrule
+ \end{tabular}
+ """
)
- result = df.to_latex()
- expected = r"""\begin{tabular}{lrrrrr}
-\toprule
-{} & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\
-{} & 0 & 1 & 0 & 1 & 0 \\
-\midrule
-0 & 0 & 5 & 0 & 5 & 0 \\
-1 & 1 & 6 & 1 & 6 & 1 \\
-2 & 2 & 7 & 2 & 7 & 2 \\
-3 & 3 & 8 & 3 & 8 & 3 \\
-4 & 4 & 9 & 4 & 9 & 4 \\
-\bottomrule
-\end{tabular}
-"""
assert result == expected
- result = df.to_latex(multicolumn=False)
- expected = r"""\begin{tabular}{lrrrrr}
-\toprule
-{} & c1 & & c2 & & c3 \\
-{} & 0 & 1 & 0 & 1 & 0 \\
-\midrule
-0 & 0 & 5 & 0 & 5 & 0 \\
-1 & 1 & 6 & 1 & 6 & 1 \\
-2 & 2 & 7 & 2 & 7 & 2 \\
-3 & 3 & 8 & 3 & 8 & 3 \\
-4 & 4 & 9 & 4 & 9 & 4 \\
-\bottomrule
-\end{tabular}
-"""
+ def test_to_latex_multicolumn_false(self, multicolumn_frame):
+ result = multicolumn_frame.to_latex(multicolumn=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrrr}
+ \toprule
+ {} & c1 & & c2 & & c3 \\
+ {} & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ 0 & 0 & 5 & 0 & 5 & 0 \\
+ 1 & 1 & 6 & 1 & 6 & 1 \\
+ 2 & 2 & 7 & 2 & 7 & 2 \\
+ 3 & 3 & 8 & 3 & 8 & 3 \\
+ 4 & 4 & 9 & 4 & 9 & 4 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
assert result == expected
- result = df.T.to_latex(multirow=True)
- expected = r"""\begin{tabular}{llrrrrr}
-\toprule
- & & 0 & 1 & 2 & 3 & 4 \\
-\midrule
-\multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
- & 1 & 5 & 6 & 7 & 8 & 9 \\
-\cline{1-7}
-\multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
- & 1 & 5 & 6 & 7 & 8 & 9 \\
-\cline{1-7}
-c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
-\bottomrule
-\end{tabular}
-"""
+ def test_to_latex_multirow_true(self, multicolumn_frame):
+ result = multicolumn_frame.T.to_latex(multirow=True)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrrrrr}
+ \toprule
+ & & 0 & 1 & 2 & 3 & 4 \\
+ \midrule
+ \multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ \multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
assert result == expected
- df.index = df.T.index
- result = df.T.to_latex(multirow=True, multicolumn=True, multicolumn_format="c")
- expected = r"""\begin{tabular}{llrrrrr}
-\toprule
- & & \multicolumn{2}{c}{c1} & \multicolumn{2}{c}{c2} & c3 \\
- & & 0 & 1 & 0 & 1 & 0 \\
-\midrule
-\multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
- & 1 & 5 & 6 & 7 & 8 & 9 \\
-\cline{1-7}
-\multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
- & 1 & 5 & 6 & 7 & 8 & 9 \\
-\cline{1-7}
-c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
-\bottomrule
-\end{tabular}
-"""
+ def test_to_latex_multicolumnrow_with_multicol_format(self, multicolumn_frame):
+ multicolumn_frame.index = multicolumn_frame.T.index
+ result = multicolumn_frame.T.to_latex(
+ multirow=True,
+ multicolumn=True,
+ multicolumn_format="c",
+ )
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrrrrr}
+ \toprule
+ & & \multicolumn{2}{c}{c1} & \multicolumn{2}{c}{c2} & c3 \\
+ & & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ \multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ \multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
assert result == expected
- def test_to_latex_escape(self):
- a = "a"
- b = "b"
-
- test_dict = {"co$e^x$": {a: "a", b: "b"}, "co^l1": {a: "a", b: "b"}}
-
- unescaped_result = DataFrame(test_dict).to_latex(escape=False)
- escaped_result = DataFrame(test_dict).to_latex() # default: escape=True
-
- unescaped_expected = r"""\begin{tabular}{lll}
-\toprule
-{} & co$e^x$ & co^l1 \\
-\midrule
-a & a & a \\
-b & b & b \\
-\bottomrule
-\end{tabular}
-"""
-
- escaped_expected = r"""\begin{tabular}{lll}
-\toprule
-{} & co\$e\textasciicircum x\$ & co\textasciicircum l1 \\
-\midrule
-a & a & a \\
-b & b & b \\
-\bottomrule
-\end{tabular}
-"""
+ def test_to_latex_escape_false(self, df_with_symbols):
+ result = df_with_symbols.to_latex(escape=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lll}
+ \toprule
+ {} & co$e^x$ & co^l1 \\
+ \midrule
+ a & a & a \\
+ b & b & b \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
- assert unescaped_result == unescaped_expected
- assert escaped_result == escaped_expected
+ def test_to_latex_escape_default(self, df_with_symbols):
+ result = df_with_symbols.to_latex() # default: escape=True
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lll}
+ \toprule
+ {} & co\$e\textasciicircum x\$ & co\textasciicircum l1 \\
+ \midrule
+ a & a & a \\
+ b & b & b \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
def test_to_latex_special_escape(self):
df = DataFrame([r"a\b\c", r"^a^b^c", r"~a~b~c"])
+ result = df.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ {} & 0 \\
+ \midrule
+ 0 & a\textbackslash b\textbackslash c \\
+ 1 & \textasciicircum a\textasciicircum b\textasciicircum c \\
+ 2 & \textasciitilde a\textasciitilde b\textasciitilde c \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
- escaped_result = df.to_latex()
- escaped_expected = r"""\begin{tabular}{ll}
-\toprule
-{} & 0 \\
-\midrule
-0 & a\textbackslash b\textbackslash c \\
-1 & \textasciicircum a\textasciicircum b\textasciicircum c \\
-2 & \textasciitilde a\textasciitilde b\textasciitilde c \\
-\bottomrule
-\end{tabular}
-"""
- assert escaped_result == escaped_expected
-
- def test_to_latex_longtable(self):
-
+ def test_to_latex_longtable_with_index(self):
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
- withindex_result = df.to_latex(longtable=True)
- withindex_expected = r"""\begin{longtable}{lrl}
-\toprule
-{} & a & b \\
-\midrule
-\endfirsthead
-
-\toprule
-{} & a & b \\
-\midrule
-\endhead
-\midrule
-\multicolumn{3}{r}{{Continued on next page}} \\
-\midrule
-\endfoot
-
-\bottomrule
-\endlastfoot
-0 & 1 & b1 \\
-1 & 2 & b2 \\
-\end{longtable}
-"""
- assert withindex_result == withindex_expected
-
- withoutindex_result = df.to_latex(index=False, longtable=True)
- withoutindex_expected = r"""\begin{longtable}{rl}
-\toprule
- a & b \\
-\midrule
-\endfirsthead
-
-\toprule
- a & b \\
-\midrule
-\endhead
-\midrule
-\multicolumn{2}{r}{{Continued on next page}} \\
-\midrule
-\endfoot
-
-\bottomrule
-\endlastfoot
- 1 & b1 \\
- 2 & b2 \\
-\end{longtable}
-"""
-
- assert withoutindex_result == withoutindex_expected
-
- df = DataFrame({"a": [1, 2]})
- with1column_result = df.to_latex(index=False, longtable=True)
- assert r"\multicolumn{1}" in with1column_result
-
- df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]})
- with3columns_result = df.to_latex(index=False, longtable=True)
- assert r"\multicolumn{3}" in with3columns_result
-
- def test_to_latex_caption_label(self):
- # GH 25436
- the_caption = "a table in a \\texttt{table/tabular} environment"
- the_label = "tab:table_tabular"
+ result = df.to_latex(longtable=True)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{lrl}
+ \toprule
+ {} & a & b \\
+ \midrule
+ \endfirsthead
+
+ \toprule
+ {} & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{{Continued on next page}} \\
+ \midrule
+ \endfoot
+
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+ def test_to_latex_longtable_without_index(self):
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(index=False, longtable=True)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{rl}
+ \toprule
+ a & b \\
+ \midrule
+ \endfirsthead
+
+ \toprule
+ a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{2}{r}{{Continued on next page}} \\
+ \midrule
+ \endfoot
+
+ \bottomrule
+ \endlastfoot
+ 1 & b1 \\
+ 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
- # test when only the caption is provided
- result_c = df.to_latex(caption=the_caption)
-
- expected_c = r"""\begin{table}
-\centering
-\caption{a table in a \texttt{table/tabular} environment}
-\begin{tabular}{lrl}
-\toprule
-{} & a & b \\
-\midrule
-0 & 1 & b1 \\
-1 & 2 & b2 \\
-\bottomrule
-\end{tabular}
-\end{table}
-"""
- assert result_c == expected_c
-
- # test when only the label is provided
- result_l = df.to_latex(label=the_label)
-
- expected_l = r"""\begin{table}
-\centering
-\label{tab:table_tabular}
-\begin{tabular}{lrl}
-\toprule
-{} & a & b \\
-\midrule
-0 & 1 & b1 \\
-1 & 2 & b2 \\
-\bottomrule
-\end{tabular}
-\end{table}
-"""
- assert result_l == expected_l
-
- # test when the caption and the label are provided
- result_cl = df.to_latex(caption=the_caption, label=the_label)
+ @pytest.mark.parametrize(
+ "df, expected_number",
+ [
+ (DataFrame({"a": [1, 2]}), 1),
+ (DataFrame({"a": [1, 2], "b": [3, 4]}), 2),
+ (DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}), 3),
+ ],
+ )
+ def test_to_latex_longtable_continued_on_next_page(self, df, expected_number):
+ result = df.to_latex(index=False, longtable=True)
+ assert fr"\multicolumn{{{expected_number}}}" in result
- expected_cl = r"""\begin{table}
-\centering
-\caption{a table in a \texttt{table/tabular} environment}
-\label{tab:table_tabular}
-\begin{tabular}{lrl}
-\toprule
-{} & a & b \\
-\midrule
-0 & 1 & b1 \\
-1 & 2 & b2 \\
-\bottomrule
-\end{tabular}
-\end{table}
-"""
- assert result_cl == expected_cl
+ def test_to_latex_caption_only(self, df_short, caption_table):
+ # GH 25436
+ result = df_short.to_latex(caption=caption_table)
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \centering
+ \caption{a table in a \texttt{table/tabular} environment}
+ \begin{tabular}{lrl}
+ \toprule
+ {} & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
- def test_to_latex_longtable_caption_label(self):
+ def test_to_latex_label_only(self, df_short, label_table):
# GH 25436
- the_caption = "a table in a \\texttt{longtable} environment"
- the_label = "tab:longtable"
+ result = df_short.to_latex(label=label_table)
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \centering
+ \label{tab:table_tabular}
+ \begin{tabular}{lrl}
+ \toprule
+ {} & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
- df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ def test_to_latex_caption_and_label(self, df_short, caption_table, label_table):
+ # GH 25436
+ result = df_short.to_latex(caption=caption_table, label=label_table)
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \centering
+ \caption{a table in a \texttt{table/tabular} environment}
+ \label{tab:table_tabular}
+ \begin{tabular}{lrl}
+ \toprule
+ {} & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+ def test_to_latex_longtable_caption_only(self, df_short, caption_longtable):
+ # GH 25436
# test when no caption and no label is provided
# is performed by test_to_latex_longtable()
+ result = df_short.to_latex(longtable=True, caption=caption_longtable)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{lrl}
+ \caption{a table in a \texttt{longtable} environment}\\
+ \toprule
+ {} & a & b \\
+ \midrule
+ \endfirsthead
+ \caption[]{a table in a \texttt{longtable} environment} \\
+ \toprule
+ {} & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{{Continued on next page}} \\
+ \midrule
+ \endfoot
+
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
- # test when only the caption is provided
- result_c = df.to_latex(longtable=True, caption=the_caption)
-
- expected_c = r"""\begin{longtable}{lrl}
-\caption{a table in a \texttt{longtable} environment}\\
-\toprule
-{} & a & b \\
-\midrule
-\endfirsthead
-\caption[]{a table in a \texttt{longtable} environment} \\
-\toprule
-{} & a & b \\
-\midrule
-\endhead
-\midrule
-\multicolumn{3}{r}{{Continued on next page}} \\
-\midrule
-\endfoot
-
-\bottomrule
-\endlastfoot
-0 & 1 & b1 \\
-1 & 2 & b2 \\
-\end{longtable}
-"""
- assert result_c == expected_c
-
- # test when only the label is provided
- result_l = df.to_latex(longtable=True, label=the_label)
-
- expected_l = r"""\begin{longtable}{lrl}
-\label{tab:longtable}\\
-\toprule
-{} & a & b \\
-\midrule
-\endfirsthead
-
-\toprule
-{} & a & b \\
-\midrule
-\endhead
-\midrule
-\multicolumn{3}{r}{{Continued on next page}} \\
-\midrule
-\endfoot
-
-\bottomrule
-\endlastfoot
-0 & 1 & b1 \\
-1 & 2 & b2 \\
-\end{longtable}
-"""
- assert result_l == expected_l
-
- # test when the caption and the label are provided
- result_cl = df.to_latex(longtable=True, caption=the_caption, label=the_label)
-
- expected_cl = r"""\begin{longtable}{lrl}
-\caption{a table in a \texttt{longtable} environment}
-\label{tab:longtable}\\
-\toprule
-{} & a & b \\
-\midrule
-\endfirsthead
-\caption[]{a table in a \texttt{longtable} environment} \\
-\toprule
-{} & a & b \\
-\midrule
-\endhead
-\midrule
-\multicolumn{3}{r}{{Continued on next page}} \\
-\midrule
-\endfoot
+ def test_to_latex_longtable_label_only(self, df_short, label_longtable):
+ # GH 25436
+ result = df_short.to_latex(longtable=True, label=label_longtable)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{lrl}
+ \label{tab:longtable}\\
+ \toprule
+ {} & a & b \\
+ \midrule
+ \endfirsthead
+
+ \toprule
+ {} & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{{Continued on next page}} \\
+ \midrule
+ \endfoot
+
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
-\bottomrule
-\endlastfoot
-0 & 1 & b1 \\
-1 & 2 & b2 \\
-\end{longtable}
-"""
- assert result_cl == expected_cl
+ def test_to_latex_longtable_caption_and_label(
+ self,
+ df_short,
+ caption_longtable,
+ label_longtable,
+ ):
+ # GH 25436
+ result = df_short.to_latex(
+ longtable=True,
+ caption=caption_longtable,
+ label=label_longtable,
+ )
+ expected = _dedent(
+ r"""
+ \begin{longtable}{lrl}
+ \caption{a table in a \texttt{longtable} environment}
+ \label{tab:longtable}\\
+ \toprule
+ {} & a & b \\
+ \midrule
+ \endfirsthead
+ \caption[]{a table in a \texttt{longtable} environment} \\
+ \toprule
+ {} & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{{Continued on next page}} \\
+ \midrule
+ \endfoot
+
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
def test_to_latex_position(self):
the_position = "h"
-
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
-
- # test when only the position is provided
- result_p = df.to_latex(position=the_position)
-
- expected_p = r"""\begin{table}[h]
-\centering
-\begin{tabular}{lrl}
-\toprule
-{} & a & b \\
-\midrule
-0 & 1 & b1 \\
-1 & 2 & b2 \\
-\bottomrule
-\end{tabular}
-\end{table}
-"""
- assert result_p == expected_p
+ result = df.to_latex(position=the_position)
+ expected = _dedent(
+ r"""
+ \begin{table}[h]
+ \centering
+ \begin{tabular}{lrl}
+ \toprule
+ {} & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
def test_to_latex_longtable_position(self):
the_position = "t"
-
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
-
- # test when only the position is provided
- result_p = df.to_latex(longtable=True, position=the_position)
-
- expected_p = r"""\begin{longtable}[t]{lrl}
-\toprule
-{} & a & b \\
-\midrule
-\endfirsthead
-
-\toprule
-{} & a & b \\
-\midrule
-\endhead
-\midrule
-\multicolumn{3}{r}{{Continued on next page}} \\
-\midrule
-\endfoot
-
-\bottomrule
-\endlastfoot
-0 & 1 & b1 \\
-1 & 2 & b2 \\
-\end{longtable}
-"""
- assert result_p == expected_p
+ result = df.to_latex(longtable=True, position=the_position)
+ expected = _dedent(
+ r"""
+ \begin{longtable}[t]{lrl}
+ \toprule
+ {} & a & b \\
+ \midrule
+ \endfirsthead
+
+ \toprule
+ {} & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{{Continued on next page}} \\
+ \midrule
+ \endfoot
+
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
def test_to_latex_escape_special_chars(self):
special_characters = ["&", "%", "$", "#", "_", "{", "}", "~", "^", "\\"]
df = DataFrame(data=special_characters)
- observed = df.to_latex()
- expected = r"""\begin{tabular}{ll}
-\toprule
-{} & 0 \\
-\midrule
-0 & \& \\
-1 & \% \\
-2 & \$ \\
-3 & \# \\
-4 & \_ \\
-5 & \{ \\
-6 & \} \\
-7 & \textasciitilde \\
-8 & \textasciicircum \\
-9 & \textbackslash \\
-\bottomrule
-\end{tabular}
-"""
-
- assert observed == expected
+ result = df.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ {} & 0 \\
+ \midrule
+ 0 & \& \\
+ 1 & \% \\
+ 2 & \$ \\
+ 3 & \# \\
+ 4 & \_ \\
+ 5 & \{ \\
+ 6 & \} \\
+ 7 & \textasciitilde \\
+ 8 & \textasciicircum \\
+ 9 & \textbackslash \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
- def test_to_latex_no_header(self):
+ def test_to_latex_no_header_with_index(self):
# GH 7124
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
- withindex_result = df.to_latex(header=False)
- withindex_expected = r"""\begin{tabular}{lrl}
-\toprule
-0 & 1 & b1 \\
-1 & 2 & b2 \\
-\bottomrule
-\end{tabular}
-"""
-
- assert withindex_result == withindex_expected
-
- withoutindex_result = df.to_latex(index=False, header=False)
- withoutindex_expected = r"""\begin{tabular}{rl}
-\toprule
-1 & b1 \\
-2 & b2 \\
-\bottomrule
-\end{tabular}
-"""
-
- assert withoutindex_result == withoutindex_expected
+ result = df.to_latex(header=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
- def test_to_latex_specified_header(self):
+ def test_to_latex_no_header_without_index(self):
# GH 7124
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
- withindex_result = df.to_latex(header=["AA", "BB"])
- withindex_expected = r"""\begin{tabular}{lrl}
-\toprule
-{} & AA & BB \\
-\midrule
-0 & 1 & b1 \\
-1 & 2 & b2 \\
-\bottomrule
-\end{tabular}
-"""
-
- assert withindex_result == withindex_expected
-
- withoutindex_result = df.to_latex(header=["AA", "BB"], index=False)
- withoutindex_expected = r"""\begin{tabular}{rl}
-\toprule
-AA & BB \\
-\midrule
- 1 & b1 \\
- 2 & b2 \\
-\bottomrule
-\end{tabular}
-"""
+ result = df.to_latex(index=False, header=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{rl}
+ \toprule
+ 1 & b1 \\
+ 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
- assert withoutindex_result == withoutindex_expected
+ def test_to_latex_specified_header_with_index(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(header=["AA", "BB"])
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ {} & AA & BB \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
- withoutescape_result = df.to_latex(header=["$A$", "$B$"], escape=False)
- withoutescape_expected = r"""\begin{tabular}{lrl}
-\toprule
-{} & $A$ & $B$ \\
-\midrule
-0 & 1 & b1 \\
-1 & 2 & b2 \\
-\bottomrule
-\end{tabular}
-"""
+ def test_to_latex_specified_header_without_index(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(header=["AA", "BB"], index=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{rl}
+ \toprule
+ AA & BB \\
+ \midrule
+ 1 & b1 \\
+ 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
- assert withoutescape_result == withoutescape_expected
+ def test_to_latex_specified_header_special_chars_without_escape(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(header=["$A$", "$B$"], escape=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ {} & $A$ & $B$ \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+ def test_to_latex_number_of_items_in_header_missmatch_raises(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_latex(header=["A"])
- def test_to_latex_decimal(self, float_frame):
+ def test_to_latex_decimal(self):
# GH 12031
- float_frame.to_latex()
-
df = DataFrame({"a": [1.0, 2.1], "b": ["b1", "b2"]})
- withindex_result = df.to_latex(decimal=",")
-
- withindex_expected = r"""\begin{tabular}{lrl}
-\toprule
-{} & a & b \\
-\midrule
-0 & 1,0 & b1 \\
-1 & 2,1 & b2 \\
-\bottomrule
-\end{tabular}
-"""
-
- assert withindex_result == withindex_expected
+ result = df.to_latex(decimal=",")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ {} & a & b \\
+ \midrule
+ 0 & 1,0 & b1 \\
+ 1 & 2,1 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
def test_to_latex_series(self):
s = Series(["a", "b", "c"])
- withindex_result = s.to_latex()
- withindex_expected = r"""\begin{tabular}{ll}
-\toprule
-{} & 0 \\
-\midrule
-0 & a \\
-1 & b \\
-2 & c \\
-\bottomrule
-\end{tabular}
-"""
- assert withindex_result == withindex_expected
+ result = s.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ {} & 0 \\
+ \midrule
+ 0 & a \\
+ 1 & b \\
+ 2 & c \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
def test_to_latex_bold_rows(self):
# GH 16707
df = pd.DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
- observed = df.to_latex(bold_rows=True)
- expected = r"""\begin{tabular}{lrl}
-\toprule
-{} & a & b \\
-\midrule
-\textbf{0} & 1 & b1 \\
-\textbf{1} & 2 & b2 \\
-\bottomrule
-\end{tabular}
-"""
- assert observed == expected
+ result = df.to_latex(bold_rows=True)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ {} & a & b \\
+ \midrule
+ \textbf{0} & 1 & b1 \\
+ \textbf{1} & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
def test_to_latex_no_bold_rows(self):
# GH 16707
df = pd.DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
- observed = df.to_latex(bold_rows=False)
- expected = r"""\begin{tabular}{lrl}
-\toprule
-{} & a & b \\
-\midrule
-0 & 1 & b1 \\
-1 & 2 & b2 \\
-\bottomrule
-\end{tabular}
-"""
- assert observed == expected
+ result = df.to_latex(bold_rows=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ {} & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
@pytest.mark.parametrize("name0", [None, "named0"])
@pytest.mark.parametrize("name1", [None, "named1"])
@@ -875,13 +1041,16 @@ def test_to_latex_multiindex_nans(self, one_row):
if one_row:
df = df.iloc[[0]]
observed = df.set_index(["a", "b"]).to_latex()
- expected = r"""\begin{tabular}{llr}
-\toprule
- & & c \\
-a & b & \\
-\midrule
-NaN & 2 & 4 \\
-"""
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llr}
+ \toprule
+ & & c \\
+ a & b & \\
+ \midrule
+ NaN & 2 & 4 \\
+ """
+ )
if not one_row:
expected += r"""1.0 & 3 & 5 \\
"""
@@ -893,93 +1062,111 @@ def test_to_latex_multiindex_nans(self, one_row):
def test_to_latex_non_string_index(self):
# GH 19981
observed = pd.DataFrame([[1, 2, 3]] * 2).set_index([0, 1]).to_latex()
- expected = r"""\begin{tabular}{llr}
-\toprule
- & & 2 \\
-0 & 1 & \\
-\midrule
-1 & 2 & 3 \\
- & 2 & 3 \\
-\bottomrule
-\end{tabular}
-"""
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llr}
+ \toprule
+ & & 2 \\
+ 0 & 1 & \\
+ \midrule
+ 1 & 2 & 3 \\
+ & 2 & 3 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
assert observed == expected
def test_to_latex_midrule_location(self):
# GH 18326
df = pd.DataFrame({"a": [1, 2]})
df.index.name = "foo"
- observed = df.to_latex(index_names=False)
- expected = r"""\begin{tabular}{lr}
-\toprule
-{} & a \\
-\midrule
-0 & 1 \\
-1 & 2 \\
-\bottomrule
-\end{tabular}
-"""
-
- assert observed == expected
+ result = df.to_latex(index_names=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lr}
+ \toprule
+ {} & a \\
+ \midrule
+ 0 & 1 \\
+ 1 & 2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
def test_to_latex_multiindex_empty_name(self):
# GH 18669
mi = pd.MultiIndex.from_product([[1, 2]], names=[""])
df = pd.DataFrame(-1, index=mi, columns=range(4))
observed = df.to_latex()
- expected = r"""\begin{tabular}{lrrrr}
-\toprule
- & 0 & 1 & 2 & 3 \\
-{} & & & & \\
-\midrule
-1 & -1 & -1 & -1 & -1 \\
-2 & -1 & -1 & -1 & -1 \\
-\bottomrule
-\end{tabular}
-"""
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrr}
+ \toprule
+ & 0 & 1 & 2 & 3 \\
+ {} & & & & \\
+ \midrule
+ 1 & -1 & -1 & -1 & -1 \\
+ 2 & -1 & -1 & -1 & -1 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
assert observed == expected
- def test_to_latex_float_format_no_fixed_width(self):
-
+ def test_to_latex_float_format_no_fixed_width_3decimals(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
- expected = r"""\begin{tabular}{lr}
-\toprule
-{} & x \\
-\midrule
-0 & 0.200 \\
-\bottomrule
-\end{tabular}
-"""
- assert df.to_latex(float_format="%.3f") == expected
+ result = df.to_latex(float_format="%.3f")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lr}
+ \toprule
+ {} & x \\
+ \midrule
+ 0 & 0.200 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+ def test_to_latex_float_format_no_fixed_width_integer(self):
# GH 22270
df = DataFrame({"x": [100.0]})
- expected = r"""\begin{tabular}{lr}
-\toprule
-{} & x \\
-\midrule
-0 & 100 \\
-\bottomrule
-\end{tabular}
-"""
- assert df.to_latex(float_format="%.0f") == expected
+ result = df.to_latex(float_format="%.0f")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lr}
+ \toprule
+ {} & x \\
+ \midrule
+ 0 & 100 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
def test_to_latex_multindex_header(self):
# GH 16718
- df = pd.DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}).set_index(
- ["a", "b"]
- )
+ df = pd.DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]})
+ df = df.set_index(["a", "b"])
observed = df.to_latex(header=["r1", "r2"])
- expected = r"""\begin{tabular}{llrr}
-\toprule
- & & r1 & r2 \\
-a & b & & \\
-\midrule
-0 & 1 & 2 & 3 \\
-\bottomrule
-\end{tabular}
-"""
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrr}
+ \toprule
+ & & r1 & r2 \\
+ a & b & & \\
+ \midrule
+ 0 & 1 & 2 & 3 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
assert observed == expected
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Refactor/clean-up ``test_to_latex.py``.
- Split big test functions with multiple assertions into multiple functions
- Make readable expected strings by first indenting for the good visual appearance and then dedenting by the leading whitespace for the assertion. | https://api.github.com/repos/pandas-dev/pandas/pulls/36528 | 2020-09-21T17:19:02Z | 2020-09-22T22:34:30Z | 2020-09-22T22:34:30Z | 2020-09-28T18:53:19Z |
TST: remove xfails with strict=False | diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py
index 6bbc9bc9e1788..08eab69900400 100644
--- a/pandas/tests/io/parser/test_common.py
+++ b/pandas/tests/io/parser/test_common.py
@@ -1138,7 +1138,6 @@ def test_parse_integers_above_fp_precision(all_parsers):
tm.assert_frame_equal(result, expected)
-@pytest.mark.xfail(reason="ResourceWarning #35660", strict=False)
def test_chunks_have_consistent_numerical_type(all_parsers):
parser = all_parsers
integers = [str(i) for i in range(499999)]
@@ -1152,7 +1151,6 @@ def test_chunks_have_consistent_numerical_type(all_parsers):
assert result.a.dtype == float
-@pytest.mark.xfail(reason="ResourceWarning #35660", strict=False)
def test_warn_if_chunks_have_mismatched_type(all_parsers):
warning_type = None
parser = all_parsers
| xref https://github.com/pandas-dev/pandas/pull/35772#issuecomment-675558375 | https://api.github.com/repos/pandas-dev/pandas/pulls/36524 | 2020-09-21T14:09:41Z | 2020-09-21T16:25:39Z | 2020-09-21T16:25:38Z | 2020-09-21T19:29:07Z |
DOC: a few sphinx fixes in release notes | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index e3a96c69918db..26d2734651809 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -33,8 +33,8 @@ Fixed regressions
- Fixed regression in :class:`IntegerArray` unary plus and minus operations raising a ``TypeError`` (:issue:`36063`)
- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a tuple (:issue:`35534`)
- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a frozenset (:issue:`35747`)
-- Fixed regression in :meth:`read_excel` with ``engine="odf"`` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`,:issue:`35802`)
-- Fixed regression in :class:`DataFrame` and :class:`Series` comparisons between numeric arrays and strings (:issue:`35700`,:issue:`36377`)
+- Fixed regression in :meth:`read_excel` with ``engine="odf"`` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`, :issue:`35802`)
+- Fixed regression in :class:`DataFrame` and :class:`Series` comparisons between numeric arrays and strings (:issue:`35700`, :issue:`36377`)
- Fixed regression when setting empty :class:`DataFrame` column to a :class:`Series` in preserving name of index in frame (:issue:`36527`)
.. ---------------------------------------------------------------------------
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 6a5b4b3b9ff16..dbf7a993c1760 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -96,7 +96,7 @@ For example:
buffer = io.BytesIO()
data.to_csv(buffer, mode="w+b", encoding="utf-8", compression="gzip")
-:.. _whatsnew_read_csv_table_precision_default:
+.. _whatsnew_120.read_csv_table_precision_default:
Change in default floating precision for ``read_csv`` and ``read_table``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -209,7 +209,7 @@ See :ref:`install.dependencies` and :ref:`install.optional_dependencies` for mor
Deprecations
~~~~~~~~~~~~
- Deprecated parameter ``inplace`` in :meth:`MultiIndex.set_codes` and :meth:`MultiIndex.set_levels` (:issue:`35626`)
-- Deprecated parameter ``dtype`` in :~meth:`Index.copy` on method all index classes. Use the :meth:`Index.astype` method instead for changing dtype(:issue:`35853`)
+- Deprecated parameter ``dtype`` in :meth:`~Index.copy` on method all index classes. Use the :meth:`~Index.astype` method instead for changing dtype (:issue:`35853`)
- Date parser functions :func:`~pandas.io.date_converters.parse_date_time`, :func:`~pandas.io.date_converters.parse_date_fields`, :func:`~pandas.io.date_converters.parse_all_fields` and :func:`~pandas.io.date_converters.generic_parser` from ``pandas.io.date_converters`` are deprecated and will be removed in a future version; use :func:`to_datetime` instead (:issue:`35741`)
- :meth:`DataFrame.lookup` is deprecated and will be removed in a future version, use :meth:`DataFrame.melt` and :meth:`DataFrame.loc` instead (:issue:`18682`)
- The :meth:`Index.to_native_types` is deprecated. Use ``.astype(str)`` instead (:issue:`28867`)
@@ -249,7 +249,7 @@ Datetimelike
- Bug in :class:`DateOffset` where attributes reconstructed from pickle files differ from original objects when input values exceed normal ranges (e.g months=12) (:issue:`34511`)
- Bug in :meth:`DatetimeIndex.get_slice_bound` where ``datetime.date`` objects were not accepted or naive :class:`Timestamp` with a tz-aware :class:`DatetimeIndex` (:issue:`35690`)
- Bug in :meth:`DatetimeIndex.slice_locs` where ``datetime.date`` objects were not accepted (:issue:`34077`)
-- Bug in :meth:`DatetimeIndex.searchsorted`, :meth:`TimedeltaIndex.searchsorted`, :meth:`PeriodIndex.searchsorted`, and :meth:`Series.searchsorted` with ``datetime64``, ``timedelta64`` or ``Period`` dtype placement of ``NaT`` values being inconsistent with ``NumPy`` (:issue:`36176`,:issue:`36254`)
+- Bug in :meth:`DatetimeIndex.searchsorted`, :meth:`TimedeltaIndex.searchsorted`, :meth:`PeriodIndex.searchsorted`, and :meth:`Series.searchsorted` with ``datetime64``, ``timedelta64`` or ``Period`` dtype placement of ``NaT`` values being inconsistent with ``NumPy`` (:issue:`36176`, :issue:`36254`)
- Inconsistency in :class:`DatetimeArray`, :class:`TimedeltaArray`, and :class:`PeriodArray` setitem casting arrays of strings to datetimelike scalars but not scalar strings (:issue:`36261`)
-
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36523 | 2020-09-21T13:58:43Z | 2020-09-23T11:16:22Z | 2020-09-23T11:16:22Z | 2020-09-23T11:37:14Z |
TST: check inequality by comparing categorical with NaN ( #28384 ) | diff --git a/pandas/tests/arrays/categorical/test_missing.py b/pandas/tests/arrays/categorical/test_missing.py
index 5309b8827e3f0..21bea9356dcf0 100644
--- a/pandas/tests/arrays/categorical/test_missing.py
+++ b/pandas/tests/arrays/categorical/test_missing.py
@@ -148,3 +148,24 @@ def test_use_inf_as_na_outside_context(self, values, expected):
result = pd.isna(DataFrame(cat))
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "a1, a2, categories",
+ [
+ (["a", "b", "c"], [np.nan, "a", "b"], ["a", "b", "c"]),
+ ([1, 2, 3], [np.nan, 1, 2], [1, 2, 3]),
+ ],
+ )
+ def test_compare_categorical_with_missing(self, a1, a2, categories):
+ # GH 28384
+ cat_type = CategoricalDtype(categories)
+
+ # !=
+ result = Series(a1, dtype=cat_type) != Series(a2, dtype=cat_type)
+ expected = Series(a1) != Series(a2)
+ tm.assert_series_equal(result, expected)
+
+ # ==
+ result = Series(a1, dtype=cat_type) == Series(a2, dtype=cat_type)
+ expected = Series(a1) == Series(a2)
+ tm.assert_series_equal(result, expected)
| - [x] closes #28384
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` | https://api.github.com/repos/pandas-dev/pandas/pulls/36520 | 2020-09-21T07:40:40Z | 2020-09-22T23:21:38Z | 2020-09-22T23:21:37Z | 2020-09-30T19:35:21Z |
Backport PR #36464: BUG: Fix astype from float32 to string | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index 7d658215d7b76..72937141c2870 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -47,6 +47,7 @@ Bug fixes
- Bug in :class:`Series` constructor where integer overflow would occur for sufficiently large scalar inputs when an index was provided (:issue:`36291`)
- Bug in :meth:`DataFrame.sort_values` raising an ``AttributeError`` when sorting on a key that casts column to categorical dtype (:issue:`36383`)
- Bug in :meth:`DataFrame.stack` raising a ``ValueError`` when stacking :class:`MultiIndex` columns based on position when the levels had duplicate names (:issue:`36353`)
+- Bug in :meth:`Series.astype` showing too much precision when casting from ``np.float32`` to string dtype (:issue:`36451`)
- Bug in :meth:`Series.isin` and :meth:`DataFrame.isin` when using ``NaN`` and a row length above 1,000,000 (:issue:`22205`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index eadfcefaac73d..6bf0aba128e39 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -650,11 +650,12 @@ cpdef ndarray[object] ensure_string_array(
Py_ssize_t i = 0, n = len(arr)
result = np.asarray(arr, dtype="object")
+
if copy and result is arr:
result = result.copy()
for i in range(n):
- val = result[i]
+ val = arr[i]
if not checknull(val):
result[i] = str(val)
else:
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index a4778869aee24..b5a83c17a64f0 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -199,11 +199,9 @@ def _from_sequence(cls, scalars, dtype=None, copy=False):
if dtype:
assert dtype == "string"
- result = np.asarray(scalars, dtype="object")
-
# convert non-na-likes to str, and nan-likes to StringDtype.na_value
result = lib.ensure_string_array(
- result, na_value=StringDtype.na_value, copy=copy
+ scalars, na_value=StringDtype.na_value, copy=copy
)
return cls(result)
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index efd5d29ae0717..56a8e21edd004 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -336,3 +336,12 @@ def test_memory_usage():
series = pd.Series(["a", "b", "c"], dtype="string")
assert 0 < series.nbytes <= series.memory_usage() < series.memory_usage(deep=True)
+
+
+@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
+def test_astype_from_float_dtype(dtype):
+ # https://github.com/pandas-dev/pandas/issues/36451
+ s = pd.Series([0.1], dtype=dtype)
+ result = s.astype("string")
+ expected = pd.Series(["0.1"], dtype="string")
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py
index b9d90a9fc63dd..7449d8d65ef96 100644
--- a/pandas/tests/series/methods/test_astype.py
+++ b/pandas/tests/series/methods/test_astype.py
@@ -1,3 +1,4 @@
+import numpy as np
import pytest
from pandas import Interval, Series, Timestamp, date_range
@@ -46,3 +47,11 @@ def test_astype_ignores_errors_for_extension_dtypes(self, values, errors):
msg = "(Cannot cast)|(could not convert)"
with pytest.raises((ValueError, TypeError), match=msg):
values.astype(float, errors=errors)
+
+ @pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
+ def test_astype_from_float_to_str(self, dtype):
+ # https://github.com/pandas-dev/pandas/issues/36451
+ s = Series([0.1], dtype=dtype)
+ result = s.astype(str)
+ expected = Series(["0.1"])
+ tm.assert_series_equal(result, expected)
| Backport of https://github.com/pandas-dev/pandas/pull/36464 | https://api.github.com/repos/pandas-dev/pandas/pulls/36519 | 2020-09-21T06:55:12Z | 2020-09-21T08:47:09Z | 2020-09-21T08:47:09Z | 2020-09-22T13:39:53Z |
Fix grammatical errors | diff --git a/doc/source/getting_started/intro_tutorials/03_subset_data.rst b/doc/source/getting_started/intro_tutorials/03_subset_data.rst
index 8476fee5e1eee..a718c39620ce5 100644
--- a/doc/source/getting_started/intro_tutorials/03_subset_data.rst
+++ b/doc/source/getting_started/intro_tutorials/03_subset_data.rst
@@ -27,14 +27,14 @@ This tutorial uses the Titanic data set, stored as CSV. The data
consists of the following data columns:
- PassengerId: Id of every passenger.
-- Survived: This feature have value 0 and 1. 0 for not survived and 1
+- Survived: This feature has value 0 and 1. 0 for not survived and 1
for survived.
- Pclass: There are 3 classes: Class 1, Class 2 and Class 3.
- Name: Name of passenger.
- Sex: Gender of passenger.
- Age: Age of passenger.
-- SibSp: Indication that passenger have siblings and spouse.
-- Parch: Whether a passenger is alone or have family.
+- SibSp: Indication that passengers have siblings and spouses.
+- Parch: Whether a passenger is alone or has a family.
- Ticket: Ticket number of passenger.
- Fare: Indicating the fare.
- Cabin: The cabin of passenger.
@@ -199,7 +199,7 @@ selection brackets ``[]``. Only rows for which the value is ``True``
will be selected.
We know from before that the original Titanic ``DataFrame`` consists of
-891 rows. Let’s have a look at the amount of rows which satisfy the
+891 rows. Let’s have a look at the number of rows which satisfy the
condition by checking the ``shape`` attribute of the resulting
``DataFrame`` ``above_35``:
@@ -398,7 +398,7 @@ See the user guide section on :ref:`different choices for indexing <indexing.cho
<div class="d-flex flex-row gs-torefguide">
<span class="badge badge-info">To user guide</span>
-A full overview about indexing is provided in the user guide pages on :ref:`indexing and selecting data <indexing>`.
+A full overview of indexing is provided in the user guide pages on :ref:`indexing and selecting data <indexing>`.
.. raw:: html
diff --git a/doc/source/getting_started/intro_tutorials/04_plotting.rst b/doc/source/getting_started/intro_tutorials/04_plotting.rst
index f3d99ee56359a..9102de98a6c2a 100644
--- a/doc/source/getting_started/intro_tutorials/04_plotting.rst
+++ b/doc/source/getting_started/intro_tutorials/04_plotting.rst
@@ -167,7 +167,7 @@ I want each of the columns in a separate subplot.
@savefig 04_airqual_area_subplot.png
axs = air_quality.plot.area(figsize=(12, 4), subplots=True)
-Separate subplots for each of the data columns is supported by the ``subplots`` argument
+Separate subplots for each of the data columns are supported by the ``subplots`` argument
of the ``plot`` functions. The builtin options available in each of the pandas plot
functions that are worthwhile to have a look.
@@ -213,7 +213,7 @@ I want to further customize, extend or save the resulting plot.
</li>
</ul>
-Each of the plot objects created by pandas are a
+Each of the plot objects created by pandas is a
`matplotlib <https://matplotlib.org/>`__ object. As Matplotlib provides
plenty of options to customize plots, making the link between pandas and
Matplotlib explicit enables all the power of matplotlib to the plot.
| This PR fixes grammatical errors. | https://api.github.com/repos/pandas-dev/pandas/pulls/36518 | 2020-09-21T05:40:55Z | 2020-10-08T19:49:21Z | 2020-10-08T19:49:21Z | 2020-10-08T19:49:28Z |
DOC: Correct inconsistent description on default DateOffset setting | diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index 32f0cac3f81e2..d97b1e48bdf07 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -846,7 +846,7 @@ into ``freq`` keyword arguments. The available date offsets and associated frequ
:header: "Date Offset", "Frequency String", "Description"
:widths: 15, 15, 65
- :class:`~pandas.tseries.offsets.DateOffset`, None, "Generic offset class, defaults to 1 calendar day"
+ :class:`~pandas.tseries.offsets.DateOffset`, None, "Generic offset class, defaults to absolute 24 hours"
:class:`~pandas.tseries.offsets.BDay` or :class:`~pandas.tseries.offsets.BusinessDay`, ``'B'``,"business day (weekday)"
:class:`~pandas.tseries.offsets.CDay` or :class:`~pandas.tseries.offsets.CustomBusinessDay`, ``'C'``, "custom business day"
:class:`~pandas.tseries.offsets.Week`, ``'W'``, "one week, optionally anchored on a day of the week"
| correct inconsistent doc description on default DateOffset setting
- [x] closes #36512
| https://api.github.com/repos/pandas-dev/pandas/pulls/36516 | 2020-09-21T04:20:50Z | 2020-10-31T23:00:19Z | 2020-10-31T23:00:18Z | 2020-10-31T23:50:19Z |
CI/CLN: update travis | diff --git a/.travis.yml b/.travis.yml
index a38e90bbce8ba..81cd461dd2c87 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,6 +1,15 @@
language: python
python: 3.7
+addons:
+ apt:
+ update: true
+ packages:
+ - xvfb
+
+services:
+ - xvfb
+
# To turn off cached cython files and compiler cache
# set NOCACHE-true
# To delete caches go to https://travis-ci.org/OWNER/REPOSITORY/caches or run
@@ -10,11 +19,9 @@ cache:
ccache: true
directories:
- $HOME/.cache # cython cache
- - $HOME/.ccache # compiler cache
env:
global:
- # Variable for test workers
- PYTEST_WORKERS="auto"
# create a github personal access token
# cd pandas-dev/pandas
@@ -22,18 +29,17 @@ env:
- secure: "EkWLZhbrp/mXJOx38CHjs7BnjXafsqHtwxPQrqWy457VDFWhIY1DMnIR/lOWG+a20Qv52sCsFtiZEmMfUjf0pLGXOqurdxbYBGJ7/ikFLk9yV2rDwiArUlVM9bWFnFxHvdz9zewBH55WurrY4ShZWyV+x2dWjjceWG5VpWeI6sA="
git:
- # for cloning
depth: false
matrix:
fast_finish: true
include:
- # In allowed failures
- dist: bionic
python: 3.9-dev
env:
- JOB="3.9-dev" PATTERN="(not slow and not network and not clipboard)"
+
- env:
- JOB="3.8" ENV_FILE="ci/deps/travis-38.yaml" PATTERN="(not slow and not network and not clipboard)"
@@ -42,7 +48,7 @@ matrix:
- arch: arm64
env:
- - JOB="3.7, arm64" PYTEST_WORKERS=8 ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard and not arm_slow)"
+ - JOB="3.7, arm64" PYTEST_WORKERS=1 ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard and not arm_slow)"
- env:
- JOB="3.7, locale" ENV_FILE="ci/deps/travis-37-locale.yaml" PATTERN="((not slow and not network and not clipboard) or (single and db))" LOCALE_OVERRIDE="zh_CN.UTF-8" SQL="1"
@@ -71,12 +77,6 @@ before_install:
- uname -a
- git --version
- ./ci/check_git_tags.sh
- # Because travis runs on Google Cloud and has a /etc/boto.cfg,
- # it breaks moto import, see:
- # https://github.com/spulec/moto/issues/1771
- # https://github.com/boto/boto/issues/3741
- # This overrides travis and tells it to look nowhere.
- - export BOTO_CONFIG=/dev/null
install:
- echo "install start"
diff --git a/ci/build39.sh b/ci/build39.sh
index f2ef11d5a71f4..faef2be03c2bb 100755
--- a/ci/build39.sh
+++ b/ci/build39.sh
@@ -1,7 +1,6 @@
#!/bin/bash -e
# Special build for python3.9 until numpy puts its own wheels up
-sudo apt-get install build-essential gcc xvfb
pip install --no-deps -U pip wheel setuptools
pip install cython numpy python-dateutil pytz pytest pytest-xdist hypothesis
diff --git a/ci/setup_env.sh b/ci/setup_env.sh
index 961433204cfbb..247f809c5fe63 100755
--- a/ci/setup_env.sh
+++ b/ci/setup_env.sh
@@ -42,9 +42,7 @@ else
fi
if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
- sudo apt-get update
- sudo apt-get -y install xvfb
- CONDA_URL="https://github.com/conda-forge/miniforge/releases/download/4.8.5-0/Miniforge3-4.8.5-0-Linux-aarch64.sh"
+ CONDA_URL="https://github.com/conda-forge/miniforge/releases/download/4.8.5-1/Miniforge3-4.8.5-1-Linux-aarch64.sh"
else
CONDA_URL="https://repo.continuum.io/miniconda/Miniconda3-latest-$CONDA_OS.sh"
fi
@@ -100,8 +98,6 @@ echo "conda list (root environment)"
conda list
# Clean up any left-over from a previous build
-# (note workaround for https://github.com/conda/conda/issues/2679:
-# `conda env remove` issue)
conda remove --all -q -y -n pandas-dev
echo
@@ -142,12 +138,6 @@ conda list pandas
echo "[Build extensions]"
python setup.py build_ext -q -i -j2
-# TODO: Some of our environments end up with old versions of pip (10.x)
-# Adding a new enough version of pip to the requirements explodes the
-# solve time. Just using pip to update itself.
-# - py35_macos
-# - py35_compat
-# - py36_32bit
echo "[Updating pip]"
python -m pip install --no-deps -U pip wheel setuptools
| update and cleanup Travis
closes #36601
| https://api.github.com/repos/pandas-dev/pandas/pulls/36514 | 2020-09-21T02:55:11Z | 2020-09-24T14:58:54Z | 2020-09-24T14:58:54Z | 2020-10-03T05:41:40Z |
CI: troubleshoot segfault | diff --git a/pandas/tests/scalar/test_na_scalar.py b/pandas/tests/scalar/test_na_scalar.py
index 10d366fe485da..5c4d7e191d1bb 100644
--- a/pandas/tests/scalar/test_na_scalar.py
+++ b/pandas/tests/scalar/test_na_scalar.py
@@ -305,11 +305,3 @@ def test_pickle_roundtrip_containers(as_frame, values, dtype):
s = s.to_frame(name="A")
result = tm.round_trip_pickle(s)
tm.assert_equal(result, s)
-
-
-@pytest.mark.parametrize("array", [np.array(["a"], dtype=object), ["a"]])
-def test_array_contains_na(array):
- # GH 31922
- msg = "boolean value of NA is ambiguous"
- with pytest.raises(TypeError, match=msg):
- NA in array
| I can't reproduce the CI failures locally, so just rolling back a few commits to narrow it down. | https://api.github.com/repos/pandas-dev/pandas/pulls/36511 | 2020-09-20T21:25:49Z | 2020-09-21T12:28:24Z | 2020-09-21T12:28:24Z | 2021-11-20T23:21:19Z |
REF: dataframe formatters/outputs | diff --git a/pandas/_typing.py b/pandas/_typing.py
index 7678d1bf12d8b..a9177106535fc 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -38,6 +38,8 @@
from pandas.core.indexes.base import Index
from pandas.core.series import Series
+ from pandas.io.formats.format import EngFormatter
+
# array-like
AnyArrayLike = TypeVar("AnyArrayLike", "ExtensionArray", "Index", "Series", np.ndarray)
@@ -127,6 +129,10 @@
EncodingVar = TypeVar("EncodingVar", str, None, Optional[str])
+# type of float formatter in DataFrameFormatter
+FloatFormatType = Union[str, Callable, "EngFormatter"]
+
+
@dataclass
class IOargs(Generic[ModeVar, EncodingVar]):
"""
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 801307a8f9481..3af7d5c8fae24 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -788,10 +788,8 @@ def _repr_html_(self) -> Optional[str]:
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=".",
- table_id=None,
- render_links=False,
)
- return formatter.to_html(notebook=True)
+ return fmt.DataFrameRenderer(formatter).to_html(notebook=True)
else:
return None
@@ -874,9 +872,12 @@ def to_string(
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
+ )
+ return fmt.DataFrameRenderer(formatter).to_string(
+ buf=buf,
+ encoding=encoding,
line_width=line_width,
)
- return formatter.to_string(buf=buf, encoding=encoding)
# ----------------------------------------------------------------------
@@ -2476,29 +2477,29 @@ def to_html(
columns=columns,
col_space=col_space,
na_rep=na_rep,
+ header=header,
+ index=index,
formatters=formatters,
float_format=float_format,
+ bold_rows=bold_rows,
sparsify=sparsify,
justify=justify,
index_names=index_names,
- header=header,
- index=index,
- bold_rows=bold_rows,
escape=escape,
+ decimal=decimal,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
- decimal=decimal,
- table_id=table_id,
- render_links=render_links,
)
# TODO: a generic formatter wld b in DataFrameFormatter
- return formatter.to_html(
+ return fmt.DataFrameRenderer(formatter).to_html(
buf=buf,
classes=classes,
notebook=notebook,
border=border,
encoding=encoding,
+ table_id=table_id,
+ render_links=render_links,
)
# ----------------------------------------------------------------------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 4d2f504146e87..d658d799f1fb8 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4,7 +4,6 @@
from datetime import timedelta
import functools
import gc
-from io import StringIO
import json
import operator
import pickle
@@ -109,7 +108,11 @@
from pandas.core.window import Expanding, ExponentialMovingWindow, Rolling, Window
from pandas.io.formats import format as fmt
-from pandas.io.formats.format import DataFrameFormatter, format_percentiles
+from pandas.io.formats.format import (
+ DataFrameFormatter,
+ DataFrameRenderer,
+ format_percentiles,
+)
from pandas.io.formats.printing import pprint_thing
if TYPE_CHECKING:
@@ -3149,7 +3152,7 @@ def to_latex(
escape=escape,
decimal=decimal,
)
- return formatter.to_latex(
+ return DataFrameRenderer(formatter).to_latex(
buf=buf,
column_format=column_format,
longtable=longtable,
@@ -3182,7 +3185,7 @@ def to_csv(
date_format: Optional[str] = None,
doublequote: bool_t = True,
escapechar: Optional[str] = None,
- decimal: Optional[str] = ".",
+ decimal: str = ".",
errors: str = "strict",
storage_options: StorageOptions = None,
) -> Optional[str]:
@@ -3340,10 +3343,16 @@ def to_csv(
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
- from pandas.io.formats.csvs import CSVFormatter
+ formatter = DataFrameFormatter(
+ frame=df,
+ header=header,
+ index=index,
+ na_rep=na_rep,
+ float_format=float_format,
+ decimal=decimal,
+ )
- formatter = CSVFormatter(
- df,
+ return DataFrameRenderer(formatter).to_csv(
path_or_buf,
line_terminator=line_terminator,
sep=sep,
@@ -3351,11 +3360,7 @@ def to_csv(
errors=errors,
compression=compression,
quoting=quoting,
- na_rep=na_rep,
- float_format=float_format,
- cols=columns,
- header=header,
- index=index,
+ columns=columns,
index_label=index_label,
mode=mode,
chunksize=chunksize,
@@ -3363,16 +3368,8 @@ def to_csv(
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
- decimal=decimal,
storage_options=storage_options,
)
- formatter.save()
-
- if path_or_buf is None:
- assert isinstance(formatter.path_or_buf, StringIO)
- return formatter.path_or_buf.getvalue()
-
- return None
# ----------------------------------------------------------------------
# Lookup Caching
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index d0e9163fc5f11..6c62d6825bc84 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -5,7 +5,7 @@
import csv as csvlib
from io import StringIO, TextIOWrapper
import os
-from typing import Any, Dict, Hashable, Iterator, List, Optional, Sequence, Union
+from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Sequence, Union
import numpy as np
@@ -13,6 +13,7 @@
from pandas._typing import (
CompressionOptions,
FilePathOrBuffer,
+ FloatFormatType,
IndexLabel,
Label,
StorageOptions,
@@ -30,18 +31,17 @@
from pandas.io.common import get_filepath_or_buffer, get_handle
+if TYPE_CHECKING:
+ from pandas.io.formats.format import DataFrameFormatter
+
class CSVFormatter:
def __init__(
self,
- obj,
+ formatter: "DataFrameFormatter",
path_or_buf: Optional[FilePathOrBuffer[str]] = None,
sep: str = ",",
- na_rep: str = "",
- float_format: Optional[str] = None,
cols: Optional[Sequence[Label]] = None,
- header: Union[bool, Sequence[Hashable]] = True,
- index: bool = True,
index_label: Optional[IndexLabel] = None,
mode: str = "w",
encoding: Optional[str] = None,
@@ -54,10 +54,11 @@ def __init__(
date_format: Optional[str] = None,
doublequote: bool = True,
escapechar: Optional[str] = None,
- decimal=".",
storage_options: StorageOptions = None,
):
- self.obj = obj
+ self.fmt = formatter
+
+ self.obj = self.fmt.frame
self.encoding = encoding or "utf-8"
@@ -79,35 +80,45 @@ def __init__(
self.mode = ioargs.mode
self.sep = sep
- self.na_rep = na_rep
- self.float_format = float_format
- self.decimal = decimal
- self.header = header
- self.index = index
- self.index_label = index_label
+ self.index_label = self._initialize_index_label(index_label)
self.errors = errors
self.quoting = quoting or csvlib.QUOTE_MINIMAL
- self.quotechar = quotechar
+ self.quotechar = self._initialize_quotechar(quotechar)
self.doublequote = doublequote
self.escapechar = escapechar
self.line_terminator = line_terminator or os.linesep
self.date_format = date_format
- self.cols = cols # type: ignore[assignment]
- self.chunksize = chunksize # type: ignore[assignment]
+ self.cols = self._initialize_columns(cols)
+ self.chunksize = self._initialize_chunksize(chunksize)
+
+ @property
+ def na_rep(self) -> str:
+ return self.fmt.na_rep
+
+ @property
+ def float_format(self) -> Optional["FloatFormatType"]:
+ return self.fmt.float_format
@property
- def index_label(self) -> IndexLabel:
- return self._index_label
+ def decimal(self) -> str:
+ return self.fmt.decimal
- @index_label.setter
- def index_label(self, index_label: Optional[IndexLabel]) -> None:
+ @property
+ def header(self) -> Union[bool, Sequence[str]]:
+ return self.fmt.header
+
+ @property
+ def index(self) -> bool:
+ return self.fmt.index
+
+ def _initialize_index_label(self, index_label: Optional[IndexLabel]) -> IndexLabel:
if index_label is not False:
if index_label is None:
- index_label = self._get_index_label_from_obj()
+ return self._get_index_label_from_obj()
elif not isinstance(index_label, (list, tuple, np.ndarray, ABCIndexClass)):
# given a string for a DF with Index
- index_label = [index_label]
- self._index_label = index_label
+ return [index_label]
+ return index_label
def _get_index_label_from_obj(self) -> List[str]:
if isinstance(self.obj.index, ABCMultiIndex):
@@ -122,30 +133,17 @@ def _get_index_label_flat(self) -> List[str]:
index_label = self.obj.index.name
return [""] if index_label is None else [index_label]
- @property
- def quotechar(self) -> Optional[str]:
+ def _initialize_quotechar(self, quotechar: Optional[str]) -> Optional[str]:
if self.quoting != csvlib.QUOTE_NONE:
# prevents crash in _csv
- return self._quotechar
+ return quotechar
return None
- @quotechar.setter
- def quotechar(self, quotechar: Optional[str]) -> None:
- self._quotechar = quotechar
-
@property
def has_mi_columns(self) -> bool:
return bool(isinstance(self.obj.columns, ABCMultiIndex))
- @property
- def cols(self) -> Sequence[Label]:
- return self._cols
-
- @cols.setter
- def cols(self, cols: Optional[Sequence[Label]]) -> None:
- self._cols = self._refine_cols(cols)
-
- def _refine_cols(self, cols: Optional[Sequence[Label]]) -> Sequence[Label]:
+ def _initialize_columns(self, cols: Optional[Sequence[Label]]) -> Sequence[Label]:
# validate mi options
if self.has_mi_columns:
if cols is not None:
@@ -161,12 +159,16 @@ def _refine_cols(self, cols: Optional[Sequence[Label]]) -> Sequence[Label]:
# update columns to include possible multiplicity of dupes
# and make sure sure cols is just a list of labels
- cols = self.obj.columns
- if isinstance(cols, ABCIndexClass):
- return cols._format_native_types(**self._number_format)
+ new_cols = self.obj.columns
+ if isinstance(new_cols, ABCIndexClass):
+ return new_cols._format_native_types(**self._number_format)
else:
- assert isinstance(cols, Sequence)
- return list(cols)
+ return list(new_cols)
+
+ def _initialize_chunksize(self, chunksize: Optional[int]) -> int:
+ if chunksize is None:
+ return (100000 // (len(self.cols) or 1)) or 1
+ return int(chunksize)
@property
def _number_format(self) -> Dict[str, Any]:
@@ -179,17 +181,6 @@ def _number_format(self) -> Dict[str, Any]:
decimal=self.decimal,
)
- @property
- def chunksize(self) -> int:
- return self._chunksize
-
- @chunksize.setter
- def chunksize(self, chunksize: Optional[int]) -> None:
- if chunksize is None:
- chunksize = (100000 // (len(self.cols) or 1)) or 1
- assert chunksize is not None
- self._chunksize = int(chunksize)
-
@property
def data_index(self) -> Index:
data_index = self.obj.index
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 7635cda56ba26..6f4bd2ed8c73a 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -39,8 +39,14 @@
from pandas._libs.tslib import format_array_from_datetime
from pandas._libs.tslibs import NaT, Timedelta, Timestamp, iNaT
from pandas._libs.tslibs.nattype import NaTType
-from pandas._typing import FilePathOrBuffer, Label
-from pandas.errors import AbstractMethodError
+from pandas._typing import (
+ CompressionOptions,
+ FilePathOrBuffer,
+ FloatFormatType,
+ IndexLabel,
+ Label,
+ StorageOptions,
+)
from pandas.core.dtypes.common import (
is_categorical_dtype,
@@ -75,10 +81,10 @@
if TYPE_CHECKING:
from pandas import Categorical, DataFrame, Series
+
FormattersType = Union[
List[Callable], Tuple[Callable, ...], Mapping[Union[str, int], Callable]
]
-FloatFormatType = Union[str, Callable, "EngFormatter"]
ColspaceType = Mapping[Label, Union[str, int]]
ColspaceArgType = Union[
str, int, Sequence[Union[str, int]], Mapping[Label, Union[str, int]]
@@ -449,95 +455,8 @@ def get_adjustment() -> TextAdjustment:
return TextAdjustment()
-class TableFormatter:
-
- show_dimensions: Union[bool, str]
- formatters: FormattersType
- columns: Index
- _is_truncated: bool
-
- @property
- def is_truncated(self) -> bool:
- return self._is_truncated
-
- @property
- def should_show_dimensions(self) -> bool:
- return self.show_dimensions is True or (
- self.show_dimensions == "truncate" and self.is_truncated
- )
-
- def _get_formatter(self, i: Union[str, int]) -> Optional[Callable]:
- if isinstance(self.formatters, (list, tuple)):
- if is_integer(i):
- i = cast(int, i)
- return self.formatters[i]
- else:
- return None
- else:
- if is_integer(i) and i not in self.columns:
- i = self.columns[i]
- return self.formatters.get(i, None)
-
- @contextmanager
- def get_buffer(
- self, buf: Optional[FilePathOrBuffer[str]], encoding: Optional[str] = None
- ):
- """
- Context manager to open, yield and close buffer for filenames or Path-like
- objects, otherwise yield buf unchanged.
- """
- if buf is not None:
- buf = stringify_path(buf)
- else:
- buf = StringIO()
-
- if encoding is None:
- encoding = "utf-8"
- elif not isinstance(buf, str):
- raise ValueError("buf is not a file name and encoding is specified.")
-
- if hasattr(buf, "write"):
- yield buf
- elif isinstance(buf, str):
- with open(buf, "w", encoding=encoding, newline="") as f:
- # GH#30034 open instead of codecs.open prevents a file leak
- # if we have an invalid encoding argument.
- # newline="" is needed to roundtrip correctly on
- # windows test_to_latex_filename
- yield f
- else:
- raise TypeError("buf is not a file name and it has no write method")
-
- def write_result(self, buf: IO[str]) -> None:
- """
- Write the result of serialization to buf.
- """
- raise AbstractMethodError(self)
-
- def get_result(
- self,
- buf: Optional[FilePathOrBuffer[str]] = None,
- encoding: Optional[str] = None,
- ) -> Optional[str]:
- """
- Perform serialization. Write to buf or return as string if buf is None.
- """
- with self.get_buffer(buf, encoding=encoding) as f:
- self.write_result(buf=f)
- if buf is None:
- return f.getvalue()
- return None
-
-
-class DataFrameFormatter(TableFormatter):
- """
- Render a DataFrame
-
- self.to_string() : console-friendly tabular output
- self.to_html() : html table
- self.to_latex() : LaTeX tabular environment table
-
- """
+class DataFrameFormatter:
+ """Class for processing dataframe formatting options and data."""
__doc__ = __doc__ if __doc__ else ""
__doc__ += common_docstring + return_docstring
@@ -555,46 +474,94 @@ def __init__(
float_format: Optional[FloatFormatType] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
- line_width: Optional[int] = None,
max_rows: Optional[int] = None,
min_rows: Optional[int] = None,
max_cols: Optional[int] = None,
show_dimensions: Union[bool, str] = False,
decimal: str = ".",
- table_id: Optional[str] = None,
- render_links: bool = False,
bold_rows: bool = False,
escape: bool = True,
):
self.frame = frame
- self.show_index_names = index_names
- self.sparsify = self._initialize_sparsify(sparsify)
- self.float_format = float_format
- self.formatters = self._initialize_formatters(formatters)
- self.na_rep = na_rep
- self.decimal = decimal
+ self.columns = self._initialize_columns(columns)
self.col_space = self._initialize_colspace(col_space)
self.header = header
self.index = index
- self.line_width = line_width
+ self.na_rep = na_rep
+ self.formatters = self._initialize_formatters(formatters)
+ self.justify = self._initialize_justify(justify)
+ self.float_format = float_format
+ self.sparsify = self._initialize_sparsify(sparsify)
+ self.show_index_names = index_names
+ self.decimal = decimal
+ self.bold_rows = bold_rows
+ self.escape = escape
self.max_rows = max_rows
self.min_rows = min_rows
self.max_cols = max_cols
self.show_dimensions = show_dimensions
- self.table_id = table_id
- self.render_links = render_links
- self.justify = self._initialize_justify(justify)
- self.bold_rows = bold_rows
- self.escape = escape
- self.columns = self._initialize_columns(columns)
self.max_cols_fitted = self._calc_max_cols_fitted()
self.max_rows_fitted = self._calc_max_rows_fitted()
self.tr_frame = self.frame
- self._truncate()
+ self.truncate()
self.adj = get_adjustment()
+ def get_strcols(self) -> List[List[str]]:
+ """
+ Render a DataFrame to a list of columns (as lists of strings).
+ """
+ strcols = self._get_strcols_without_index()
+
+ if self.index:
+ str_index = self._get_formatted_index(self.tr_frame)
+ strcols.insert(0, str_index)
+
+ return strcols
+
+ @property
+ def should_show_dimensions(self) -> bool:
+ return self.show_dimensions is True or (
+ self.show_dimensions == "truncate" and self.is_truncated
+ )
+
+ @property
+ def is_truncated(self) -> bool:
+ return bool(self.is_truncated_horizontally or self.is_truncated_vertically)
+
+ @property
+ def is_truncated_horizontally(self) -> bool:
+ return bool(self.max_cols_fitted and (len(self.columns) > self.max_cols_fitted))
+
+ @property
+ def is_truncated_vertically(self) -> bool:
+ return bool(self.max_rows_fitted and (len(self.frame) > self.max_rows_fitted))
+
+ @property
+ def dimensions_info(self) -> str:
+ return f"\n\n[{len(self.frame)} rows x {len(self.frame.columns)} columns]"
+
+ @property
+ def has_index_names(self) -> bool:
+ return _has_names(self.frame.index)
+
+ @property
+ def has_column_names(self) -> bool:
+ return _has_names(self.frame.columns)
+
+ @property
+ def show_row_idx_names(self) -> bool:
+ return all((self.has_index_names, self.index, self.show_index_names))
+
+ @property
+ def show_col_idx_names(self) -> bool:
+ return all((self.has_column_names, self.show_index_names, self.header))
+
+ @property
+ def max_rows_displayed(self) -> int:
+ return min(self.max_rows or len(self.frame), len(self.frame))
+
def _initialize_sparsify(self, sparsify: Optional[bool]) -> bool:
if sparsify is None:
return get_option("display.multi_sparse")
@@ -653,10 +620,6 @@ def _initialize_colspace(
result = dict(zip(self.frame.columns, col_space))
return result
- @property
- def max_rows_displayed(self) -> int:
- return min(self.max_rows or len(self.frame), len(self.frame))
-
def _calc_max_cols_fitted(self) -> Optional[int]:
"""Number of columns fitting the screen."""
if not self._is_in_terminal():
@@ -707,26 +670,14 @@ def _get_number_of_auxillary_rows(self) -> int:
num_rows = dot_row + prompt_row
if self.show_dimensions:
- num_rows += len(self._dimensions_info.splitlines())
+ num_rows += len(self.dimensions_info.splitlines())
if self.header:
num_rows += 1
return num_rows
- @property
- def is_truncated_horizontally(self) -> bool:
- return bool(self.max_cols_fitted and (len(self.columns) > self.max_cols_fitted))
-
- @property
- def is_truncated_vertically(self) -> bool:
- return bool(self.max_rows_fitted and (len(self.frame) > self.max_rows_fitted))
-
- @property
- def is_truncated(self) -> bool:
- return bool(self.is_truncated_horizontally or self.is_truncated_vertically)
-
- def _truncate(self) -> None:
+ def truncate(self) -> None:
"""
Check whether the frame should be truncated. If so, slice the frame up.
"""
@@ -785,7 +736,7 @@ def _get_strcols_without_index(self) -> List[List[str]]:
if not is_list_like(self.header) and not self.header:
for i, c in enumerate(self.tr_frame):
- fmt_values = self._format_col(i)
+ fmt_values = self.format_col(i)
fmt_values = _make_fixed_width(
strings=fmt_values,
justify=self.justify,
@@ -816,7 +767,7 @@ def _get_strcols_without_index(self) -> List[List[str]]:
header_colwidth = max(
int(self.col_space.get(c, 0)), *(self.adj.len(x) for x in cheader)
)
- fmt_values = self._format_col(i)
+ fmt_values = self.format_col(i)
fmt_values = _make_fixed_width(
fmt_values, self.justify, minimum=header_colwidth, adj=self.adj
)
@@ -827,223 +778,7 @@ def _get_strcols_without_index(self) -> List[List[str]]:
return strcols
- def _get_strcols(self) -> List[List[str]]:
- strcols = self._get_strcols_without_index()
-
- str_index = self._get_formatted_index(self.tr_frame)
- if self.index:
- strcols.insert(0, str_index)
-
- return strcols
-
- def _to_str_columns(self) -> List[List[str]]:
- """
- Render a DataFrame to a list of columns (as lists of strings).
- """
- strcols = self._get_strcols()
-
- if self.is_truncated:
- strcols = self._insert_dot_separators(strcols)
-
- return strcols
-
- def _insert_dot_separators(self, strcols: List[List[str]]) -> List[List[str]]:
- str_index = self._get_formatted_index(self.tr_frame)
- index_length = len(str_index)
-
- if self.is_truncated_horizontally:
- strcols = self._insert_dot_separator_horizontal(strcols, index_length)
-
- if self.is_truncated_vertically:
- strcols = self._insert_dot_separator_vertical(strcols, index_length)
-
- return strcols
-
- def _insert_dot_separator_horizontal(
- self, strcols: List[List[str]], index_length: int
- ) -> List[List[str]]:
- strcols.insert(self.tr_col_num + 1, [" ..."] * index_length)
- return strcols
-
- def _insert_dot_separator_vertical(
- self, strcols: List[List[str]], index_length: int
- ) -> List[List[str]]:
- n_header_rows = index_length - len(self.tr_frame)
- row_num = self.tr_row_num
- for ix, col in enumerate(strcols):
- cwidth = self.adj.len(col[row_num])
-
- if self.is_truncated_horizontally:
- is_dot_col = ix == self.tr_col_num + 1
- else:
- is_dot_col = False
-
- if cwidth > 3 or is_dot_col:
- dots = "..."
- else:
- dots = ".."
-
- if ix == 0:
- dot_mode = "left"
- elif is_dot_col:
- cwidth = 4
- dot_mode = "right"
- else:
- dot_mode = "right"
-
- dot_str = self.adj.justify([dots], cwidth, mode=dot_mode)[0]
- col.insert(row_num + n_header_rows, dot_str)
- return strcols
-
- def write_result(self, buf: IO[str]) -> None:
- """
- Render a DataFrame to a console-friendly tabular output.
- """
- text = self._get_string_representation()
-
- buf.writelines(text)
-
- if self.should_show_dimensions:
- buf.write(self._dimensions_info)
-
- @property
- def _dimensions_info(self) -> str:
- return f"\n\n[{len(self.frame)} rows x {len(self.frame.columns)} columns]"
-
- def _get_string_representation(self) -> str:
- if self.frame.empty:
- info_line = (
- f"Empty {type(self.frame).__name__}\n"
- f"Columns: {pprint_thing(self.frame.columns)}\n"
- f"Index: {pprint_thing(self.frame.index)}"
- )
- return info_line
-
- strcols = self._to_str_columns()
-
- if self.line_width is None:
- # no need to wrap around just print the whole frame
- return self.adj.adjoin(1, *strcols)
-
- if self.max_cols is None or self.max_cols > 0:
- # need to wrap around
- return self._join_multiline(*strcols)
-
- # max_cols == 0. Try to fit frame to terminal
- return self._fit_strcols_to_terminal_width(strcols)
-
- def _fit_strcols_to_terminal_width(self, strcols) -> str:
- from pandas import Series
-
- lines = self.adj.adjoin(1, *strcols).split("\n")
- max_len = Series(lines).str.len().max()
- # plus truncate dot col
- width, _ = get_terminal_size()
- dif = max_len - width
- # '+ 1' to avoid too wide repr (GH PR #17023)
- adj_dif = dif + 1
- col_lens = Series([Series(ele).apply(len).max() for ele in strcols])
- n_cols = len(col_lens)
- counter = 0
- while adj_dif > 0 and n_cols > 1:
- counter += 1
- mid = int(round(n_cols / 2.0))
- mid_ix = col_lens.index[mid]
- col_len = col_lens[mid_ix]
- # adjoin adds one
- adj_dif -= col_len + 1
- col_lens = col_lens.drop(mid_ix)
- n_cols = len(col_lens)
-
- # subtract index column
- max_cols_fitted = n_cols - self.index
- # GH-21180. Ensure that we print at least two.
- max_cols_fitted = max(max_cols_fitted, 2)
- self.max_cols_fitted = max_cols_fitted
-
- # Call again _truncate to cut frame appropriately
- # and then generate string representation
- self._truncate()
- strcols = self._to_str_columns()
- return self.adj.adjoin(1, *strcols)
-
- def _join_multiline(self, *args) -> str:
- lwidth = self.line_width
- adjoin_width = 1
- strcols = list(args)
- if self.index:
- idx = strcols.pop(0)
- lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width
-
- col_widths = [
- np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0
- for col in strcols
- ]
-
- assert lwidth is not None
- col_bins = _binify(col_widths, lwidth)
- nbins = len(col_bins)
-
- if self.is_truncated_vertically:
- assert self.max_rows_fitted is not None
- nrows = self.max_rows_fitted + 1
- else:
- nrows = len(self.frame)
-
- str_lst = []
- start = 0
- for i, end in enumerate(col_bins):
- row = strcols[start:end]
- if self.index:
- row.insert(0, idx)
- if nbins > 1:
- if end <= len(strcols) and i < nbins - 1:
- row.append([" \\"] + [" "] * (nrows - 1))
- else:
- row.append([" "] * nrows)
- str_lst.append(self.adj.adjoin(adjoin_width, *row))
- start = end
- return "\n\n".join(str_lst)
-
- def to_string(
- self,
- buf: Optional[FilePathOrBuffer[str]] = None,
- encoding: Optional[str] = None,
- ) -> Optional[str]:
- return self.get_result(buf=buf, encoding=encoding)
-
- def to_latex(
- self,
- buf: Optional[FilePathOrBuffer[str]] = None,
- column_format: Optional[str] = None,
- longtable: bool = False,
- encoding: Optional[str] = None,
- multicolumn: bool = False,
- multicolumn_format: Optional[str] = None,
- multirow: bool = False,
- caption: Optional[Union[str, Tuple[str, str]]] = None,
- label: Optional[str] = None,
- position: Optional[str] = None,
- ) -> Optional[str]:
- """
- Render a DataFrame to a LaTeX tabular/longtable environment output.
- """
- from pandas.io.formats.latex import LatexFormatter
-
- latex_formatter = LatexFormatter(
- self,
- longtable=longtable,
- column_format=column_format,
- multicolumn=multicolumn,
- multicolumn_format=multicolumn_format,
- multirow=multirow,
- caption=caption,
- label=label,
- position=position,
- )
- return latex_formatter.get_result(buf=buf, encoding=encoding)
-
- def _format_col(self, i: int) -> List[str]:
+ def format_col(self, i: int) -> List[str]:
frame = self.tr_frame
formatter = self._get_formatter(i)
return format_array(
@@ -1056,34 +791,17 @@ def _format_col(self, i: int) -> List[str]:
leading_space=self.index,
)
- def to_html(
- self,
- buf: Optional[FilePathOrBuffer[str]] = None,
- encoding: Optional[str] = None,
- classes: Optional[Union[str, List, Tuple]] = None,
- notebook: bool = False,
- border: Optional[int] = None,
- ) -> Optional[str]:
- """
- Render a DataFrame to a html table.
-
- Parameters
- ----------
- classes : str or list-like
- classes to include in the `class` attribute of the opening
- ``<table>`` tag, in addition to the default "dataframe".
- notebook : {True, False}, optional, default False
- Whether the generated HTML is for IPython Notebook.
- border : int
- A ``border=border`` attribute is included in the opening
- ``<table>`` tag. Default ``pd.options.display.html.border``.
- """
- from pandas.io.formats.html import HTMLFormatter, NotebookFormatter
-
- Klass = NotebookFormatter if notebook else HTMLFormatter
- return Klass(self, classes=classes, border=border).get_result(
- buf=buf, encoding=encoding
- )
+ def _get_formatter(self, i: Union[str, int]) -> Optional[Callable]:
+ if isinstance(self.formatters, (list, tuple)):
+ if is_integer(i):
+ i = cast(int, i)
+ return self.formatters[i]
+ else:
+ return None
+ else:
+ if is_integer(i) and i not in self.columns:
+ i = self.columns[i]
+ return self.formatters.get(i, None)
def _get_formatted_column_labels(self, frame: "DataFrame") -> List[List[str]]:
from pandas.core.indexes.multi import sparsify_labels
@@ -1126,22 +844,6 @@ def space_format(x, y):
# self.str_columns = str_columns
return str_columns
- @property
- def has_index_names(self) -> bool:
- return _has_names(self.frame.index)
-
- @property
- def has_column_names(self) -> bool:
- return _has_names(self.frame.columns)
-
- @property
- def show_row_idx_names(self) -> bool:
- return all((self.has_index_names, self.index, self.show_index_names))
-
- @property
- def show_col_idx_names(self) -> bool:
- return all((self.has_column_names, self.show_index_names, self.header))
-
def _get_formatted_index(self, frame: "DataFrame") -> List[str]:
# Note: this is only used by to_string() and to_latex(), not by
# to_html(). so safe to cast col_space here.
@@ -1192,6 +894,224 @@ def _get_column_name_list(self) -> List[str]:
return names
+class DataFrameRenderer:
+ """Class for creating dataframe output in multiple formats.
+
+ Called in pandas.core.generic.NDFrame:
+ - to_csv
+ - to_latex
+
+ Called in pandas.core.frame.DataFrame:
+ - to_html
+ - to_string
+
+ Parameters
+ ----------
+ fmt : DataFrameFormatter
+ Formatter with the formating options.
+ """
+
+ def __init__(self, fmt: DataFrameFormatter):
+ self.fmt = fmt
+
+ def to_latex(
+ self,
+ buf: Optional[FilePathOrBuffer[str]] = None,
+ column_format: Optional[str] = None,
+ longtable: bool = False,
+ encoding: Optional[str] = None,
+ multicolumn: bool = False,
+ multicolumn_format: Optional[str] = None,
+ multirow: bool = False,
+ caption: Optional[str] = None,
+ label: Optional[str] = None,
+ position: Optional[str] = None,
+ ) -> Optional[str]:
+ """
+ Render a DataFrame to a LaTeX tabular/longtable environment output.
+ """
+ from pandas.io.formats.latex import LatexFormatter
+
+ latex_formatter = LatexFormatter(
+ self.fmt,
+ longtable=longtable,
+ column_format=column_format,
+ multicolumn=multicolumn,
+ multicolumn_format=multicolumn_format,
+ multirow=multirow,
+ caption=caption,
+ label=label,
+ position=position,
+ )
+ string = latex_formatter.to_string()
+ return save_to_buffer(string, buf=buf, encoding=encoding)
+
+ def to_html(
+ self,
+ buf: Optional[FilePathOrBuffer[str]] = None,
+ encoding: Optional[str] = None,
+ classes: Optional[Union[str, List, Tuple]] = None,
+ notebook: bool = False,
+ border: Optional[int] = None,
+ table_id: Optional[str] = None,
+ render_links: bool = False,
+ ) -> Optional[str]:
+ """
+ Render a DataFrame to a html table.
+
+ Parameters
+ ----------
+ buf : str, Path or StringIO-like, optional, default None
+ Buffer to write to. If None, the output is returned as a string.
+ encoding : str, default “utf-8”
+ Set character encoding.
+ classes : str or list-like
+ classes to include in the `class` attribute of the opening
+ ``<table>`` tag, in addition to the default "dataframe".
+ notebook : {True, False}, optional, default False
+ Whether the generated HTML is for IPython Notebook.
+ border : int
+ A ``border=border`` attribute is included in the opening
+ ``<table>`` tag. Default ``pd.options.display.html.border``.
+ table_id : str, optional
+ A css id is included in the opening `<table>` tag if specified.
+ render_links : bool, default False
+ Convert URLs to HTML links.
+ """
+ from pandas.io.formats.html import HTMLFormatter, NotebookFormatter
+
+ Klass = NotebookFormatter if notebook else HTMLFormatter
+
+ html_formatter = Klass(
+ self.fmt,
+ classes=classes,
+ border=border,
+ table_id=table_id,
+ render_links=render_links,
+ )
+ string = html_formatter.to_string()
+ return save_to_buffer(string, buf=buf, encoding=encoding)
+
+ def to_string(
+ self,
+ buf: Optional[FilePathOrBuffer[str]] = None,
+ encoding: Optional[str] = None,
+ line_width: Optional[int] = None,
+ ) -> Optional[str]:
+ """
+ Render a DataFrame to a console-friendly tabular output.
+
+ Parameters
+ ----------
+ buf : str, Path or StringIO-like, optional, default None
+ Buffer to write to. If None, the output is returned as a string.
+ encoding: str, default “utf-8”
+ Set character encoding.
+ line_width : int, optional
+ Width to wrap a line in characters.
+ """
+ from pandas.io.formats.string import StringFormatter
+
+ string_formatter = StringFormatter(self.fmt, line_width=line_width)
+ string = string_formatter.to_string()
+ return save_to_buffer(string, buf=buf, encoding=encoding)
+
+ def to_csv(
+ self,
+ path_or_buf: Optional[FilePathOrBuffer[str]] = None,
+ encoding: Optional[str] = None,
+ sep: str = ",",
+ columns: Optional[Sequence[Label]] = None,
+ index_label: Optional[IndexLabel] = None,
+ mode: str = "w",
+ compression: CompressionOptions = "infer",
+ quoting: Optional[int] = None,
+ quotechar: str = '"',
+ line_terminator: Optional[str] = None,
+ chunksize: Optional[int] = None,
+ date_format: Optional[str] = None,
+ doublequote: bool = True,
+ escapechar: Optional[str] = None,
+ errors: str = "strict",
+ storage_options: StorageOptions = None,
+ ) -> Optional[str]:
+ """
+ Render dataframe as comma-separated file.
+ """
+ from pandas.io.formats.csvs import CSVFormatter
+
+ csv_formatter = CSVFormatter(
+ path_or_buf=path_or_buf,
+ line_terminator=line_terminator,
+ sep=sep,
+ encoding=encoding,
+ errors=errors,
+ compression=compression,
+ quoting=quoting,
+ cols=columns,
+ index_label=index_label,
+ mode=mode,
+ chunksize=chunksize,
+ quotechar=quotechar,
+ date_format=date_format,
+ doublequote=doublequote,
+ escapechar=escapechar,
+ storage_options=storage_options,
+ formatter=self.fmt,
+ )
+ csv_formatter.save()
+
+ if path_or_buf is None:
+ assert isinstance(csv_formatter.path_or_buf, StringIO)
+ return csv_formatter.path_or_buf.getvalue()
+
+ return None
+
+
+def save_to_buffer(
+ string: str,
+ buf: Optional[FilePathOrBuffer[str]] = None,
+ encoding: Optional[str] = None,
+) -> Optional[str]:
+ """
+ Perform serialization. Write to buf or return as string if buf is None.
+ """
+ with get_buffer(buf, encoding=encoding) as f:
+ f.write(string)
+ if buf is None:
+ return f.getvalue()
+ return None
+
+
+@contextmanager
+def get_buffer(buf: Optional[FilePathOrBuffer[str]], encoding: Optional[str] = None):
+ """
+ Context manager to open, yield and close buffer for filenames or Path-like
+ objects, otherwise yield buf unchanged.
+ """
+ if buf is not None:
+ buf = stringify_path(buf)
+ else:
+ buf = StringIO()
+
+ if encoding is None:
+ encoding = "utf-8"
+ elif not isinstance(buf, str):
+ raise ValueError("buf is not a file name and encoding is specified.")
+
+ if hasattr(buf, "write"):
+ yield buf
+ elif isinstance(buf, str):
+ with open(buf, "w", encoding=encoding, newline="") as f:
+ # GH#30034 open instead of codecs.open prevents a file leak
+ # if we have an invalid encoding argument.
+ # newline="" is needed to roundtrip correctly on
+ # windows test_to_latex_filename
+ yield f
+ else:
+ raise TypeError("buf is not a file name and it has no write method")
+
+
# ----------------------------------------------------------------------
# Array formatters
@@ -2036,26 +1956,6 @@ def set_eng_float_format(accuracy: int = 3, use_eng_prefix: bool = False) -> Non
set_option("display.column_space", max(12, accuracy + 9))
-def _binify(cols: List[int], line_width: int) -> List[int]:
- adjoin_width = 1
- bins = []
- curr_width = 0
- i_last_column = len(cols) - 1
- for i, w in enumerate(cols):
- w_adjoined = w + adjoin_width
- curr_width += w_adjoined
- if i_last_column == i:
- wrap = curr_width + 1 > line_width and i > 0
- else:
- wrap = curr_width + 2 > line_width and i > 0
- if wrap:
- bins.append(i)
- curr_width = w_adjoined
-
- bins.append(len(cols))
- return bins
-
-
def get_level_lengths(
levels: Any, sentinel: Union[bool, object, str] = ""
) -> List[Dict[int, int]]:
diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py
index c8eb89afdd849..b4f7e3922f02f 100644
--- a/pandas/io/formats/html.py
+++ b/pandas/io/formats/html.py
@@ -3,7 +3,7 @@
"""
from textwrap import dedent
-from typing import IO, Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union, cast
+from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union, cast
from pandas._config import get_option
@@ -12,16 +12,11 @@
from pandas import MultiIndex, option_context
from pandas.io.common import is_url
-from pandas.io.formats.format import (
- DataFrameFormatter,
- TableFormatter,
- buffer_put_lines,
- get_level_lengths,
-)
+from pandas.io.formats.format import DataFrameFormatter, get_level_lengths
from pandas.io.formats.printing import pprint_thing
-class HTMLFormatter(TableFormatter):
+class HTMLFormatter:
"""
Internal class for formatting output data in html.
This class is intended for shared functionality between
@@ -38,6 +33,8 @@ def __init__(
formatter: DataFrameFormatter,
classes: Optional[Union[str, List[str], Tuple[str, ...]]] = None,
border: Optional[int] = None,
+ table_id: Optional[str] = None,
+ render_links: bool = False,
) -> None:
self.fmt = formatter
self.classes = classes
@@ -51,14 +48,35 @@ def __init__(
if border is None:
border = cast(int, get_option("display.html.border"))
self.border = border
- self.table_id = self.fmt.table_id
- self.render_links = self.fmt.render_links
+ self.table_id = table_id
+ self.render_links = render_links
self.col_space = {
column: f"{value}px" if isinstance(value, int) else value
for column, value in self.fmt.col_space.items()
}
+ def to_string(self) -> str:
+ lines = self.render()
+ if any(isinstance(x, str) for x in lines):
+ lines = [str(x) for x in lines]
+ return "\n".join(lines)
+
+ def render(self) -> List[str]:
+ self._write_table()
+
+ if self.should_show_dimensions:
+ by = chr(215) # ×
+ self.write(
+ f"<p>{len(self.frame)} rows {by} {len(self.frame.columns)} columns</p>"
+ )
+
+ return self.elements
+
+ @property
+ def should_show_dimensions(self):
+ return self.fmt.should_show_dimensions
+
@property
def show_row_idx_names(self) -> bool:
return self.fmt.show_row_idx_names
@@ -187,20 +205,6 @@ def write_tr(
indent -= indent_delta
self.write("</tr>", indent)
- def render(self) -> List[str]:
- self._write_table()
-
- if self.should_show_dimensions:
- by = chr(215) # ×
- self.write(
- f"<p>{len(self.frame)} rows {by} {len(self.frame.columns)} columns</p>"
- )
-
- return self.elements
-
- def write_result(self, buf: IO[str]) -> None:
- buffer_put_lines(buf, self.render())
-
def _write_table(self, indent: int = 0) -> None:
_classes = ["dataframe"] # Default class.
use_mathjax = get_option("display.html.use_mathjax")
@@ -370,7 +374,7 @@ def _write_header(self, indent: int) -> None:
def _get_formatted_values(self) -> Dict[int, List[str]]:
with option_context("display.max_colwidth", None):
- fmt_values = {i: self.fmt._format_col(i) for i in range(self.ncols)}
+ fmt_values = {i: self.fmt.format_col(i) for i in range(self.ncols)}
return fmt_values
def _write_body(self, indent: int) -> None:
@@ -565,7 +569,7 @@ class NotebookFormatter(HTMLFormatter):
"""
def _get_formatted_values(self) -> Dict[int, List[str]]:
- return {i: self.fmt._format_col(i) for i in range(self.ncols)}
+ return {i: self.fmt.format_col(i) for i in range(self.ncols)}
def _get_columns_formatted_values(self) -> List[str]:
return self.columns.format()
diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py
index 2eee0ce73291f..f3c49e1cd3801 100644
--- a/pandas/io/formats/latex.py
+++ b/pandas/io/formats/latex.py
@@ -2,13 +2,13 @@
Module for formatting output data in Latex.
"""
from abc import ABC, abstractmethod
-from typing import IO, Iterator, List, Optional, Tuple, Type, Union
+from typing import Iterator, List, Optional, Tuple, Type, Union
import numpy as np
from pandas.core.dtypes.generic import ABCMultiIndex
-from pandas.io.formats.format import DataFrameFormatter, TableFormatter
+from pandas.io.formats.format import DataFrameFormatter
def _split_into_full_short_caption(
@@ -133,17 +133,12 @@ def header_levels(self) -> int:
def _get_strcols(self) -> List[List[str]]:
"""String representation of the columns."""
- if len(self.frame.columns) == 0 or len(self.frame.index) == 0:
- info_line = (
- f"Empty {type(self.frame).__name__}\n"
- f"Columns: {self.frame.columns}\n"
- f"Index: {self.frame.index}"
- )
- strcols = [[info_line]]
+ if self.fmt.frame.empty:
+ strcols = [[self._empty_info_line]]
else:
- strcols = self.fmt._to_str_columns()
+ strcols = self.fmt.get_strcols()
- # reestablish the MultiIndex that has been joined by _to_str_column
+ # reestablish the MultiIndex that has been joined by get_strcols()
if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex):
out = self.frame.index.format(
adjoin=False,
@@ -176,6 +171,14 @@ def pad_empties(x):
strcols = out + strcols[1:]
return strcols
+ @property
+ def _empty_info_line(self):
+ return (
+ f"Empty {type(self.frame).__name__}\n"
+ f"Columns: {self.frame.columns}\n"
+ f"Index: {self.frame.index}"
+ )
+
def _preprocess_row(self, row: List[str]) -> List[str]:
"""Preprocess elements of the row."""
if self.fmt.escape:
@@ -647,7 +650,7 @@ def env_end(self) -> str:
return "\\end{tabular}"
-class LatexFormatter(TableFormatter):
+class LatexFormatter:
r"""
Used to render a DataFrame to a LaTeX tabular/longtable environment output.
@@ -703,13 +706,12 @@ def __init__(
self.label = label
self.position = position
- def write_result(self, buf: IO[str]) -> None:
+ def to_string(self) -> str:
"""
Render a DataFrame to a LaTeX tabular, longtable, or table/tabular
environment output.
"""
- table_string = self.builder.get_result()
- buf.write(table_string)
+ return self.builder.get_result()
@property
def builder(self) -> TableBuilderAbstract:
diff --git a/pandas/io/formats/string.py b/pandas/io/formats/string.py
new file mode 100644
index 0000000000000..4ebb78f29c739
--- /dev/null
+++ b/pandas/io/formats/string.py
@@ -0,0 +1,201 @@
+"""
+Module for formatting output data in console (to string).
+"""
+from shutil import get_terminal_size
+from typing import Iterable, List, Optional
+
+import numpy as np
+
+from pandas.io.formats.format import DataFrameFormatter
+from pandas.io.formats.printing import pprint_thing
+
+
+class StringFormatter:
+ """Formatter for string representation of a dataframe."""
+
+ def __init__(self, fmt: DataFrameFormatter, line_width: Optional[int] = None):
+ self.fmt = fmt
+ self.adj = fmt.adj
+ self.frame = fmt.frame
+ self.line_width = line_width
+
+ def to_string(self) -> str:
+ text = self._get_string_representation()
+ if self.fmt.should_show_dimensions:
+ text = "".join([text, self.fmt.dimensions_info])
+ return text
+
+ def _get_strcols(self) -> List[List[str]]:
+ strcols = self.fmt.get_strcols()
+ if self.fmt.is_truncated:
+ strcols = self._insert_dot_separators(strcols)
+ return strcols
+
+ def _get_string_representation(self) -> str:
+ if self.fmt.frame.empty:
+ return self._empty_info_line
+
+ strcols = self._get_strcols()
+
+ if self.line_width is None:
+ # no need to wrap around just print the whole frame
+ return self.adj.adjoin(1, *strcols)
+
+ if self._need_to_wrap_around:
+ return self._join_multiline(strcols)
+
+ return self._fit_strcols_to_terminal_width(strcols)
+
+ @property
+ def _empty_info_line(self) -> str:
+ return (
+ f"Empty {type(self.frame).__name__}\n"
+ f"Columns: {pprint_thing(self.frame.columns)}\n"
+ f"Index: {pprint_thing(self.frame.index)}"
+ )
+
+ @property
+ def _need_to_wrap_around(self) -> bool:
+ return bool(self.fmt.max_cols is None or self.fmt.max_cols > 0)
+
+ def _insert_dot_separators(self, strcols: List[List[str]]) -> List[List[str]]:
+ str_index = self.fmt._get_formatted_index(self.fmt.tr_frame)
+ index_length = len(str_index)
+
+ if self.fmt.is_truncated_horizontally:
+ strcols = self._insert_dot_separator_horizontal(strcols, index_length)
+
+ if self.fmt.is_truncated_vertically:
+ strcols = self._insert_dot_separator_vertical(strcols, index_length)
+
+ return strcols
+
+ def _insert_dot_separator_horizontal(
+ self, strcols: List[List[str]], index_length: int
+ ) -> List[List[str]]:
+ strcols.insert(self.fmt.tr_col_num + 1, [" ..."] * index_length)
+ return strcols
+
+ def _insert_dot_separator_vertical(
+ self, strcols: List[List[str]], index_length: int
+ ) -> List[List[str]]:
+ n_header_rows = index_length - len(self.fmt.tr_frame)
+ row_num = self.fmt.tr_row_num
+ for ix, col in enumerate(strcols):
+ cwidth = self.adj.len(col[row_num])
+
+ if self.fmt.is_truncated_horizontally:
+ is_dot_col = ix == self.fmt.tr_col_num + 1
+ else:
+ is_dot_col = False
+
+ if cwidth > 3 or is_dot_col:
+ dots = "..."
+ else:
+ dots = ".."
+
+ if ix == 0:
+ dot_mode = "left"
+ elif is_dot_col:
+ cwidth = 4
+ dot_mode = "right"
+ else:
+ dot_mode = "right"
+
+ dot_str = self.adj.justify([dots], cwidth, mode=dot_mode)[0]
+ col.insert(row_num + n_header_rows, dot_str)
+ return strcols
+
+ def _join_multiline(self, strcols_input: Iterable[List[str]]) -> str:
+ lwidth = self.line_width
+ adjoin_width = 1
+ strcols = list(strcols_input)
+
+ if self.fmt.index:
+ idx = strcols.pop(0)
+ lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width
+
+ col_widths = [
+ np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0
+ for col in strcols
+ ]
+
+ assert lwidth is not None
+ col_bins = _binify(col_widths, lwidth)
+ nbins = len(col_bins)
+
+ if self.fmt.is_truncated_vertically:
+ assert self.fmt.max_rows_fitted is not None
+ nrows = self.fmt.max_rows_fitted + 1
+ else:
+ nrows = len(self.frame)
+
+ str_lst = []
+ start = 0
+ for i, end in enumerate(col_bins):
+ row = strcols[start:end]
+ if self.fmt.index:
+ row.insert(0, idx)
+ if nbins > 1:
+ if end <= len(strcols) and i < nbins - 1:
+ row.append([" \\"] + [" "] * (nrows - 1))
+ else:
+ row.append([" "] * nrows)
+ str_lst.append(self.adj.adjoin(adjoin_width, *row))
+ start = end
+ return "\n\n".join(str_lst)
+
+ def _fit_strcols_to_terminal_width(self, strcols: List[List[str]]) -> str:
+ from pandas import Series
+
+ lines = self.adj.adjoin(1, *strcols).split("\n")
+ max_len = Series(lines).str.len().max()
+ # plus truncate dot col
+ width, _ = get_terminal_size()
+ dif = max_len - width
+ # '+ 1' to avoid too wide repr (GH PR #17023)
+ adj_dif = dif + 1
+ col_lens = Series([Series(ele).apply(len).max() for ele in strcols])
+ n_cols = len(col_lens)
+ counter = 0
+ while adj_dif > 0 and n_cols > 1:
+ counter += 1
+ mid = int(round(n_cols / 2.0))
+ mid_ix = col_lens.index[mid]
+ col_len = col_lens[mid_ix]
+ # adjoin adds one
+ adj_dif -= col_len + 1
+ col_lens = col_lens.drop(mid_ix)
+ n_cols = len(col_lens)
+
+ # subtract index column
+ max_cols_fitted = n_cols - self.fmt.index
+ # GH-21180. Ensure that we print at least two.
+ max_cols_fitted = max(max_cols_fitted, 2)
+ self.fmt.max_cols_fitted = max_cols_fitted
+
+ # Call again _truncate to cut frame appropriately
+ # and then generate string representation
+ self.fmt.truncate()
+ strcols = self._get_strcols()
+ return self.adj.adjoin(1, *strcols)
+
+
+def _binify(cols: List[int], line_width: int) -> List[int]:
+ adjoin_width = 1
+ bins = []
+ curr_width = 0
+ i_last_column = len(cols) - 1
+ for i, w in enumerate(cols):
+ w_adjoined = w + adjoin_width
+ curr_width += w_adjoined
+ if i_last_column == i:
+ wrap = curr_width + 1 > line_width and i > 0
+ else:
+ wrap = curr_width + 2 > line_width and i > 0
+ if wrap:
+ bins.append(i)
+ curr_width = w_adjoined
+
+ bins.append(len(cols))
+ return bins
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Partially addresses #36407
This is a continuation of https://github.com/pandas-dev/pandas/pull/36434.
- Separated ``StringFormatter`` (or better ``ConsoleFormatter``?) from ``DataFrameFormatter``. Placed it into the new module (subject to discussion).
- Used composition of ``DataFrameFormatter`` in ``HTMLFormatter``, ``LatexFormatter``, ``CSVFormatter`` and ``StringFormatter``. It turned out that the inheritance of each of the formatters from the base ``DataFrameFormatter`` was too complicated to comprehend. The composition seems to suit here better.
- Created new class ``DataFrameRenderer`` to keep methods for outputs in each of the formats.
This is not the ultimate refactoring, but just one more step.
| https://api.github.com/repos/pandas-dev/pandas/pulls/36510 | 2020-09-20T21:09:09Z | 2020-10-20T23:18:59Z | 2020-10-20T23:18:58Z | 2020-10-20T23:19:04Z |
CLN: Unify Series case in _wrap_applied_output | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 0705261d0c516..b9cc2c19c224b 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1190,14 +1190,6 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
key_index = self.grouper.result_index if self.as_index else None
- if isinstance(first_not_none, Series):
- # this is to silence a DeprecationWarning
- # TODO: Remove when default dtype of empty Series is object
- kwargs = first_not_none._construct_axes_dict()
- backup = create_series_with_explicit_dtype(dtype_if_empty=object, **kwargs)
-
- values = [x if (x is not None) else backup for x in values]
-
if isinstance(first_not_none, (np.ndarray, Index)):
# GH#1738: values is list of arrays of unequal lengths
# fall through to the outer else clause
@@ -1217,8 +1209,13 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
result = DataFrame(values, index=key_index, columns=[self._selection])
self._insert_inaxis_grouper_inplace(result)
return result
-
else:
+ # this is to silence a DeprecationWarning
+ # TODO: Remove when default dtype of empty Series is object
+ kwargs = first_not_none._construct_axes_dict()
+ backup = create_series_with_explicit_dtype(dtype_if_empty=object, **kwargs)
+ values = [x if (x is not None) else backup for x in values]
+
all_indexed_same = all_indexes_same(x.index for x in values)
# GH3596
| One block of _wrap_applied_output is only executed when `first_not_none` is a Series; moved it to the else clause. | https://api.github.com/repos/pandas-dev/pandas/pulls/36504 | 2020-09-20T18:19:53Z | 2020-09-21T21:59:47Z | 2020-09-21T21:59:47Z | 2020-09-26T12:00:06Z |
BUG: alignment changing index on input series | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 7ba64f57be136..6987292eaf05c 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -271,6 +271,7 @@ Numeric
- Bug in :func:`to_numeric` where float precision was incorrect (:issue:`31364`)
- Bug in :meth:`DataFrame.any` with ``axis=1`` and ``bool_only=True`` ignoring the ``bool_only`` keyword (:issue:`32432`)
- Bug in :meth:`Series.equals` where a ``ValueError`` was raised when numpy arrays were compared to scalars (:issue:`35267`)
+- Bug in :class:`Series` where two :class:`Series` each have a :class:`DatetimeIndex` with different timezones having those indexes incorrectly changed when performing arithmetic operations (:issue:`33671`)
-
Conversion
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 0b9021b094cd7..a8b48f875c825 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -8748,6 +8748,10 @@ def _align_frame(
if is_datetime64tz_dtype(left.index.dtype):
if left.index.tz != right.index.tz:
if join_index is not None:
+ # GH#33671 ensure we don't change the index on
+ # our original Series (NB: by default deep=False)
+ left = left.copy()
+ right = right.copy()
left.index = join_index
right.index = join_index
@@ -8835,6 +8839,10 @@ def _align_series(
if is_datetime64tz_dtype(left.index.dtype):
if left.index.tz != right.index.tz:
if join_index is not None:
+ # GH#33671 ensure we don't change the index on
+ # our original Series (NB: by default deep=False)
+ left = left.copy()
+ right = right.copy()
left.index = join_index
right.index = join_index
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index 8fad6ee1cca8b..f30246ff12fac 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -254,6 +254,19 @@ def test_sub_datetimelike_align(self):
result = (dt2.to_frame() - dt.to_frame())[0]
tm.assert_series_equal(result, expected)
+ def test_alignment_doesnt_change_tz(self):
+ # GH#33671
+ dti = pd.date_range("2016-01-01", periods=10, tz="CET")
+ dti_utc = dti.tz_convert("UTC")
+ ser = pd.Series(10, index=dti)
+ ser_utc = pd.Series(10, index=dti_utc)
+
+ # we don't care about the result, just that original indexes are unchanged
+ ser * ser_utc
+
+ assert ser.index is dti
+ assert ser_utc.index is dti_utc
+
# ------------------------------------------------------------------
# Comparisons
| - [x] closes #33671
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36503 | 2020-09-20T18:08:38Z | 2020-09-25T00:31:58Z | 2020-09-25T00:31:58Z | 2020-09-25T00:33:21Z |
Modify doc/source/whatsnew7 | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 0f0f009307c75..f49f679b26856 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -33,7 +33,7 @@ Enhancements
.. _whatsnew_0250.enhancements.agg_relabel:
-Groupby aggregation with relabeling
+GroupBy aggregation with relabeling
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Pandas has added special groupby behavior, known as "named aggregation", for naming the
@@ -85,7 +85,7 @@ See :ref:`groupby.aggregate.named` for more.
.. _whatsnew_0250.enhancements.multiple_lambdas:
-Groupby aggregation with multiple lambdas
+GroupBy aggregation with multiple lambdas
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can now provide multiple lambda functions to a list-like aggregation in
@@ -161,7 +161,7 @@ To restore the previous behaviour of a single threshold, set
.. _whatsnew_0250.enhancements.json_normalize_with_max_level:
-Json normalize with max_level param support
+JSON normalize with max_level param support
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:func:`json_normalize` normalizes the provided input dict to all
@@ -308,7 +308,7 @@ would be reassigned as -1. (:issue:`19387`)
.. _whatsnew_0250.api_breaking.groupby_apply_first_group_once:
-``Groupby.apply`` on ``DataFrame`` evaluates first group only once
+``GroupBy.apply`` on ``DataFrame`` evaluates first group only once
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The implementation of :meth:`DataFrameGroupBy.apply() <pandas.core.groupby.DataFrameGroupBy.apply>`
@@ -422,7 +422,7 @@ of ``object`` dtype. :attr:`Series.str` will now infer the dtype data *within* t
.. _whatsnew_0250.api_breaking.groupby_categorical:
-Categorical dtypes are preserved during groupby
+Categorical dtypes are preserved during GroupBy
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Previously, columns that were categorical, but not the groupby key(s) would be converted to ``object`` dtype during groupby operations. Pandas now will preserve these dtypes. (:issue:`18502`)
@@ -483,7 +483,7 @@ values are coerced to floating point, which may result in loss of precision. See
:ref:`indexing.set_ops` for more.
-``DataFrame`` groupby ffill/bfill no longer return group labels
+``DataFrame`` GroupBy ffill/bfill no longer return group labels
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The methods ``ffill``, ``bfill``, ``pad`` and ``backfill`` of
@@ -513,7 +513,7 @@ are returned. (:issue:`21521`)
df.groupby("a").ffill()
-``DataFrame`` describe on an empty categorical / object column will return top and freq
+``DataFrame`` describe on an empty Categorical / object column will return top and freq
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When calling :meth:`DataFrame.describe` with an empty categorical / object
@@ -1085,7 +1085,6 @@ Conversion
- Bug in :func:`DataFrame.astype()` when passing a dict of columns and types the ``errors`` parameter was ignored. (:issue:`25905`)
-
--
Strings
^^^^^^^
@@ -1139,8 +1138,8 @@ MultiIndex
- Bug in which incorrect exception raised by :class:`Timedelta` when testing the membership of :class:`MultiIndex` (:issue:`24570`)
-
-I/O
-^^^
+IO
+^^
- Bug in :func:`DataFrame.to_html()` where values were truncated using display options instead of outputting the full content (:issue:`17004`)
- Fixed bug in missing text when using :meth:`to_clipboard` if copying utf-16 characters in Python 3 on Windows (:issue:`25040`)
@@ -1182,9 +1181,8 @@ Plotting
- Fixed bug causing plots of :class:`PeriodIndex` timeseries to fail if the frequency is a multiple of the frequency rule code (:issue:`14763`)
- Fixed bug when plotting a :class:`DatetimeIndex` with ``datetime.timezone.utc`` timezone (:issue:`17173`)
-
--
-Groupby/resample/rolling
+GroupBy/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug in :meth:`pandas.core.resample.Resampler.agg` with a timezone aware index where ``OverflowError`` would raise when passing a list of functions (:issue:`22660`)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst
index 944021ca0fcae..96d44bd8fa5c6 100644
--- a/doc/source/whatsnew/v0.25.1.rst
+++ b/doc/source/whatsnew/v0.25.1.rst
@@ -6,8 +6,8 @@ What's new in 0.25.1 (August 21, 2019)
These are the changes in pandas 0.25.1. See :ref:`release` for a full changelog
including other versions of pandas.
-I/O and LZMA
-~~~~~~~~~~~~
+IO and LZMA
+~~~~~~~~~~~
Some users may unknowingly have an incomplete Python installation lacking the `lzma` module from the standard library. In this case, `import pandas` failed due to an `ImportError` (:issue:`27575`).
Pandas will now warn, rather than raising an `ImportError` if the `lzma` module is not present. Any subsequent attempt to use `lzma` methods will raise a `RuntimeError`.
@@ -67,8 +67,8 @@ Missing
- Bug in :func:`pandas.isnull` or :func:`pandas.isna` when the input is a type e.g. ``type(pandas.Series())`` (:issue:`27482`)
-I/O
-^^^
+IO
+^^
- Avoid calling ``S3File.s3`` when reading parquet, as this was removed in s3fs version 0.3.0 (:issue:`27756`)
- Better error message when a negative header is passed in :func:`pandas.read_csv` (:issue:`27779`)
@@ -82,7 +82,7 @@ Plotting
:meth:`pandas.plotting.deregister_matplotlib_converters` (:issue:`27481`).
- Fix compatibility issue with matplotlib when passing a pandas ``Index`` to a plot call (:issue:`27775`).
-Groupby/resample/rolling
+GroupBy/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Fixed regression in :meth:`pands.core.groupby.DataFrameGroupBy.quantile` raising when multiple quantiles are given (:issue:`27526`)
diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst
index c0c68ce4b1f44..a9167c3b41289 100644
--- a/doc/source/whatsnew/v0.25.2.rst
+++ b/doc/source/whatsnew/v0.25.2.rst
@@ -21,14 +21,14 @@ Indexing
- Fix regression in :meth:`DataFrame.reindex` not following the ``limit`` argument (:issue:`28631`).
- Fix regression in :meth:`RangeIndex.get_indexer` for decreasing :class:`RangeIndex` where target values may be improperly identified as missing/present (:issue:`28678`)
-I/O
-^^^
+IO
+^^
- Fix regression in notebook display where ``<th>`` tags were missing for :attr:`DataFrame.index` values (:issue:`28204`).
- Regression in :meth:`~DataFrame.to_csv` where writing a :class:`Series` or :class:`DataFrame` indexed by an :class:`IntervalIndex` would incorrectly raise a ``TypeError`` (:issue:`28210`)
- Fix :meth:`~DataFrame.to_csv` with ``ExtensionArray`` with list-like values (:issue:`28840`).
-Groupby/resample/rolling
+GroupBy/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug incorrectly raising an ``IndexError`` when passing a list of quantiles to :meth:`pandas.core.groupby.DataFrameGroupBy.quantile` (:issue:`28113`).
diff --git a/doc/source/whatsnew/v0.25.3.rst b/doc/source/whatsnew/v0.25.3.rst
index f7f54198a0f82..e028c08e1e85c 100644
--- a/doc/source/whatsnew/v0.25.3.rst
+++ b/doc/source/whatsnew/v0.25.3.rst
@@ -11,7 +11,7 @@ including other versions of pandas.
Bug fixes
~~~~~~~~~
-Groupby/resample/rolling
+GroupBy/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug in :meth:`DataFrameGroupBy.quantile` where NA values in the grouping could cause segfaults or incorrect results (:issue:`28882`)
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 4f0ca97310d85..fc46224ec86a7 100755
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -196,7 +196,7 @@ You can use the alias ``"boolean"`` as well.
.. _whatsnew_100.convert_dtypes:
-``convert_dtypes`` method to ease use of supported extension dtypes
+Method ``convert_dtypes`` to ease use of supported extension dtypes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In order to encourage use of the extension dtypes ``StringDtype``,
@@ -1082,13 +1082,11 @@ Timedelta
^^^^^^^^^
- Bug in subtracting a :class:`TimedeltaIndex` or :class:`TimedeltaArray` from a ``np.datetime64`` object (:issue:`29558`)
-
--
Timezones
^^^^^^^^^
-
--
Numeric
@@ -1113,7 +1111,6 @@ Numeric
Conversion
^^^^^^^^^^
--
-
Strings
@@ -1152,7 +1149,6 @@ Indexing
Missing
^^^^^^^
--
-
MultiIndex
@@ -1162,8 +1158,8 @@ MultiIndex
- Series and MultiIndex `.drop` with `MultiIndex` raise exception if labels not in given in level (:issue:`8594`)
-
-I/O
-^^^
+IO
+^^
- :meth:`read_csv` now accepts binary mode file buffers when using the Python csv engine (:issue:`23779`)
- Bug in :meth:`DataFrame.to_json` where using a Tuple as a column or index value and using ``orient="columns"`` or ``orient="index"`` would produce invalid JSON (:issue:`20500`)
@@ -1203,7 +1199,7 @@ Plotting
- Allow :meth:`DataFrame.plot.scatter` to plot ``objects`` and ``datetime`` type data (:issue:`18755`, :issue:`30391`)
- Bug in :meth:`DataFrame.hist`, ``xrot=0`` does not work with ``by`` and subplots (:issue:`30288`).
-Groupby/resample/rolling
+GroupBy/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug in :meth:`core.groupby.DataFrameGroupBy.apply` only showing output from a single group when function returns an :class:`Index` (:issue:`28652`)
diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
index c3f144e2f0cb3..3f7c6e85e14ca 100644
--- a/doc/source/whatsnew/v1.0.2.rst
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -47,7 +47,7 @@ Fixed regressions
.. ---------------------------------------------------------------------------
-Indexing with Nullable Boolean Arrays
+Indexing with nullable boolean arrays
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Previously indexing with a nullable Boolean array containing ``NA`` would raise a ``ValueError``, however this is now permitted with ``NA`` being treated as ``False``. (:issue:`31503`)
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index a49b29d691692..dc3404f93be91 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -42,7 +42,7 @@ For example, the below now works:
.. _whatsnew_110.period_index_partial_string_slicing:
-Non-monotonic PeriodIndex Partial String Slicing
+Non-monotonic PeriodIndex partial string slicing
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:class:`PeriodIndex` now supports partial string slicing for non-monotonic indexes, mirroring :class:`DatetimeIndex` behavior (:issue:`31096`)
@@ -413,7 +413,7 @@ And the differences in reindexing ``df`` with ``mi_2`` and using ``method='pad'`
.. _whatsnew_110.notable_bug_fixes.indexing_raises_key_errors:
-Failed Label-Based Lookups Always Raise KeyError
+Failed label-based lookups always raise KeyError
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Label lookups ``series[key]``, ``series.loc[key]`` and ``frame.loc[key]``
@@ -786,7 +786,7 @@ Optional libraries below the lowest tested version may still work, but are not c
See :ref:`install.dependencies` and :ref:`install.optional_dependencies` for more.
-Development Changes
+Development changes
^^^^^^^^^^^^^^^^^^^
- The minimum version of Cython is now the most recent bug-fix version (0.29.16) (:issue:`33334`).
@@ -1051,8 +1051,8 @@ MultiIndex
- Bug when joining two :class:`MultiIndex` without specifying level with different columns. Return-indexers parameter was ignored. (:issue:`34074`)
-I/O
-^^^
+IO
+^^
- Passing a ``set`` as ``names`` argument to :func:`pandas.read_csv`, :func:`pandas.read_table`, or :func:`pandas.read_fwf` will raise ``ValueError: Names should be an ordered collection.`` (:issue:`34946`)
- Bug in print-out when ``display.precision`` is zero. (:issue:`20359`)
- Bug in :func:`read_json` where integer overflow was occurring when json contains big number strings. (:issue:`30320`)
@@ -1108,7 +1108,7 @@ Plotting
- Bug in :meth:`pandas.plotting.bootstrap_plot` was causing cluttered axes and overlapping labels (:issue:`34905`)
- Bug in :meth:`DataFrame.plot.scatter` caused an error when plotting variable marker sizes (:issue:`32904`)
-Groupby/resample/rolling
+GroupBy/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Using a :class:`pandas.api.indexers.BaseIndexer` with ``count``, ``min``, ``max``, ``median``, ``skew``, ``cov``, ``corr`` will now return correct results for any monotonic :class:`pandas.api.indexers.BaseIndexer` descendant (:issue:`32865`)
diff --git a/scripts/validate_rst_title_capitalization.py b/scripts/validate_rst_title_capitalization.py
index b654e27737359..d2899facb7cd3 100755
--- a/scripts/validate_rst_title_capitalization.py
+++ b/scripts/validate_rst_title_capitalization.py
@@ -30,6 +30,7 @@
"BigQuery",
"STATA",
"Interval",
+ "IntervalArray",
"PEP8",
"Period",
"Series",
@@ -141,6 +142,13 @@
"False",
"Styler",
"os",
+ "UTC",
+ "str",
+ "msgpack",
+ "ExtensionArray",
+ "LZMA",
+ "Numba",
+ "Timestamp",
}
CAP_EXCEPTIONS_DICT = {word.lower(): word for word in CAPITALIZATION_EXCEPTIONS}
| - [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] modify files in doc/source/whatsnew 'v0.25.0.rst' 'v0.25.1.rst' 'v0.25.2.rst' 'v0.25.3.rst' 'v1.0.0.rst' 'v1.0.1.rst' 'v1.0.2.rst' 'v1.0.3.rst' 'v1.0.4.rst' 'v1.05.rst' 'v1.1.0.rst' | https://api.github.com/repos/pandas-dev/pandas/pulls/36501 | 2020-09-20T17:41:27Z | 2020-10-07T02:42:03Z | 2020-10-07T02:42:03Z | 2020-10-07T02:42:07Z |
Update outdated instructions in scripts/generate_pip_deps_from_conda.py #36494 | diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py
index b0a06416ce443..c417f58f6bf1b 100755
--- a/scripts/generate_pip_deps_from_conda.py
+++ b/scripts/generate_pip_deps_from_conda.py
@@ -6,11 +6,11 @@
Usage:
Generate `requirements-dev.txt`
- $ ./conda_to_pip
+ $ python scripts/generate_pip_deps_from_conda.py
Compare and fail (exit status != 0) if `requirements-dev.txt` has not been
generated with this script:
- $ ./conda_to_pip --compare
+ $ python scripts/generate_pip_deps_from_conda.py --compare
"""
import argparse
import os
| - [x] closes #36494
- [x] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/36496 | 2020-09-20T08:17:23Z | 2020-09-21T21:11:46Z | 2020-09-21T21:11:46Z | 2020-09-21T21:11:51Z |
CI: Update version of 'black' | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 309e22e71a523..dd5323960ed20 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/python/black
- rev: 19.10b0
+ rev: 20.8b1
hooks:
- id: black
language_version: python3
diff --git a/asv_bench/benchmarks/arithmetic.py b/asv_bench/benchmarks/arithmetic.py
index 3ef6ab6209ea7..5a3febdcf75e7 100644
--- a/asv_bench/benchmarks/arithmetic.py
+++ b/asv_bench/benchmarks/arithmetic.py
@@ -125,7 +125,7 @@ def setup(self, op):
arr1 = np.random.randn(n_rows, int(n_cols / 2)).astype("f8")
arr2 = np.random.randn(n_rows, int(n_cols / 2)).astype("f4")
df = pd.concat(
- [pd.DataFrame(arr1), pd.DataFrame(arr2)], axis=1, ignore_index=True,
+ [pd.DataFrame(arr1), pd.DataFrame(arr2)], axis=1, ignore_index=True
)
# should already be the case, but just to be sure
df._consolidate_inplace()
diff --git a/doc/make.py b/doc/make.py
index 94fbfa9382d81..40ce9ea3bbcd2 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -286,7 +286,7 @@ def main():
joined = ",".join(cmds)
argparser = argparse.ArgumentParser(
- description="pandas documentation builder", epilog=f"Commands: {joined}",
+ description="pandas documentation builder", epilog=f"Commands: {joined}"
)
joined = ", ".join(cmds)
diff --git a/doc/source/conf.py b/doc/source/conf.py
index ee0d4ca3f2a24..04540f7e6ec95 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -308,7 +308,7 @@
for method in methods:
# ... and each of its public methods
- moved_api_pages.append((f"{old}.{method}", f"{new}.{method}",))
+ moved_api_pages.append((f"{old}.{method}", f"{new}.{method}"))
if pattern is None:
html_additional_pages = {
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index e5c6f77eea3ef..8558774955a40 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -720,7 +720,7 @@ submitting code to run the check yourself::
to auto-format your code. Additionally, many editors have plugins that will
apply ``black`` as you edit files.
-You should use a ``black`` version >= 19.10b0 as previous versions are not compatible
+You should use a ``black`` version 20.8b1 as previous versions are not compatible
with the pandas codebase.
If you wish to run these checks automatically, we encourage you to use
diff --git a/environment.yml b/environment.yml
index 36bbd3d307159..ffd319b006ff2 100644
--- a/environment.yml
+++ b/environment.yml
@@ -15,7 +15,7 @@ dependencies:
- cython>=0.29.21
# code checks
- - black=19.10b0
+ - black=20.8b1
- cpplint
- flake8<3.8.0 # temporary pin, GH#34150
- flake8-comprehensions>=3.1.0 # used by flake8, linting of unnecessary comprehensions
diff --git a/pandas/_vendored/typing_extensions.py b/pandas/_vendored/typing_extensions.py
index 129d8998faccc..6efbbe9302952 100644
--- a/pandas/_vendored/typing_extensions.py
+++ b/pandas/_vendored/typing_extensions.py
@@ -2116,8 +2116,7 @@ def __init_subclass__(cls, *args, **kwargs):
raise TypeError(f"Cannot subclass {cls.__module__}.Annotated")
def _strip_annotations(t):
- """Strips the annotations from a given type.
- """
+ """Strips the annotations from a given type."""
if isinstance(t, _AnnotatedAlias):
return _strip_annotations(t.__origin__)
if isinstance(t, typing._GenericAlias):
diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py
index c123156495924..541c617f7f618 100644
--- a/pandas/core/aggregation.py
+++ b/pandas/core/aggregation.py
@@ -387,7 +387,7 @@ def validate_func_kwargs(
def transform(
- obj: FrameOrSeries, func: AggFuncType, axis: Axis, *args, **kwargs,
+ obj: FrameOrSeries, func: AggFuncType, axis: Axis, *args, **kwargs
) -> FrameOrSeries:
"""
Transform a DataFrame or Series
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 50d1810fee30d..ccccdc4409694 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1023,11 +1023,10 @@ def checked_add_with_arr(arr, b, arr_mask=None, b_mask=None):
to_raise = ((np.iinfo(np.int64).max - b2 < arr) & not_nan).any()
else:
to_raise = (
- ((np.iinfo(np.int64).max - b2[mask1] < arr[mask1]) & not_nan[mask1]).any()
- or (
- (np.iinfo(np.int64).min - b2[mask2] > arr[mask2]) & not_nan[mask2]
- ).any()
- )
+ (np.iinfo(np.int64).max - b2[mask1] < arr[mask1]) & not_nan[mask1]
+ ).any() or (
+ (np.iinfo(np.int64).min - b2[mask2] > arr[mask2]) & not_nan[mask2]
+ ).any()
if to_raise:
raise OverflowError("Overflow in int64 addition")
diff --git a/pandas/core/array_algos/replace.py b/pandas/core/array_algos/replace.py
index 09f9aefd64096..9eaa265adab2b 100644
--- a/pandas/core/array_algos/replace.py
+++ b/pandas/core/array_algos/replace.py
@@ -17,7 +17,7 @@
def compare_or_regex_search(
- a: ArrayLike, b: Union[Scalar, Pattern], regex: bool, mask: ArrayLike,
+ a: ArrayLike, b: Union[Scalar, Pattern], regex: bool, mask: ArrayLike
) -> Union[ArrayLike, bool]:
"""
Compare two array_like inputs of the same shape or two scalar values
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 36dfe43bfd708..47b127b300681 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -449,9 +449,7 @@ def __init__(
if isinstance(data, BlockManager):
if index is None and columns is None and dtype is None and copy is False:
# GH#33357 fastpath
- NDFrame.__init__(
- self, data,
- )
+ NDFrame.__init__(self, data)
return
mgr = self._init_mgr(
@@ -5747,7 +5745,7 @@ def nsmallest(self, n, columns, keep="first") -> DataFrame:
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
- Iceland 337000 17036 IS
+ Iceland 337000 17036 IS
When using ``keep='last'``, ties are resolved in reverse order:
@@ -7142,7 +7140,7 @@ def unstack(self, level=-1, fill_value=None):
return unstack(self, level, fill_value)
- @Appender(_shared_docs["melt"] % dict(caller="df.melt(", other="melt",))
+ @Appender(_shared_docs["melt"] % dict(caller="df.melt(", other="melt"))
def melt(
self,
id_vars=None,
@@ -8624,7 +8622,7 @@ def blk_func(values):
# After possibly _get_data and transposing, we are now in the
# simple case where we can use BlockManager.reduce
res = df._mgr.reduce(blk_func)
- out = df._constructor(res,).iloc[0].rename(None)
+ out = df._constructor(res).iloc[0].rename(None)
if out_dtype is not None:
out = out.astype(out_dtype)
if axis == 0 and is_object_dtype(out.dtype):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 48fae9a0a91cd..0984e86a23592 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -198,7 +198,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
# Constructors
def __init__(
- self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False,
+ self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False
):
if (
@@ -208,9 +208,7 @@ def __init__(
and copy is False
):
# GH#33357 called with just the SingleBlockManager
- NDFrame.__init__(
- self, data,
- )
+ NDFrame.__init__(self, data)
self.name = name
return
@@ -329,9 +327,7 @@ def __init__(
data = SingleBlockManager.from_array(data, index)
- generic.NDFrame.__init__(
- self, data,
- )
+ generic.NDFrame.__init__(self, data)
self.name = name
self._set_axis(0, index, fastpath=True)
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index ec62192464665..1fec2bbbf5fdc 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -72,7 +72,7 @@ def get_indexer_indexer(
)
elif isinstance(target, ABCMultiIndex):
indexer = lexsort_indexer(
- target._get_codes_for_sorting(), orders=ascending, na_position=na_position,
+ target._get_codes_for_sorting(), orders=ascending, na_position=na_position
)
else:
# Check monotonic-ness before sort an index (GH 11080)
diff --git a/pandas/core/util/numba_.py b/pandas/core/util/numba_.py
index f06dd10d0e497..1dd005c1602a5 100644
--- a/pandas/core/util/numba_.py
+++ b/pandas/core/util/numba_.py
@@ -25,7 +25,7 @@ def set_use_numba(enable: bool = False) -> None:
def get_jit_arguments(
- engine_kwargs: Optional[Dict[str, bool]] = None, kwargs: Optional[Dict] = None,
+ engine_kwargs: Optional[Dict[str, bool]] = None, kwargs: Optional[Dict] = None
) -> Tuple[bool, bool, bool]:
"""
Return arguments to pass to numba.JIT, falling back on pandas default JIT settings.
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 7eb31daa894c9..99992d0218a68 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1382,10 +1382,6 @@ def _format(x):
class FloatArrayFormatter(GenericArrayFormatter):
- """
-
- """
-
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py
index eb35fff3a4f8e..170df193bef00 100644
--- a/pandas/io/formats/latex.py
+++ b/pandas/io/formats/latex.py
@@ -41,8 +41,8 @@ def __init__(
self.multirow = multirow
self.clinebuf: List[List[int]] = []
self.strcols = self._get_strcols()
- self.strrows: List[List[str]] = (
- list(zip(*self.strcols)) # type: ignore[arg-type]
+ self.strrows: List[List[str]] = list(
+ zip(*self.strcols) # type: ignore[arg-type]
)
def get_strrow(self, row_num: int) -> str:
diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py
index ece9367cea7fe..f18117cfd3d1f 100644
--- a/pandas/tests/arrays/sparse/test_array.py
+++ b/pandas/tests/arrays/sparse/test_array.py
@@ -193,9 +193,7 @@ def test_constructor_inferred_fill_value(self, data, fill_value):
assert result == fill_value
@pytest.mark.parametrize("format", ["coo", "csc", "csr"])
- @pytest.mark.parametrize(
- "size", [0, 10],
- )
+ @pytest.mark.parametrize("size", [0, 10])
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
@@ -693,17 +691,13 @@ def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
- res = sparse[
- 4:,
- ] # noqa: E231
- exp = SparseArray(dense[4:,]) # noqa: E231
+ res = sparse[(slice(4, None),)]
+ exp = SparseArray(dense[4:])
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
- res = sparse[
- 4:,
- ] # noqa: E231
- exp = SparseArray(dense[4:,], fill_value=0) # noqa: E231
+ res = sparse[(slice(4, None),)]
+ exp = SparseArray(dense[4:], fill_value=0)
tm.assert_sp_array_equal(res, exp)
msg = "too many indices for array"
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index f21b1d3dfe487..4324b03ed13d6 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -1060,14 +1060,14 @@ def test_any_all_bool_only(self):
(np.any, {"A": pd.Series([0.0, 1.0], dtype="float")}, True),
(np.all, {"A": pd.Series([0, 1], dtype=int)}, False),
(np.any, {"A": pd.Series([0, 1], dtype=int)}, True),
- pytest.param(np.all, {"A": pd.Series([0, 1], dtype="M8[ns]")}, False,),
- pytest.param(np.any, {"A": pd.Series([0, 1], dtype="M8[ns]")}, True,),
- pytest.param(np.all, {"A": pd.Series([1, 2], dtype="M8[ns]")}, True,),
- pytest.param(np.any, {"A": pd.Series([1, 2], dtype="M8[ns]")}, True,),
- pytest.param(np.all, {"A": pd.Series([0, 1], dtype="m8[ns]")}, False,),
- pytest.param(np.any, {"A": pd.Series([0, 1], dtype="m8[ns]")}, True,),
- pytest.param(np.all, {"A": pd.Series([1, 2], dtype="m8[ns]")}, True,),
- pytest.param(np.any, {"A": pd.Series([1, 2], dtype="m8[ns]")}, True,),
+ pytest.param(np.all, {"A": pd.Series([0, 1], dtype="M8[ns]")}, False),
+ pytest.param(np.any, {"A": pd.Series([0, 1], dtype="M8[ns]")}, True),
+ pytest.param(np.all, {"A": pd.Series([1, 2], dtype="M8[ns]")}, True),
+ pytest.param(np.any, {"A": pd.Series([1, 2], dtype="M8[ns]")}, True),
+ pytest.param(np.all, {"A": pd.Series([0, 1], dtype="m8[ns]")}, False),
+ pytest.param(np.any, {"A": pd.Series([0, 1], dtype="m8[ns]")}, True),
+ pytest.param(np.all, {"A": pd.Series([1, 2], dtype="m8[ns]")}, True),
+ pytest.param(np.any, {"A": pd.Series([1, 2], dtype="m8[ns]")}, True),
(np.all, {"A": pd.Series([0, 1], dtype="category")}, False),
(np.any, {"A": pd.Series([0, 1], dtype="category")}, True),
(np.all, {"A": pd.Series([1, 2], dtype="category")}, True),
diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py
index 18b5743a3375a..9d179d983ceeb 100644
--- a/pandas/tests/io/test_gcs.py
+++ b/pandas/tests/io/test_gcs.py
@@ -108,9 +108,7 @@ def test_to_csv_compression_encoding_gcs(gcs_buffer, compression_only, encoding)
compression_only = "gz"
compression["method"] = "infer"
path_gcs += f".{compression_only}"
- df.to_csv(
- path_gcs, compression=compression, encoding=encoding,
- )
+ df.to_csv(path_gcs, compression=compression, encoding=encoding)
assert gcs_buffer.getvalue() == buffer.getvalue()
read_df = read_csv(path_gcs, index_col=0, compression="infer", encoding=encoding)
tm.assert_frame_equal(df, read_df)
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 35a400cba8671..a5033c51bce81 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -572,7 +572,7 @@ def test_s3_roundtrip(self, df_compat, s3_resource, pa, s3so):
pytest.param(
["A"],
marks=pytest.mark.xfail(
- PY38, reason="Getting back empty DataFrame", raises=AssertionError,
+ PY38, reason="Getting back empty DataFrame", raises=AssertionError
),
),
[],
diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py
index 316a299ba1cbb..d1c3ad508d877 100644
--- a/pandas/tests/scalar/timestamp/test_constructors.py
+++ b/pandas/tests/scalar/timestamp/test_constructors.py
@@ -259,17 +259,20 @@ def test_constructor_keyword(self):
Timestamp("20151112")
)
- assert repr(
- Timestamp(
- year=2015,
- month=11,
- day=12,
- hour=1,
- minute=2,
- second=3,
- microsecond=999999,
+ assert (
+ repr(
+ Timestamp(
+ year=2015,
+ month=11,
+ day=12,
+ hour=1,
+ minute=2,
+ second=3,
+ microsecond=999999,
+ )
)
- ) == repr(Timestamp("2015-11-12 01:02:03.999999"))
+ == repr(Timestamp("2015-11-12 01:02:03.999999"))
+ )
def test_constructor_fromordinal(self):
base = datetime(2000, 1, 1)
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index aee947e738525..a796023c75b78 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -554,9 +554,7 @@ def test_unary_minus_nullable_int(
expected = pd.Series(target, dtype=dtype)
tm.assert_series_equal(result, expected)
- @pytest.mark.parametrize(
- "source", [[1, 2, 3], [1, 2, None], [-1, 0, 1]],
- )
+ @pytest.mark.parametrize("source", [[1, 2, 3], [1, 2, None], [-1, 0, 1]])
def test_unary_plus_nullable_int(self, any_signed_nullable_int_dtype, source):
dtype = any_signed_nullable_int_dtype
expected = pd.Series(source, dtype=dtype)
diff --git a/requirements-dev.txt b/requirements-dev.txt
index fb647c10f72bc..4f93ce9017f91 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -6,7 +6,7 @@ python-dateutil>=2.7.3
pytz
asv
cython>=0.29.21
-black==19.10b0
+black==20.8b1
cpplint
flake8<3.8.0
flake8-comprehensions>=3.1.0
diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py
index b11de0c4ad860..74819db7b878c 100644
--- a/scripts/tests/test_validate_docstrings.py
+++ b/scripts/tests/test_validate_docstrings.py
@@ -6,8 +6,7 @@
class BadDocstrings:
- """Everything here has a bad docstring
- """
+ """Everything here has a bad docstring"""
def private_classes(self):
"""
diff --git a/versioneer.py b/versioneer.py
index 65c9523ba5573..171156c2c5315 100644
--- a/versioneer.py
+++ b/versioneer.py
@@ -1073,7 +1073,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format(
- full_tag, tag_prefix,
+ full_tag, tag_prefix
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
| closes #35925
Not many files left so am making a PR to close this off | https://api.github.com/repos/pandas-dev/pandas/pulls/36493 | 2020-09-20T06:57:43Z | 2020-09-22T13:37:59Z | 2020-09-22T13:37:59Z | 2020-09-23T12:17:25Z |
TST: DataFrame.to_parquet accepts pathlib.Path with partition_cols defined | diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index a5033c51bce81..b7c8ca7e0c49f 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -3,6 +3,7 @@
from distutils.version import LooseVersion
from io import BytesIO
import os
+import pathlib
from warnings import catch_warnings
import numpy as np
@@ -663,6 +664,20 @@ def test_partition_cols_string(self, pa, df_full):
assert len(dataset.partitions.partition_names) == 1
assert dataset.partitions.partition_names == set(partition_cols_list)
+ @pytest.mark.parametrize(
+ "path_type", [lambda path: path, lambda path: pathlib.Path(path)]
+ )
+ def test_partition_cols_pathlib(self, pa, df_compat, path_type):
+ # GH 35902
+
+ partition_cols = "B"
+ partition_cols_list = [partition_cols]
+ df = df_compat
+
+ with tm.ensure_clean_dir() as path_str:
+ path = path_type(path_str)
+ df.to_parquet(path, partition_cols=partition_cols_list)
+
def test_empty_dataframe(self, pa):
# GH #27339
df = pd.DataFrame()
| - [x] closes #35902
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/36491 | 2020-09-20T04:44:49Z | 2020-09-23T11:20:14Z | 2020-09-23T11:20:13Z | 2020-09-23T14:42:17Z |
REF: share fillna | diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index a947ab64f7380..808d598558c83 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -6,7 +6,11 @@
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly, doc
+from pandas.util._validators import validate_fillna_kwargs
+from pandas.core.dtypes.inference import is_array_like
+
+from pandas.core import missing
from pandas.core.algorithms import take, unique
from pandas.core.array_algos.transforms import shift
from pandas.core.arrays.base import ExtensionArray
@@ -194,3 +198,32 @@ def __getitem__(self, key):
def _validate_getitem_key(self, key):
return check_array_indexer(self, key)
+
+ @doc(ExtensionArray.fillna)
+ def fillna(self: _T, value=None, method=None, limit=None) -> _T:
+ value, method = validate_fillna_kwargs(value, method)
+
+ mask = self.isna()
+
+ # TODO: share this with EA base class implementation
+ if is_array_like(value):
+ if len(value) != len(self):
+ raise ValueError(
+ f"Length of 'value' does not match. Got ({len(value)}) "
+ f" expected {len(self)}"
+ )
+ value = value[mask]
+
+ if mask.any():
+ if method is not None:
+ func = missing.get_fill_func(method)
+ new_values = func(self._ndarray.copy(), limit=limit, mask=mask)
+ # TODO: PandasArray didnt used to copy, need tests for this
+ new_values = self._from_backing_data(new_values)
+ else:
+ # fill with value
+ new_values = self.copy()
+ new_values[mask] = value
+ else:
+ new_values = self.copy()
+ return new_values
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 45cabe8f0b498..7051507f9a90e 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -28,7 +28,6 @@
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning
from pandas.util._decorators import Appender, Substitution, cache_readonly
-from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.common import (
is_categorical_dtype,
@@ -48,11 +47,9 @@
is_unsigned_integer_dtype,
pandas_dtype,
)
-from pandas.core.dtypes.generic import ABCSeries
-from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
-from pandas.core import missing, nanops, ops
+from pandas.core import nanops, ops
from pandas.core.algorithms import checked_add_with_arr, unique1d, value_counts
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.arrays.base import ExtensionOpsMixin
@@ -979,43 +976,6 @@ def _maybe_mask_results(self, result, fill_value=iNaT, convert=None):
result[self._isnan] = fill_value
return result
- def fillna(self, value=None, method=None, limit=None):
- # TODO(GH-20300): remove this
- # Just overriding to ensure that we avoid an astype(object).
- # Either 20300 or a `_values_for_fillna` would avoid this duplication.
- if isinstance(value, ABCSeries):
- value = value.array
-
- value, method = validate_fillna_kwargs(value, method)
-
- mask = self.isna()
-
- if is_array_like(value):
- if len(value) != len(self):
- raise ValueError(
- f"Length of 'value' does not match. Got ({len(value)}) "
- f" expected {len(self)}"
- )
- value = value[mask]
-
- if mask.any():
- if method is not None:
- if method == "pad":
- func = missing.pad_1d
- else:
- func = missing.backfill_1d
-
- values = self.copy()
- new_values = func(values, limit=limit, mask=mask)
- new_values = self._from_backing_data(new_values)
- else:
- # fill with value
- new_values = self.copy()
- new_values[mask] = value
- else:
- new_values = self.copy()
- return new_values
-
# ------------------------------------------------------------------
# Frequency Properties/Methods
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index afcae2c5c8b43..61076132b24cd 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -1,5 +1,5 @@
import numbers
-from typing import Optional, Tuple, Type, Union
+from typing import Tuple, Type, Union
import numpy as np
from numpy.lib.mixins import NDArrayOperatorsMixin
@@ -7,10 +7,8 @@
from pandas._libs import lib
from pandas._typing import Scalar
from pandas.compat.numpy import function as nv
-from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.dtypes import ExtensionDtype
-from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import isna
from pandas import compat
@@ -19,7 +17,6 @@
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.arrays.base import ExtensionOpsMixin
from pandas.core.construction import extract_array
-from pandas.core.missing import backfill_1d, pad_1d
class PandasDtype(ExtensionDtype):
@@ -263,35 +260,6 @@ def _validate_setitem_value(self, value):
def isna(self) -> np.ndarray:
return isna(self._ndarray)
- def fillna(
- self, value=None, method: Optional[str] = None, limit: Optional[int] = None
- ) -> "PandasArray":
- # TODO(_values_for_fillna): remove this
- value, method = validate_fillna_kwargs(value, method)
-
- mask = self.isna()
-
- if is_array_like(value):
- if len(value) != len(self):
- raise ValueError(
- f"Length of 'value' does not match. Got ({len(value)}) "
- f" expected {len(self)}"
- )
- value = value[mask]
-
- if mask.any():
- if method is not None:
- func = pad_1d if method == "pad" else backfill_1d
- new_values = func(self._ndarray, limit=limit, mask=mask)
- new_values = self._from_sequence(new_values, dtype=self.dtype)
- else:
- # fill with value
- new_values = self.copy()
- new_values[mask] = value
- else:
- new_values = self.copy()
- return new_values
-
def _validate_fill_value(self, fill_value):
if fill_value is None:
# Primarily for subclasses
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 9b96c8f01153b..edcdf2f54bc4c 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -587,7 +587,7 @@ def interpolate_2d(
return values
-def _cast_values_for_fillna(values, dtype: DtypeObj):
+def _cast_values_for_fillna(values, dtype: DtypeObj, has_mask: bool):
"""
Cast values to a dtype that algos.pad and algos.backfill can handle.
"""
@@ -597,8 +597,10 @@ def _cast_values_for_fillna(values, dtype: DtypeObj):
if needs_i8_conversion(dtype):
values = values.view(np.int64)
- elif is_integer_dtype(values):
+ elif is_integer_dtype(values) and not has_mask:
# NB: this check needs to come after the datetime64 check above
+ # has_mask check to avoid casting i8 values that have already
+ # been cast from PeriodDtype
values = ensure_float64(values)
return values
@@ -609,11 +611,12 @@ def _fillna_prep(values, mask=None, dtype: Optional[DtypeObj] = None):
if dtype is None:
dtype = values.dtype
- if mask is None:
+ has_mask = mask is not None
+ if not has_mask:
# This needs to occur before datetime/timedeltas are cast to int64
mask = isna(values)
- values = _cast_values_for_fillna(values, dtype)
+ values = _cast_values_for_fillna(values, dtype, has_mask)
mask = mask.view(np.uint8)
return values, mask
| Near-identical implementations in DatetimeLikeArray and PandasArray | https://api.github.com/repos/pandas-dev/pandas/pulls/36488 | 2020-09-20T03:04:35Z | 2020-09-21T23:53:16Z | 2020-09-21T23:53:16Z | 2021-02-21T22:42:04Z |
Don't unlabel stale PR on update | diff --git a/.github/workflows/stale-pr.yml b/.github/workflows/stale-pr.yml
index e3b8d9336a5a6..e77bf2b81fc86 100644
--- a/.github/workflows/stale-pr.yml
+++ b/.github/workflows/stale-pr.yml
@@ -17,5 +17,5 @@ jobs:
exempt-pr-labels: "Needs Review,Blocked,Needs Discussion"
days-before-stale: 30
days-before-close: -1
- remove-stale-when-updated: true
+ remove-stale-when-updated: false
debug-only: false
| Apparently the bot unlabels on any activity (not just activity from the owner), e.g., https://github.com/pandas-dev/pandas/pull/34584#issuecomment-693107104, so if you ask the person for an update and they don't respond, the PR is no longer stale. Probably better to do this manually for now. | https://api.github.com/repos/pandas-dev/pandas/pulls/36487 | 2020-09-20T01:25:08Z | 2020-09-20T02:13:55Z | 2020-09-20T02:13:55Z | 2020-09-20T02:49:13Z |
add a test for loc method; check if a warning raise when replacing a … | diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py
index 66835c586e6c7..1254f1f217a2e 100644
--- a/pandas/tests/indexing/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/test_chaining_and_caching.py
@@ -335,12 +335,14 @@ def test_setting_with_copy_bug(self):
# this should not raise
df2["y"] = ["g", "h", "i"]
- def test_detect_chained_assignment_warnings(self):
+ def test_detect_chained_assignment_warnings_errors(self):
+ df = DataFrame({"A": ["aaa", "bbb", "ccc"], "B": [1, 2, 3]})
with option_context("chained_assignment", "warn"):
- df = DataFrame({"A": ["aaa", "bbb", "ccc"], "B": [1, 2, 3]})
-
with tm.assert_produces_warning(com.SettingWithCopyWarning):
df.loc[0]["A"] = 111
+ with option_context("chained_assignment", "raise"):
+ with pytest.raises(com.SettingWithCopyError):
+ df.loc[0]["A"] = 111
def test_detect_chained_assignment_warnings_filter_and_dupe_cols(self):
# xref gh-13017.
| …subframe
- [x] closes #36424
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36486 | 2020-09-19T22:52:42Z | 2020-09-22T23:24:02Z | 2020-09-22T23:24:01Z | 2020-09-22T23:24:07Z |
TYP: core.missing; PERF for needs_i8_conversion | diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 026aad5ad6eb7..45cabe8f0b498 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -1005,19 +1005,9 @@ def fillna(self, value=None, method=None, limit=None):
else:
func = missing.backfill_1d
- values = self._ndarray
- if not is_period_dtype(self.dtype):
- # For PeriodArray self._ndarray is i8, which gets copied
- # by `func`. Otherwise we need to make a copy manually
- # to avoid modifying `self` in-place.
- values = values.copy()
-
+ values = self.copy()
new_values = func(values, limit=limit, mask=mask)
- if is_datetime64tz_dtype(self.dtype):
- # we need to pass int64 values to the constructor to avoid
- # re-localizing incorrectly
- new_values = new_values.view("i8")
- new_values = type(self)(new_values, dtype=self.dtype)
+ new_values = self._from_backing_data(new_values)
else:
# fill with value
new_values = self.copy()
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 5987fdabf78bb..acbdbfd7707e3 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1215,6 +1215,10 @@ def needs_i8_conversion(arr_or_dtype) -> bool:
"""
if arr_or_dtype is None:
return False
+ if isinstance(arr_or_dtype, (np.dtype, ExtensionDtype)):
+ # fastpath
+ dtype = arr_or_dtype
+ return dtype.kind in ["m", "M"] or dtype.type is Period
return (
is_datetime_or_timedelta_dtype(arr_or_dtype)
or is_datetime64tz_dtype(arr_or_dtype)
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index be66b19d10064..9b96c8f01153b 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -7,17 +7,15 @@
import numpy as np
from pandas._libs import algos, lib
+from pandas._typing import DtypeObj
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from_array
from pandas.core.dtypes.common import (
ensure_float64,
- is_datetime64_dtype,
- is_datetime64tz_dtype,
is_integer_dtype,
is_numeric_v_string_like,
is_scalar,
- is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import isna
@@ -72,7 +70,7 @@ def mask_missing(arr, values_to_mask):
return mask
-def clean_fill_method(method, allow_nearest=False):
+def clean_fill_method(method, allow_nearest: bool = False):
# asfreq is compat for resampling
if method in [None, "asfreq"]:
return None
@@ -543,7 +541,12 @@ def _cubicspline_interpolate(xi, yi, x, axis=0, bc_type="not-a-knot", extrapolat
def interpolate_2d(
- values, method="pad", axis=0, limit=None, fill_value=None, dtype=None
+ values,
+ method="pad",
+ axis=0,
+ limit=None,
+ fill_value=None,
+ dtype: Optional[DtypeObj] = None,
):
"""
Perform an actual interpolation of values, values will be make 2-d if
@@ -584,18 +587,14 @@ def interpolate_2d(
return values
-def _cast_values_for_fillna(values, dtype):
+def _cast_values_for_fillna(values, dtype: DtypeObj):
"""
Cast values to a dtype that algos.pad and algos.backfill can handle.
"""
# TODO: for int-dtypes we make a copy, but for everything else this
# alters the values in-place. Is this intentional?
- if (
- is_datetime64_dtype(dtype)
- or is_datetime64tz_dtype(dtype)
- or is_timedelta64_dtype(dtype)
- ):
+ if needs_i8_conversion(dtype):
values = values.view(np.int64)
elif is_integer_dtype(values):
@@ -605,7 +604,7 @@ def _cast_values_for_fillna(values, dtype):
return values
-def _fillna_prep(values, mask=None, dtype=None):
+def _fillna_prep(values, mask=None, dtype: Optional[DtypeObj] = None):
# boilerplate for pad_1d, backfill_1d, pad_2d, backfill_2d
if dtype is None:
dtype = values.dtype
@@ -620,19 +619,19 @@ def _fillna_prep(values, mask=None, dtype=None):
return values, mask
-def pad_1d(values, limit=None, mask=None, dtype=None):
+def pad_1d(values, limit=None, mask=None, dtype: Optional[DtypeObj] = None):
values, mask = _fillna_prep(values, mask, dtype)
algos.pad_inplace(values, mask, limit=limit)
return values
-def backfill_1d(values, limit=None, mask=None, dtype=None):
+def backfill_1d(values, limit=None, mask=None, dtype: Optional[DtypeObj] = None):
values, mask = _fillna_prep(values, mask, dtype)
algos.backfill_inplace(values, mask, limit=limit)
return values
-def pad_2d(values, limit=None, mask=None, dtype=None):
+def pad_2d(values, limit=None, mask=None, dtype: Optional[DtypeObj] = None):
values, mask = _fillna_prep(values, mask, dtype)
if np.all(values.shape):
@@ -643,7 +642,7 @@ def pad_2d(values, limit=None, mask=None, dtype=None):
return values
-def backfill_2d(values, limit=None, mask=None, dtype=None):
+def backfill_2d(values, limit=None, mask=None, dtype: Optional[DtypeObj] = None):
values, mask = _fillna_prep(values, mask, dtype)
if np.all(values.shape):
| Split off from a branch that shares `fillna` code between some of our EAs.
Preliminary to a PR that allows for an actually-inplace fillna implementation. | https://api.github.com/repos/pandas-dev/pandas/pulls/36485 | 2020-09-19T22:36:42Z | 2020-09-20T02:13:12Z | 2020-09-20T02:13:12Z | 2020-09-20T02:59:28Z |
ENH: PandasArray ops use core.ops functions | diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 237d571507a3a..05139783456b9 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -362,19 +362,29 @@ def __invert__(self):
@classmethod
def _create_arithmetic_method(cls, op):
+
+ pd_op = ops.get_array_op(op)
+
@ops.unpack_zerodim_and_defer(op.__name__)
def arithmetic_method(self, other):
if isinstance(other, cls):
other = other._ndarray
- with np.errstate(all="ignore"):
- result = op(self._ndarray, other)
+ result = pd_op(self._ndarray, other)
- if op is divmod:
+ if op is divmod or op is ops.rdivmod:
a, b = result
- return cls(a), cls(b)
-
- return cls(result)
+ if isinstance(a, np.ndarray):
+ # for e.g. op vs TimedeltaArray, we may already
+ # have an ExtensionArray, in which case we do not wrap
+ return cls(a), cls(b)
+ return a, b
+
+ if isinstance(result, np.ndarray):
+ # for e.g. multiplication vs TimedeltaArray, we may already
+ # have an ExtensionArray, in which case we do not wrap
+ return cls(result)
+ return result
return compat.set_function_name(arithmetic_method, f"__{op.__name__}__", cls)
diff --git a/pandas/tests/arithmetic/common.py b/pandas/tests/arithmetic/common.py
index cd8dd102dc27c..a663c2f3a0175 100644
--- a/pandas/tests/arithmetic/common.py
+++ b/pandas/tests/arithmetic/common.py
@@ -4,7 +4,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, Index, Series
+from pandas import DataFrame, Index, Series, array as pd_array
import pandas._testing as tm
@@ -49,12 +49,12 @@ def assert_invalid_comparison(left, right, box):
----------
left : np.ndarray, ExtensionArray, Index, or Series
right : object
- box : {pd.DataFrame, pd.Series, pd.Index, tm.to_array}
+ box : {pd.DataFrame, pd.Series, pd.Index, pd.array, tm.to_array}
"""
# Not for tznaive-tzaware comparison
# Note: not quite the same as how we do this for tm.box_expected
- xbox = box if box is not Index else np.array
+ xbox = box if box not in [Index, pd_array] else np.array
result = left == right
expected = xbox(np.zeros(result.shape, dtype=np.bool_))
diff --git a/pandas/tests/arithmetic/conftest.py b/pandas/tests/arithmetic/conftest.py
index 8b9e5cd371a90..6286711ac6113 100644
--- a/pandas/tests/arithmetic/conftest.py
+++ b/pandas/tests/arithmetic/conftest.py
@@ -2,7 +2,6 @@
import pytest
import pandas as pd
-import pandas._testing as tm
# ------------------------------------------------------------------
# Helper Functions
@@ -56,7 +55,7 @@ def one(request):
zeros = [
box_cls([0] * 5, dtype=dtype)
- for box_cls in [pd.Index, np.array]
+ for box_cls in [pd.Index, np.array, pd.array]
for dtype in [np.int64, np.uint64, np.float64]
]
zeros.extend(
@@ -231,7 +230,7 @@ def box(request):
return request.param
-@pytest.fixture(params=[pd.Index, pd.Series, pd.DataFrame, tm.to_array], ids=id_func)
+@pytest.fixture(params=[pd.Index, pd.Series, pd.DataFrame, pd.array], ids=id_func)
def box_with_array(request):
"""
Fixture to test behavior for Index, Series, DataFrame, and pandas Array
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index 0dd389ed516c7..626dd4f748e0b 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -48,7 +48,9 @@ def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
- xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
+ xbox = (
+ box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
+ )
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
@@ -135,7 +137,7 @@ def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
- xbox = box if box is not pd.Index else np.ndarray
+ xbox = box if box not in [pd.Index, pd.array] else np.ndarray
ts = pd.Timestamp.now(tz)
ser = pd.Series([ts, pd.NaT])
@@ -203,6 +205,8 @@ def test_nat_comparisons(self, dtype, index_or_series, reverse, pair):
def test_comparison_invalid(self, tz_naive_fixture, box_with_array):
# GH#4968
# invalid date/int comparisons
+ if box_with_array is pd.array:
+ pytest.xfail("assert_invalid_comparison doesnt handle BooleanArray yet")
tz = tz_naive_fixture
ser = Series(range(5))
ser2 = Series(pd.date_range("20010101", periods=5, tz=tz))
@@ -226,8 +230,12 @@ def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
# dont bother testing ndarray comparison methods as this fails
# on older numpys (since they check object identity)
return
+ if box_with_array is pd.array and dtype is object:
+ pytest.xfail("reversed comparisons give BooleanArray, not ndarray")
- xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
+ xbox = (
+ box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
+ )
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box_with_array)
@@ -299,7 +307,9 @@ def test_timestamp_compare_series(self, left, right):
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
- xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
+ xbox = (
+ box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
+ )
ser = pd.Series([pd.Timestamp("2000-01-29 01:59:00"), "NaT"])
ser = tm.box_expected(ser, box_with_array)
@@ -388,7 +398,9 @@ def test_dti_cmp_nat(self, dtype, box_with_array):
# on older numpys (since they check object identity)
return
- xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
+ xbox = (
+ box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
+ )
left = pd.DatetimeIndex(
[pd.Timestamp("2011-01-01"), pd.NaT, pd.Timestamp("2011-01-03")]
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index 139401bdf5806..df98b43e11f4a 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -89,8 +89,9 @@ def test_compare_invalid(self):
b.name = pd.Timestamp("2000-01-01")
tm.assert_series_equal(a / b, 1 / (b / a))
- def test_numeric_cmp_string_numexpr_path(self, box):
+ def test_numeric_cmp_string_numexpr_path(self, box_with_array):
# GH#36377, GH#35700
+ box = box_with_array
xbox = box if box is not pd.Index else np.ndarray
obj = pd.Series(np.random.randn(10 ** 5))
@@ -183,10 +184,14 @@ def test_ops_series(self):
],
ids=lambda x: type(x).__name__,
)
- def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box):
+ def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box_with_array):
# GH#19333
+ box = box_with_array
+ if box is pd.array:
+ pytest.xfail(
+ "we get a PandasArray[timedelta64[ns]] instead of TimedeltaArray"
+ )
index = numeric_idx
-
expected = pd.TimedeltaIndex([pd.Timedelta(days=n) for n in range(5)])
index = tm.box_expected(index, box)
@@ -207,7 +212,11 @@ def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box):
],
ids=lambda x: type(x).__name__,
)
- def test_numeric_arr_mul_tdscalar_numexpr_path(self, scalar_td, box):
+ def test_numeric_arr_mul_tdscalar_numexpr_path(self, scalar_td, box_with_array):
+ box = box_with_array
+ if box is pd.array:
+ pytest.xfail("IntegerArray.__mul__ doesnt handle timedeltas")
+
arr = np.arange(2 * 10 ** 4).astype(np.int64)
obj = tm.box_expected(arr, box, transpose=False)
@@ -220,7 +229,11 @@ def test_numeric_arr_mul_tdscalar_numexpr_path(self, scalar_td, box):
result = scalar_td * obj
tm.assert_equal(result, expected)
- def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box):
+ def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box_with_array):
+ box = box_with_array
+ if box is pd.array:
+ pytest.xfail("We get PandasArray[td64] instead of TimedeltaArray")
+
index = numeric_idx[1:3]
expected = TimedeltaIndex(["3 Days", "36 Hours"])
@@ -248,7 +261,11 @@ def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box):
pd.offsets.Second(0),
],
)
- def test_add_sub_timedeltalike_invalid(self, numeric_idx, other, box):
+ def test_add_sub_timedeltalike_invalid(self, numeric_idx, other, box_with_array):
+ box = box_with_array
+ if box is pd.array:
+ pytest.xfail("PandasArray[int].__add__ doesnt raise on td64")
+
left = tm.box_expected(numeric_idx, box)
msg = (
"unsupported operand type|"
@@ -276,16 +293,21 @@ def test_add_sub_timedeltalike_invalid(self, numeric_idx, other, box):
],
)
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
- def test_add_sub_datetimelike_invalid(self, numeric_idx, other, box):
+ def test_add_sub_datetimelike_invalid(self, numeric_idx, other, box_with_array):
# GH#28080 numeric+datetime64 should raise; Timestamp raises
# NullFrequencyError instead of TypeError so is excluded.
+ box = box_with_array
left = tm.box_expected(numeric_idx, box)
- msg = (
- "unsupported operand type|"
- "Cannot (add|subtract) NaT (to|from) ndarray|"
- "Addition/subtraction of integers and integer-arrays|"
- "Concatenation operation is not implemented for NumPy arrays"
+ msg = "|".join(
+ [
+ "unsupported operand type",
+ "Cannot (add|subtract) NaT (to|from) ndarray",
+ "Addition/subtraction of integers and integer-arrays",
+ "Concatenation operation is not implemented for NumPy arrays",
+ # pd.array vs np.datetime64 case
+ r"operand type\(s\) all returned NotImplemented from __array_ufunc__",
+ ]
)
with pytest.raises(TypeError, match=msg):
left + other
@@ -568,8 +590,9 @@ class TestMultiplicationDivision:
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# for non-timestamp/timedelta/period dtypes
- def test_divide_decimal(self, box):
+ def test_divide_decimal(self, box_with_array):
# resolves issue GH#9787
+ box = box_with_array
ser = Series([Decimal(10)])
expected = Series([Decimal(5)])
diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py
index c0cb522b516ab..02cb4f4d7a606 100644
--- a/pandas/tests/arithmetic/test_object.py
+++ b/pandas/tests/arithmetic/test_object.py
@@ -104,22 +104,22 @@ def test_add_extension_scalar(self, other, box_with_array, op):
result = op(arr, other)
tm.assert_equal(result, expected)
- def test_objarr_add_str(self, box):
+ def test_objarr_add_str(self, box_with_array):
ser = pd.Series(["x", np.nan, "x"])
expected = pd.Series(["xa", np.nan, "xa"])
- ser = tm.box_expected(ser, box)
- expected = tm.box_expected(expected, box)
+ ser = tm.box_expected(ser, box_with_array)
+ expected = tm.box_expected(expected, box_with_array)
result = ser + "a"
tm.assert_equal(result, expected)
- def test_objarr_radd_str(self, box):
+ def test_objarr_radd_str(self, box_with_array):
ser = pd.Series(["x", np.nan, "x"])
expected = pd.Series(["ax", np.nan, "ax"])
- ser = tm.box_expected(ser, box)
- expected = tm.box_expected(expected, box)
+ ser = tm.box_expected(ser, box_with_array)
+ expected = tm.box_expected(expected, box_with_array)
result = "a" + ser
tm.assert_equal(result, expected)
diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py
index 930435074efc1..e78e696d00398 100644
--- a/pandas/tests/arithmetic/test_period.py
+++ b/pandas/tests/arithmetic/test_period.py
@@ -28,7 +28,9 @@ class TestPeriodArrayLikeComparisons:
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
- xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
+ xbox = (
+ box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
+ )
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
@@ -68,7 +70,7 @@ def test_compare_object_dtype(self, box_with_array, other_box):
pi = pd.period_range("2000", periods=5)
parr = tm.box_expected(pi, box_with_array)
- xbox = np.ndarray if box_with_array is pd.Index else box_with_array
+ xbox = np.ndarray if box_with_array in [pd.Index, pd.array] else box_with_array
other = other_box(pi)
@@ -175,7 +177,9 @@ def test_pi_cmp_period(self):
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
- xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
+ xbox = (
+ box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
+ )
pi = pd.period_range("2000-01-01", periods=10, freq="D")
@@ -196,7 +200,7 @@ def test_parr_cmp_period_scalar2(self, box_with_array):
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
- xbox = np.ndarray if box_with_array is pd.Index else box_with_array
+ xbox = np.ndarray if box_with_array in [pd.Index, pd.array] else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
@@ -235,7 +239,7 @@ def test_parr_cmp_period_scalar(self, freq, box_with_array):
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
- xbox = np.ndarray if box_with_array is pd.Index else box_with_array
+ xbox = np.ndarray if box_with_array in [pd.Index, pd.array] else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
@@ -284,7 +288,7 @@ def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = r"Input has different freq=(M|2M|3M) from PeriodArray\(freq=A-DEC\)"
- idx_msg = rev_msg if box_with_array is tm.to_array else msg
+ idx_msg = rev_msg if box_with_array in [tm.to_array, pd.array] else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@@ -298,7 +302,7 @@ def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from PeriodArray\(freq=4M\)"
- idx_msg = rev_msg if box_with_array is tm.to_array else msg
+ idx_msg = rev_msg if box_with_array in [tm.to_array, pd.array] else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@@ -779,7 +783,7 @@ def test_pi_add_sub_td64_array_tick(self):
@pytest.mark.parametrize("tdi_freq", [None, "H"])
def test_parr_sub_td64array(self, box_with_array, tdi_freq, pi_freq):
box = box_with_array
- xbox = box if box is not tm.to_array else pd.Index
+ xbox = box if box not in [pd.array, tm.to_array] else pd.Index
tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq)
dti = Timestamp("2018-03-07 17:16:40") + tdi
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index 68bedcc099a91..b3dfb5d015ab4 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -50,7 +50,9 @@ class TestTimedelta64ArrayLikeComparisons:
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
- xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
+ xbox = (
+ box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
+ )
tdi = pd.timedelta_range("2H", periods=4)
other = np.array(tdi.to_numpy()[0])
@@ -73,7 +75,8 @@ def test_compare_timedelta64_zerodim(self, box_with_array):
def test_compare_timedeltalike_scalar(self, box_with_array, td_scalar):
# regression test for GH#5963
box = box_with_array
- xbox = box if box is not pd.Index else np.ndarray
+ xbox = box if box not in [pd.Index, pd.array] else np.ndarray
+
ser = pd.Series([timedelta(days=1), timedelta(days=2)])
ser = tm.box_expected(ser, box)
actual = ser > td_scalar
@@ -85,6 +88,7 @@ def test_compare_timedeltalike_scalar(self, box_with_array, td_scalar):
def test_td64_comparisons_invalid(self, box_with_array, invalid):
# GH#13624 for str
box = box_with_array
+
rng = timedelta_range("1 days", periods=10)
obj = tm.box_expected(rng, box)
@@ -1142,19 +1146,24 @@ def test_td64arr_add_sub_integer_array(self, box_with_array):
# GH#19959, deprecated GH#22535
# GH#22696 for DataFrame case, check that we don't dispatch to numpy
# implementation, which treats int64 as m8[ns]
+ box = box_with_array
+ xbox = np.ndarray if box is pd.array else box
rng = timedelta_range("1 days 09:00:00", freq="H", periods=3)
- tdarr = tm.box_expected(rng, box_with_array)
- other = tm.box_expected([4, 3, 2], box_with_array)
+ tdarr = tm.box_expected(rng, box)
+ other = tm.box_expected([4, 3, 2], xbox)
msg = "Addition/subtraction of integers and integer-arrays"
assert_invalid_addsub_type(tdarr, other, msg)
def test_td64arr_addsub_integer_array_no_freq(self, box_with_array):
# GH#19959
+ box = box_with_array
+ xbox = np.ndarray if box is pd.array else box
+
tdi = TimedeltaIndex(["1 Day", "NaT", "3 Hours"])
- tdarr = tm.box_expected(tdi, box_with_array)
- other = tm.box_expected([14, -1, 16], box_with_array)
+ tdarr = tm.box_expected(tdi, box)
+ other = tm.box_expected([14, -1, 16], xbox)
msg = "Addition/subtraction of integers"
assert_invalid_addsub_type(tdarr, other, msg)
@@ -1204,7 +1213,7 @@ def test_td64arr_add_sub_tdi(self, box_with_array, names):
)
tdi = TimedeltaIndex(["0 days", "1 day"], name=names[0])
- tdi = np.array(tdi) if box is tm.to_array else tdi
+ tdi = np.array(tdi) if box in [tm.to_array, pd.array] else tdi
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series(
[Timedelta(hours=3), Timedelta(days=1, hours=4)], name=names[2]
@@ -1311,7 +1320,7 @@ def test_td64arr_add_offset_index(self, names, box_with_array):
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
- other = np.array(other) if box is tm.to_array else other
+ other = np.array(other) if box in [tm.to_array, pd.array] else other
expected = TimedeltaIndex(
[tdi[n] + other[n] for n in range(len(tdi))], freq="infer", name=names[2]
@@ -1353,8 +1362,8 @@ def test_td64arr_add_offset_array(self, box_with_array):
def test_td64arr_sub_offset_index(self, names, box_with_array):
# GH#18824, GH#19744
box = box_with_array
- xbox = box if box is not tm.to_array else pd.Index
- exname = names[2] if box is not tm.to_array else names[1]
+ xbox = box if box not in [tm.to_array, pd.array] else pd.Index
+ exname = names[2] if box not in [tm.to_array, pd.array] else names[1]
if box is pd.DataFrame and names[1] != names[0]:
pytest.skip(
@@ -1395,13 +1404,13 @@ def test_td64arr_sub_offset_array(self, box_with_array):
def test_td64arr_with_offset_series(self, names, box_with_array):
# GH#18849
box = box_with_array
- box2 = Series if box in [pd.Index, tm.to_array] else box
+ box2 = Series if box in [pd.Index, tm.to_array, pd.array] else box
if box is pd.DataFrame:
# Since we are operating with a DataFrame and a non-DataFrame,
# the non-DataFrame is cast to Series and its name ignored.
exname = names[0]
- elif box is tm.to_array:
+ elif box in [tm.to_array, pd.array]:
exname = names[1]
else:
exname = names[2]
@@ -1456,8 +1465,11 @@ def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_with_array):
# Unsorted
def test_td64arr_add_sub_object_array(self, box_with_array):
+ box = box_with_array
+ xbox = np.ndarray if box is pd.array else box
+
tdi = pd.timedelta_range("1 day", periods=3, freq="D")
- tdarr = tm.box_expected(tdi, box_with_array)
+ tdarr = tm.box_expected(tdi, box)
other = np.array(
[pd.Timedelta(days=1), pd.offsets.Day(2), pd.Timestamp("2000-01-04")]
@@ -1469,7 +1481,7 @@ def test_td64arr_add_sub_object_array(self, box_with_array):
expected = pd.Index(
[pd.Timedelta(days=2), pd.Timedelta(days=4), pd.Timestamp("2000-01-07")]
)
- expected = tm.box_expected(expected, box_with_array)
+ expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
msg = "unsupported operand type|cannot subtract a datelike"
@@ -1483,7 +1495,7 @@ def test_td64arr_add_sub_object_array(self, box_with_array):
expected = pd.Index(
[pd.Timedelta(0), pd.Timedelta(0), pd.Timestamp("2000-01-01")]
)
- expected = tm.box_expected(expected, box_with_array)
+ expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@@ -1536,7 +1548,7 @@ def test_tdi_mul_int_array(self, box_with_array):
def test_tdi_mul_int_series(self, box_with_array):
box = box_with_array
- xbox = pd.Series if box in [pd.Index, tm.to_array] else box
+ xbox = pd.Series if box in [pd.Index, tm.to_array, pd.array] else box
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
expected = TimedeltaIndex(np.arange(5, dtype="int64") ** 2)
@@ -1549,7 +1561,7 @@ def test_tdi_mul_int_series(self, box_with_array):
def test_tdi_mul_float_series(self, box_with_array):
box = box_with_array
- xbox = pd.Series if box in [pd.Index, tm.to_array] else box
+ xbox = pd.Series if box in [pd.Index, tm.to_array, pd.array] else box
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box)
@@ -1604,13 +1616,16 @@ def test_td64arr_div_nat_invalid(self, box_with_array):
def test_td64arr_div_td64nat(self, box_with_array):
# GH#23829
+ box = box_with_array
+ xbox = np.ndarray if box is pd.array else box
+
rng = timedelta_range("1 days", "10 days")
- rng = tm.box_expected(rng, box_with_array)
+ rng = tm.box_expected(rng, box)
other = np.timedelta64("NaT")
expected = np.array([np.nan] * 10)
- expected = tm.box_expected(expected, box_with_array)
+ expected = tm.box_expected(expected, xbox)
result = rng / other
tm.assert_equal(result, expected)
@@ -1631,11 +1646,14 @@ def test_td64arr_div_int(self, box_with_array):
def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
# GH#20088, GH#22163 ensure DataFrame returns correct dtype
+ box = box_with_array
+ xbox = np.ndarray if box is pd.array else box
+
rng = timedelta_range("1 days", "10 days", name="foo")
expected = pd.Float64Index((np.arange(10) + 1) * 12, name="foo")
- rng = tm.box_expected(rng, box_with_array)
- expected = tm.box_expected(expected, box_with_array)
+ rng = tm.box_expected(rng, box)
+ expected = tm.box_expected(expected, xbox)
result = rng / two_hours
tm.assert_equal(result, expected)
@@ -1647,32 +1665,38 @@ def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"])
def test_td64arr_div_td64_scalar(self, m, unit, box_with_array):
+ box = box_with_array
+ xbox = np.ndarray if box is pd.array else box
+
startdate = Series(pd.date_range("2013-01-01", "2013-01-03"))
enddate = Series(pd.date_range("2013-03-01", "2013-03-03"))
ser = enddate - startdate
ser[2] = np.nan
flat = ser
- ser = tm.box_expected(ser, box_with_array)
+ ser = tm.box_expected(ser, box)
# op
expected = Series([x / np.timedelta64(m, unit) for x in flat])
- expected = tm.box_expected(expected, box_with_array)
+ expected = tm.box_expected(expected, xbox)
result = ser / np.timedelta64(m, unit)
tm.assert_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x for x in flat])
- expected = tm.box_expected(expected, box_with_array)
+ expected = tm.box_expected(expected, xbox)
result = np.timedelta64(m, unit) / ser
tm.assert_equal(result, expected)
def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours, box_with_array):
+ box = box_with_array
+ xbox = np.ndarray if box is pd.array else box
+
rng = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
expected = pd.Float64Index([12, np.nan, 24], name="foo")
- rng = tm.box_expected(rng, box_with_array)
- expected = tm.box_expected(expected, box_with_array)
+ rng = tm.box_expected(rng, box)
+ expected = tm.box_expected(expected, xbox)
result = rng / two_hours
tm.assert_equal(result, expected)
@@ -1683,17 +1707,20 @@ def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours, box_with_array):
def test_td64arr_div_td64_ndarray(self, box_with_array):
# GH#22631
+ box = box_with_array
+ xbox = np.ndarray if box is pd.array else box
+
rng = TimedeltaIndex(["1 days", pd.NaT, "2 days"])
expected = pd.Float64Index([12, np.nan, 24])
- rng = tm.box_expected(rng, box_with_array)
- expected = tm.box_expected(expected, box_with_array)
+ rng = tm.box_expected(rng, box)
+ expected = tm.box_expected(expected, xbox)
other = np.array([2, 4, 2], dtype="m8[h]")
result = rng / other
tm.assert_equal(result, expected)
- result = rng / tm.box_expected(other, box_with_array)
+ result = rng / tm.box_expected(other, box)
tm.assert_equal(result, expected)
result = rng / other.astype(object)
@@ -1707,7 +1734,7 @@ def test_td64arr_div_td64_ndarray(self, box_with_array):
result = other / rng
tm.assert_equal(result, expected)
- result = tm.box_expected(other, box_with_array) / rng
+ result = tm.box_expected(other, box) / rng
tm.assert_equal(result, expected)
result = other.astype(object) / rng
@@ -1736,6 +1763,7 @@ def test_tdarr_div_length_mismatch(self, box_with_array):
def test_td64arr_floordiv_td64arr_with_nat(self, box_with_array):
# GH#35529
box = box_with_array
+ xbox = np.ndarray if box is pd.array else box
left = pd.Series([1000, 222330, 30], dtype="timedelta64[ns]")
right = pd.Series([1000, 222330, None], dtype="timedelta64[ns]")
@@ -1744,7 +1772,7 @@ def test_td64arr_floordiv_td64arr_with_nat(self, box_with_array):
right = tm.box_expected(right, box)
expected = np.array([1.0, 1.0, np.nan], dtype=np.float64)
- expected = tm.box_expected(expected, box)
+ expected = tm.box_expected(expected, xbox)
result = left // right
@@ -1756,39 +1784,48 @@ def test_td64arr_floordiv_td64arr_with_nat(self, box_with_array):
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
+ box = box_with_array
+ xbox = np.ndarray if box is pd.array else box
+
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
- td1 = tm.box_expected(td1, box_with_array, transpose=False)
- expected = tm.box_expected(expected, box_with_array, transpose=False)
+ td1 = tm.box_expected(td1, box, transpose=False)
+ expected = tm.box_expected(expected, xbox, transpose=False)
result = td1 // scalar_td
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
+ box = box_with_array
+ xbox = np.ndarray if box is pd.array else box
+
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
- td1 = tm.box_expected(td1, box_with_array, transpose=False)
- expected = tm.box_expected(expected, box_with_array, transpose=False)
+ td1 = tm.box_expected(td1, box, transpose=False)
+ expected = tm.box_expected(expected, xbox, transpose=False)
result = scalar_td // td1
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box_with_array, scalar_td):
# GH#18831
+ box = box_with_array
+ xbox = np.ndarray if box is pd.array else box
+
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
- td1 = tm.box_expected(td1, box_with_array, transpose=False)
- expected = tm.box_expected(expected, box_with_array, transpose=False)
+ td1 = tm.box_expected(td1, box, transpose=False)
+ expected = tm.box_expected(expected, xbox, transpose=False)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
@@ -1806,11 +1843,14 @@ def test_td64arr_floordiv_int(self, box_with_array):
1 // idx
def test_td64arr_floordiv_tdlike_scalar(self, two_hours, box_with_array):
+ box = box_with_array
+ xbox = np.ndarray if box is pd.array else box
+
tdi = timedelta_range("1 days", "10 days", name="foo")
expected = pd.Int64Index((np.arange(10) + 1) * 12, name="foo")
- tdi = tm.box_expected(tdi, box_with_array)
- expected = tm.box_expected(expected, box_with_array)
+ tdi = tm.box_expected(tdi, box)
+ expected = tm.box_expected(expected, xbox)
result = tdi // two_hours
tm.assert_equal(result, expected)
@@ -1827,17 +1867,20 @@ def test_td64arr_floordiv_tdlike_scalar(self, two_hours, box_with_array):
)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_with_array):
# GH#19125
+ box = box_with_array
+ xbox = np.ndarray if box_with_array is pd.array else box_with_array
+
tdi = TimedeltaIndex(["00:05:03", "00:05:03", pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
- tdi = tm.box_expected(tdi, box_with_array, transpose=False)
- expected = tm.box_expected(expected, box_with_array, transpose=False)
+ tdi = tm.box_expected(tdi, box, transpose=False)
+ expected = tm.box_expected(expected, xbox, transpose=False)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
- expected = tm.box_expected(expected, box_with_array, transpose=False)
+ expected = tm.box_expected(expected, xbox, transpose=False)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
@@ -2059,7 +2102,7 @@ def test_td64arr_mul_int_series(self, box_with_array, names, request):
reason = "broadcasts along wrong axis, but doesn't raise"
request.node.add_marker(pytest.mark.xfail(reason=reason))
- exname = names[2] if box is not tm.to_array else names[1]
+ exname = names[2] if box not in [tm.to_array, pd.array] else names[1]
tdi = TimedeltaIndex(
["0days", "1day", "2days", "3days", "4days"], name=names[0]
@@ -2074,8 +2117,12 @@ def test_td64arr_mul_int_series(self, box_with_array, names, request):
)
tdi = tm.box_expected(tdi, box)
- box = Series if (box is pd.Index or box is tm.to_array) else box
- expected = tm.box_expected(expected, box)
+ xbox = (
+ Series
+ if (box is pd.Index or box is tm.to_array or box is pd.array)
+ else box
+ )
+ expected = tm.box_expected(expected, xbox)
result = ser * tdi
tm.assert_equal(result, expected)
@@ -2098,7 +2145,7 @@ def test_float_series_rdiv_td64arr(self, box_with_array, names):
)
ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1])
- xname = names[2] if box is not tm.to_array else names[1]
+ xname = names[2] if box not in [tm.to_array, pd.array] else names[1]
expected = Series(
[tdi[n] / ser[n] for n in range(len(ser))],
dtype="timedelta64[ns]",
@@ -2106,7 +2153,7 @@ def test_float_series_rdiv_td64arr(self, box_with_array, names):
)
xbox = box
- if box in [pd.Index, tm.to_array] and type(ser) is Series:
+ if box in [pd.Index, tm.to_array, pd.array] and type(ser) is Series:
xbox = Series
tdi = tm.box_expected(tdi, box)
| Needs tests, hopefully i can tweak fixture usage in tests.arithmetic | https://api.github.com/repos/pandas-dev/pandas/pulls/36484 | 2020-09-19T22:10:39Z | 2020-10-02T21:51:24Z | 2020-10-02T21:51:24Z | 2020-10-02T23:06:30Z |
REF: share IntervalArray._validate_foo | diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index f9f68004bcc23..ebabc7edcbf43 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -20,7 +20,6 @@
is_datetime64_any_dtype,
is_float_dtype,
is_integer_dtype,
- is_interval,
is_interval_dtype,
is_list_like,
is_object_dtype,
@@ -813,7 +812,9 @@ def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs):
fill_left = fill_right = fill_value
if allow_fill:
- fill_left, fill_right = self._validate_fill_value(fill_value)
+ if (np.asarray(indices) == -1).any():
+ # We have excel tests that pass fill_value=True, xref GH#36466
+ fill_left, fill_right = self._validate_fill_value(fill_value)
left_take = take(
self.left, indices, allow_fill=allow_fill, fill_value=fill_left
@@ -824,20 +825,33 @@ def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs):
return self._shallow_copy(left_take, right_take)
- def _validate_fill_value(self, value):
- if is_interval(value):
- self._check_closed_matches(value, name="fill_value")
- fill_left, fill_right = value.left, value.right
- elif not is_scalar(value) and notna(value):
- msg = (
- "'IntervalArray.fillna' only supports filling with a "
- "'scalar pandas.Interval or NA'. "
- f"Got a '{type(value).__name__}' instead."
- )
- raise ValueError(msg)
+ def _validate_listlike(self, value):
+ # list-like of intervals
+ try:
+ array = IntervalArray(value)
+ # TODO: self._check_closed_matches(array, name="value")
+ value_left, value_right = array.left, array.right
+ except TypeError as err:
+ # wrong type: not interval or NA
+ msg = f"'value' should be an interval type, got {type(value)} instead."
+ raise TypeError(msg) from err
+ return value_left, value_right
+
+ def _validate_scalar(self, value):
+ if isinstance(value, Interval):
+ self._check_closed_matches(value, name="value")
+ left, right = value.left, value.right
+ elif is_valid_nat_for_dtype(value, self.left.dtype):
+ # GH#18295
+ left = right = value
else:
- fill_left = fill_right = self.left._na_value
- return fill_left, fill_right
+ raise ValueError(
+ "can only insert Interval objects and NA into an IntervalArray"
+ )
+ return left, right
+
+ def _validate_fill_value(self, value):
+ return self._validate_scalar(value)
def _validate_fillna_value(self, value):
if not isinstance(value, Interval):
@@ -851,26 +865,12 @@ def _validate_fillna_value(self, value):
return value.left, value.right
def _validate_insert_value(self, value):
- if isinstance(value, Interval):
- if value.closed != self.closed:
- raise ValueError(
- "inserted item must be closed on the same side as the index"
- )
- left_insert = value.left
- right_insert = value.right
- elif is_valid_nat_for_dtype(value, self.left.dtype):
- # GH#18295
- left_insert = right_insert = value
- else:
- raise ValueError(
- "can only insert Interval objects and NA into an IntervalIndex"
- )
- return left_insert, right_insert
+ return self._validate_scalar(value)
def _validate_setitem_value(self, value):
needs_float_conversion = False
- if is_scalar(value) and isna(value):
+ if is_valid_nat_for_dtype(value, self.left.dtype):
# na value: need special casing to set directly on numpy arrays
if is_integer_dtype(self.dtype.subtype):
# can't set NaN on a numpy integer array
diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py
index 0176755b54dd1..e5ccb51ce36f5 100644
--- a/pandas/tests/arrays/interval/test_interval.py
+++ b/pandas/tests/arrays/interval/test_interval.py
@@ -105,6 +105,10 @@ def test_set_na(self, left_right_dtypes):
left, right = left_right_dtypes
result = IntervalArray.from_arrays(left, right)
+ if result.dtype.subtype.kind not in ["m", "M"]:
+ msg = "'value' should be an interval type, got <.*NaTType'> instead."
+ with pytest.raises(TypeError, match=msg):
+ result[0] = pd.NaT
if result.dtype.subtype.kind in ["i", "u"]:
msg = "Cannot set float NaN to integer-backed IntervalArray"
with pytest.raises(ValueError, match=msg):
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index 734c98af3d058..b81f0f27e60ad 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -191,13 +191,14 @@ def test_insert(self, data):
tm.assert_index_equal(result, expected)
# invalid type
- msg = "can only insert Interval objects and NA into an IntervalIndex"
+ msg = "can only insert Interval objects and NA into an IntervalArray"
with pytest.raises(ValueError, match=msg):
data.insert(1, "foo")
# invalid closed
- msg = "inserted item must be closed on the same side as the index"
+ msg = "'value.closed' is 'left', expected 'right'."
for closed in {"left", "right", "both", "neither"} - {item.closed}:
+ msg = f"'value.closed' is '{closed}', expected '{item.closed}'."
with pytest.raises(ValueError, match=msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
@@ -211,7 +212,7 @@ def test_insert(self, data):
if data.left.dtype.kind not in ["m", "M"]:
# trying to insert pd.NaT into a numeric-dtyped Index should cast/raise
- msg = "can only insert Interval objects and NA into an IntervalIndex"
+ msg = "can only insert Interval objects and NA into an IntervalArray"
with pytest.raises(ValueError, match=msg):
result = data.insert(1, pd.NaT)
else:
| https://api.github.com/repos/pandas-dev/pandas/pulls/36483 | 2020-09-19T20:56:14Z | 2020-09-19T22:53:29Z | 2020-09-19T22:53:29Z | 2020-09-19T22:56:37Z | |
Complex Dtype Support for Hashmap Algos | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 2f8cb346935a9..684ab0fa38d22 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -101,6 +101,7 @@ Other enhancements
- :meth:`Series.ewm`, :meth:`DataFrame.ewm`, now support a ``method`` argument with a ``'table'`` option that performs the windowing operation over an entire :class:`DataFrame`. See :ref:`Window Overview <window.overview>` for performance and functional benefits (:issue:`42273`)
- :meth:`.GroupBy.cummin` and :meth:`.GroupBy.cummax` now support the argument ``skipna`` (:issue:`34047`)
- :meth:`read_table` now supports the argument ``storage_options`` (:issue:`39167`)
+- Methods that relied on hashmap based algos such as :meth:`DataFrameGroupBy.value_counts`, :meth:`DataFrameGroupBy.count` and :func:`factorize` ignored imaginary component for complex numbers (:issue:`17927`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/algos.pyi b/pandas/_libs/algos.pyi
index 60279395724ff..fdec60a84a708 100644
--- a/pandas/_libs/algos.pyi
+++ b/pandas/_libs/algos.pyi
@@ -150,6 +150,8 @@ def diff_2d(
) -> None: ...
def ensure_platform_int(arr: object) -> npt.NDArray[np.intp]: ...
def ensure_object(arr: object) -> npt.NDArray[np.object_]: ...
+def ensure_complex64(arr: object, copy=True) -> npt.NDArray[np.complex64]: ...
+def ensure_complex128(arr: object, copy=True) -> npt.NDArray[np.complex128]: ...
def ensure_float64(arr: object, copy=True) -> npt.NDArray[np.float64]: ...
def ensure_float32(arr: object, copy=True) -> npt.NDArray[np.float32]: ...
def ensure_int8(arr: object, copy=True) -> npt.NDArray[np.int8]: ...
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 167fac257075c..b4200456e2c3d 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -15,6 +15,8 @@ import numpy as np
cimport numpy as cnp
from numpy cimport (
+ NPY_COMPLEX64,
+ NPY_COMPLEX128,
NPY_FLOAT32,
NPY_FLOAT64,
NPY_INT8,
diff --git a/pandas/_libs/algos_common_helper.pxi.in b/pandas/_libs/algos_common_helper.pxi.in
index 64e8bdea4672c..87130906ef28b 100644
--- a/pandas/_libs/algos_common_helper.pxi.in
+++ b/pandas/_libs/algos_common_helper.pxi.in
@@ -47,6 +47,8 @@ dtypes = [('float64', 'FLOAT64', 'float64'),
('uint16', 'UINT16', 'uint16'),
('uint32', 'UINT32', 'uint32'),
('uint64', 'UINT64', 'uint64'),
+ ('complex64', 'COMPLEX64', 'complex64'),
+ ('complex128', 'COMPLEX128', 'complex128')
# ('platform_int', 'INT', 'int_'),
# ('object', 'OBJECT', 'object_'),
]
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 393fe08f7277c..a9c2b31849425 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -97,6 +97,8 @@ def ensure_float(arr):
ensure_int32 = algos.ensure_int32
ensure_int16 = algos.ensure_int16
ensure_int8 = algos.ensure_int8
+ensure_complex64 = algos.ensure_complex64
+ensure_complex128 = algos.ensure_complex128
ensure_platform_int = algos.ensure_platform_int
ensure_object = algos.ensure_object
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index a714abd461461..f26f18c9c20a0 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1009,6 +1009,30 @@ def test_groupby_complex():
tm.assert_series_equal(result, expected)
+def test_groupby_complex_numbers():
+ # GH 17927
+ df = DataFrame(
+ [
+ {"a": 1, "b": 1 + 1j},
+ {"a": 1, "b": 1 + 2j},
+ {"a": 4, "b": 1},
+ ]
+ )
+ expected = DataFrame(
+ np.array([1, 1, 1], dtype=np.int64),
+ index=Index([(1 + 1j), (1 + 2j), (1 + 0j)], dtype="object", name="b"),
+ columns=Index(["a"], dtype="object"),
+ )
+ result = df.groupby("b", sort=False).count()
+ tm.assert_frame_equal(result, expected)
+
+ # Sorted by the magnitude of the complex numbers
+ # Complex Index dtype is cast to object
+ expected.index = Index([(1 + 0j), (1 + 1j), (1 + 2j)], dtype="object", name="b")
+ result = df.groupby("b", sort=True).count()
+ tm.assert_frame_equal(result, expected)
+
+
def test_groupby_series_indexed_differently():
s1 = Series(
[5.0, -9.0, 4.0, 100.0, -5.0, 55.0, 6.7],
diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py
index 1fd8b0f8b837a..ee517a667d832 100644
--- a/pandas/tests/indexes/multi/test_duplicates.py
+++ b/pandas/tests/indexes/multi/test_duplicates.py
@@ -8,6 +8,7 @@
from pandas import (
DatetimeIndex,
MultiIndex,
+ Series,
)
import pandas._testing as tm
@@ -299,6 +300,37 @@ def test_duplicated_drop_duplicates():
tm.assert_index_equal(idx.drop_duplicates(keep=False), expected)
+@pytest.mark.parametrize(
+ "dtype",
+ [
+ np.complex64,
+ np.complex128,
+ ],
+)
+def test_duplicated_series_complex_numbers(dtype):
+ # GH 17927
+ expected = Series(
+ [False, False, False, True, False, False, False, True, False, True],
+ dtype=bool,
+ )
+ result = Series(
+ [
+ np.nan + np.nan * 1j,
+ 0,
+ 1j,
+ 1j,
+ 1,
+ 1 + 1j,
+ 1 + 2j,
+ 1 + 1j,
+ np.nan,
+ np.nan + np.nan * 1j,
+ ],
+ dtype=dtype,
+ ).duplicated()
+ tm.assert_series_equal(result, expected)
+
+
def test_multi_drop_duplicates_pos_args_deprecation():
# GH#41485
idx = MultiIndex.from_arrays([[1, 2, 3, 1], [1, 2, 3, 1]])
diff --git a/pandas/tests/indexes/period/methods/test_factorize.py b/pandas/tests/indexes/period/methods/test_factorize.py
index 7c9367a1011a2..9e297d6caca27 100644
--- a/pandas/tests/indexes/period/methods/test_factorize.py
+++ b/pandas/tests/indexes/period/methods/test_factorize.py
@@ -1,6 +1,9 @@
import numpy as np
-from pandas import PeriodIndex
+from pandas import (
+ PeriodIndex,
+ factorize,
+)
import pandas._testing as tm
@@ -35,3 +38,15 @@ def test_factorize(self):
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
+
+ def test_factorize_complex(self):
+ # GH 17927
+ array = [1, 2, 2 + 1j]
+ labels, uniques = factorize(array)
+
+ expected_labels = np.array([0, 1, 2], dtype=np.intp)
+ tm.assert_numpy_array_equal(labels, expected_labels)
+
+ # Should return a complex dtype in the future
+ expected_uniques = np.array([(1 + 0j), (2 + 0j), (2 + 1j)], dtype=object)
+ tm.assert_numpy_array_equal(uniques, expected_uniques)
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index c0c1c2f057c96..513b9af18d2b6 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -1487,3 +1487,50 @@ def test_mode_boolean_with_na(self):
result = ser.mode()
expected = Series({0: True}, dtype="boolean")
tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "array,expected,dtype",
+ [
+ (
+ [0, 1j, 1, 1, 1 + 1j, 1 + 2j],
+ Series([1], dtype=np.complex128),
+ np.complex128,
+ ),
+ (
+ [0, 1j, 1, 1, 1 + 1j, 1 + 2j],
+ Series([1], dtype=np.complex64),
+ np.complex64,
+ ),
+ (
+ [1 + 1j, 2j, 1 + 1j],
+ Series([1 + 1j], dtype=np.complex128),
+ np.complex128,
+ ),
+ ],
+ )
+ def test_single_mode_value_complex(self, array, expected, dtype):
+ result = Series(array, dtype=dtype).mode()
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "array,expected,dtype",
+ [
+ (
+ # no modes
+ [0, 1j, 1, 1 + 1j, 1 + 2j],
+ Series([0j, 1j, 1 + 0j, 1 + 1j, 1 + 2j], dtype=np.complex128),
+ np.complex128,
+ ),
+ (
+ [1 + 1j, 2j, 1 + 1j, 2j, 3],
+ Series([2j, 1 + 1j], dtype=np.complex64),
+ np.complex64,
+ ),
+ ],
+ )
+ def test_multimode_complex(self, array, expected, dtype):
+ # GH 17927
+ # mode tries to sort multimodal series.
+ # Complex numbers are sorted by their magnitude
+ result = Series(array, dtype=dtype).mode()
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_isin.py b/pandas/tests/series/methods/test_isin.py
index d3a3434872826..f769c08a512ef 100644
--- a/pandas/tests/series/methods/test_isin.py
+++ b/pandas/tests/series/methods/test_isin.py
@@ -186,3 +186,18 @@ def test_isin_large_series_mixed_dtypes_and_nan():
result = ser.isin({"foo", "bar"})
expected = Series([False] * 3 * 1_000_000)
tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "array,expected",
+ [
+ (
+ [0, 1j, 1j, 1, 1 + 1j, 1 + 2j, 1 + 1j],
+ Series([False, True, True, False, True, True, True], dtype=bool),
+ )
+ ],
+)
+def test_isin_complex_numbers(array, expected):
+ # GH 17927
+ result = Series(array).isin([1j, 1 + 1j, 1 + 2j])
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_value_counts.py b/pandas/tests/series/methods/test_value_counts.py
index e707c3f4023df..c914dba75dc35 100644
--- a/pandas/tests/series/methods/test_value_counts.py
+++ b/pandas/tests/series/methods/test_value_counts.py
@@ -207,3 +207,22 @@ def test_value_counts_bool_with_nan(self, ser, dropna, exp):
# GH32146
out = ser.value_counts(dropna=dropna)
tm.assert_series_equal(out, exp)
+
+ @pytest.mark.parametrize(
+ "input_array,expected",
+ [
+ (
+ [1 + 1j, 1 + 1j, 1, 3j, 3j, 3j],
+ Series([3, 2, 1], index=pd.Index([3j, 1 + 1j, 1], dtype=np.complex128)),
+ ),
+ (
+ [1 + 1j, 1 + 1j, 1, 3j, 3j, 3j],
+ Series([3, 2, 1], index=pd.Index([3j, 1 + 1j, 1], dtype=np.complex64)),
+ ),
+ ],
+ )
+ def test_value_counts_complex_numbers(self, input_array, expected):
+ # GH 17927
+ # Complex Index dtype is cast to object
+ result = Series(input_array).value_counts()
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index ba587e28b8c3d..5488c076554fd 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -1513,6 +1513,21 @@ def test_unique_tuples(self, arr, uniques):
result = pd.unique(arr)
tm.assert_numpy_array_equal(result, expected)
+ @pytest.mark.parametrize(
+ "array,expected",
+ [
+ (
+ [1 + 1j, 0, 1, 1j, 1 + 2j, 1 + 2j],
+ # Should return a complex dtype in the future
+ np.array([(1 + 1j), 0j, (1 + 0j), 1j, (1 + 2j)], dtype=object),
+ )
+ ],
+ )
+ def test_unique_complex_numbers(self, array, expected):
+ # GH 17927
+ result = pd.unique(array)
+ tm.assert_numpy_array_equal(result, expected)
+
class TestHashTable:
def test_string_hashtable_set_item_signature(self):
| - [x] fixes #17927 #26475
- [x] tests added / passed
- [x] passes black pandas
- [x] passes git diff upstream/master -u -- "*.py" | flake8 --diff
Ref: https://github.com/pandas-dev/pandas/issues/18009
Based on https://github.com/pandas-dev/pandas/pull/27599
First set of tests for complex number handling + sensible results from functions that rely on hash tables.
Use generic object hashing for now.
@jbrockmendel you interested in reviewing? | https://api.github.com/repos/pandas-dev/pandas/pulls/36482 | 2020-09-19T18:58:54Z | 2021-09-04T14:25:00Z | 2021-09-04T14:25:00Z | 2021-09-04T14:25:00Z |
CLN: Use https for @tm.network | diff --git a/pandas/_testing.py b/pandas/_testing.py
index 04d36749a3d8c..3fb869c0f7cee 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -2404,7 +2404,7 @@ def can_connect(url, error_classes=None):
@optional_args
def network(
t,
- url="http://www.google.com",
+ url="https://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
@@ -2428,7 +2428,7 @@ def network(
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
- for connectivity. Defaults to 'http://www.google.com'.
+ for connectivity. Defaults to 'https://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
@@ -2472,7 +2472,7 @@ def network(
You can specify alternative URLs::
- >>> @network("http://www.yahoo.com")
+ >>> @network("https://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
| - [x] ref #36467
AssertionError: Caused unexpected warning(s): [('ResourceWarning', ResourceWarning("unclosed <ssl.SSLSocket fd=18, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=6, laddr=('10.20.0.43', 54722), raddr=('**172.217.204.95**', 443)>"), ),
**This is a google address** -> which leads me to believe this resource warning is coming from our `network` function in `_testing.py` since it calls `urllib.request.urlopen("https://www.google.com")`
Note: this doesn't solve the problem but we should use https. (Avoids a redirect) | https://api.github.com/repos/pandas-dev/pandas/pulls/36480 | 2020-09-19T17:41:23Z | 2020-09-19T19:59:11Z | 2020-09-19T19:59:11Z | 2020-09-19T19:59:15Z |
CLN: aggregation.transform | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index a269580bc4453..b77e301547731 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -420,6 +420,7 @@ Reshaping
- Bug in :func:`union_indexes` where input index names are not preserved in some cases. Affects :func:`concat` and :class:`DataFrame` constructor (:issue:`13475`)
- Bug in func :meth:`crosstab` when using multiple columns with ``margins=True`` and ``normalize=True`` (:issue:`35144`)
- Bug in :meth:`DataFrame.agg` with ``func={'name':<FUNC>}`` incorrectly raising ``TypeError`` when ``DataFrame.columns==['Name']`` (:issue:`36212`)
+- Bug in :meth:`Series.transform` would give incorrect results or raise when the argument ``func`` was dictionary (:issue:`35811`)
-
Sparse
@@ -442,7 +443,6 @@ Other
- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly raising ``AssertionError`` instead of ``ValueError`` when invalid parameter combinations are passed (:issue:`36045`)
- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` with numeric values and string ``to_replace`` (:issue:`34789`)
- Fixed metadata propagation in the :class:`Series.dt` accessor (:issue:`28283`)
-- Bug in :meth:`Series.transform` would give incorrect results or raise when the argument ``func`` was dictionary (:issue:`35811`)
- Bug in :meth:`Index.union` behaving differently depending on whether operand is a :class:`Index` or other list-like (:issue:`36384`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py
index f2eb282d1e498..ad69e9f31e065 100644
--- a/pandas/core/aggregation.py
+++ b/pandas/core/aggregation.py
@@ -17,9 +17,17 @@
Sequence,
Tuple,
Union,
+ cast,
)
-from pandas._typing import AggFuncType, Axis, FrameOrSeries, Label
+from pandas._typing import (
+ AggFuncType,
+ AggFuncTypeBase,
+ Axis,
+ FrameOrSeries,
+ FrameOrSeriesUnion,
+ Label,
+)
from pandas.core.dtypes.common import is_dict_like, is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
@@ -391,7 +399,7 @@ def validate_func_kwargs(
def transform(
obj: FrameOrSeries, func: AggFuncType, axis: Axis, *args, **kwargs
-) -> FrameOrSeries:
+) -> FrameOrSeriesUnion:
"""
Transform a DataFrame or Series
@@ -424,16 +432,20 @@ def transform(
assert not is_series
return transform(obj.T, func, 0, *args, **kwargs).T
- if isinstance(func, list):
+ if is_list_like(func) and not is_dict_like(func):
+ func = cast(List[AggFuncTypeBase], func)
+ # Convert func equivalent dict
if is_series:
func = {com.get_callable_name(v) or v: v for v in func}
else:
func = {col: func for col in obj}
- if isinstance(func, dict):
+ if is_dict_like(func):
+ func = cast(Dict[Label, Union[AggFuncTypeBase, List[AggFuncTypeBase]]], func)
return transform_dict_like(obj, func, *args, **kwargs)
# func is either str or callable
+ func = cast(AggFuncTypeBase, func)
try:
result = transform_str_or_callable(obj, func, *args, **kwargs)
except Exception:
@@ -451,29 +463,42 @@ def transform(
return result
-def transform_dict_like(obj, func, *args, **kwargs):
+def transform_dict_like(
+ obj: FrameOrSeries,
+ func: Dict[Label, Union[AggFuncTypeBase, List[AggFuncTypeBase]]],
+ *args,
+ **kwargs,
+):
"""
Compute transform in the case of a dict-like func
"""
from pandas.core.reshape.concat import concat
+ if len(func) == 0:
+ raise ValueError("No transform functions were provided")
+
if obj.ndim != 1:
+ # Check for missing columns on a frame
cols = sorted(set(func.keys()) - set(obj.columns))
if len(cols) > 0:
raise SpecificationError(f"Column(s) {cols} do not exist")
- if any(isinstance(v, dict) for v in func.values()):
+ # Can't use func.values(); wouldn't work for a Series
+ if any(is_dict_like(v) for _, v in func.items()):
# GH 15931 - deprecation of renaming keys
raise SpecificationError("nested renamer is not supported")
- results = {}
+ results: Dict[Label, FrameOrSeriesUnion] = {}
for name, how in func.items():
colg = obj._gotitem(name, ndim=1)
try:
results[name] = transform(colg, how, 0, *args, **kwargs)
- except Exception as e:
- if str(e) == "Function did not transform":
- raise e
+ except Exception as err:
+ if (
+ str(err) == "Function did not transform"
+ or str(err) == "No transform functions were provided"
+ ):
+ raise err
# combine results
if len(results) == 0:
@@ -481,7 +506,9 @@ def transform_dict_like(obj, func, *args, **kwargs):
return concat(results, axis=1)
-def transform_str_or_callable(obj, func, *args, **kwargs):
+def transform_str_or_callable(
+ obj: FrameOrSeries, func: AggFuncTypeBase, *args, **kwargs
+) -> FrameOrSeriesUnion:
"""
Compute transform in the case of a string or callable func
"""
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1f9987d9d3f5b..28a6f6b8c6621 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7270,7 +7270,7 @@ def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame:
def _gotitem(
self,
- key: Union[str, List[str]],
+ key: Union[Label, List[Label]],
ndim: int,
subset: Optional[FrameOrSeriesUnion] = None,
) -> FrameOrSeriesUnion:
diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py
index 14363dabfcdf3..d595b03a535ed 100644
--- a/pandas/core/shared_docs.py
+++ b/pandas/core/shared_docs.py
@@ -265,16 +265,17 @@
Parameters
----------
-func : function, str, list or dict
+func : function, str, list-like or dict-like
Function to use for transforming the data. If a function, must either
- work when passed a {klass} or when passed to {klass}.apply.
+ work when passed a {klass} or when passed to {klass}.apply. If func
+ is both list-like and dict-like, dict-like behavior takes precedence.
Accepted combinations are:
- function
- string function name
- - list of functions and/or function names, e.g. ``[np.exp, 'sqrt']``
- - dict of axis labels -> functions, function names or list of such.
+ - list-like of functions and/or function names, e.g. ``[np.exp, 'sqrt']``
+ - dict-like of axis labels -> functions, function names or list-like of such.
{axis}
*args
Positional arguments to pass to `func`.
diff --git a/pandas/tests/frame/apply/test_frame_transform.py b/pandas/tests/frame/apply/test_frame_transform.py
index 346e60954fc13..01c6fd4ec08f0 100644
--- a/pandas/tests/frame/apply/test_frame_transform.py
+++ b/pandas/tests/frame/apply/test_frame_transform.py
@@ -4,7 +4,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, MultiIndex
+from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
from pandas.core.base import SpecificationError
from pandas.core.groupby.base import transformation_kernels
@@ -41,9 +41,15 @@ def test_transform_groupby_kernel(axis, float_frame, op):
@pytest.mark.parametrize(
- "ops, names", [([np.sqrt], ["sqrt"]), ([np.abs, np.sqrt], ["absolute", "sqrt"])]
+ "ops, names",
+ [
+ ([np.sqrt], ["sqrt"]),
+ ([np.abs, np.sqrt], ["absolute", "sqrt"]),
+ (np.array([np.sqrt]), ["sqrt"]),
+ (np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]),
+ ],
)
-def test_transform_list(axis, float_frame, ops, names):
+def test_transform_listlike(axis, float_frame, ops, names):
# GH 35964
other_axis = 1 if axis in {0, "index"} else 0
with np.errstate(all="ignore"):
@@ -56,7 +62,14 @@ def test_transform_list(axis, float_frame, ops, names):
tm.assert_frame_equal(result, expected)
-def test_transform_dict(axis, float_frame):
+@pytest.mark.parametrize("ops", [[], np.array([])])
+def test_transform_empty_listlike(float_frame, ops):
+ with pytest.raises(ValueError, match="No transform functions were provided"):
+ float_frame.transform(ops)
+
+
+@pytest.mark.parametrize("box", [dict, Series])
+def test_transform_dictlike(axis, float_frame, box):
# GH 35964
if axis == 0 or axis == "index":
e = float_frame.columns[0]
@@ -64,10 +77,26 @@ def test_transform_dict(axis, float_frame):
else:
e = float_frame.index[0]
expected = float_frame.iloc[[0]].transform(np.abs)
- result = float_frame.transform({e: np.abs}, axis=axis)
+ result = float_frame.transform(box({e: np.abs}), axis=axis)
tm.assert_frame_equal(result, expected)
+@pytest.mark.parametrize(
+ "ops",
+ [
+ {},
+ {"A": []},
+ {"A": [], "B": "cumsum"},
+ {"A": "cumsum", "B": []},
+ {"A": [], "B": ["cumsum"]},
+ {"A": ["cumsum"], "B": []},
+ ],
+)
+def test_transform_empty_dictlike(float_frame, ops):
+ with pytest.raises(ValueError, match="No transform functions were provided"):
+ float_frame.transform(ops)
+
+
@pytest.mark.parametrize("use_apply", [True, False])
def test_transform_udf(axis, float_frame, use_apply):
# GH 35964
diff --git a/pandas/tests/series/apply/test_series_transform.py b/pandas/tests/series/apply/test_series_transform.py
index 0e200709f60cf..67b271f757cfb 100644
--- a/pandas/tests/series/apply/test_series_transform.py
+++ b/pandas/tests/series/apply/test_series_transform.py
@@ -34,9 +34,15 @@ def test_transform_groupby_kernel(string_series, op):
@pytest.mark.parametrize(
- "ops, names", [([np.sqrt], ["sqrt"]), ([np.abs, np.sqrt], ["absolute", "sqrt"])]
+ "ops, names",
+ [
+ ([np.sqrt], ["sqrt"]),
+ ([np.abs, np.sqrt], ["absolute", "sqrt"]),
+ (np.array([np.sqrt]), ["sqrt"]),
+ (np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]),
+ ],
)
-def test_transform_list(string_series, ops, names):
+def test_transform_listlike(string_series, ops, names):
# GH 35964
with np.errstate(all="ignore"):
expected = concat([op(string_series) for op in ops], axis=1)
@@ -45,15 +51,38 @@ def test_transform_list(string_series, ops, names):
tm.assert_frame_equal(result, expected)
-def test_transform_dict(string_series):
+@pytest.mark.parametrize("ops", [[], np.array([])])
+def test_transform_empty_listlike(string_series, ops):
+ with pytest.raises(ValueError, match="No transform functions were provided"):
+ string_series.transform(ops)
+
+
+@pytest.mark.parametrize("box", [dict, Series])
+def test_transform_dictlike(string_series, box):
# GH 35964
with np.errstate(all="ignore"):
expected = concat([np.sqrt(string_series), np.abs(string_series)], axis=1)
expected.columns = ["foo", "bar"]
- result = string_series.transform({"foo": np.sqrt, "bar": np.abs})
+ result = string_series.transform(box({"foo": np.sqrt, "bar": np.abs}))
tm.assert_frame_equal(result, expected)
+@pytest.mark.parametrize(
+ "ops",
+ [
+ {},
+ {"A": []},
+ {"A": [], "B": ["cumsum"]},
+ {"A": ["cumsum"], "B": []},
+ {"A": [], "B": "cumsum"},
+ {"A": "cumsum", "B": []},
+ ],
+)
+def test_transform_empty_dictlike(string_series, ops):
+ with pytest.raises(ValueError, match="No transform functions were provided"):
+ string_series.transform(ops)
+
+
def test_transform_udf(axis, string_series):
# GH 35964
# via apply
| - [x] closes #36330
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Followup to #35964
- Moved whatsnew to reshaping
- Use is_list_like/is_dict_like in aggregate.transform
- Broke out dict-like and str/callable computations to own functions to split up the transform function
- Added/refined typing | https://api.github.com/repos/pandas-dev/pandas/pulls/36478 | 2020-09-19T17:21:59Z | 2020-10-06T22:47:09Z | 2020-10-06T22:47:08Z | 2020-10-11T13:22:02Z |
Backport PR #36385 on branch 1.1.x (BUG: Always cast to Categorical in lexsort_indexer) | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index 19ed4d171af13..7d658215d7b76 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -45,6 +45,7 @@ Bug fixes
- Bug in :func:`read_spss` where passing a ``pathlib.Path`` as ``path`` would raise a ``TypeError`` (:issue:`33666`)
- Bug in :meth:`Series.str.startswith` and :meth:`Series.str.endswith` with ``category`` dtype not propagating ``na`` parameter (:issue:`36241`)
- Bug in :class:`Series` constructor where integer overflow would occur for sufficiently large scalar inputs when an index was provided (:issue:`36291`)
+- Bug in :meth:`DataFrame.sort_values` raising an ``AttributeError`` when sorting on a key that casts column to categorical dtype (:issue:`36383`)
- Bug in :meth:`DataFrame.stack` raising a ``ValueError`` when stacking :class:`MultiIndex` columns based on position when the levels had duplicate names (:issue:`36353`)
- Bug in :meth:`Series.isin` and :meth:`DataFrame.isin` when using ``NaN`` and a row length above 1,000,000 (:issue:`22205`)
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index ee73aa42701b0..c090531de4075 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -9,7 +9,6 @@
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
- is_categorical_dtype,
is_extension_array_dtype,
)
from pandas.core.dtypes.generic import ABCMultiIndex
@@ -227,13 +226,7 @@ def lexsort_indexer(
keys = [ensure_key_mapped(k, key) for k in keys]
for k, order in zip(keys, orders):
- # we are already a Categorical
- if is_categorical_dtype(k):
- cat = k
-
- # create the Categorical
- else:
- cat = Categorical(k, ordered=True)
+ cat = Categorical(k, ordered=True)
if na_position not in ["last", "first"]:
raise ValueError(f"invalid na_position: {na_position}")
diff --git a/pandas/tests/frame/methods/test_sort_values.py b/pandas/tests/frame/methods/test_sort_values.py
index c60e7e3b1bdb6..0ca232ec433e7 100644
--- a/pandas/tests/frame/methods/test_sort_values.py
+++ b/pandas/tests/frame/methods/test_sort_values.py
@@ -691,3 +691,23 @@ def test_sort_values_key_dict_axis(self):
result = df.sort_values(1, key=lambda col: -col, axis=1)
expected = df.loc[:, ::-1]
tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("ordered", [True, False])
+ def test_sort_values_key_casts_to_categorical(self, ordered):
+ # https://github.com/pandas-dev/pandas/issues/36383
+ categories = ["c", "b", "a"]
+ df = pd.DataFrame({"x": [1, 1, 1], "y": ["a", "b", "c"]})
+
+ def sorter(key):
+ if key.name == "y":
+ return pd.Series(
+ pd.Categorical(key, categories=categories, ordered=ordered)
+ )
+ return key
+
+ result = df.sort_values(by=["x", "y"], key=sorter)
+ expected = pd.DataFrame(
+ {"x": [1, 1, 1], "y": ["c", "b", "a"]}, index=pd.Index([2, 1, 0])
+ )
+
+ tm.assert_frame_equal(result, expected)
| Backport PR #36385: BUG: Always cast to Categorical in lexsort_indexer | https://api.github.com/repos/pandas-dev/pandas/pulls/36477 | 2020-09-19T14:04:10Z | 2020-09-19T15:07:17Z | 2020-09-19T15:07:17Z | 2020-09-19T15:07:17Z |
Turn on stale PR GitHub action | diff --git a/.github/workflows/stale-pr.yml b/.github/workflows/stale-pr.yml
index a6aece34478d9..e3b8d9336a5a6 100644
--- a/.github/workflows/stale-pr.yml
+++ b/.github/workflows/stale-pr.yml
@@ -18,4 +18,4 @@ jobs:
days-before-stale: 30
days-before-close: -1
remove-stale-when-updated: true
- debug-only: true
+ debug-only: false
| I think it makes sense to turn this on now (it's only labeling stale PRs for now, and after doing a lot of this manually it seems to have calmed down). Later can add some sort of friendly reminder to update the PR. Also rather than closing these stalled PRs I'm wondering if they could serve as starting points for new contributors who want to pick up where someone else left off.
Some recent workflows here (you can search for the string "Marking" in the logs to find PRs that would have been labeled): https://github.com/pandas-dev/pandas/actions?query=workflow%3A%22Stale+PRs%22
@simonjayhawkins You'd mentioned doing this more frequently in debug mode, would you prefer longer intervals when running live? | https://api.github.com/repos/pandas-dev/pandas/pulls/36476 | 2020-09-19T13:33:28Z | 2020-09-19T19:52:25Z | 2020-09-19T19:52:24Z | 2020-09-19T20:04:59Z |
Backport PR #36266:: BUG: fix isin with nans and large arrays | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index 7e52e4c53d6ff..19ed4d171af13 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -46,6 +46,7 @@ Bug fixes
- Bug in :meth:`Series.str.startswith` and :meth:`Series.str.endswith` with ``category`` dtype not propagating ``na`` parameter (:issue:`36241`)
- Bug in :class:`Series` constructor where integer overflow would occur for sufficiently large scalar inputs when an index was provided (:issue:`36291`)
- Bug in :meth:`DataFrame.stack` raising a ``ValueError`` when stacking :class:`MultiIndex` columns based on position when the levels had duplicate names (:issue:`36353`)
+- Bug in :meth:`Series.isin` and :meth:`DataFrame.isin` when using ``NaN`` and a row length above 1,000,000 (:issue:`22205`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 856b4ead3f3cc..67ab3a8548f21 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -438,7 +438,12 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray:
# GH16012
# Ensure np.in1d doesn't get object types or it *may* throw an exception
if len(comps) > 1_000_000 and not is_object_dtype(comps):
- f = np.in1d
+ # If the the values include nan we need to check for nan explicitly
+ # since np.nan it not equal to np.nan
+ if np.isnan(values).any():
+ f = lambda c, v: np.logical_or(np.in1d(c, v), np.isnan(c))
+ else:
+ f = np.in1d
elif is_integer_dtype(comps):
try:
values = values.astype("int64", copy=False)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 326c926238f89..a8a55418a619a 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -787,7 +787,6 @@ def test_i8(self):
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
-
s = pd.date_range("20000101", periods=2000000, freq="s").values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
@@ -827,6 +826,23 @@ def test_same_nan_is_in(self):
result = algos.isin(comps, values)
tm.assert_numpy_array_equal(expected, result)
+ def test_same_nan_is_in_large(self):
+ # https://github.com/pandas-dev/pandas/issues/22205
+ s = np.tile(1.0, 1_000_001)
+ s[0] = np.nan
+ result = algos.isin(s, [np.nan, 1])
+ expected = np.ones(len(s), dtype=bool)
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_same_nan_is_in_large_series(self):
+ # https://github.com/pandas-dev/pandas/issues/22205
+ s = np.tile(1.0, 1_000_001)
+ series = pd.Series(s)
+ s[0] = np.nan
+ result = series.isin([np.nan, 1])
+ expected = pd.Series(np.ones(len(s), dtype=bool))
+ tm.assert_series_equal(result, expected)
+
def test_same_object_is_in(self):
# GH 22160
# there could be special treatment for nans
| #36266 | https://api.github.com/repos/pandas-dev/pandas/pulls/36474 | 2020-09-19T09:57:09Z | 2020-09-19T11:14:40Z | 2020-09-19T11:14:40Z | 2020-09-19T11:14:46Z |
Backport PR #36440 on branch 1.1.x (REGR: Series[numeric] comparison with str raising on numexpr path) | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index c920e517ea303..9e068745f2f4c 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -34,7 +34,7 @@ Fixed regressions
- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a tuple (:issue:`35534`)
- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a frozenset (:issue:`35747`)
- Fixed regression in :meth:`read_excel` with ``engine="odf"`` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`,:issue:`35802`)
--
+- Fixed regression in :class:`DataFrame` and :class:`Series` comparisons between numeric arrays and strings (:issue:`35700`,:issue:`36377`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 32bbdf425acab..a18f7bdccd0d0 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -135,10 +135,14 @@ def cmp_method(self, other):
with np.errstate(all="ignore"):
result = ops.comp_method_OBJECT_ARRAY(op, self._values, other)
- else:
+ elif is_interval_dtype(self.dtype):
with np.errstate(all="ignore"):
result = op(self._values, np.asarray(other))
+ else:
+ with np.errstate(all="ignore"):
+ result = ops.comparison_op(self._values, np.asarray(other), op)
+
if is_bool_dtype(result):
return result
return ops.invalid_comparison(self, other, op)
diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py
index 3379ee56b6ad0..31e8d007cae76 100644
--- a/pandas/core/ops/array_ops.py
+++ b/pandas/core/ops/array_ops.py
@@ -23,6 +23,7 @@
is_bool_dtype,
is_integer_dtype,
is_list_like,
+ is_numeric_v_string_like,
is_object_dtype,
is_scalar,
)
@@ -235,6 +236,10 @@ def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike:
else:
res_values = np.zeros(lvalues.shape, dtype=bool)
+ elif is_numeric_v_string_like(lvalues, rvalues):
+ # GH#36377 going through the numexpr path would incorrectly raise
+ return invalid_comparison(lvalues, rvalues, op)
+
elif is_object_dtype(lvalues.dtype):
res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues)
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index 2155846b271fc..84ff6e6f29bca 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -89,6 +89,26 @@ def test_compare_invalid(self):
b.name = pd.Timestamp("2000-01-01")
tm.assert_series_equal(a / b, 1 / (b / a))
+ def test_numeric_cmp_string_numexpr_path(self, box):
+ # GH#36377, GH#35700
+ xbox = box if box is not pd.Index else np.ndarray
+
+ obj = pd.Series(np.random.randn(10 ** 5))
+ obj = tm.box_expected(obj, box, transpose=False)
+
+ result = obj == "a"
+
+ expected = pd.Series(np.zeros(10 ** 5, dtype=bool))
+ expected = tm.box_expected(expected, xbox, transpose=False)
+ tm.assert_equal(result, expected)
+
+ result = obj != "a"
+ tm.assert_equal(result, ~expected)
+
+ msg = "Invalid comparison between dtype=float64 and str"
+ with pytest.raises(TypeError, match=msg):
+ obj < "a"
+
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Datetime/Timedelta Scalar
diff --git a/pandas/tests/indexes/test_numpy_compat.py b/pandas/tests/indexes/test_numpy_compat.py
index 043539c173427..4df23d43ec1e1 100644
--- a/pandas/tests/indexes/test_numpy_compat.py
+++ b/pandas/tests/indexes/test_numpy_compat.py
@@ -114,18 +114,3 @@ def test_numpy_ufuncs_other(index, func):
else:
with pytest.raises(Exception):
func(index)
-
-
-def test_elementwise_comparison_warning():
- # https://github.com/pandas-dev/pandas/issues/22698#issuecomment-458968300
- # np.array([1, 2]) == 'a' returns False, and produces a
- # FutureWarning that it'll be [False, False] in the future.
- # We just want to ensure that comes through.
- # When NumPy dev actually enforces this change, we'll need to skip
- # this test.
- idx = Index([1, 2])
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- result = idx == "a"
-
- expected = np.array([False, False])
- tm.assert_numpy_array_equal(result, expected)
| Backport PR #36440: REGR: Series[numeric] comparison with str raising on numexpr path | https://api.github.com/repos/pandas-dev/pandas/pulls/36473 | 2020-09-19T08:13:15Z | 2020-09-19T10:20:14Z | 2020-09-19T10:20:14Z | 2020-09-19T10:20:14Z |
CI: add pre-commit action, include pyupgrade | diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml
new file mode 100644
index 0000000000000..723347913ac38
--- /dev/null
+++ b/.github/workflows/pre-commit.yml
@@ -0,0 +1,14 @@
+name: pre-commit
+
+on:
+ pull_request:
+ push:
+ branches: [master]
+
+jobs:
+ pre-commit:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-python@v2
+ - uses: pre-commit/action@v2.0.0
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 309e22e71a523..2fa5f5ec5d4fa 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -3,30 +3,30 @@ repos:
rev: 19.10b0
hooks:
- id: black
- language_version: python3
- repo: https://gitlab.com/pycqa/flake8
rev: 3.8.3
hooks:
- id: flake8
- language: python_venv
additional_dependencies: [flake8-comprehensions>=3.1.0]
- id: flake8
name: flake8-pyx
- language: python_venv
files: \.(pyx|pxd)$
types:
- file
args: [--append-config=flake8/cython.cfg]
- id: flake8
name: flake8-pxd
- language: python_venv
files: \.pxi\.in$
types:
- file
args: [--append-config=flake8/cython-template.cfg]
-- repo: https://github.com/pre-commit/mirrors-isort
- rev: v5.2.2
+- repo: https://github.com/PyCQA/isort
+ rev: 5.2.2
hooks:
- id: isort
- language: python_venv
exclude: ^pandas/__init__\.py$|^pandas/core/api\.py$
+- repo: https://github.com/asottile/pyupgrade
+ rev: v2.7.2
+ hooks:
+ - id: pyupgrade
+ args: [--py37-plus]
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index e5c6f77eea3ef..6f29ea8b764bd 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -634,6 +634,10 @@ do not make sudden changes to the code that could have the potential to break
a lot of user code as a result, that is, we need it to be as *backwards compatible*
as possible to avoid mass breakages.
+In addition to ``./ci/code_checks.sh``, some extra checks are run by
+``pre-commit`` - see :ref:`here <contributing.pre-commit>` for how to
+run them.
+
Additional standards are outlined on the :ref:`pandas code style guide <code_style>`
Optional dependencies
@@ -826,6 +830,13 @@ remain up-to-date with our code checks as they change.
Note that if needed, you can skip these checks with ``git commit --no-verify``.
+If you don't want to use ``pre-commit`` as part of your workflow, you can still use it
+to run its checks by running::
+
+ pre-commit run --files <files you have modified>
+
+without having to have done ``pre-commit install`` beforehand.
+
Backwards compatibility
~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/sphinxext/announce.py b/doc/sphinxext/announce.py
index 9c175e4e58b45..2ec0b515ea95c 100755
--- a/doc/sphinxext/announce.py
+++ b/doc/sphinxext/announce.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
-# -*- encoding:utf-8 -*-
"""
Script to generate contributor and pull request lists
diff --git a/environment.yml b/environment.yml
index 36bbd3d307159..1a260b0f7cc56 100644
--- a/environment.yml
+++ b/environment.yml
@@ -22,7 +22,9 @@ dependencies:
- flake8-rst>=0.6.0,<=0.7.0 # linting of code blocks in rst files
- isort>=5.2.1 # check that imports are in the right order
- mypy=0.782
+ - pre-commit
- pycodestyle # used by flake8
+ - pyupgrade
# documentation
- gitpython # obtain contributors from git for whatsnew
diff --git a/requirements-dev.txt b/requirements-dev.txt
index fb647c10f72bc..9ee26562e6d4b 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -13,7 +13,9 @@ flake8-comprehensions>=3.1.0
flake8-rst>=0.6.0,<=0.7.0
isort>=5.2.1
mypy==0.782
+pre-commit
pycodestyle
+pyupgrade
gitpython
gitdb
sphinx
| xref this comment https://github.com/pandas-dev/pandas/issues/36426#issuecomment-694238629 by @TomAugspurger
> I would recommend running the linting commands on CI through pre-commit. That way the versions in .pre-commit-config.yaml are used everywhere.
So, this PR does that.
It also adds pyupgrade as a pre-commit hook (xref #36450 ) and removes some unnecessary configurations from the `.pre-commit-config.yaml` file (all of these hooks already specify `language: python`, see e.g. https://github.com/psf/black/blob/master/.pre-commit-hooks.yaml )
TODO
----
update https://pandas.pydata.org/docs/development/contributing.html#code-standards (thanks Ali!) | https://api.github.com/repos/pandas-dev/pandas/pulls/36471 | 2020-09-19T07:15:22Z | 2020-09-22T14:31:08Z | 2020-09-22T14:31:08Z | 2020-09-25T18:27:43Z |
BUG: Fix astype from float32 to string | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index 7d658215d7b76..72937141c2870 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -47,6 +47,7 @@ Bug fixes
- Bug in :class:`Series` constructor where integer overflow would occur for sufficiently large scalar inputs when an index was provided (:issue:`36291`)
- Bug in :meth:`DataFrame.sort_values` raising an ``AttributeError`` when sorting on a key that casts column to categorical dtype (:issue:`36383`)
- Bug in :meth:`DataFrame.stack` raising a ``ValueError`` when stacking :class:`MultiIndex` columns based on position when the levels had duplicate names (:issue:`36353`)
+- Bug in :meth:`Series.astype` showing too much precision when casting from ``np.float32`` to string dtype (:issue:`36451`)
- Bug in :meth:`Series.isin` and :meth:`DataFrame.isin` when using ``NaN`` and a row length above 1,000,000 (:issue:`22205`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index a57cf3b523985..61a9634b00211 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -659,11 +659,12 @@ cpdef ndarray[object] ensure_string_array(
Py_ssize_t i = 0, n = len(arr)
result = np.asarray(arr, dtype="object")
+
if copy and result is arr:
result = result.copy()
for i in range(n):
- val = result[i]
+ val = arr[i]
if isinstance(val, str):
continue
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index cef35f2b1137c..cb1144c18e49c 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -198,10 +198,9 @@ def _from_sequence(cls, scalars, dtype=None, copy=False):
if dtype:
assert dtype == "string"
- result = np.asarray(scalars, dtype="object")
# convert non-na-likes to str, and nan-likes to StringDtype.na_value
result = lib.ensure_string_array(
- result, na_value=StringDtype.na_value, copy=copy
+ scalars, na_value=StringDtype.na_value, copy=copy
)
# Manually creating new array avoids the validation step in the __init__, so is
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index efd5d29ae0717..56a8e21edd004 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -336,3 +336,12 @@ def test_memory_usage():
series = pd.Series(["a", "b", "c"], dtype="string")
assert 0 < series.nbytes <= series.memory_usage() < series.memory_usage(deep=True)
+
+
+@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
+def test_astype_from_float_dtype(dtype):
+ # https://github.com/pandas-dev/pandas/issues/36451
+ s = pd.Series([0.1], dtype=dtype)
+ result = s.astype("string")
+ expected = pd.Series(["0.1"], dtype="string")
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py
index b9d90a9fc63dd..7449d8d65ef96 100644
--- a/pandas/tests/series/methods/test_astype.py
+++ b/pandas/tests/series/methods/test_astype.py
@@ -1,3 +1,4 @@
+import numpy as np
import pytest
from pandas import Interval, Series, Timestamp, date_range
@@ -46,3 +47,11 @@ def test_astype_ignores_errors_for_extension_dtypes(self, values, errors):
msg = "(Cannot cast)|(could not convert)"
with pytest.raises((ValueError, TypeError), match=msg):
values.astype(float, errors=errors)
+
+ @pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
+ def test_astype_from_float_to_str(self, dtype):
+ # https://github.com/pandas-dev/pandas/issues/36451
+ s = Series([0.1], dtype=dtype)
+ result = s.astype(str)
+ expected = Series(["0.1"])
+ tm.assert_series_equal(result, expected)
| - [x] closes #36451
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
It seems both bugs were caused by the same behavior from numpy:
```python
[ins] In [1]: import numpy as np
[ins] In [2]: arr = np.array([0.1], dtype=np.float32)
[ins] In [3]: arr
Out[3]: array([0.1], dtype=float32)
[ins] In [4]: np.asarray(arr, dtype="object")
Out[4]: array([0.10000000149011612], dtype=object)
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/36464 | 2020-09-19T00:43:45Z | 2020-09-21T06:47:19Z | 2020-09-21T06:47:19Z | 2020-09-21T12:18:46Z |
Remove unnecessary trailing commas | diff --git a/doc/make.py b/doc/make.py
index db729853e5834..94fbfa9382d81 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -291,7 +291,7 @@ def main():
joined = ", ".join(cmds)
argparser.add_argument(
- "command", nargs="?", default="html", help=f"command to run: {joined}",
+ "command", nargs="?", default="html", help=f"command to run: {joined}"
)
argparser.add_argument(
"--num-jobs", type=int, default=0, help="number of jobs used by sphinx-build"
| https://api.github.com/repos/pandas-dev/pandas/pulls/36463 | 2020-09-19T00:22:12Z | 2020-09-19T07:36:12Z | 2020-09-19T07:36:12Z | 2020-09-19T07:36:13Z | |
REF: MultiIndex._validate_insert_value, IntervaArray._validate_setitem_value | diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index ff9dd3f2a85bc..f9f68004bcc23 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -547,38 +547,7 @@ def __getitem__(self, value):
return self._shallow_copy(left, right)
def __setitem__(self, key, value):
- # na value: need special casing to set directly on numpy arrays
- needs_float_conversion = False
- if is_scalar(value) and isna(value):
- if is_integer_dtype(self.dtype.subtype):
- # can't set NaN on a numpy integer array
- needs_float_conversion = True
- elif is_datetime64_any_dtype(self.dtype.subtype):
- # need proper NaT to set directly on the numpy array
- value = np.datetime64("NaT")
- elif is_timedelta64_dtype(self.dtype.subtype):
- # need proper NaT to set directly on the numpy array
- value = np.timedelta64("NaT")
- value_left, value_right = value, value
-
- # scalar interval
- elif is_interval_dtype(value) or isinstance(value, Interval):
- self._check_closed_matches(value, name="value")
- value_left, value_right = value.left, value.right
-
- else:
- # list-like of intervals
- try:
- array = IntervalArray(value)
- value_left, value_right = array.left, array.right
- except TypeError as err:
- # wrong type: not interval or NA
- msg = f"'value' should be an interval type, got {type(value)} instead."
- raise TypeError(msg) from err
-
- if needs_float_conversion:
- raise ValueError("Cannot set float NaN to integer-backed IntervalArray")
-
+ value_left, value_right = self._validate_setitem_value(value)
key = check_array_indexer(self, key)
# Need to ensure that left and right are updated atomically, so we're
@@ -898,6 +867,41 @@ def _validate_insert_value(self, value):
)
return left_insert, right_insert
+ def _validate_setitem_value(self, value):
+ needs_float_conversion = False
+
+ if is_scalar(value) and isna(value):
+ # na value: need special casing to set directly on numpy arrays
+ if is_integer_dtype(self.dtype.subtype):
+ # can't set NaN on a numpy integer array
+ needs_float_conversion = True
+ elif is_datetime64_any_dtype(self.dtype.subtype):
+ # need proper NaT to set directly on the numpy array
+ value = np.datetime64("NaT")
+ elif is_timedelta64_dtype(self.dtype.subtype):
+ # need proper NaT to set directly on the numpy array
+ value = np.timedelta64("NaT")
+ value_left, value_right = value, value
+
+ elif is_interval_dtype(value) or isinstance(value, Interval):
+ # scalar interval
+ self._check_closed_matches(value, name="value")
+ value_left, value_right = value.left, value.right
+
+ else:
+ try:
+ # list-like of intervals
+ array = IntervalArray(value)
+ value_left, value_right = array.left, array.right
+ except TypeError as err:
+ # wrong type: not interval or NA
+ msg = f"'value' should be an interval type, got {type(value)} instead."
+ raise TypeError(msg) from err
+
+ if needs_float_conversion:
+ raise ValueError("Cannot set float NaN to integer-backed IntervalArray")
+ return value_left, value_right
+
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each interval.
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index a21a54e4a9be3..cd3e384837280 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -3596,6 +3596,15 @@ def astype(self, dtype, copy=True):
return self._shallow_copy()
return self
+ def _validate_insert_value(self, item):
+ if not isinstance(item, tuple):
+ # Pad the key with empty strings if lower levels of the key
+ # aren't specified:
+ item = (item,) + ("",) * (self.nlevels - 1)
+ elif len(item) != self.nlevels:
+ raise ValueError("Item must have length equal to number of levels.")
+ return item
+
def insert(self, loc: int, item):
"""
Make new MultiIndex inserting new item at location
@@ -3610,12 +3619,7 @@ def insert(self, loc: int, item):
-------
new_index : Index
"""
- # Pad the key with empty strings if lower levels of the key
- # aren't specified:
- if not isinstance(item, tuple):
- item = (item,) + ("",) * (self.nlevels - 1)
- elif len(item) != self.nlevels:
- raise ValueError("Item must have length equal to number of levels.")
+ item = self._validate_insert_value(item)
new_levels = []
new_codes = []
| https://api.github.com/repos/pandas-dev/pandas/pulls/36461 | 2020-09-18T23:27:35Z | 2020-09-19T00:58:25Z | 2020-09-19T00:58:25Z | 2020-09-19T01:03:08Z | |
CI: Revert PR template | diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 5e4d3b4ec38e4..7c3870470f074 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,9 +1,3 @@
----
-
-labels: "Needs Review"
-
----
-
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
| ---
labels: "Needs Review"
---
I don't know if this change had the intended effect, so opening this (partially as a test if it actually works)
Edit: Yeah, it didn't. I guess this doesn't work like the issue templates. | https://api.github.com/repos/pandas-dev/pandas/pulls/36460 | 2020-09-18T22:28:42Z | 2020-09-18T22:50:18Z | 2020-09-18T22:50:18Z | 2020-09-18T22:50:25Z |
Align cython and python reduction code paths | diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 8161b5c5c2b11..3a0fda5aed620 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -16,12 +16,12 @@ from pandas._libs cimport util
from pandas._libs.lib import is_scalar, maybe_convert_objects
-cdef _check_result_array(object obj, Py_ssize_t cnt):
+cpdef check_result_array(object obj, Py_ssize_t cnt):
if (util.is_array(obj) or
(isinstance(obj, list) and len(obj) == cnt) or
getattr(obj, 'shape', None) == (cnt,)):
- raise ValueError('Function does not reduce')
+ raise ValueError('Must produce aggregated value')
cdef class _BaseGrouper:
@@ -74,12 +74,14 @@ cdef class _BaseGrouper:
cached_ityp._engine.clear_mapping()
cached_ityp._cache.clear() # e.g. inferred_freq must go
res = self.f(cached_typ)
- res = _extract_result(res)
+ res = extract_result(res)
if not initialized:
# On the first pass, we check the output shape to see
# if this looks like a reduction.
initialized = True
- _check_result_array(res, len(self.dummy_arr))
+ # In all tests other than test_series_grouper and
+ # test_series_bin_grouper, we have len(self.dummy_arr) == 0
+ check_result_array(res, len(self.dummy_arr))
return res, initialized
@@ -278,9 +280,14 @@ cdef class SeriesGrouper(_BaseGrouper):
return result, counts
-cdef inline _extract_result(object res, bint squeeze=True):
+cpdef inline extract_result(object res, bint squeeze=True):
""" extract the result object, it might be a 0-dim ndarray
or a len-1 0-dim, or a scalar """
+ if hasattr(res, "_values"):
+ # Preserve EA
+ res = res._values
+ if squeeze and res.ndim == 1 and len(res) == 1:
+ res = res[0]
if hasattr(res, 'values') and util.is_array(res.values):
res = res.values
if util.is_array(res):
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index bbccd22f2ae85..0705261d0c516 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -29,7 +29,7 @@
import numpy as np
-from pandas._libs import lib
+from pandas._libs import lib, reduction as libreduction
from pandas._typing import ArrayLike, FrameOrSeries, FrameOrSeriesUnion
from pandas.util._decorators import Appender, Substitution, doc
@@ -471,12 +471,19 @@ def _get_index() -> Index:
def _aggregate_named(self, func, *args, **kwargs):
result = {}
+ initialized = False
for name, group in self:
- group.name = name
+ # Each step of this loop corresponds to
+ # libreduction._BaseGrouper._apply_to_group
+ group.name = name # NB: libreduction does not pin name
+
output = func(group, *args, **kwargs)
- if isinstance(output, (Series, Index, np.ndarray)):
- raise ValueError("Must produce aggregated value")
+ output = libreduction.extract_result(output)
+ if not initialized:
+ # We only do this validation on the first iteration
+ libreduction.check_result_array(output, 0)
+ initialized = True
result[name] = output
return result
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index e9525f03368fa..b3f91d4623c84 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -623,7 +623,7 @@ def agg_series(self, obj: Series, func: F):
try:
return self._aggregate_series_fast(obj, func)
except ValueError as err:
- if "Function does not reduce" in str(err):
+ if "Must produce aggregated value" in str(err):
# raised in libreduction
pass
else:
@@ -653,27 +653,26 @@ def _aggregate_series_pure_python(self, obj: Series, func: F):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
- result = None
+ result = np.empty(ngroups, dtype="O")
+ initialized = False
splitter = get_splitter(obj, group_index, ngroups, axis=0)
for label, group in splitter:
+
+ # Each step of this loop corresponds to
+ # libreduction._BaseGrouper._apply_to_group
res = func(group)
+ res = libreduction.extract_result(res)
- if result is None:
- if isinstance(res, (Series, Index, np.ndarray)):
- if len(res) == 1:
- # e.g. test_agg_lambda_with_timezone lambda e: e.head(1)
- # FIXME: are we potentially losing important res.index info?
- res = res.item()
- else:
- raise ValueError("Function does not reduce")
- result = np.empty(ngroups, dtype="O")
+ if not initialized:
+ # We only do this validation on the first iteration
+ libreduction.check_result_array(res, 0)
+ initialized = True
counts[label] = group.shape[0]
result[label] = res
- assert result is not None
result = lib.maybe_convert_objects(result, try_float=0)
# TODO: maybe_cast_to_extension_array?
| Get rid of a few of the small behavior differences between these
(entirely disabling the cython path leads to 4 currently-xfailed tests passing, so there is still some subtle difference after this) | https://api.github.com/repos/pandas-dev/pandas/pulls/36459 | 2020-09-18T20:57:43Z | 2020-09-19T15:34:45Z | 2020-09-19T15:34:45Z | 2020-09-19T16:46:13Z |
[BUG]: Rolling.sum() calculated wrong values when axis is one and dtypes are mixed | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 3992e697db7e4..0aa82fb936a95 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -336,7 +336,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrameGroupby.tshift` failing to raise ``ValueError`` when a frequency cannot be inferred for the index of a group (:issue:`35937`)
- Bug in :meth:`DataFrame.groupby` does not always maintain column index name for ``any``, ``all``, ``bfill``, ``ffill``, ``shift`` (:issue:`29764`)
- Bug in :meth:`DataFrameGroupBy.apply` raising error with ``np.nan`` group(s) when ``dropna=False`` (:issue:`35889`)
--
+- Bug in :meth:`Rolling.sum()` returned wrong values when dtypes where mixed between float and integer and axis was equal to one (:issue:`20649`, :issue:`35596`)
Reshaping
^^^^^^^^^
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 21a7164411fb7..06c3ad23f904f 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -243,7 +243,13 @@ def _create_data(self, obj: FrameOrSeries) -> FrameOrSeries:
if self.on is not None and not isinstance(self.on, Index):
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
-
+ if self.axis == 1:
+ # GH: 20649 in case of mixed dtype and axis=1 we have to convert everything
+ # to float to calculate the complete row at once. We exclude all non-numeric
+ # dtypes.
+ obj = obj.select_dtypes(include=["integer", "float"], exclude=["timedelta"])
+ obj = obj.astype("float64", copy=False)
+ obj._mgr = obj._mgr.consolidate()
return obj
def _gotitem(self, key, ndim, subset=None):
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 88afcec0f7bf4..4dfa0287bbb03 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -771,3 +771,51 @@ def test_rolling_numerical_too_large_numbers():
index=dates,
)
tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ ("func", "value"),
+ [("sum", 2.0), ("max", 1.0), ("min", 1.0), ("mean", 1.0), ("median", 1.0)],
+)
+def test_rolling_mixed_dtypes_axis_1(func, value):
+ # GH: 20649
+ df = pd.DataFrame(1, index=[1, 2], columns=["a", "b", "c"])
+ df["c"] = 1.0
+ result = getattr(df.rolling(window=2, min_periods=1, axis=1), func)()
+ expected = pd.DataFrame(
+ {"a": [1.0, 1.0], "b": [value, value], "c": [value, value]}, index=[1, 2]
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_rolling_axis_one_with_nan():
+ # GH: 35596
+ df = pd.DataFrame(
+ [
+ [0, 1, 2, 4, np.nan, np.nan, np.nan],
+ [0, 1, 2, np.nan, np.nan, np.nan, np.nan],
+ [0, 2, 2, np.nan, 2, np.nan, 1],
+ ]
+ )
+ result = df.rolling(window=7, min_periods=1, axis="columns").sum()
+ expected = pd.DataFrame(
+ [
+ [0.0, 1.0, 3.0, 7.0, 7.0, 7.0, 7.0],
+ [0.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0],
+ [0.0, 2.0, 4.0, 4.0, 6.0, 6.0, 7.0],
+ ]
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "value",
+ ["test", pd.to_datetime("2019-12-31"), pd.to_timedelta("1 days 06:05:01.00003")],
+)
+def test_rolling_axis_1_non_numeric_dtypes(value):
+ # GH: 20649
+ df = pd.DataFrame({"a": [1, 2]})
+ df["b"] = value
+ result = df.rolling(window=2, min_periods=1, axis=1).sum()
+ expected = pd.DataFrame({"a": [1.0, 2.0]})
+ tm.assert_frame_equal(result, expected)
| - [x] closes #20649
- [x] closes #35596
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
In case of ``axis=1`` and mixed dtypes, the ``_apply_blockwise`` did not calculate the sum for a complete row. Through converting the obj and consolidating the blocks we can avoid this. | https://api.github.com/repos/pandas-dev/pandas/pulls/36458 | 2020-09-18T19:39:19Z | 2020-09-19T22:04:01Z | 2020-09-19T22:04:00Z | 2020-09-19T22:17:54Z |
CLN: Update files (as per #36450) to Python 3.7+ syntax | diff --git a/pandas/_config/display.py b/pandas/_config/display.py
index ef319f4447565..e4553a2107f87 100644
--- a/pandas/_config/display.py
+++ b/pandas/_config/display.py
@@ -22,7 +22,7 @@ def detect_console_encoding() -> str:
encoding = None
try:
encoding = sys.stdout.encoding or sys.stdin.encoding
- except (AttributeError, IOError):
+ except (AttributeError, OSError):
pass
# try again for something better
diff --git a/pandas/_testing.py b/pandas/_testing.py
index 9db0c3496e290..cd34bec52daef 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -1960,8 +1960,7 @@ def index_subclass_makers_generator():
makeCategoricalIndex,
makeMultiIndex,
]
- for make_index_func in make_index_funcs:
- yield make_index_func
+ yield from make_index_funcs
def all_timeseries_index_generator(k=10):
diff --git a/pandas/_vendored/typing_extensions.py b/pandas/_vendored/typing_extensions.py
index 53df8da175a56..129d8998faccc 100644
--- a/pandas/_vendored/typing_extensions.py
+++ b/pandas/_vendored/typing_extensions.py
@@ -409,7 +409,7 @@ def __repr__(self):
def __getitem__(self, parameters):
item = typing._type_check(
- parameters, "{} accepts only single type".format(self._name)
+ parameters, f"{self._name} accepts only single type"
)
return _GenericAlias(self, (item,))
@@ -1671,7 +1671,7 @@ def __class_getitem__(cls, params):
params = (params,)
if not params and cls is not Tuple:
raise TypeError(
- "Parameter list to {}[...] cannot be empty".format(cls.__qualname__)
+ f"Parameter list to {cls.__qualname__}[...] cannot be empty"
)
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
@@ -2113,7 +2113,7 @@ def __class_getitem__(cls, params):
return _AnnotatedAlias(origin, metadata)
def __init_subclass__(cls, *args, **kwargs):
- raise TypeError("Cannot subclass {}.Annotated".format(cls.__module__))
+ raise TypeError(f"Cannot subclass {cls.__module__}.Annotated")
def _strip_annotations(t):
"""Strips the annotations from a given type.
@@ -2195,7 +2195,7 @@ def _tree_repr(self, tree):
else:
tp_repr = origin[0]._tree_repr(origin)
metadata_reprs = ", ".join(repr(arg) for arg in metadata)
- return "%s[%s, %s]" % (cls, tp_repr, metadata_reprs)
+ return f"{cls}[{tp_repr}, {metadata_reprs}]"
def _subs_tree(self, tvars=None, args=None): # noqa
if self is Annotated:
@@ -2382,7 +2382,7 @@ def TypeAlias(self, parameters):
It's invalid when used anywhere except as in the example above.
"""
- raise TypeError("{} is not subscriptable".format(self))
+ raise TypeError(f"{self} is not subscriptable")
elif sys.version_info[:2] >= (3, 7):
diff --git a/pandas/_version.py b/pandas/_version.py
index 66e756a4744c8..b3fa8530d09eb 100644
--- a/pandas/_version.py
+++ b/pandas/_version.py
@@ -74,7 +74,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
- except EnvironmentError:
+ except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
@@ -121,7 +121,7 @@ def git_get_keywords(versionfile_abs):
# _version.py.
keywords = {}
try:
- f = open(versionfile_abs, "r")
+ f = open(versionfile_abs)
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
@@ -132,7 +132,7 @@ def git_get_keywords(versionfile_abs):
if mo:
keywords["full"] = mo.group(1)
f.close()
- except EnvironmentError:
+ except OSError:
pass
return keywords
diff --git a/pandas/io/clipboard/__init__.py b/pandas/io/clipboard/__init__.py
index d16955a98b62f..a8020f4bb4e4f 100644
--- a/pandas/io/clipboard/__init__.py
+++ b/pandas/io/clipboard/__init__.py
@@ -274,7 +274,7 @@ def copy_dev_clipboard(text):
fo.write(text)
def paste_dev_clipboard() -> str:
- with open("/dev/clipboard", "rt") as fo:
+ with open("/dev/clipboard") as fo:
content = fo.read()
return content
@@ -521,7 +521,7 @@ def determine_clipboard():
return init_windows_clipboard()
if platform.system() == "Linux":
- with open("/proc/version", "r") as f:
+ with open("/proc/version") as f:
if "Microsoft" in f.read():
return init_wsl_clipboard()
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index bf4586a4b5b96..cc7b6b0bfea97 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -587,8 +587,7 @@ def _format_regular_rows(self):
else:
coloffset = 0
- for cell in self._generate_body(coloffset):
- yield cell
+ yield from self._generate_body(coloffset)
def _format_hierarchical_rows(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, ABCIndex))
@@ -664,8 +663,7 @@ def _format_hierarchical_rows(self):
)
gcolidx += 1
- for cell in self._generate_body(gcolidx):
- yield cell
+ yield from self._generate_body(gcolidx)
def _generate_body(self, coloffset: int):
if self.styler is None:
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 40fde224a7ae9..9a91b16e52723 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -719,7 +719,7 @@ def _build_doc(self):
r = r.getroot()
except AttributeError:
pass
- except (UnicodeDecodeError, IOError) as e:
+ except (UnicodeDecodeError, OSError) as e:
# if the input is a blob of html goop
if not is_url(self.io):
r = fromstring(self.io, parser=parser)
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index c3977f89ac42f..a0ceb18c8bd20 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -821,7 +821,7 @@ def close(self):
if self.should_close:
try:
self.open_stream.close()
- except (IOError, AttributeError):
+ except (OSError, AttributeError):
pass
for file_handle in self.file_handles:
file_handle.close()
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index e850a101a0a63..5e5a89d96f0e5 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -364,7 +364,7 @@ def read_hdf(
if isinstance(path_or_buf, HDFStore):
if not path_or_buf.is_open:
- raise IOError("The HDFStore must be open for reading.")
+ raise OSError("The HDFStore must be open for reading.")
store = path_or_buf
auto_close = False
@@ -693,7 +693,7 @@ def open(self, mode: str = "a", **kwargs):
try:
self._handle = tables.open_file(self._path, self._mode, **kwargs)
- except IOError as err: # pragma: no cover
+ except OSError as err: # pragma: no cover
if "can not be written" in str(err):
print(f"Opening {self._path} in read-only mode")
self._handle = tables.open_file(self._path, "r", **kwargs)
@@ -724,7 +724,7 @@ def open(self, mode: str = "a", **kwargs):
# trying to read from a non-existent file causes an error which
# is not part of IOError, make it one
if self._mode == "r" and "Unable to open/create file" in str(err):
- raise IOError(str(err)) from err
+ raise OSError(str(err)) from err
raise
def close(self):
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index df5f6c3d53d30..a8af84e42918d 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -1077,7 +1077,7 @@ def close(self) -> None:
""" close the handle if its open """
try:
self.path_or_buf.close()
- except IOError:
+ except OSError:
pass
def _set_encoding(self) -> None:
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 602b42022f561..0c64ea824996f 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -342,7 +342,7 @@ def _setup_subplots(self):
valid_log = {False, True, "sym", None}
input_log = {self.logx, self.logy, self.loglog}
if input_log - valid_log:
- invalid_log = next(iter((input_log - valid_log)))
+ invalid_log = next(iter(input_log - valid_log))
raise ValueError(
f"Boolean, None and 'sym' are valid options, '{invalid_log}' is given."
)
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index 89fbfbd5b8324..e200f13652a84 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -277,7 +277,7 @@ def test_constructor_with_generator(self):
# returned a scalar for a generator
exp = Categorical([0, 1, 2])
- cat = Categorical((x for x in [0, 1, 2]))
+ cat = Categorical(x for x in [0, 1, 2])
tm.assert_categorical_equal(cat, exp)
cat = Categorical(range(3))
tm.assert_categorical_equal(cat, exp)
diff --git a/pandas/tests/arrays/integer/test_construction.py b/pandas/tests/arrays/integer/test_construction.py
index 1893c4554bfbf..e0a4877da6c7e 100644
--- a/pandas/tests/arrays/integer/test_construction.py
+++ b/pandas/tests/arrays/integer/test_construction.py
@@ -29,7 +29,7 @@ def test_from_dtype_from_float(data):
# from int / array
expected = pd.Series(data).dropna().reset_index(drop=True)
- dropped = np.array(data.dropna()).astype(np.dtype((dtype.type)))
+ dropped = np.array(data.dropna()).astype(np.dtype(dtype.type))
result = pd.Series(dropped, dtype=str(dtype))
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 2fbeec8dd8378..9147360e71c73 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -167,7 +167,7 @@ def _na_value(self):
def _formatter(self, boxed=False):
if boxed:
- return "Decimal: {0}".format
+ return "Decimal: {}".format
return repr
@classmethod
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index f7b572a70073a..7d03dadb20dd9 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -137,7 +137,7 @@ def test_combine_add(self, data_repeated):
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 + x2)
expected = pd.Series(
- ([a + b for (a, b) in zip(list(orig_data1), list(orig_data2))])
+ [a + b for (a, b) in zip(list(orig_data1), list(orig_data2))]
)
self.assert_series_equal(result, expected)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 63a2160e128ed..b5e211895672a 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -71,7 +71,7 @@ def test_series_with_name_not_matching_column(self):
lambda: DataFrame({}),
lambda: DataFrame(()),
lambda: DataFrame([]),
- lambda: DataFrame((_ for _ in [])),
+ lambda: DataFrame(_ for _ in []),
lambda: DataFrame(range(0)),
lambda: DataFrame(data=None),
lambda: DataFrame(data={}),
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 1bb40b322cd48..6783fc5b66433 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -249,8 +249,8 @@ def test_len():
# issue 11016
df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
- assert len(df.groupby(("a"))) == 0
- assert len(df.groupby(("b"))) == 3
+ assert len(df.groupby("a")) == 0
+ assert len(df.groupby("b")) == 3
assert len(df.groupby(["a", "b"])) == 3
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index 40b4ce46e550b..18ef95c05f291 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -739,7 +739,7 @@ def test_get_group(self):
with pytest.raises(ValueError, match=msg):
g.get_group("foo")
with pytest.raises(ValueError, match=msg):
- g.get_group(("foo"))
+ g.get_group("foo")
msg = "must supply a same-length tuple to get_group with multiple grouping keys"
with pytest.raises(ValueError, match=msg):
g.get_group(("foo", "bar", "baz"))
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 7720db9d98ebf..f811bd579aaaa 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1360,7 +1360,7 @@ def test_get_indexer_strings_raises(self):
def test_get_indexer_numeric_index_boolean_target(self, idx_class):
# GH 16877
- numeric_index = idx_class(RangeIndex((4)))
+ numeric_index = idx_class(RangeIndex(4))
result = numeric_index.get_indexer([True, False, True])
expected = np.array([-1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index ca8a3ddc95575..0cc61cd7df389 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -745,7 +745,7 @@ def run_tests(df, rhs, right):
# make frames multi-type & re-run tests
for frame in [df, rhs, right]:
frame["joe"] = frame["joe"].astype("float64")
- frame["jolie"] = frame["jolie"].map("@{0}".format)
+ frame["jolie"] = frame["jolie"].map("@{}".format)
run_tests(df, rhs, right)
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index f00fa6274fca2..cce0783a3c867 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -648,7 +648,7 @@ def test_to_string_unicode_columns(self, float_frame):
assert isinstance(result, str)
def test_to_string_utf8_columns(self):
- n = "\u05d0".encode("utf-8")
+ n = "\u05d0".encode()
with option_context("display.max_rows", 1):
df = DataFrame([1, 2], columns=[n])
diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py
index c40935b2cc5dd..e2ceb95d77053 100644
--- a/pandas/tests/io/formats/test_to_csv.py
+++ b/pandas/tests/io/formats/test_to_csv.py
@@ -26,7 +26,7 @@ def test_to_csv_with_single_column(self):
"""
with tm.ensure_clean("test.csv") as path:
df1.to_csv(path, header=None, index=None)
- with open(path, "r") as f:
+ with open(path) as f:
assert f.read() == expected1
df2 = DataFrame([1, None])
@@ -36,7 +36,7 @@ def test_to_csv_with_single_column(self):
"""
with tm.ensure_clean("test.csv") as path:
df2.to_csv(path, header=None, index=None)
- with open(path, "r") as f:
+ with open(path) as f:
assert f.read() == expected2
def test_to_csv_defualt_encoding(self):
@@ -58,7 +58,7 @@ def test_to_csv_quotechar(self):
with tm.ensure_clean("test.csv") as path:
df.to_csv(path, quoting=1) # 1=QUOTE_ALL
- with open(path, "r") as f:
+ with open(path) as f:
assert f.read() == expected
expected = """\
@@ -69,7 +69,7 @@ def test_to_csv_quotechar(self):
with tm.ensure_clean("test.csv") as path:
df.to_csv(path, quoting=1, quotechar="$")
- with open(path, "r") as f:
+ with open(path) as f:
assert f.read() == expected
with tm.ensure_clean("test.csv") as path:
@@ -86,7 +86,7 @@ def test_to_csv_doublequote(self):
with tm.ensure_clean("test.csv") as path:
df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL
- with open(path, "r") as f:
+ with open(path) as f:
assert f.read() == expected
from _csv import Error
@@ -105,7 +105,7 @@ def test_to_csv_escapechar(self):
with tm.ensure_clean("test.csv") as path: # QUOTE_ALL
df.to_csv(path, quoting=1, doublequote=False, escapechar="\\")
- with open(path, "r") as f:
+ with open(path) as f:
assert f.read() == expected
df = DataFrame({"col": ["a,a", ",bb,"]})
@@ -117,7 +117,7 @@ def test_to_csv_escapechar(self):
with tm.ensure_clean("test.csv") as path:
df.to_csv(path, quoting=3, escapechar="\\") # QUOTE_NONE
- with open(path, "r") as f:
+ with open(path) as f:
assert f.read() == expected
def test_csv_to_string(self):
@@ -342,7 +342,7 @@ def test_to_csv_string_array_ascii(self):
"""
with tm.ensure_clean("str_test.csv") as path:
df.to_csv(path, encoding="ascii")
- with open(path, "r") as f:
+ with open(path) as f:
assert f.read() == expected_ascii
def test_to_csv_string_array_utf8(self):
@@ -356,7 +356,7 @@ def test_to_csv_string_array_utf8(self):
"""
with tm.ensure_clean("unicode_test.csv") as path:
df.to_csv(path, encoding="utf-8")
- with open(path, "r") as f:
+ with open(path) as f:
assert f.read() == expected_utf8
def test_to_csv_string_with_lf(self):
@@ -467,7 +467,7 @@ def test_to_csv_write_to_open_file(self):
with open(path, "w") as f:
f.write("manual header\n")
df.to_csv(f, header=None, index=None)
- with open(path, "r") as f:
+ with open(path) as f:
assert f.read() == expected
def test_to_csv_write_to_open_file_with_newline_py3(self):
diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py
index e85fd398964d0..7acdbfd462874 100644
--- a/pandas/tests/io/formats/test_to_html.py
+++ b/pandas/tests/io/formats/test_to_html.py
@@ -137,7 +137,7 @@ def test_to_html_encoding(float_frame, tmp_path):
# GH 28663
path = tmp_path / "test.html"
float_frame.to_html(path, encoding="gbk")
- with open(str(path), "r", encoding="gbk") as f:
+ with open(str(path), encoding="gbk") as f:
assert float_frame.to_html() == f.read()
diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py
index a98644250b328..a93ab6f9cc7aa 100644
--- a/pandas/tests/io/formats/test_to_latex.py
+++ b/pandas/tests/io/formats/test_to_latex.py
@@ -21,7 +21,7 @@ def test_to_latex_filename(self, float_frame):
with tm.ensure_clean("test.tex") as path:
float_frame.to_latex(path)
- with open(path, "r") as f:
+ with open(path) as f:
assert float_frame.to_latex() == f.read()
# test with utf-8 and encoding option (GH 7061)
diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py
index e2007e07c572a..086c0b7ba08b2 100644
--- a/pandas/tests/io/json/test_ujson.py
+++ b/pandas/tests/io/json/test_ujson.py
@@ -591,14 +591,14 @@ def test_decode_number_with_32bit_sign_bit(self, val):
def test_encode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
- base = "\u00e5".encode("utf-8")
+ base = "\u00e5".encode()
escape_input = base * 1024 * 1024 * 2
ujson.encode(escape_input)
def test_decode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
- base = "\u00e5".encode("utf-8")
+ base = "\u00e5".encode()
quote = b'"'
escape_input = quote + (base * 1024 * 1024 * 2) + quote
diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py
index 7c58afe867440..ae63b6af3a8b6 100644
--- a/pandas/tests/io/parser/test_c_parser_only.py
+++ b/pandas/tests/io/parser/test_c_parser_only.py
@@ -577,7 +577,7 @@ def test_file_handles_mmap(c_parser_only, csv1):
# Don't close user provided file handles.
parser = c_parser_only
- with open(csv1, "r") as f:
+ with open(csv1) as f:
m = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
parser.read_csv(m)
diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py
index 1d8d5a29686a4..49358fe2ecfe4 100644
--- a/pandas/tests/io/parser/test_common.py
+++ b/pandas/tests/io/parser/test_common.py
@@ -1726,7 +1726,7 @@ def test_iteration_open_handle(all_parsers):
with open(path, "w") as f:
f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
- with open(path, "r") as f:
+ with open(path) as f:
for line in f:
if "CCC" in line:
break
diff --git a/pandas/tests/io/parser/test_encoding.py b/pandas/tests/io/parser/test_encoding.py
index de7b3bed034c7..f23b498c7388a 100644
--- a/pandas/tests/io/parser/test_encoding.py
+++ b/pandas/tests/io/parser/test_encoding.py
@@ -27,7 +27,7 @@ def test_bytes_io_input(all_parsers):
def test_read_csv_unicode(all_parsers):
parser = all_parsers
- data = BytesIO("\u0141aski, Jan;1".encode("utf-8"))
+ data = BytesIO("\u0141aski, Jan;1".encode())
result = parser.read_csv(data, sep=";", encoding="utf-8", header=None)
expected = DataFrame([["\u0141aski, Jan", 1]])
diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index e982667f06f31..127d0dc4c9829 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -173,9 +173,7 @@ def test_read_csv_compat():
def test_bytes_io_input():
- result = read_fwf(
- BytesIO("שלום\nשלום".encode("utf8")), widths=[2, 2], encoding="utf8"
- )
+ result = read_fwf(BytesIO("שלום\nשלום".encode()), widths=[2, 2], encoding="utf8")
expected = DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/pytables/common.py b/pandas/tests/io/pytables/common.py
index aad18890de3ad..7e7a76e287d32 100644
--- a/pandas/tests/io/pytables/common.py
+++ b/pandas/tests/io/pytables/common.py
@@ -25,7 +25,7 @@ def safe_close(store):
try:
if store is not None:
store.close()
- except IOError:
+ except OSError:
pass
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index 85a12a13d19fb..ede8d61490778 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -339,7 +339,7 @@ def test_constructor_bad_file(self, mmap_file):
with pytest.raises(err, match=msg):
icom._MMapWrapper(non_file)
- target = open(mmap_file, "r")
+ target = open(mmap_file)
target.close()
msg = "I/O operation on closed file"
@@ -347,7 +347,7 @@ def test_constructor_bad_file(self, mmap_file):
icom._MMapWrapper(target)
def test_get_attr(self, mmap_file):
- with open(mmap_file, "r") as target:
+ with open(mmap_file) as target:
wrapper = icom._MMapWrapper(target)
attrs = dir(wrapper.mmap)
@@ -360,7 +360,7 @@ def test_get_attr(self, mmap_file):
assert not hasattr(wrapper, "foo")
def test_next(self, mmap_file):
- with open(mmap_file, "r") as target:
+ with open(mmap_file) as target:
wrapper = icom._MMapWrapper(target)
lines = target.readlines()
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index 2c93dbb5b6b83..59034e9f3d807 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -114,7 +114,7 @@ def test_to_html_compat(self):
c_idx_names=False,
r_idx_names=False,
)
- .applymap("{0:.3f}".format)
+ .applymap("{:.3f}".format)
.astype(float)
)
out = df.to_html()
@@ -616,7 +616,7 @@ def try_remove_ws(x):
@pytest.mark.slow
def test_gold_canyon(self):
gc = "Gold Canyon"
- with open(self.banklist_data, "r") as f:
+ with open(self.banklist_data) as f:
raw_text = f.read()
assert gc in raw_text
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 1edcc937f72c3..32a15e6201037 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -281,7 +281,6 @@ def _get_exec(self):
@pytest.fixture(params=[("io", "data", "csv", "iris.csv")])
def load_iris_data(self, datapath, request):
- import io
iris_csv_file = datapath(*request.param)
@@ -291,7 +290,7 @@ def load_iris_data(self, datapath, request):
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
- with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
+ with open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
diff --git a/pandas/tests/reshape/test_get_dummies.py b/pandas/tests/reshape/test_get_dummies.py
index ce13762ea8f86..82e0e52c089a2 100644
--- a/pandas/tests/reshape/test_get_dummies.py
+++ b/pandas/tests/reshape/test_get_dummies.py
@@ -386,7 +386,7 @@ def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
"get_dummies_kwargs,expected",
[
(
- {"data": DataFrame(({"ä": ["a"]}))},
+ {"data": DataFrame({"ä": ["a"]})},
DataFrame({"ä_a": [1]}, dtype=np.uint8),
),
(
diff --git a/pandas/tests/scalar/test_na_scalar.py b/pandas/tests/scalar/test_na_scalar.py
index 0a7dfbee4e672..5c4d7e191d1bb 100644
--- a/pandas/tests/scalar/test_na_scalar.py
+++ b/pandas/tests/scalar/test_na_scalar.py
@@ -28,9 +28,9 @@ def test_format():
assert format(NA, ">10") == " <NA>"
assert format(NA, "xxx") == "<NA>" # NA is flexible, accept any format spec
- assert "{}".format(NA) == "<NA>"
- assert "{:>10}".format(NA) == " <NA>"
- assert "{:xxx}".format(NA) == "<NA>"
+ assert f"{NA}" == "<NA>"
+ assert f"{NA:>10}" == " <NA>"
+ assert f"{NA:xxx}" == "<NA>"
def test_truthiness():
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index e39083b709f38..6ba55ce3c74b9 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -180,7 +180,7 @@ def test_td64_summation_overflow(self):
# mean
result = (s - s.min()).mean()
- expected = pd.Timedelta((pd.TimedeltaIndex((s - s.min())).asi8 / len(s)).sum())
+ expected = pd.Timedelta((pd.TimedeltaIndex(s - s.min()).asi8 / len(s)).sum())
# the computation is converted to float so
# might be some loss of precision
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 8ac0a55e63cd1..1b5fddaf14335 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -49,7 +49,7 @@ class TestSeriesConstructors:
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
- (lambda: Series((_ for _ in [])), False), # creates a RangeIndex
+ (lambda: Series(_ for _ in []), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
@@ -222,8 +222,7 @@ def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
- for i in range(10):
- yield i
+ yield from range(10)
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index bcc0b18134dad..ae89e16ca7667 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -137,13 +137,13 @@ def test_astype_str_cast_dt64(self):
ts = Series([Timestamp("2010-01-04 00:00:00")])
s = ts.astype(str)
- expected = Series([str("2010-01-04")])
+ expected = Series(["2010-01-04"])
tm.assert_series_equal(s, expected)
ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
s = ts.astype(str)
- expected = Series([str("2010-01-04 00:00:00-05:00")])
+ expected = Series(["2010-01-04 00:00:00-05:00"])
tm.assert_series_equal(s, expected)
def test_astype_str_cast_td64(self):
@@ -152,7 +152,7 @@ def test_astype_str_cast_td64(self):
td = Series([Timedelta(1, unit="d")])
ser = td.astype(str)
- expected = Series([str("1 days")])
+ expected = Series(["1 days"])
tm.assert_series_equal(ser, expected)
def test_astype_unicode(self):
@@ -167,7 +167,7 @@ def test_astype_unicode(self):
former_encoding = None
if sys.getdefaultencoding() == "utf-8":
- test_series.append(Series(["野菜食べないとやばい".encode("utf-8")]))
+ test_series.append(Series(["野菜食べないとやばい".encode()]))
for s in test_series:
res = s.astype("unicode")
diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py
index 708118e950686..b12ebd58e6a7b 100644
--- a/pandas/tests/series/test_io.py
+++ b/pandas/tests/series/test_io.py
@@ -66,12 +66,11 @@ def test_from_csv(self, datetime_series, string_series):
tm.assert_series_equal(check_series, series)
def test_to_csv(self, datetime_series):
- import io
with tm.ensure_clean() as path:
datetime_series.to_csv(path, header=False)
- with io.open(path, newline=None) as f:
+ with open(path, newline=None) as f:
lines = f.readlines()
assert lines[1] != "\n"
diff --git a/scripts/validate_rst_title_capitalization.py b/scripts/validate_rst_title_capitalization.py
index 62ec6b9ef07af..b654e27737359 100755
--- a/scripts/validate_rst_title_capitalization.py
+++ b/scripts/validate_rst_title_capitalization.py
@@ -212,7 +212,7 @@ def find_titles(rst_file: str) -> Iterable[Tuple[str, int]]:
The corresponding line number of the heading.
"""
- with open(rst_file, "r") as fd:
+ with open(rst_file) as fd:
previous_line = ""
for i, line in enumerate(fd):
line = line[:-1]
@@ -250,10 +250,9 @@ def find_rst_files(source_paths: List[str]) -> Iterable[str]:
elif directory_address.endswith(".rst"):
yield directory_address
else:
- for filename in glob.glob(
+ yield from glob.glob(
pathname=f"{directory_address}/**/*.rst", recursive=True
- ):
- yield filename
+ )
def main(source_paths: List[str], output_format: str) -> int:
diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py
index 4a0e859535215..b6ffab1482bbc 100755
--- a/scripts/validate_unwanted_patterns.py
+++ b/scripts/validate_unwanted_patterns.py
@@ -447,7 +447,7 @@ def main(
if os.path.isfile(source_path):
file_path = source_path
- with open(file_path, "r") as file_obj:
+ with open(file_path) as file_obj:
for line_number, msg in function(file_obj):
is_failed = True
print(
@@ -466,7 +466,7 @@ def main(
continue
file_path = os.path.join(subdir, file_name)
- with open(file_path, "r") as file_obj:
+ with open(file_path) as file_obj:
for line_number, msg in function(file_obj):
is_failed = True
print(
diff --git a/setup.py b/setup.py
index a8dfeb0974195..8f447d5c38169 100755
--- a/setup.py
+++ b/setup.py
@@ -99,7 +99,7 @@ def render_templates(cls, pxifiles):
# if .pxi.in is not updated, no need to output .pxi
continue
- with open(pxifile, "r") as f:
+ with open(pxifile) as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
diff --git a/versioneer.py b/versioneer.py
index 5882349f65f0b..65c9523ba5573 100644
--- a/versioneer.py
+++ b/versioneer.py
@@ -349,7 +349,7 @@
import sys
-class VersioneerConfig(object):
+class VersioneerConfig:
pass
@@ -398,7 +398,7 @@ def get_config_from_root(root):
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
- with open(setup_cfg, "r") as f:
+ with open(setup_cfg) as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
@@ -451,7 +451,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
- except EnvironmentError:
+ except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
@@ -461,7 +461,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
return None
else:
if verbose:
- print("unable to find command, tried %s" % (commands,))
+ print(f"unable to find command, tried {commands}")
return None
stdout = p.communicate()[0].strip().decode()
@@ -946,7 +946,7 @@ def git_get_keywords(versionfile_abs):
# _version.py.
keywords = {}
try:
- f = open(versionfile_abs, "r")
+ f = open(versionfile_abs)
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
@@ -957,7 +957,7 @@ def git_get_keywords(versionfile_abs):
if mo:
keywords["full"] = mo.group(1)
f.close()
- except EnvironmentError:
+ except OSError:
pass
return keywords
@@ -1072,9 +1072,8 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
- pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
- full_tag,
- tag_prefix,
+ pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format(
+ full_tag, tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
@@ -1111,13 +1110,13 @@ def do_vcs_install(manifest_in, versionfile_source, ipy):
files.append(versioneer_file)
present = False
try:
- f = open(".gitattributes", "r")
+ f = open(".gitattributes")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
- except EnvironmentError:
+ except OSError:
pass
if not present:
f = open(".gitattributes", "a+")
@@ -1171,7 +1170,7 @@ def versions_from_file(filename):
try:
with open(filename) as f:
contents = f.read()
- except EnvironmentError:
+ except OSError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(
r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
@@ -1187,7 +1186,7 @@ def write_to_version_file(filename, versions):
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
- print("set %s to '%s'" % (filename, versions["version"]))
+ print("set {} to '{}'".format(filename, versions["version"]))
def plus_or_dot(pieces):
@@ -1399,7 +1398,7 @@ def get_versions(verbose=False):
try:
ver = versions_from_file(versionfile_abs)
if verbose:
- print("got version from file %s %s" % (versionfile_abs, ver))
+ print(f"got version from file {versionfile_abs} {ver}")
return ver
except NotThisMethod:
pass
@@ -1619,11 +1618,7 @@ def do_setup():
root = get_root()
try:
cfg = get_config_from_root(root)
- except (
- EnvironmentError,
- configparser.NoSectionError,
- configparser.NoOptionError,
- ) as e:
+ except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg", file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
@@ -1648,9 +1643,9 @@ def do_setup():
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
- with open(ipy, "r") as f:
+ with open(ipy) as f:
old = f.read()
- except EnvironmentError:
+ except OSError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
@@ -1669,12 +1664,12 @@ def do_setup():
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
- with open(manifest_in, "r") as f:
+ with open(manifest_in) as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
- except EnvironmentError:
+ except OSError:
pass
# That doesn't cover everything MANIFEST.in can do
# (https://docs.python.org/2/distutils/sourcedist.html#commands), so
@@ -1707,7 +1702,7 @@ def scan_setup_py():
found = set()
setters = False
errors = 0
- with open("setup.py", "r") as f:
+ with open("setup.py") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
| Ran `pyupgrade` on all files unchecked in #36450. formatter pre-commit
hook had no complaints; removed unnecessary imports from the following
to satisfy `flake8` hook:
```
pandas/tests/io/test_sql.py
pandas/test/series/test_io.py
```
- [x] closes #36450
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36457 | 2020-09-18T18:52:33Z | 2020-09-18T22:20:32Z | 2020-09-18T22:20:32Z | 2020-09-19T16:48:19Z |
[TST]: Wrong Corr with Timedelta index | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 05996efb6d332..ff25c4f2551f2 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -499,6 +499,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrame.groupby.rolling` returning wrong values with partial centered window (:issue:`36040`).
- Bug in :meth:`DataFrameGroupBy.rolling` returned wrong values with timeaware window containing ``NaN``. Raises ``ValueError`` because windows are not monotonic now (:issue:`34617`)
- Bug in :meth:`Rolling.__iter__` where a ``ValueError`` was not raised when ``min_periods`` was larger than ``window`` (:issue:`37156`)
+- Using :meth:`Rolling.var()` instead of :meth:`Rolling.std()` avoids numerical issues for :meth:`Rolling.corr()` when :meth:`Rolling.var()` is still within floating point precision while :meth:`Rolling.std()` is not (:issue:`31286`)
Reshaping
^^^^^^^^^
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 9136f9398799b..bfc31021a8f87 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -1906,8 +1906,10 @@ def _get_corr(a, b):
b = b.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
-
- return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
+ # GH 31286: Through using var instead of std we can avoid numerical
+ # issues when the result of var is withing floating proint precision
+ # while std is not.
+ return a.cov(b, **kwargs) / (a.var(**kwargs) * b.var(**kwargs)) ** 0.5
return flex_binary_moment(
self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 02365906c55bb..2c8439aae75e5 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -1073,3 +1073,17 @@ def get_window_bounds(self, num_values, min_periods, center, closed):
result = getattr(df.rolling(indexer), method)()
expected = DataFrame({"values": expected})
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ ("index", "window"),
+ [([0, 1, 2, 3, 4], 2), (pd.date_range("2001-01-01", freq="D", periods=5), "2D")],
+)
+def test_rolling_corr_timedelta_index(index, window):
+ # GH: 31286
+ x = Series([1, 2, 3, 4, 5], index=index)
+ y = x.copy()
+ x[0:2] = 0.0
+ result = x.rolling(window).corr(y)
+ expected = Series([np.nan, np.nan, 1, 1, 1], index=index)
+ tm.assert_almost_equal(result, expected)
| - [x] xref #31286 (closes the issue, if there is no way around the numerical issues)
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/36454 | 2020-09-18T16:40:00Z | 2020-10-26T14:49:46Z | 2020-10-26T14:49:46Z | 2020-10-26T16:50:56Z |
CLN Upgrade pandas/core syntax | diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 2073f110d536f..b1f98199f9fba 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -570,8 +570,7 @@ def __iter__(self):
converted = ints_to_pydatetime(
data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp"
)
- for v in converted:
- yield v
+ yield from converted
def astype(self, dtype, copy=True):
# We handle
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 853f7bb0b0d81..c88af77ea6189 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -1427,7 +1427,7 @@ def sparse_arithmetic_method(self, other):
# TODO: look into _wrap_result
if len(self) != len(other):
raise AssertionError(
- (f"length mismatch: {len(self)} vs. {len(other)}")
+ f"length mismatch: {len(self)} vs. {len(other)}"
)
if not isinstance(other, SparseArray):
dtype = getattr(other, "dtype", None)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 968fb180abcd0..b860c83f89cbc 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -62,8 +62,7 @@ def flatten(l):
"""
for el in l:
if iterable_not_string(el):
- for s in flatten(el):
- yield s
+ yield from flatten(el)
else:
yield el
@@ -434,10 +433,8 @@ def random_state(state=None):
return np.random
else:
raise ValueError(
- (
- "random_state must be an integer, array-like, a BitGenerator, "
- "a numpy RandomState, or None"
- )
+ "random_state must be an integer, array-like, a BitGenerator, "
+ "a numpy RandomState, or None"
)
diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py
index 09fc53716dda9..8c56f02c8d3cc 100644
--- a/pandas/core/computation/expr.py
+++ b/pandas/core/computation/expr.py
@@ -153,7 +153,7 @@ def _preparse(
the ``tokenize`` module and ``tokval`` is a string.
"""
assert callable(f), "f must be callable"
- return tokenize.untokenize((f(x) for x in tokenize_string(source)))
+ return tokenize.untokenize(f(x) for x in tokenize_string(source))
def _is_type(t):
diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py
index 8dd7c1a22d0ae..d876c655421ef 100644
--- a/pandas/core/computation/pytables.py
+++ b/pandas/core/computation/pytables.py
@@ -554,7 +554,7 @@ def __init__(
else:
w = _validate_where(w)
where[idx] = w
- _where = " & ".join((f"({w})" for w in com.flatten(where)))
+ _where = " & ".join(f"({w})" for w in com.flatten(where))
else:
_where = where
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index cc18b8681200f..0b9021b094cd7 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1681,10 +1681,7 @@ def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
label_axis_name = "column" if axis == 0 else "index"
raise ValueError(
- (
- f"The {label_axis_name} label '{key}' "
- f"is not unique.{multi_message}"
- )
+ f"The {label_axis_name} label '{key}' is not unique.{multi_message}"
)
return values
@@ -1725,10 +1722,8 @@ def _drop_labels_or_levels(self, keys, axis: int = 0):
if invalid_keys:
raise ValueError(
- (
- "The following keys are not valid labels or "
- f"levels for axis {axis}: {invalid_keys}"
- )
+ "The following keys are not valid labels or "
+ f"levels for axis {axis}: {invalid_keys}"
)
# Compute levels and labels to drop
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index 9cfd13f95ca0e..2387427d15670 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -93,15 +93,8 @@ def _gotitem(self, key, ndim, subset=None):
)
series_apply_allowlist = (
- (
- common_apply_allowlist
- | {
- "nlargest",
- "nsmallest",
- "is_monotonic_increasing",
- "is_monotonic_decreasing",
- }
- )
+ common_apply_allowlist
+ | {"nlargest", "nsmallest", "is_monotonic_increasing", "is_monotonic_decreasing"}
) | frozenset(["dtype", "unique"])
dataframe_apply_allowlist = common_apply_allowlist | frozenset(["dtypes", "corrwith"])
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index a931221ef3ce1..bbccd22f2ae85 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1212,7 +1212,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
return result
else:
- all_indexed_same = all_indexes_same((x.index for x in values))
+ all_indexed_same = all_indexes_same(x.index for x in values)
# GH3596
# provide a reduction (Frame -> Series) if groups are
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 222ae589ea7fc..525caab7564a3 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -504,7 +504,7 @@ def _maybe_check_unique(self):
if not self.is_unique:
msg = """Index has duplicates."""
duplicates = self._format_duplicate_message()
- msg += "\n{}".format(duplicates)
+ msg += f"\n{duplicates}"
raise DuplicateLabelError(msg)
@@ -4315,10 +4315,8 @@ def identical(self, other) -> bool:
return (
self.equals(other)
and all(
- (
- getattr(self, c, None) == getattr(other, c, None)
- for c in self._comparables
- )
+ getattr(self, c, None) == getattr(other, c, None)
+ for c in self._comparables
)
and type(self) == type(other)
)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 1ab40a76b30ff..5aa72bb838756 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -398,7 +398,7 @@ def _partial_date_slice(
if len(self) and (
(use_lhs and t1 < self[0] and t2 < self[0])
- or ((use_rhs and t1 > self[-1] and t2 > self[-1]))
+ or (use_rhs and t1 > self[-1] and t2 > self[-1])
):
# we are out of range
raise KeyError
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index d95355589fd0c..5a6518995c554 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1837,7 +1837,7 @@ def _get_single_indexer(join_key, index, sort: bool = False):
def _left_join_on_index(left_ax: Index, right_ax: Index, join_keys, sort: bool = False):
if len(join_keys) > 1:
if not (
- (isinstance(right_ax, MultiIndex) and len(join_keys) == right_ax.nlevels)
+ isinstance(right_ax, MultiIndex) and len(join_keys) == right_ax.nlevels
):
raise AssertionError(
"If more than one join key is given then "
| xref #36450 | https://api.github.com/repos/pandas-dev/pandas/pulls/36453 | 2020-09-18T16:32:11Z | 2020-09-18T19:33:01Z | 2020-09-18T19:33:01Z | 2020-10-10T14:14:44Z |
ASV: added benchamark tests for DataFrame.to_numpy() and .values | diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index 44f71b392c0eb..70d90ded84545 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -219,6 +219,46 @@ def time_to_html_mixed(self):
self.df2.to_html()
+class ToNumpy:
+ def setup(self):
+ N = 10000
+ M = 10
+ self.df_tall = DataFrame(np.random.randn(N, M))
+ self.df_wide = DataFrame(np.random.randn(M, N))
+ self.df_mixed_tall = self.df_tall.copy()
+ self.df_mixed_tall["foo"] = "bar"
+ self.df_mixed_tall[0] = period_range("2000", periods=N)
+ self.df_mixed_tall[1] = range(N)
+ self.df_mixed_wide = self.df_wide.copy()
+ self.df_mixed_wide["foo"] = "bar"
+ self.df_mixed_wide[0] = period_range("2000", periods=M)
+ self.df_mixed_wide[1] = range(M)
+
+ def time_to_numpy_tall(self):
+ self.df_tall.to_numpy()
+
+ def time_to_numpy_wide(self):
+ self.df_wide.to_numpy()
+
+ def time_to_numpy_mixed_tall(self):
+ self.df_mixed_tall.to_numpy()
+
+ def time_to_numpy_mixed_wide(self):
+ self.df_mixed_wide.to_numpy()
+
+ def time_values_tall(self):
+ self.df_tall.values
+
+ def time_values_wide(self):
+ self.df_wide.values
+
+ def time_values_mixed_tall(self):
+ self.df_mixed_tall.values
+
+ def time_values_mixed_wide(self):
+ self.df_mixed_wide.values
+
+
class Repr:
def setup(self):
nrows = 10000
| - [x] closes #35023
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Adds benchmarks with single dtype and mixed frames of varying sizes.
xref #34999 | https://api.github.com/repos/pandas-dev/pandas/pulls/36452 | 2020-09-18T15:08:47Z | 2020-09-19T19:55:02Z | 2020-09-19T19:55:01Z | 2020-09-20T03:48:42Z |
CLN: 35925 rm trailing commas | diff --git a/pandas/tests/indexes/base_class/test_indexing.py b/pandas/tests/indexes/base_class/test_indexing.py
index 196c0401a72be..b2fa8f31ee5ec 100644
--- a/pandas/tests/indexes/base_class/test_indexing.py
+++ b/pandas/tests/indexes/base_class/test_indexing.py
@@ -14,7 +14,7 @@ def test_get_slice_bounds_within(self, kind, side, expected):
@pytest.mark.parametrize("kind", ["getitem", "loc", None])
@pytest.mark.parametrize("side", ["left", "right"])
@pytest.mark.parametrize(
- "data, bound, expected", [(list("abcdef"), "x", 6), (list("bcdefg"), "a", 0)],
+ "data, bound, expected", [(list("abcdef"), "x", 6), (list("bcdefg"), "a", 0)]
)
def test_get_slice_bounds_outside(self, kind, side, expected, data, bound):
index = Index(data)
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index aa6b395176b06..675ae388a28a4 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -411,9 +411,7 @@ def test_sort_values_invalid_na_position(index_with_missing, na_position):
pytest.xfail("missing value sorting order not defined for index type")
if na_position not in ["first", "last"]:
- with pytest.raises(
- ValueError, match=f"invalid na_position: {na_position}",
- ):
+ with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"):
index_with_missing.sort_values(na_position=na_position)
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index e1623a14c333e..7fa7a571d2571 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -699,9 +699,7 @@ def test_get_slice_bounds_within(self, kind, side, expected):
@pytest.mark.parametrize("kind", ["getitem", "loc", None])
@pytest.mark.parametrize("side", ["left", "right"])
- @pytest.mark.parametrize(
- "bound, expected", [(-1, 0), (10, 6)],
- )
+ @pytest.mark.parametrize("bound, expected", [(-1, 0), (10, 6)])
def test_get_slice_bounds_outside(self, kind, side, expected, bound):
index = Index(range(6))
result = index.get_slice_bound(bound, kind=kind, side=side)
| #35925
Checked the following files to make sure they are ready for black :
- /pandas/tests/indexes/base_class/test_indexing.py
- /pandas/tests/indexes/test_common.py
- /pandas/tests/indexes/test_numeric.py
| https://api.github.com/repos/pandas-dev/pandas/pulls/36446 | 2020-09-18T09:58:06Z | 2020-09-18T13:00:16Z | 2020-09-18T13:00:16Z | 2020-09-18T13:00:20Z |
BUG: inconsistent replace | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index c63a78c76572f..03a6df95f9f1c 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -34,6 +34,7 @@ Fixed regressions
- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a tuple (:issue:`35534`)
- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a frozenset (:issue:`35747`)
- Fixed regression in :meth:`read_excel` with ``engine="odf"`` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`, :issue:`35802`)
+- Fixed regression in :meth:`DataFrame.replace` inconsistent replace when using a float in the replace method (:issue:`35376`)
- Fixed regression in :class:`DataFrame` and :class:`Series` comparisons between numeric arrays and strings (:issue:`35700`, :issue:`36377`)
- Fixed regression in :meth:`DataFrame.apply` with ``raw=True`` and user-function returning string (:issue:`35940`)
- Fixed regression when setting empty :class:`DataFrame` column to a :class:`Series` in preserving name of index in frame (:issue:`36527`)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index f18bc4d0bcf85..8c1348b51589e 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -36,6 +36,7 @@
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
+ is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
@@ -2067,7 +2068,9 @@ def _can_hold_element(self, element: Any) -> bool:
and not issubclass(tipo.type, (np.datetime64, np.timedelta64))
and self.dtype.itemsize >= tipo.itemsize
)
- return is_integer(element)
+ # We have not inferred an integer from the dtype
+ # check if we have a builtin int or a float equal to an int
+ return is_integer(element) or (is_float(element) and element.is_integer())
class DatetimeLikeBlockMixin:
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index a77753ed9f9d0..a9cf840470ae0 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -974,6 +974,31 @@ def test_replace_for_new_dtypes(self, datetime_frame):
}
),
),
+ # GH 35376
+ (
+ DataFrame([[1, 1.0], [2, 2.0]]),
+ 1.0,
+ 5,
+ DataFrame([[5, 5.0], [2, 2.0]]),
+ ),
+ (
+ DataFrame([[1, 1.0], [2, 2.0]]),
+ 1,
+ 5,
+ DataFrame([[5, 5.0], [2, 2.0]]),
+ ),
+ (
+ DataFrame([[1, 1.0], [2, 2.0]]),
+ 1.0,
+ 5.0,
+ DataFrame([[5, 5.0], [2, 2.0]]),
+ ),
+ (
+ DataFrame([[1, 1.0], [2, 2.0]]),
+ 1,
+ 5.0,
+ DataFrame([[5, 5.0], [2, 2.0]]),
+ ),
],
)
def test_replace_dtypes(self, frame, to_replace, value, expected):
| - [x] tests added
- [x] tests passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] release note in `doc/source/whatsnew/v1.1.3.rst`
Must fix some replace in IntBlock if called with float integer (such as 1.0).
closes #35376 | https://api.github.com/repos/pandas-dev/pandas/pulls/36444 | 2020-09-18T06:50:27Z | 2020-09-26T01:25:20Z | 2020-09-26T01:25:19Z | 2020-09-26T08:26:17Z |
CI: fix gbq test #36436 | diff --git a/ci/deps/travis-37-cov.yaml b/ci/deps/travis-37-cov.yaml
index d031dc1cc062f..7d5104a58ce83 100644
--- a/ci/deps/travis-37-cov.yaml
+++ b/ci/deps/travis-37-cov.yaml
@@ -1,6 +1,5 @@
name: pandas-dev
channels:
- - defaults
- conda-forge
dependencies:
- python=3.7.*
@@ -15,7 +14,6 @@ dependencies:
# pandas dependencies
- beautifulsoup4
- botocore>=1.11
- - cython>=0.29.16
- dask
- fastparquet>=0.3.2
- fsspec>=0.7.4
@@ -31,16 +29,18 @@ dependencies:
- odfpy
- openpyxl
- pandas-gbq
+ - google-cloud-bigquery>=1.27.2 # GH 36436
- psycopg2
- pyarrow>=0.15.0
- - pymysql
+ - pymysql=0.7.11
- pytables
- python-snappy
+ - python-dateutil
- pytz
- s3fs>=0.4.0
- scikit-learn
- scipy
- - sqlalchemy
+ - sqlalchemy=1.3.0
- statsmodels
- xarray
- xlrd
@@ -51,5 +51,4 @@ dependencies:
- brotlipy
- coverage
- pandas-datareader
- - python-dateutil
- pyxlsb
diff --git a/ci/deps/travis-37-locale.yaml b/ci/deps/travis-37-locale.yaml
index 8a0b5b043ceca..cd6341e80be24 100644
--- a/ci/deps/travis-37-locale.yaml
+++ b/ci/deps/travis-37-locale.yaml
@@ -25,10 +25,10 @@ dependencies:
- numexpr
- numpy
- openpyxl
- - pandas-gbq=0.12.0
+ - pandas-gbq
+ - google-cloud-bigquery>=1.27.2 # GH 36436
- pyarrow>=0.17
- psycopg2=2.7
- - pyarrow>=0.15.0 # GH #35813
- pymysql=0.7.11
- pytables
- python-dateutil
| - [x] closes #36436
| https://api.github.com/repos/pandas-dev/pandas/pulls/36443 | 2020-09-18T06:06:52Z | 2020-09-18T21:30:42Z | 2020-09-18T21:30:42Z | 2020-09-23T19:39:08Z |
API: membership checks on ExtensionArray containing NA values | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 6f046d3a9379d..825c5367a8b37 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -752,6 +752,7 @@ ExtensionArray
- Fixed bug when applying a NumPy ufunc with multiple outputs to an :class:`.IntegerArray` returning None (:issue:`36913`)
- Fixed an inconsistency in :class:`.PeriodArray`'s ``__init__`` signature to those of :class:`.DatetimeArray` and :class:`.TimedeltaArray` (:issue:`37289`)
- Reductions for :class:`.BooleanArray`, :class:`.Categorical`, :class:`.DatetimeArray`, :class:`.FloatingArray`, :class:`.IntegerArray`, :class:`.PeriodArray`, :class:`.TimedeltaArray`, and :class:`.PandasArray` are now keyword-only methods (:issue:`37541`)
+- Fixed a bug where a ``TypeError`` was wrongly raised if a membership check was made on an ``ExtensionArray`` containing nan-like values (:issue:`37867`)
Other
^^^^^
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 448025e05422d..76b7877b0ac70 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -37,6 +37,7 @@
is_array_like,
is_dtype_equal,
is_list_like,
+ is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
@@ -354,6 +355,23 @@ def __iter__(self):
for i in range(len(self)):
yield self[i]
+ def __contains__(self, item) -> bool:
+ """
+ Return for `item in self`.
+ """
+ # GH37867
+ # comparisons of any item to pd.NA always return pd.NA, so e.g. "a" in [pd.NA]
+ # would raise a TypeError. The implementation below works around that.
+ if is_scalar(item) and isna(item):
+ if not self._can_hold_na:
+ return False
+ elif item is self.dtype.na_value or isinstance(item, self.dtype.type):
+ return self.isna().any()
+ else:
+ return False
+ else:
+ return (item == self).any()
+
def __eq__(self, other: Any) -> ArrayLike:
"""
Return for `self == other` (element-wise equality).
diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py
index 12426a0c92c55..922b3b94c16c1 100644
--- a/pandas/tests/extension/arrow/test_bool.py
+++ b/pandas/tests/extension/arrow/test_bool.py
@@ -50,6 +50,10 @@ def test_view(self, data):
# __setitem__ does not work, so we only have a smoke-test
data.view()
+ @pytest.mark.xfail(raises=AssertionError, reason="Not implemented yet")
+ def test_contains(self, data, data_missing, nulls_fixture):
+ super().test_contains(data, data_missing, nulls_fixture)
+
class TestConstructors(BaseArrowTests, base.BaseConstructorsTests):
def test_from_dtype(self, data):
diff --git a/pandas/tests/extension/base/interface.py b/pandas/tests/extension/base/interface.py
index 9ae4b01508d79..d7997310dde3d 100644
--- a/pandas/tests/extension/base/interface.py
+++ b/pandas/tests/extension/base/interface.py
@@ -29,6 +29,29 @@ def test_can_hold_na_valid(self, data):
# GH-20761
assert data._can_hold_na is True
+ def test_contains(self, data, data_missing, nulls_fixture):
+ # GH-37867
+ # Tests for membership checks. Membership checks for nan-likes is tricky and
+ # the settled on rule is: `nan_like in arr` is True if nan_like is
+ # arr.dtype.na_value and arr.isna().any() is True. Else the check returns False.
+
+ na_value = data.dtype.na_value
+ # ensure data without missing values
+ data = data[~data.isna()]
+
+ # first elements are non-missing
+ assert data[0] in data
+ assert data_missing[0] in data_missing
+
+ # check the presence of na_value
+ assert na_value in data_missing
+ assert na_value not in data
+
+ if nulls_fixture is not na_value:
+ # the data can never contain other nan-likes than na_value
+ assert nulls_fixture not in data
+ assert nulls_fixture not in data_missing
+
def test_memory_usage(self, data):
s = pd.Series(data)
result = s.memory_usage(index=False)
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 9ede9c7fbd0fd..a713550dafa5c 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -155,6 +155,14 @@ def __setitem__(self, key, value):
def __len__(self) -> int:
return len(self._data)
+ def __contains__(self, item) -> bool:
+ if not isinstance(item, decimal.Decimal):
+ return False
+ elif item.is_nan():
+ return self.isna().any()
+ else:
+ return super().__contains__(item)
+
@property
def nbytes(self) -> int:
n = len(self)
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 74ca341e27bf8..3a5e49796c53b 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -143,6 +143,13 @@ def test_custom_asserts(self):
with pytest.raises(AssertionError, match=msg):
self.assert_frame_equal(a.to_frame(), b.to_frame())
+ @pytest.mark.xfail(
+ reason="comparison method not implemented for JSONArray (GH-37867)"
+ )
+ def test_contains(self, data):
+ # GH-37867
+ super().test_contains(data)
+
class TestConstructors(BaseJSON, base.BaseConstructorsTests):
@pytest.mark.skip(reason="not implemented constructor from dtype")
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index 95f338cbc3240..d03a9ab6b2588 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -87,6 +87,28 @@ def test_memory_usage(self, data):
# Is this deliberate?
super().test_memory_usage(data)
+ def test_contains(self, data, data_missing, nulls_fixture):
+ # GH-37867
+ # na value handling in Categorical.__contains__ is deprecated.
+ # See base.BaseInterFaceTests.test_contains for more details.
+
+ na_value = data.dtype.na_value
+ # ensure data without missing values
+ data = data[~data.isna()]
+
+ # first elements are non-missing
+ assert data[0] in data
+ assert data_missing[0] in data_missing
+
+ # check the presence of na_value
+ assert na_value in data_missing
+ assert na_value not in data
+
+ # Categoricals can contain other nan-likes than na_value
+ if nulls_fixture is not na_value:
+ assert nulls_fixture not in data
+ assert nulls_fixture in data_missing # this line differs from super method
+
class TestConstructors(base.BaseConstructorsTests):
pass
| Membership checks on ExtensionArrays containing `NA` values raises ValueError in some circumstances (but not in other):
```python
>>> arr1 = pd.array(["a", pd.NA])
>>> arr2 = pd.array([pd.NA, "a"])
>>> "a" in arr1
True # ok
>>> "a" in arr2
TypeError: boolean value of NA is ambiguous # not ok
>>> pd.NA in arr1
TypeError: boolean value of NA is ambiguous # not ok
>>> pd.NA in arr2
True # ok
```
So overall quite random failures. This PR fixes this problem by adding a custom `__contains__` method on `ExtensionArray`.
I assume that we want `pd.NA in arr1` to keep returning True. Note however that `np.nan in np.array([np.nan])` return False, so pandas' behaviour is different. | https://api.github.com/repos/pandas-dev/pandas/pulls/37867 | 2020-11-15T17:22:18Z | 2020-11-29T19:11:30Z | 2020-11-29T19:11:30Z | 2020-12-03T19:11:39Z |
CLN: share some more datetimelike index methods | diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 0ce32fcd822e0..ec36e53b97b3c 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -1554,6 +1554,9 @@ def ceil(self, freq, ambiguous="raise", nonexistent="raise"):
# --------------------------------------------------------------
# Frequency Methods
+ def _maybe_clear_freq(self):
+ self._freq = None
+
def _with_freq(self, freq):
"""
Helper to get a view on the same data, with a new freq.
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index a05dc717f83c1..b9a6df10d3630 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -474,9 +474,6 @@ def _check_compatible_with(self, other, setitem: bool = False):
if not timezones.tz_compare(self.tz, other.tz):
raise ValueError(f"Timezones don't match. '{self.tz}' != '{other.tz}'")
- def _maybe_clear_freq(self):
- self._freq = None
-
# -----------------------------------------------------------------
# Descriptive Properties
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index d9ecbc874cd59..035e6e84c6ec8 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -313,9 +313,6 @@ def _check_compatible_with(self, other, setitem: bool = False):
# we don't have anything to validate.
pass
- def _maybe_clear_freq(self):
- self._freq = None
-
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 40a6086f69f85..176fef13094fd 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -104,6 +104,16 @@ class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex):
def _is_all_dates(self) -> bool:
return True
+ def _shallow_copy(self, values=None, name: Label = lib.no_default):
+ name = self.name if name is lib.no_default else name
+
+ if values is not None:
+ return self._simple_new(values, name=name)
+
+ result = self._simple_new(self._data, name=name)
+ result._cache = self._cache
+ return result
+
# ------------------------------------------------------------------------
# Abstract data attributes
@@ -662,16 +672,6 @@ def _with_freq(self, freq):
arr = self._data._with_freq(freq)
return type(self)._simple_new(arr, name=self.name)
- def _shallow_copy(self, values=None, name: Label = lib.no_default):
- name = self.name if name is lib.no_default else name
-
- if values is not None:
- return self._simple_new(values, name=name)
-
- result = self._simple_new(self._data, name=name)
- result._cache = self._cache
- return result
-
# --------------------------------------------------------------------
# Set Operation Methods
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 44c20ad0de848..cff50bf0f1eab 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -4,7 +4,6 @@
import numpy as np
from pandas._libs import index as libindex
-from pandas._libs.lib import no_default
from pandas._libs.tslibs import BaseOffset, Period, Resolution, Tick
from pandas._libs.tslibs.parsing import DateParseError, parse_time_string
from pandas._typing import DtypeObj, Label
@@ -278,16 +277,6 @@ def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True
- def _shallow_copy(self, values=None, name: Label = no_default):
- name = name if name is not no_default else self.name
-
- if values is not None:
- return self._simple_new(values, name=name)
-
- result = self._simple_new(self._data, name=name)
- result._cache = self._cache
- return result
-
def _maybe_convert_timedelta(self, other):
"""
Convert timedelta-like input to an integer multiple of self.freq
@@ -341,7 +330,7 @@ def _mpl_repr(self):
@property
def _formatter_func(self):
- return self.array._formatter(boxed=False)
+ return self._data._formatter(boxed=False)
# ------------------------------------------------------------------------
# Indexing
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index cf5fa4bbb3d75..84390822813cd 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -175,9 +175,7 @@ def _simple_new(cls, values: TimedeltaArray, name: Label = None):
@property
def _formatter_func(self):
- from pandas.io.formats.format import get_format_timedelta64
-
- return get_format_timedelta64(self, box=True)
+ return self._data._formatter()
# -------------------------------------------------------------------
| https://api.github.com/repos/pandas-dev/pandas/pulls/37865 | 2020-11-15T16:37:01Z | 2020-11-18T13:40:38Z | 2020-11-18T13:40:38Z | 2020-11-18T16:21:25Z | |
ENH: Implement cross method for Merge Operations | diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py
index 1333b3a0f0560..a572b8a70a680 100644
--- a/asv_bench/benchmarks/join_merge.py
+++ b/asv_bench/benchmarks/join_merge.py
@@ -132,6 +132,9 @@ def time_join_dataframe_index_single_key_small(self, sort):
def time_join_dataframe_index_shuffle_key_bigger_sort(self, sort):
self.df_shuf.join(self.df_key2, on="key2", sort=sort)
+ def time_join_dataframes_cross(self, sort):
+ self.df.loc[:2000].join(self.df_key1, how="cross", sort=sort)
+
class JoinIndex:
def setup(self):
@@ -205,6 +208,9 @@ def time_merge_dataframe_integer_2key(self, sort):
def time_merge_dataframe_integer_key(self, sort):
merge(self.df, self.df2, on="key1", sort=sort)
+ def time_merge_dataframes_cross(self, sort):
+ merge(self.left.loc[:2000], self.right.loc[:2000], how="cross", sort=sort)
+
class I8Merge:
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 0c7cd31a10acb..dec0122e12a98 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -255,6 +255,7 @@ Other enhancements
- Improve error reporting for :meth:`DataFrame.merge` when invalid merge column definitions were given (:issue:`16228`)
- Improve numerical stability for :meth:`.Rolling.skew`, :meth:`.Rolling.kurt`, :meth:`Expanding.skew` and :meth:`Expanding.kurt` through implementation of Kahan summation (:issue:`6929`)
- Improved error reporting for subsetting columns of a :class:`.DataFrameGroupBy` with ``axis=1`` (:issue:`37725`)
+- Implement method ``cross`` for :meth:`DataFrame.merge` and :meth:`DataFrame.join` (:issue:`5401`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index a5ba803897fc6..bca6e255d7a2b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -205,12 +205,14 @@
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
+When performing a cross merge, no column specifications to merge on are
+allowed.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
-how : {'left', 'right', 'outer', 'inner'}, default 'inner'
+how : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
@@ -221,6 +223,11 @@
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
+ * cross: creates the cartesian product from both frames, preserves the order
+ of the left keys.
+
+ .. versionadded:: 1.2.0
+
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
@@ -341,6 +348,44 @@
...
ValueError: columns overlap but no suffix specified:
Index(['value'], dtype='object')
+
+>>> df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]})
+>>> df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]})
+>>> df1
+ a b
+0 foo 1
+1 bar 2
+>>> df2
+ a c
+0 foo 3
+1 baz 4
+
+>>> df1.merge(df2, how='inner', on='a')
+ a b c
+0 foo 1 3
+
+>>> df1.merge(df2, how='left', on='a')
+ a b c
+0 foo 1 3.0
+1 bar 2 NaN
+
+>>> df1 = pd.DataFrame({'left': ['foo', 'bar']})
+>>> df2 = pd.DataFrame({'right': [7, 8]})
+>>> df1
+ left
+0 foo
+1 bar
+>>> df2
+ right
+0 7
+1 8
+
+>>> df1.merge(df2, how='cross')
+ left right
+0 foo 7
+1 foo 8
+2 bar 7
+3 bar 8
"""
@@ -8083,6 +8128,15 @@ def _join_compat(
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
+ if how == "cross":
+ return merge(
+ self,
+ other,
+ how=how,
+ on=on,
+ suffixes=(lsuffix, rsuffix),
+ sort=sort,
+ )
return merge(
self,
other,
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index cdcd6b19704c4..3b755c40721fb 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -5,6 +5,7 @@
import copy
import datetime
from functools import partial
+import hashlib
import string
from typing import TYPE_CHECKING, Optional, Tuple, cast
import warnings
@@ -643,6 +644,17 @@ def __init__(
self._validate_specification()
+ cross_col = None
+ if self.how == "cross":
+ (
+ self.left,
+ self.right,
+ self.how,
+ cross_col,
+ ) = self._create_cross_configuration(self.left, self.right)
+ self.left_on = self.right_on = [cross_col]
+ self._cross = cross_col
+
# note this function has side effects
(
self.left_join_keys,
@@ -690,8 +702,14 @@ def get_result(self):
self._maybe_restore_index_levels(result)
+ self._maybe_drop_cross_column(result, self._cross)
+
return result.__finalize__(self, method="merge")
+ def _maybe_drop_cross_column(self, result: "DataFrame", cross_col: Optional[str]):
+ if cross_col is not None:
+ result.drop(columns=cross_col, inplace=True)
+
def _indicator_pre_merge(
self, left: "DataFrame", right: "DataFrame"
) -> Tuple["DataFrame", "DataFrame"]:
@@ -1200,9 +1218,50 @@ def _maybe_coerce_merge_keys(self):
typ = rk.categories.dtype if rk_is_cat else object
self.right = self.right.assign(**{name: self.right[name].astype(typ)})
+ def _create_cross_configuration(
+ self, left, right
+ ) -> Tuple["DataFrame", "DataFrame", str, str]:
+ """
+ Creates the configuration to dispatch the cross operation to inner join,
+ e.g. adding a join column and resetting parameters. Join column is added
+ to a new object, no inplace modification
+
+ Parameters
+ ----------
+ left: DataFrame
+ right DataFrame
+
+ Returns
+ -------
+ a tuple (left, right, how, cross_col) representing the adjusted
+ DataFrames with cross_col, the merge operation set to inner and the column
+ to join over.
+ """
+ cross_col = f"_cross_{hashlib.md5().hexdigest()}"
+ how = "inner"
+ return (
+ left.assign(**{cross_col: 1}),
+ right.assign(**{cross_col: 1}),
+ how,
+ cross_col,
+ )
+
def _validate_specification(self):
+ if self.how == "cross":
+ if (
+ self.left_index
+ or self.right_index
+ or self.right_on is not None
+ or self.left_on is not None
+ or self.on is not None
+ ):
+ raise MergeError(
+ "Can not pass on, right_on, left_on or set right_index=True or "
+ "left_index=True"
+ )
+ return
# Hm, any way to make this logic less complicated??
- if self.on is None and self.left_on is None and self.right_on is None:
+ elif self.on is None and self.left_on is None and self.right_on is None:
if self.left_index and self.right_index:
self.left_on, self.right_on = (), ()
@@ -1266,7 +1325,7 @@ def _validate_specification(self):
'of levels in the index of "left"'
)
self.left_on = [None] * n
- if len(self.right_on) != len(self.left_on):
+ if self.how != "cross" and len(self.right_on) != len(self.left_on):
raise ValueError("len(right_on) must equal len(left_on)")
def _validate(self, validate: str):
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index 7db92eb55fa0b..00ef7a05f5902 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -803,3 +803,15 @@ def test_join_inner_multiindex_deterministic_order():
index=MultiIndex.from_tuples([(2, 1, 4, 3)], names=("b", "a", "d", "c")),
)
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ ("input_col", "output_cols"), [("b", ["a", "b"]), ("a", ["a_x", "a_y"])]
+)
+def test_join_cross(input_col, output_cols):
+ # GH#5401
+ left = DataFrame({"a": [1, 3]})
+ right = DataFrame({input_col: [3, 4]})
+ result = left.join(right, how="cross", lsuffix="_x", rsuffix="_y")
+ expected = DataFrame({output_cols[0]: [1, 1, 3, 3], output_cols[1]: [3, 4, 3, 4]})
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/reshape/merge/test_merge_cross.py b/pandas/tests/reshape/merge/test_merge_cross.py
new file mode 100644
index 0000000000000..d6c29ea129027
--- /dev/null
+++ b/pandas/tests/reshape/merge/test_merge_cross.py
@@ -0,0 +1,95 @@
+import pytest
+
+from pandas import DataFrame
+import pandas._testing as tm
+from pandas.core.reshape.merge import MergeError, merge
+
+
+@pytest.mark.parametrize(
+ ("input_col", "output_cols"), [("b", ["a", "b"]), ("a", ["a_x", "a_y"])]
+)
+def test_merge_cross(input_col, output_cols):
+ # GH#5401
+ left = DataFrame({"a": [1, 3]})
+ right = DataFrame({input_col: [3, 4]})
+ left_copy = left.copy()
+ right_copy = right.copy()
+ result = merge(left, right, how="cross")
+ expected = DataFrame({output_cols[0]: [1, 1, 3, 3], output_cols[1]: [3, 4, 3, 4]})
+ tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(left, left_copy)
+ tm.assert_frame_equal(right, right_copy)
+
+
+@pytest.mark.parametrize(
+ "kwargs",
+ [
+ {"left_index": True},
+ {"right_index": True},
+ {"on": "a"},
+ {"left_on": "a"},
+ {"right_on": "b"},
+ ],
+)
+def test_merge_cross_error_reporting(kwargs):
+ # GH#5401
+ left = DataFrame({"a": [1, 3]})
+ right = DataFrame({"b": [3, 4]})
+ msg = (
+ "Can not pass on, right_on, left_on or set right_index=True or "
+ "left_index=True"
+ )
+ with pytest.raises(MergeError, match=msg):
+ merge(left, right, how="cross", **kwargs)
+
+
+def test_merge_cross_mixed_dtypes():
+ # GH#5401
+ left = DataFrame(["a", "b", "c"], columns=["A"])
+ right = DataFrame(range(2), columns=["B"])
+ result = merge(left, right, how="cross")
+ expected = DataFrame({"A": ["a", "a", "b", "b", "c", "c"], "B": [0, 1, 0, 1, 0, 1]})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_merge_cross_more_than_one_column():
+ # GH#5401
+ left = DataFrame({"A": list("ab"), "B": [2, 1]})
+ right = DataFrame({"C": range(2), "D": range(4, 6)})
+ result = merge(left, right, how="cross")
+ expected = DataFrame(
+ {
+ "A": ["a", "a", "b", "b"],
+ "B": [2, 2, 1, 1],
+ "C": [0, 1, 0, 1],
+ "D": [4, 5, 4, 5],
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_merge_cross_null_values(nulls_fixture):
+ # GH#5401
+ left = DataFrame({"a": [1, nulls_fixture]})
+ right = DataFrame({"b": ["a", "b"], "c": [1.0, 2.0]})
+ result = merge(left, right, how="cross")
+ expected = DataFrame(
+ {
+ "a": [1, 1, nulls_fixture, nulls_fixture],
+ "b": ["a", "b", "a", "b"],
+ "c": [1.0, 2.0, 1.0, 2.0],
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_join_cross_error_reporting():
+ # GH#5401
+ left = DataFrame({"a": [1, 3]})
+ right = DataFrame({"a": [3, 4]})
+ msg = (
+ "Can not pass on, right_on, left_on or set right_index=True or "
+ "left_index=True"
+ )
+ with pytest.raises(MergeError, match=msg):
+ left.join(right, how="cross", on="a")
| - [x] closes #5401
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This is a first draft of a ``cross`` method for merge operations. As suggested by @jreback I added a new column and dispatched to ``inner`` as a first naive implementation. I am currently wondering, if there would be a better place to adjust the arguments than the places I selected. I wanted to avoid to modify ``self`` in some method called within the constructor, so i modified the inputs before adding the parameters to ``self``. This currently does not support the ``cross`` method for join. If we get a consensus where to put the modifications, I would add support for ``join``. Also have to add more tests probably and something to the userguide
Have to look into the performance afterwards. Copies of the Left and Right frames may be a big performance hit for bigger frames? | https://api.github.com/repos/pandas-dev/pandas/pulls/37864 | 2020-11-15T16:35:06Z | 2020-11-26T00:31:47Z | 2020-11-26T00:31:47Z | 2020-11-26T21:12:43Z |
CLN/TYP: pandas/io/formats/excel.py | diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index fe471c6f6f9ac..25885552239d6 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -5,16 +5,16 @@
from functools import reduce
import itertools
import re
-from typing import Callable, Dict, Mapping, Optional, Sequence, Union
+from typing import Callable, Dict, Iterable, Mapping, Optional, Sequence, Union, cast
import warnings
import numpy as np
+from pandas._libs.lib import is_list_like
from pandas._typing import Label, StorageOptions
from pandas.core.dtypes import missing
from pandas.core.dtypes.common import is_float, is_scalar
-from pandas.core.dtypes.generic import ABCIndex
from pandas import DataFrame, Index, MultiIndex, PeriodIndex
import pandas.core.common as com
@@ -29,7 +29,13 @@ class ExcelCell:
__slots__ = __fields__
def __init__(
- self, row: int, col: int, val, style=None, mergestart=None, mergeend=None
+ self,
+ row: int,
+ col: int,
+ val,
+ style=None,
+ mergestart: Optional[int] = None,
+ mergeend: Optional[int] = None,
):
self.row = row
self.col = col
@@ -423,7 +429,7 @@ class ExcelFormatter:
Format string for floating point numbers
cols : sequence, optional
Columns to write
- header : boolean or list of string, default True
+ header : boolean or sequence of str, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
@@ -522,7 +528,7 @@ def _format_value(self, val):
)
return val
- def _format_header_mi(self):
+ def _format_header_mi(self) -> Iterable[ExcelCell]:
if self.columns.nlevels > 1:
if not self.index:
raise NotImplementedError(
@@ -530,8 +536,7 @@ def _format_header_mi(self):
"index ('index'=False) is not yet implemented."
)
- has_aliases = isinstance(self.header, (tuple, list, np.ndarray, ABCIndex))
- if not (has_aliases or self.header):
+ if not (self._has_aliases or self.header):
return
columns = self.columns
@@ -547,28 +552,30 @@ def _format_header_mi(self):
if self.merge_cells:
# Format multi-index as a merged cells.
- for lnum in range(len(level_lengths)):
- name = columns.names[lnum]
- yield ExcelCell(lnum, coloffset, name, self.header_style)
+ for lnum, name in enumerate(columns.names):
+ yield ExcelCell(
+ row=lnum,
+ col=coloffset,
+ val=name,
+ style=self.header_style,
+ )
for lnum, (spans, levels, level_codes) in enumerate(
zip(level_lengths, columns.levels, columns.codes)
):
values = levels.take(level_codes)
- for i in spans:
- if spans[i] > 1:
- yield ExcelCell(
- lnum,
- coloffset + i + 1,
- values[i],
- self.header_style,
- lnum,
- coloffset + i + spans[i],
- )
- else:
- yield ExcelCell(
- lnum, coloffset + i + 1, values[i], self.header_style
- )
+ for i, span_val in spans.items():
+ spans_multiple_cells = span_val > 1
+ yield ExcelCell(
+ row=lnum,
+ col=coloffset + i + 1,
+ val=values[i],
+ style=self.header_style,
+ mergestart=lnum if spans_multiple_cells else None,
+ mergeend=(
+ coloffset + i + span_val if spans_multiple_cells else None
+ ),
+ )
else:
# Format in legacy format with dots to indicate levels.
for i, values in enumerate(zip(*level_strs)):
@@ -577,9 +584,8 @@ def _format_header_mi(self):
self.rowcounter = lnum
- def _format_header_regular(self):
- has_aliases = isinstance(self.header, (tuple, list, np.ndarray, ABCIndex))
- if has_aliases or self.header:
+ def _format_header_regular(self) -> Iterable[ExcelCell]:
+ if self._has_aliases or self.header:
coloffset = 0
if self.index:
@@ -588,17 +594,11 @@ def _format_header_regular(self):
coloffset = len(self.df.index[0])
colnames = self.columns
- if has_aliases:
- # pandas\io\formats\excel.py:593: error: Argument 1 to "len"
- # has incompatible type "Union[Sequence[Optional[Hashable]],
- # bool]"; expected "Sized" [arg-type]
- if len(self.header) != len(self.columns): # type: ignore[arg-type]
- # pandas\io\formats\excel.py:602: error: Argument 1 to
- # "len" has incompatible type
- # "Union[Sequence[Optional[Hashable]], bool]"; expected
- # "Sized" [arg-type]
+ if self._has_aliases:
+ self.header = cast(Sequence, self.header)
+ if len(self.header) != len(self.columns):
raise ValueError(
- f"Writing {len(self.columns)} cols " # type: ignore[arg-type]
+ f"Writing {len(self.columns)} cols "
f"but got {len(self.header)} aliases"
)
else:
@@ -609,7 +609,7 @@ def _format_header_regular(self):
self.rowcounter, colindex + coloffset, colname, self.header_style
)
- def _format_header(self):
+ def _format_header(self) -> Iterable[ExcelCell]:
if isinstance(self.columns, MultiIndex):
gen = self._format_header_mi()
else:
@@ -631,15 +631,14 @@ def _format_header(self):
self.rowcounter += 1
return itertools.chain(gen, gen2)
- def _format_body(self):
+ def _format_body(self) -> Iterable[ExcelCell]:
if isinstance(self.df.index, MultiIndex):
return self._format_hierarchical_rows()
else:
return self._format_regular_rows()
- def _format_regular_rows(self):
- has_aliases = isinstance(self.header, (tuple, list, np.ndarray, ABCIndex))
- if has_aliases or self.header:
+ def _format_regular_rows(self) -> Iterable[ExcelCell]:
+ if self._has_aliases or self.header:
self.rowcounter += 1
# output index and index_label?
@@ -676,9 +675,8 @@ def _format_regular_rows(self):
yield from self._generate_body(coloffset)
- def _format_hierarchical_rows(self):
- has_aliases = isinstance(self.header, (tuple, list, np.ndarray, ABCIndex))
- if has_aliases or self.header:
+ def _format_hierarchical_rows(self) -> Iterable[ExcelCell]:
+ if self._has_aliases or self.header:
self.rowcounter += 1
gcolidx = 0
@@ -721,23 +719,20 @@ def _format_hierarchical_rows(self):
fill_value=levels._na_value,
)
- for i in spans:
- if spans[i] > 1:
- yield ExcelCell(
- self.rowcounter + i,
- gcolidx,
- values[i],
- self.header_style,
- self.rowcounter + i + spans[i] - 1,
- gcolidx,
- )
- else:
- yield ExcelCell(
- self.rowcounter + i,
- gcolidx,
- values[i],
- self.header_style,
- )
+ for i, span_val in spans.items():
+ spans_multiple_cells = span_val > 1
+ yield ExcelCell(
+ row=self.rowcounter + i,
+ col=gcolidx,
+ val=values[i],
+ style=self.header_style,
+ mergestart=(
+ self.rowcounter + i + span_val - 1
+ if spans_multiple_cells
+ else None
+ ),
+ mergeend=gcolidx if spans_multiple_cells else None,
+ )
gcolidx += 1
else:
@@ -745,16 +740,21 @@ def _format_hierarchical_rows(self):
for indexcolvals in zip(*self.df.index):
for idx, indexcolval in enumerate(indexcolvals):
yield ExcelCell(
- self.rowcounter + idx,
- gcolidx,
- indexcolval,
- self.header_style,
+ row=self.rowcounter + idx,
+ col=gcolidx,
+ val=indexcolval,
+ style=self.header_style,
)
gcolidx += 1
yield from self._generate_body(gcolidx)
- def _generate_body(self, coloffset: int):
+ @property
+ def _has_aliases(self) -> bool:
+ """Whether the aliases for column names are present."""
+ return is_list_like(self.header)
+
+ def _generate_body(self, coloffset: int) -> Iterable[ExcelCell]:
if self.styler is None:
styles = None
else:
@@ -771,7 +771,7 @@ def _generate_body(self, coloffset: int):
xlstyle = self.style_converter(";".join(styles[i, colidx]))
yield ExcelCell(self.rowcounter + i, colidx + coloffset, val, xlstyle)
- def get_formatted_cells(self):
+ def get_formatted_cells(self) -> Iterable[ExcelCell]:
for cell in itertools.chain(self._format_header(), self._format_body()):
cell.val = self._format_value(cell.val)
yield cell
| - [ ] xref #37715
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
- Handle two type ignore items in ``pandas/io/formats/excel.py``
- De-duplicate ``has_aliases`` variable by extracting property
- Add typing for iterators yielding ``ExcelCell``
- Use named arguments for ``ExcelCell`` on some occasions (in two places de-duplicate by updating kwargs inplace) | https://api.github.com/repos/pandas-dev/pandas/pulls/37862 | 2020-11-15T15:30:45Z | 2020-11-24T13:38:08Z | 2020-11-24T13:38:08Z | 2020-12-03T07:54:23Z |
CLN: use putmask_simple instead of putmask | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 967e218078a28..f81e91d4166b7 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -32,6 +32,7 @@
TD64NS_DTYPE,
is_bool_dtype,
is_categorical_dtype,
+ is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
@@ -791,10 +792,9 @@ def replace(
regex=regex,
)
- blocks = self.putmask(mask, value, inplace=inplace)
- blocks = extend_blocks(
- [b.convert(numeric=False, copy=not inplace) for b in blocks]
- )
+ blk = self if inplace else self.copy()
+ blk._putmask_simple(mask, value)
+ blocks = blk.convert(numeric=False, copy=not inplace)
return blocks
def _replace_regex(
@@ -1184,39 +1184,15 @@ def coerce_to_target_dtype(self, other):
# don't coerce float/complex to int
return self
- elif (
- self.is_datetime
- or is_datetime64_dtype(dtype)
- or is_datetime64tz_dtype(dtype)
- ):
-
- # not a datetime
- if not (
- (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype))
- and self.is_datetime
- ):
- return self.astype(object)
-
- # don't upcast timezone with different timezone or no timezone
- mytz = getattr(self.dtype, "tz", None)
- othertz = getattr(dtype, "tz", None)
-
- if not tz_compare(mytz, othertz):
- return self.astype(object)
-
- raise AssertionError(
- f"possible recursion in coerce_to_target_dtype: {self} {other}"
- )
+ elif self.is_datetime or is_datetime64_any_dtype(dtype):
+ # The is_dtype_equal check above ensures that at most one of
+ # these two conditions hold, so we must cast to object.
+ return self.astype(object)
elif self.is_timedelta or is_timedelta64_dtype(dtype):
-
- # not a timedelta
- if not (is_timedelta64_dtype(dtype) and self.is_timedelta):
- return self.astype(object)
-
- raise AssertionError(
- f"possible recursion in coerce_to_target_dtype: {self} {other}"
- )
+ # The is_dtype_equal check above ensures that at most one of
+ # these two conditions hold, so we must cast to object.
+ return self.astype(object)
try:
return self.astype(dtype)
@@ -2578,7 +2554,7 @@ def _replace_list(
regex: bool = False,
) -> List["Block"]:
if len(algos.unique(dest_list)) == 1:
- # We got likely here by tiling value inside NDFrame.replace,
+ # We likely got here by tiling value inside NDFrame.replace,
# so un-tile here
return self.replace(src_list, dest_list[0], inplace, regex)
return super()._replace_list(src_list, dest_list, inplace, regex)
| https://api.github.com/repos/pandas-dev/pandas/pulls/37860 | 2020-11-15T04:59:22Z | 2020-11-17T01:18:49Z | 2020-11-17T01:18:49Z | 2020-11-17T01:34:21Z | |
CI: xfail a windows test | diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py
index c524b21f1be9e..7a367ccab6d52 100644
--- a/pandas/tests/plotting/test_converter.py
+++ b/pandas/tests/plotting/test_converter.py
@@ -7,6 +7,7 @@
import pandas._config.config as cf
+from pandas.compat import is_platform_windows
from pandas.compat.numpy import np_datetime64_compat
import pandas.util._test_decorators as td
@@ -72,6 +73,11 @@ def test_registering_no_warning(self):
ax.plot(s.index, s.values)
plt.close()
+ @pytest.mark.xfail(
+ is_platform_windows(),
+ reason="Getting two warnings intermittently, see GH#37746",
+ strict=False,
+ )
def test_pandas_plots_register(self):
plt = pytest.importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range("2017", periods=12))
@@ -79,8 +85,10 @@ def test_pandas_plots_register(self):
with tm.assert_produces_warning(None) as w:
s.plot()
- assert len(w) == 0
- plt.close()
+ try:
+ assert len(w) == 0
+ finally:
+ plt.close()
def test_matplotlib_formatters(self):
units = pytest.importorskip("matplotlib.units")
| At least we can fix a subset of the windows builds. | https://api.github.com/repos/pandas-dev/pandas/pulls/37858 | 2020-11-15T02:41:45Z | 2020-11-15T10:22:41Z | 2020-11-15T10:22:41Z | 2020-11-15T15:15:14Z |
CLN: remove something.xlsx | diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index 00e41a19a7980..8da9c79160e91 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -1353,12 +1353,15 @@ def check_called(func):
del called_write_cells[:]
with pd.option_context("io.excel.xlsx.writer", "dummy"):
- register_writer(DummyClass)
- writer = ExcelWriter("something.xlsx")
- assert isinstance(writer, DummyClass)
- df = tm.makeCustomDataframe(1, 1)
- check_called(lambda: df.to_excel("something.xlsx"))
- check_called(lambda: df.to_excel("something.xls", engine="dummy"))
+ path = "something.xlsx"
+ with tm.ensure_clean(path) as filepath:
+ register_writer(DummyClass)
+ writer = ExcelWriter(filepath)
+ assert isinstance(writer, DummyClass)
+ df = tm.makeCustomDataframe(1, 1)
+ check_called(lambda: df.to_excel(filepath))
+ with tm.ensure_clean("something.xls") as filepath:
+ check_called(lambda: df.to_excel(filepath, engine="dummy"))
@td.skip_if_no("xlrd")
| `test_register_writer` in `pandas/tests/io/excel/test_writers.py` wasn't expecting that `something.xls` and `something.xlsx` will ever be created.
Since #37639, `ExcelWriter` opens the file which will create it. | https://api.github.com/repos/pandas-dev/pandas/pulls/37857 | 2020-11-15T01:31:32Z | 2020-11-15T04:14:12Z | 2020-11-15T04:14:12Z | 2020-11-15T04:17:10Z |
DEP: update Versioneer | diff --git a/pandas/_version.py b/pandas/_version.py
index d2df063ff3acf..14c2b5c6e7603 100644
--- a/pandas/_version.py
+++ b/pandas/_version.py
@@ -5,31 +5,36 @@
# that just contains the computed version number.
# This file is released into the public domain. Generated by
-# versioneer-0.15 (https://github.com/warner/python-versioneer)
+# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
+
+"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
-from typing import Callable, Dict
def get_keywords():
+ """Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
- return {"refnames": git_refnames, "full": git_full}
+ git_date = "$Format:%ci$"
+ keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
+ return keywords
class VersioneerConfig:
- pass
+ """Container for Versioneer configuration parameters."""
def get_config():
+ """Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
@@ -43,14 +48,17 @@ def get_config():
class NotThisMethod(Exception):
- pass
+ """Exception raised if a method is not valid for the current scenario."""
+
+HANDLERS = {}
-HANDLERS: Dict[str, Dict[str, Callable]] = {}
+def register_vcs_handler(vcs, method): # decorator
+ """Create decorator to mark a method as the handler of a VCS."""
-def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator
- def decorate(f: Callable) -> Callable:
+ def decorate(f):
+ """Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
@@ -59,7 +67,8 @@ def decorate(f: Callable) -> Callable:
return decorate
-def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
+def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
+ """Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
@@ -69,6 +78,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
p = subprocess.Popen(
[c] + args,
cwd=cwd,
+ env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
@@ -78,58 +88,77 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
if e.errno == errno.ENOENT:
continue
if verbose:
- print(f"unable to run {dispcmd}")
+ print("unable to run %s" % dispcmd)
print(e)
- return None
+ return None, None
else:
if verbose:
print(f"unable to find command, tried {commands}")
- return None
+ return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
- print(f"unable to run {dispcmd} (error)")
- return None
- return stdout
+ print("unable to run %s (error)" % dispcmd)
+ print("stdout was %s" % stdout)
+ return None, p.returncode
+ return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
- # Source tarballs conventionally unpack into a directory that includes
- # both the project name and a version string.
- dirname = os.path.basename(root)
- if not dirname.startswith(parentdir_prefix):
- if verbose:
- print(
- f"guessing rootdir is '{root}', but '{dirname}' "
- f"doesn't start with prefix '{parentdir_prefix}'"
- )
- raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
- return {
- "version": dirname[len(parentdir_prefix) :],
- "full-revisionid": None,
- "dirty": False,
- "error": None,
- }
+ """Try to determine the version from the parent directory name.
+
+ Source tarballs conventionally unpack into a directory that includes both
+ the project name and a version string. We will also support searching up
+ two directory levels for an appropriately named parent directory
+ """
+ rootdirs = []
+
+ for i in range(3):
+ dirname = os.path.basename(root)
+ if dirname.startswith(parentdir_prefix):
+ return {
+ "version": dirname[len(parentdir_prefix) :],
+ "full-revisionid": None,
+ "dirty": False,
+ "error": None,
+ "date": None,
+ }
+ else:
+ rootdirs.append(root)
+ root = os.path.dirname(root) # up a level
+
+ if verbose:
+ print(
+ "Tried directories %s but none started with prefix %s"
+ % (str(rootdirs), parentdir_prefix)
+ )
+ raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
+ """Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
- with open(versionfile_abs) as fd:
- for line in fd.readlines():
- if line.strip().startswith("git_refnames ="):
- mo = re.search(r'=\s*"(.*)"', line)
- if mo:
- keywords["refnames"] = mo.group(1)
- if line.strip().startswith("git_full ="):
- mo = re.search(r'=\s*"(.*)"', line)
- if mo:
- keywords["full"] = mo.group(1)
+ f = open(versionfile_abs)
+ for line in f.readlines():
+ if line.strip().startswith("git_refnames ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["refnames"] = mo.group(1)
+ if line.strip().startswith("git_full ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["full"] = mo.group(1)
+ if line.strip().startswith("git_date ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["date"] = mo.group(1)
+ f.close()
except OSError:
pass
return keywords
@@ -137,8 +166,22 @@ def git_get_keywords(versionfile_abs):
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
+ """Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
+ date = keywords.get("date")
+ if date is not None:
+ # Use only the last line. Previous lines may contain GPG signature
+ # information.
+ date = date.splitlines()[-1]
+
+ # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
+ # datestamp. However we prefer "%ci" (which expands to an "ISO-8601
+ # -like" string, which we must then edit to make compliant), because
+ # it's been around since git-1.5.3, and it's too difficult to
+ # discover which version we're using, or to work around using an
+ # older one.
+ date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
@@ -159,20 +202,21 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
- print(f"discarding '{','.join(refs - tags)}', no digits")
+ print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
- print(f"likely tags: {','.join(sorted(tags))}")
+ print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
- print(f"picking {r}")
+ print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
+ "date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
@@ -182,34 +226,48 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
+ "date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
- # this runs 'git' from the root of the source tree. This only gets called
- # if the git-archive 'subst' keywords were *not* expanded, and
- # _version.py hasn't already been rewritten with a short version string,
- # meaning we're inside a checked out source tree.
-
- if not os.path.exists(os.path.join(root, ".git")):
- if verbose:
- print(f"no .git in {root}")
- raise NotThisMethod("no .git directory")
+ """Get version from 'git describe' in the root of the source tree.
+ This only gets called if the git-archive 'subst' keywords were *not*
+ expanded, and _version.py hasn't already been rewritten with a short
+ version string, meaning we're inside a checked out source tree.
+ """
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
- # if there is a tag, this yields TAG-NUM-gHEX[-dirty]
- # if there are no tags, this yields HEX[-dirty] (no NUM)
- describe_out = run_command(
- GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root
+
+ out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
+ if rc != 0:
+ if verbose:
+ print("Directory %s not under git control" % root)
+ raise NotThisMethod("'git rev-parse --git-dir' returned error")
+
+ # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
+ # if there isn't one, this yields HEX[-dirty] (no NUM)
+ describe_out, rc = run_command(
+ GITS,
+ [
+ "describe",
+ "--tags",
+ "--dirty",
+ "--always",
+ "--long",
+ "--match",
+ "%s*" % tag_prefix,
+ ],
+ cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
- full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
+ full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
@@ -236,18 +294,20 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
- pieces["error"] = f"unable to parse git-describe output: '{describe_out}'"
+ pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
- msg = f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'"
if verbose:
- print(msg)
- pieces["error"] = msg
+ fmt = "tag '%s' doesn't start with prefix '%s'"
+ print(fmt % (full_tag, tag_prefix))
+ pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format(
+ full_tag,
+ tag_prefix,
+ )
return pieces
-
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
@@ -259,110 +319,129 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
else:
# HEX: no tags
pieces["closest-tag"] = None
- count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
+ count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
+ # commit date: see ISO-8601 comment in git_versions_from_keywords()
+ date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
+ 0
+ ].strip()
+ # Use only the last line. Previous lines may contain GPG signature
+ # information.
+ date = date.splitlines()[-1]
+ pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
+
return pieces
def plus_or_dot(pieces):
+ """Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
- # now build up version string, with post-release "local version
- # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
- # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
+ """Build up version string, with post-release "local version identifier".
- # exceptions:
- # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
+ Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
+ get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
+ Exceptions:
+ 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
- rendered += f"{pieces['distance']:d}.g{pieces['short']}"
+ rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
+ if pieces["dirty"]:
+ rendered += ".dirty"
else:
# exception #1
- rendered = f"0+untagged.{pieces['distance']:d}.g{pieces['short']}"
- if pieces["dirty"]:
- rendered += ".dirty"
+ rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
+ if pieces["dirty"]:
+ rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
- # TAG[.post.devDISTANCE] . No -dirty
-
- # exceptions:
- # 1: no tags. 0.post.devDISTANCE
+ """TAG[.post0.devDISTANCE] -- No -dirty.
+ Exceptions:
+ 1: no tags. 0.post0.devDISTANCE
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
- rendered += f".post.dev{pieces['distance']:d}"
+ rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
- rendered = f"0.post.dev{pieces['distance']:d}"
+ rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
- # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
- # .dev0 sorts backwards (a dirty tree will appear "older" than the
- # corresponding clean one), but you shouldn't be releasing software with
- # -dirty anyways.
+ """TAG[.postDISTANCE[.dev0]+gHEX] .
- # exceptions:
- # 1: no tags. 0.postDISTANCE[.dev0]
+ The ".dev0" means dirty. Note that .dev0 sorts backwards
+ (a dirty tree will appear "older" than the corresponding clean one),
+ but you shouldn't be releasing software with -dirty anyways.
+ Exceptions:
+ 1: no tags. 0.postDISTANCE[.dev0]
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
- rendered += f".post{pieces['distance']:d}"
+ rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
- rendered += f"g{pieces['short']}"
+ rendered += "g%s" % pieces["short"]
else:
# exception #1
- rendered = f"0.pos{pieces['distance']:d}"
+ rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
- rendered += f"+g{pieces['short']}"
+ rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
- # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
+ """TAG[.postDISTANCE[.dev0]] .
- # exceptions:
- # 1: no tags. 0.postDISTANCE[.dev0]
+ The ".dev0" means dirty.
+ Exceptions:
+ 1: no tags. 0.postDISTANCE[.dev0]
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
- rendered += f".post{pieces['distance']:d}"
+ rendered += ".post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
else:
# exception #1
- rendered = f"0.post{pieces['distance']:d}"
- if pieces["dirty"]:
- rendered += ".dev0"
+ rendered = "0.post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
return rendered
def render_git_describe(pieces):
- # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
- # --always'
+ """TAG[-DISTANCE-gHEX][-dirty].
- # exceptions:
- # 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ Like 'git describe --tags --dirty --always'.
+ Exceptions:
+ 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
- rendered += f"-{pieces['distance']:d}-g{pieces['short']}"
+ rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
@@ -372,15 +451,17 @@ def render_git_describe(pieces):
def render_git_describe_long(pieces):
- # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
- # --always -long'. The distance/hash is unconditional.
+ """TAG-DISTANCE-gHEX[-dirty].
- # exceptions:
- # 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ Like 'git describe --tags --dirty --always -long'.
+ The distance/hash is unconditional.
+ Exceptions:
+ 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
- rendered += f"-{pieces['distance']:d}-g{pieces['short']}"
+ rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
@@ -390,12 +471,14 @@ def render_git_describe_long(pieces):
def render(pieces, style):
+ """Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
+ "date": None,
}
if not style or style == "default":
@@ -414,17 +497,19 @@ def render(pieces, style):
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
- raise ValueError(f"unknown style '{style}'")
+ raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
+ "date": pieces.get("date"),
}
def get_versions():
+ """Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
@@ -451,6 +536,7 @@ def get_versions():
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
+ "date": None,
}
try:
@@ -470,4 +556,5 @@ def get_versions():
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
+ "date": None,
}
diff --git a/versioneer.py b/versioneer.py
index 171156c2c5315..288464f1efa44 100644
--- a/versioneer.py
+++ b/versioneer.py
@@ -1,19 +1,17 @@
-# Version: 0.15
+# Version: 0.19
+
+"""The Versioneer - like a rocketeer, but for versions.
-"""
The Versioneer
==============
* like a rocketeer, but for versions!
-* https://github.com/warner/python-versioneer
+* https://github.com/python-versioneer/python-versioneer
* Brian Warner
* License: Public Domain
-* [![Latest Version]
-(https://pypip.in/version/versioneer/badge.svg?style=flat)
-](https://pypi.org/project/versioneer/)
-* [![Build Status]
-(https://travis-ci.org/warner/python-versioneer.png?branch=master)
-](https://travis-ci.org/warner/python-versioneer)
+* Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3
+* [![Latest Version][pypi-image]][pypi-url]
+* [![Build Status][travis-image]][travis-url]
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
@@ -24,9 +22,10 @@
## Quick Install
-* `pip install versioneer` to somewhere to your $PATH
-* add a `[versioneer]` section to your setup.cfg (see below)
+* `pip install versioneer` to somewhere in your $PATH
+* add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md))
* run `versioneer install` in your source tree, commit the results
+* Verify version information with `python setup.py version`
## Version Identifiers
@@ -58,7 +57,7 @@
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
-uncommitted changes.
+uncommitted changes).
The version identifier is used for multiple purposes:
@@ -85,125 +84,7 @@
## Installation
-First, decide on values for the following configuration variables:
-
-* `VCS`: the version control system you use. Currently accepts "git".
-
-* `style`: the style of version string to be produced. See "Styles" below for
- details. Defaults to "pep440", which looks like
- `TAG[+DISTANCE.gSHORTHASH[.dirty]]`.
-
-* `versionfile_source`:
-
- A project-relative pathname into which the generated version strings should
- be written. This is usually a `_version.py` next to your project's main
- `__init__.py` file, so it can be imported at runtime. If your project uses
- `src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
- This file should be checked in to your VCS as usual: the copy created below
- by `setup.py setup_versioneer` will include code that parses expanded VCS
- keywords in generated tarballs. The 'build' and 'sdist' commands will
- replace it with a copy that has just the calculated version string.
-
- This must be set even if your project does not have any modules (and will
- therefore never import `_version.py`), since "setup.py sdist" -based trees
- still need somewhere to record the pre-calculated version strings. Anywhere
- in the source tree should do. If there is a `__init__.py` next to your
- `_version.py`, the `setup.py setup_versioneer` command (described below)
- will append some `__version__`-setting assignments, if they aren't already
- present.
-
-* `versionfile_build`:
-
- Like `versionfile_source`, but relative to the build directory instead of
- the source directory. These will differ when your setup.py uses
- 'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
- then you will probably have `versionfile_build='myproject/_version.py'` and
- `versionfile_source='src/myproject/_version.py'`.
-
- If this is set to None, then `setup.py build` will not attempt to rewrite
- any `_version.py` in the built tree. If your project does not have any
- libraries (e.g. if it only builds a script), then you should use
- `versionfile_build = None` and override `distutils.command.build_scripts`
- to explicitly insert a copy of `versioneer.get_version()` into your
- generated script.
-
-* `tag_prefix`:
-
- a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
- If your tags look like 'myproject-1.2.0', then you should use
- tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
- should be an empty string.
-
-* `parentdir_prefix`:
-
- a optional string, frequently the same as tag_prefix, which appears at the
- start of all unpacked tarball filenames. If your tarball unpacks into
- 'myproject-1.2.0', this should be 'myproject-'. To disable this feature,
- just omit the field from your `setup.cfg`.
-
-This tool provides one script, named `versioneer`. That script has one mode,
-"install", which writes a copy of `versioneer.py` into the current directory
-and runs `versioneer.py setup` to finish the installation.
-
-To versioneer-enable your project:
-
-* 1: Modify your `setup.cfg`, adding a section named `[versioneer]` and
- populating it with the configuration values you decided earlier (note that
- the option names are not case-sensitive):
-
- ````
- [versioneer]
- VCS = git
- style = pep440
- versionfile_source = src/myproject/_version.py
- versionfile_build = myproject/_version.py
- tag_prefix = ""
- parentdir_prefix = myproject-
- ````
-
-* 2: Run `versioneer install`. This will do the following:
-
- * copy `versioneer.py` into the top of your source tree
- * create `_version.py` in the right place (`versionfile_source`)
- * modify your `__init__.py` (if one exists next to `_version.py`) to define
- `__version__` (by calling a function from `_version.py`)
- * modify your `MANIFEST.in` to include both `versioneer.py` and the
- generated `_version.py` in sdist tarballs
-
- `versioneer install` will complain about any problems it finds with your
- `setup.py` or `setup.cfg`. Run it multiple times until you have fixed all
- the problems.
-
-* 3: add a `import versioneer` to your setup.py, and add the following
- arguments to the setup() call:
-
- version=versioneer.get_version(),
- cmdclass=versioneer.get_cmdclass(),
-
-* 4: commit these changes to your VCS. To make sure you won't forget,
- `versioneer install` will mark everything it touched for addition using
- `git add`. Don't forget to add `setup.py` and `setup.cfg` too.
-
-## Post-Installation Usage
-
-Once established, all uses of your tree from a VCS checkout should get the
-current version string. All generated tarballs should include an embedded
-version string (so users who unpack them will not need a VCS tool installed).
-
-If you distribute your project through PyPI, then the release process should
-boil down to two steps:
-
-* 1: git tag 1.0
-* 2: python setup.py register sdist upload
-
-If you distribute it through github (i.e. users use github to generate
-tarballs with `git archive`), the process is:
-
-* 1: git tag 1.0
-* 2: git push; git push --tags
-
-Versioneer will report "0+untagged.NUMCOMMITS.gHASH" until your tree has at
-least one tag in its history.
+See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
@@ -224,6 +105,10 @@
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
+* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
+ commit date in ISO 8601 format. This will be None if the date is not
+ available.
+
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
@@ -262,8 +147,8 @@
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
-Other styles are available. See details.md in the Versioneer source tree for
-descriptions.
+Other styles are available. See [details.md](details.md) in the Versioneer
+source tree for descriptions.
## Debugging
@@ -273,47 +158,83 @@
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
-## Updating Versioneer
+## Known Limitations
-To upgrade your project to a new release of Versioneer, do the following:
+Some situations are known to cause problems for Versioneer. This details the
+most significant ones. More can be found on Github
+[issues page](https://github.com/python-versioneer/python-versioneer/issues).
-* install the new Versioneer (`pip install -U versioneer` or equivalent)
-* edit `setup.cfg`, if necessary, to include any new configuration settings
- indicated by the release notes
-* re-run `versioneer install` in your source tree, to replace
- `SRC/_version.py`
-* commit any changed files
+### Subprojects
+
+Versioneer has limited support for source trees in which `setup.py` is not in
+the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
+two common reasons why `setup.py` might not be in the root:
+
+* Source trees which contain multiple subprojects, such as
+ [Buildbot](https://github.com/buildbot/buildbot), which contains both
+ "master" and "slave" subprojects, each with their own `setup.py`,
+ `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
+ distributions (and upload multiple independently-installable tarballs).
+* Source trees whose main purpose is to contain a C library, but which also
+ provide bindings to Python (and perhaps other languages) in subdirectories.
-### Upgrading to 0.15
+Versioneer will look for `.git` in parent directories, and most operations
+should get the right version string. However `pip` and `setuptools` have bugs
+and implementation details which frequently cause `pip install .` from a
+subproject directory to fail to find a correct version string (so it usually
+defaults to `0+unknown`).
-Starting with this version, Versioneer is configured with a `[versioneer]`
-section in your `setup.cfg` file. Earlier versions required the `setup.py` to
-set attributes on the `versioneer` module immediately after import. The new
-version will refuse to run (raising an exception during import) until you
-have provided the necessary `setup.cfg` section.
+`pip install --editable .` should work correctly. `setup.py install` might
+work too.
-In addition, the Versioneer package provides an executable named
-`versioneer`, and the installation process is driven by running `versioneer
-install`. In 0.14 and earlier, the executable was named
-`versioneer-installer` and was run without an argument.
+Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
+some later version.
-### Upgrading to 0.14
+[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking
+this issue. The discussion in
+[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the
+issue from the Versioneer side in more detail.
+[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
+[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
+pip to let Versioneer work correctly.
-0.14 changes the format of the version string. 0.13 and earlier used
-hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a
-plus-separated "local version" section strings, with dot-separated
-components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old
-format, but should be ok with the new one.
+Versioneer-0.16 and earlier only looked for a `.git` directory next to the
+`setup.cfg`, so subprojects were completely unsupported with those releases.
-### Upgrading from 0.11 to 0.12
+### Editable installs with setuptools <= 18.5
-Nothing special.
+`setup.py develop` and `pip install --editable .` allow you to install a
+project into a virtualenv once, then continue editing the source code (and
+test) without re-installing after every change.
-### Upgrading from 0.10 to 0.11
+"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
+convenient way to specify executable scripts that should be installed along
+with the python package.
-You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
-`setup.py setup_versioneer`. This will enable the use of additional
-version-control systems (SVN, etc) in the future.
+These both work as expected when using modern setuptools. When using
+setuptools-18.5 or earlier, however, certain operations will cause
+`pkg_resources.DistributionNotFound` errors when running the entrypoint
+script, which must be resolved by re-installing the package. This happens
+when the install happens with one version, then the egg_info data is
+regenerated while a different version is checked out. Many setup.py commands
+cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
+a different virtualenv), so this can be surprising.
+
+[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes
+this one, but upgrading to a newer version of setuptools should probably
+resolve it.
+
+
+## Updating Versioneer
+
+To upgrade your project to a new release of Versioneer, do the following:
+
+* install the new Versioneer (`pip install -U versioneer` or equivalent)
+* edit `setup.cfg`, if necessary, to include any new configuration settings
+ indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
+* re-run `versioneer install` in your source tree, to replace
+ `SRC/_version.py`
+* commit any changed files
## Future Directions
@@ -328,19 +249,30 @@
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
+## Similar projects
+
+* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time
+ dependency
+* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of
+ versioneer
## License
-To make Versioneer easier to embed, all its code is hereby released into the
-public domain. The `_version.py` that it creates is also in the public
-domain.
+To make Versioneer easier to embed, all its code is dedicated to the public
+domain. The `_version.py` that it creates is also in the public domain.
+Specifically, both are released under the Creative Commons "Public Domain
+Dedication" license (CC0-1.0), as described in
+https://creativecommons.org/publicdomain/zero/1.0/ .
+
+[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg
+[pypi-url]: https://pypi.python.org/pypi/versioneer/
+[travis-image]:
+https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg
+[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer
"""
-try:
- import configparser
-except ImportError:
- import ConfigParser as configparser
+import configparser
import errno
import json
import os
@@ -350,12 +282,15 @@
class VersioneerConfig:
- pass
+ """Container for Versioneer configuration parameters."""
def get_root():
- # we require that all commands are run from the project root, i.e. the
- # directory that contains setup.py, setup.cfg, and versioneer.py .
+ """Get the project root directory.
+
+ We require that all commands are run from the project root, i.e. the
+ directory that contains setup.py, setup.cfg, and versioneer.py .
+ """
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
@@ -381,7 +316,9 @@ def get_root():
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
- if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
+ me_dir = os.path.normcase(os.path.splitext(me)[0])
+ vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
+ if me_dir != vsr_dir:
print(
"Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py)
@@ -392,14 +329,15 @@ def get_root():
def get_config_from_root(root):
+ """Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
- parser = configparser.SafeConfigParser()
+ parser = configparser.ConfigParser()
with open(setup_cfg) as f:
- parser.readfp(f)
+ parser.read_file(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
@@ -413,13 +351,15 @@ def get(parser, name):
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
+ if cfg.tag_prefix in ("''", '""'):
+ cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
- pass
+ """Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
@@ -428,7 +368,10 @@ class NotThisMethod(Exception):
def register_vcs_handler(vcs, method): # decorator
+ """Create decorator to mark a method as the handler of a VCS."""
+
def decorate(f):
+ """Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
@@ -437,7 +380,8 @@ def decorate(f):
return decorate
-def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
+def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
+ """Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
@@ -447,6 +391,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
p = subprocess.Popen(
[c] + args,
cwd=cwd,
+ env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
@@ -458,24 +403,23 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
if verbose:
print("unable to run %s" % dispcmd)
print(e)
- return None
+ return None, None
else:
if verbose:
print(f"unable to find command, tried {commands}")
- return None
-
+ return None, None
stdout = p.communicate()[0].strip().decode()
-
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
- return None
- return stdout
+ print("stdout was %s" % stdout)
+ return None, p.returncode
+ return stdout, p.returncode
LONG_VERSION_PY[
"git"
-] = r"""
+] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
@@ -483,7 +427,9 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
# that just contains the computed version number.
# This file is released into the public domain. Generated by
-# versioneer-0.15 (https://github.com/warner/python-versioneer)
+# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
+
+"""Git implementation of _version.py."""
import errno
import os
@@ -493,21 +439,24 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
def get_keywords():
+ """Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
- keywords = {"refnames": git_refnames, "full": git_full}
+ git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
+ keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
- pass
+ """Container for Versioneer configuration parameters."""
def get_config():
+ """Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
@@ -521,7 +470,7 @@ def get_config():
class NotThisMethod(Exception):
- pass
+ """Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
@@ -529,7 +478,9 @@ class NotThisMethod(Exception):
def register_vcs_handler(vcs, method): # decorator
+ """Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
+ """Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
@@ -537,14 +488,17 @@ def decorate(f):
return decorate
-def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
+def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
+ env=None):
+ """Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
- p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
+ p = subprocess.Popen([c] + args, cwd=cwd, env=env,
+ stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
@@ -555,37 +509,48 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
- return None
+ return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
- return None
-
+ return None, None
stdout = p.communicate()[0].strip().decode()
-
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
- return None
- return stdout
+ print("stdout was %%s" %% stdout)
+ return None, p.returncode
+ return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
- # Source tarballs conventionally unpack into a directory that includes
- # both the project name and a version string.
- dirname = os.path.basename(root)
- if not dirname.startswith(parentdir_prefix):
- if verbose:
- print("guessing rootdir is '%%s', but '%%s' doesn't start with "
- "prefix '%%s'" %% (root, dirname, parentdir_prefix))
- raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
- return {"version": dirname[len(parentdir_prefix):],
- "full-revisionid": None,
- "dirty": False, "error": None}
+ """Try to determine the version from the parent directory name.
+
+ Source tarballs conventionally unpack into a directory that includes both
+ the project name and a version string. We will also support searching up
+ two directory levels for an appropriately named parent directory
+ """
+ rootdirs = []
+
+ for i in range(3):
+ dirname = os.path.basename(root)
+ if dirname.startswith(parentdir_prefix):
+ return {"version": dirname[len(parentdir_prefix):],
+ "full-revisionid": None,
+ "dirty": False, "error": None, "date": None}
+ else:
+ rootdirs.append(root)
+ root = os.path.dirname(root) # up a level
+
+ if verbose:
+ print("Tried directories %%s but none started with prefix %%s" %%
+ (str(rootdirs), parentdir_prefix))
+ raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
+ """Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
@@ -602,6 +567,10 @@ def git_get_keywords(versionfile_abs):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
+ if line.strip().startswith("git_date ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
@@ -610,18 +579,32 @@ def git_get_keywords(versionfile_abs):
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
+ """Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
+ date = keywords.get("date")
+ if date is not None:
+ # Use only the last line. Previous lines may contain GPG signature
+ # information.
+ date = date.splitlines()[-1]
+
+ # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
+ # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
+ # -like" string, which we must then edit to make compliant), because
+ # it's been around since git-1.5.3, and it's too difficult to
+ # discover which version we're using, or to work around using an
+ # older one.
+ date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
- refs = {r.strip() for r in refnames.strip("()").split(",")}
+ refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
- tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
+ tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
@@ -630,9 +613,9 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
- tags = {r for r in refs if re.search(r'\d', r)}
+ tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
- print("discarding '%%s', no digits" %% ",".join(refs-tags))
+ print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
@@ -643,41 +626,46 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
- "dirty": False, "error": None
- }
+ "dirty": False, "error": None,
+ "date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
- "dirty": False, "error": "no suitable tags"}
+ "dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
- # this runs 'git' from the root of the source tree. This only gets called
- # if the git-archive 'subst' keywords were *not* expanded, and
- # _version.py hasn't already been rewritten with a short version string,
- # meaning we're inside a checked out source tree.
-
- if not os.path.exists(os.path.join(root, ".git")):
- if verbose:
- print("no .git in %%s" %% root)
- raise NotThisMethod("no .git directory")
+ """Get version from 'git describe' in the root of the source tree.
+ This only gets called if the git-archive 'subst' keywords were *not*
+ expanded, and _version.py hasn't already been rewritten with a short
+ version string, meaning we're inside a checked out source tree.
+ """
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
- # if there is a tag, this yields TAG-NUM-gHEX[-dirty]
- # if there are no tags, this yields HEX[-dirty] (no NUM)
- describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
- "--always", "--long"],
- cwd=root)
+
+ out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
+ hide_stderr=True)
+ if rc != 0:
+ if verbose:
+ print("Directory %%s not under git control" %% root)
+ raise NotThisMethod("'git rev-parse --git-dir' returned error")
+
+ # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
+ # if there isn't one, this yields HEX[-dirty] (no NUM)
+ describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
+ "--always", "--long",
+ "--match", "%%s*" %% tag_prefix],
+ cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
- full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
+ full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
@@ -728,27 +716,37 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
else:
# HEX: no tags
pieces["closest-tag"] = None
- count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
- cwd=root)
+ count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
+ cwd=root)
pieces["distance"] = int(count_out) # total number of commits
+ # commit date: see ISO-8601 comment in git_versions_from_keywords()
+ date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
+ cwd=root)[0].strip()
+ # Use only the last line. Previous lines may contain GPG signature
+ # information.
+ date = date.splitlines()[-1]
+ pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
+
return pieces
def plus_or_dot(pieces):
+ """Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
- # now build up version string, with post-release "local version
- # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
- # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
+ """Build up version string, with post-release "local version identifier".
- # exceptions:
- # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
+ Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
+ get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
+ Exceptions:
+ 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
@@ -766,30 +764,31 @@ def render_pep440(pieces):
def render_pep440_pre(pieces):
- # TAG[.post.devDISTANCE] . No -dirty
-
- # exceptions:
- # 1: no tags. 0.post.devDISTANCE
+ """TAG[.post0.devDISTANCE] -- No -dirty.
+ Exceptions:
+ 1: no tags. 0.post0.devDISTANCE
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
- rendered += ".post.dev%%d" %% pieces["distance"]
+ rendered += ".post0.dev%%d" %% pieces["distance"]
else:
# exception #1
- rendered = "0.post.dev%%d" %% pieces["distance"]
+ rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
- # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
- # .dev0 sorts backwards (a dirty tree will appear "older" than the
- # corresponding clean one), but you shouldn't be releasing software with
- # -dirty anyways.
+ """TAG[.postDISTANCE[.dev0]+gHEX] .
- # exceptions:
- # 1: no tags. 0.postDISTANCE[.dev0]
+ The ".dev0" means dirty. Note that .dev0 sorts backwards
+ (a dirty tree will appear "older" than the corresponding clean one),
+ but you shouldn't be releasing software with -dirty anyways.
+ Exceptions:
+ 1: no tags. 0.postDISTANCE[.dev0]
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
@@ -808,11 +807,13 @@ def render_pep440_post(pieces):
def render_pep440_old(pieces):
- # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
+ """TAG[.postDISTANCE[.dev0]] .
- # exceptions:
- # 1: no tags. 0.postDISTANCE[.dev0]
+ The ".dev0" means dirty.
+ Exceptions:
+ 1: no tags. 0.postDISTANCE[.dev0]
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
@@ -828,12 +829,13 @@ def render_pep440_old(pieces):
def render_git_describe(pieces):
- # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
- # --always'
+ """TAG[-DISTANCE-gHEX][-dirty].
- # exceptions:
- # 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ Like 'git describe --tags --dirty --always'.
+ Exceptions:
+ 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
@@ -847,12 +849,14 @@ def render_git_describe(pieces):
def render_git_describe_long(pieces):
- # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
- # --always -long'. The distance/hash is unconditional.
+ """TAG-DISTANCE-gHEX[-dirty].
- # exceptions:
- # 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ Like 'git describe --tags --dirty --always -long'.
+ The distance/hash is unconditional.
+ Exceptions:
+ 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
@@ -865,11 +869,13 @@ def render_git_describe_long(pieces):
def render(pieces, style):
+ """Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
- "error": pieces["error"]}
+ "error": pieces["error"],
+ "date": None}
if not style or style == "default":
style = "pep440" # the default
@@ -890,10 +896,12 @@ def render(pieces, style):
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
- "dirty": pieces["dirty"], "error": None}
+ "dirty": pieces["dirty"], "error": None,
+ "date": pieces.get("date")}
def get_versions():
+ """Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
@@ -918,7 +926,8 @@ def get_versions():
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
- "error": "unable to find root of source tree"}
+ "error": "unable to find root of source tree",
+ "date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
@@ -934,12 +943,13 @@ def get_versions():
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
- "error": "unable to compute version"}
-"""
+ "error": "unable to compute version", "date": None}
+'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
+ """Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
@@ -956,6 +966,10 @@ def git_get_keywords(versionfile_abs):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
+ if line.strip().startswith("git_date ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["date"] = mo.group(1)
f.close()
except OSError:
pass
@@ -964,8 +978,22 @@ def git_get_keywords(versionfile_abs):
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
+ """Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
+ date = keywords.get("date")
+ if date is not None:
+ # Use only the last line. Previous lines may contain GPG signature
+ # information.
+ date = date.splitlines()[-1]
+
+ # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
+ # datestamp. However we prefer "%ci" (which expands to an "ISO-8601
+ # -like" string, which we must then edit to make compliant), because
+ # it's been around since git-1.5.3, and it's too difficult to
+ # discover which version we're using, or to work around using an
+ # older one.
+ date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
@@ -1000,6 +1028,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
+ "date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
@@ -1009,34 +1038,48 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
+ "date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
- # this runs 'git' from the root of the source tree. This only gets called
- # if the git-archive 'subst' keywords were *not* expanded, and
- # _version.py hasn't already been rewritten with a short version string,
- # meaning we're inside a checked out source tree.
-
- if not os.path.exists(os.path.join(root, ".git")):
- if verbose:
- print("no .git in %s" % root)
- raise NotThisMethod("no .git directory")
+ """Get version from 'git describe' in the root of the source tree.
+ This only gets called if the git-archive 'subst' keywords were *not*
+ expanded, and _version.py hasn't already been rewritten with a short
+ version string, meaning we're inside a checked out source tree.
+ """
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
- # if there is a tag, this yields TAG-NUM-gHEX[-dirty]
- # if there are no tags, this yields HEX[-dirty] (no NUM)
- describe_out = run_command(
- GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root
+
+ out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
+ if rc != 0:
+ if verbose:
+ print("Directory %s not under git control" % root)
+ raise NotThisMethod("'git rev-parse --git-dir' returned error")
+
+ # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
+ # if there isn't one, this yields HEX[-dirty] (no NUM)
+ describe_out, rc = run_command(
+ GITS,
+ [
+ "describe",
+ "--tags",
+ "--dirty",
+ "--always",
+ "--long",
+ "--match",
+ "%s*" % tag_prefix,
+ ],
+ cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
- full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
+ full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
@@ -1073,7 +1116,8 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format(
- full_tag, tag_prefix
+ full_tag,
+ tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
@@ -1087,13 +1131,27 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
else:
# HEX: no tags
pieces["closest-tag"] = None
- count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
+ count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
+ # commit date: see ISO-8601 comment in git_versions_from_keywords()
+ date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
+ 0
+ ].strip()
+ # Use only the last line. Previous lines may contain GPG signature
+ # information.
+ date = date.splitlines()[-1]
+ pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
+
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
+ """Git-specific installation logic for Versioneer.
+
+ For Git, this means creating/changing .gitattributes to mark _version.py
+ for export-subst keyword substitution.
+ """
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
@@ -1127,34 +1185,43 @@ def do_vcs_install(manifest_in, versionfile_source, ipy):
def versions_from_parentdir(parentdir_prefix, root, verbose):
- # Source tarballs conventionally unpack into a directory that includes
- # both the project name and a version string.
- dirname = os.path.basename(root)
- if not dirname.startswith(parentdir_prefix):
- if verbose:
- print(
- "guessing rootdir is '%s', but '%s' doesn't start with "
- "prefix '%s'" % (root, dirname, parentdir_prefix)
- )
- raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
- return {
- "version": dirname[len(parentdir_prefix) :],
- "full-revisionid": None,
- "dirty": False,
- "error": None,
- }
+ """Try to determine the version from the parent directory name.
+
+ Source tarballs conventionally unpack into a directory that includes both
+ the project name and a version string. We will also support searching up
+ two directory levels for an appropriately named parent directory
+ """
+ rootdirs = []
+
+ for i in range(3):
+ dirname = os.path.basename(root)
+ if dirname.startswith(parentdir_prefix):
+ return {
+ "version": dirname[len(parentdir_prefix) :],
+ "full-revisionid": None,
+ "dirty": False,
+ "error": None,
+ "date": None,
+ }
+ else:
+ rootdirs.append(root)
+ root = os.path.dirname(root) # up a level
+
+ if verbose:
+ print(
+ "Tried directories %s but none started with prefix %s"
+ % (str(rootdirs), parentdir_prefix)
+ )
+ raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
-# This file was generated by 'versioneer.py' (0.15) from
+# This file was generated by 'versioneer.py' (0.19) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
-from warnings import catch_warnings
-with catch_warnings(record=True):
- import json
-import sys
+import json
version_json = '''
%s
@@ -1167,6 +1234,7 @@ def get_versions():
def versions_from_file(filename):
+ """Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
@@ -1175,12 +1243,17 @@ def versions_from_file(filename):
mo = re.search(
r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
+ if not mo:
+ mo = re.search(
+ r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
+ )
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
+ """Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": "))
with open(filename, "w") as f:
@@ -1190,19 +1263,21 @@ def write_to_version_file(filename, versions):
def plus_or_dot(pieces):
+ """Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
- # now build up version string, with post-release "local version
- # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
- # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
+ """Build up version string, with post-release "local version identifier".
- # exceptions:
- # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
+ Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
+ get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
+ Exceptions:
+ 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
@@ -1219,30 +1294,31 @@ def render_pep440(pieces):
def render_pep440_pre(pieces):
- # TAG[.post.devDISTANCE] . No -dirty
-
- # exceptions:
- # 1: no tags. 0.post.devDISTANCE
+ """TAG[.post0.devDISTANCE] -- No -dirty.
+ Exceptions:
+ 1: no tags. 0.post0.devDISTANCE
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
- rendered += ".post.dev%d" % pieces["distance"]
+ rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
- rendered = "0.post.dev%d" % pieces["distance"]
+ rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
- # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
- # .dev0 sorts backwards (a dirty tree will appear "older" than the
- # corresponding clean one), but you shouldn't be releasing software with
- # -dirty anyways.
+ """TAG[.postDISTANCE[.dev0]+gHEX] .
- # exceptions:
- # 1: no tags. 0.postDISTANCE[.dev0]
+ The ".dev0" means dirty. Note that .dev0 sorts backwards
+ (a dirty tree will appear "older" than the corresponding clean one),
+ but you shouldn't be releasing software with -dirty anyways.
+ Exceptions:
+ 1: no tags. 0.postDISTANCE[.dev0]
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
@@ -1261,11 +1337,13 @@ def render_pep440_post(pieces):
def render_pep440_old(pieces):
- # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
+ """TAG[.postDISTANCE[.dev0]] .
- # exceptions:
- # 1: no tags. 0.postDISTANCE[.dev0]
+ The ".dev0" means dirty.
+ Exceptions:
+ 1: no tags. 0.postDISTANCE[.dev0]
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
@@ -1281,12 +1359,13 @@ def render_pep440_old(pieces):
def render_git_describe(pieces):
- # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
- # --always'
+ """TAG[-DISTANCE-gHEX][-dirty].
- # exceptions:
- # 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ Like 'git describe --tags --dirty --always'.
+ Exceptions:
+ 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
@@ -1300,12 +1379,14 @@ def render_git_describe(pieces):
def render_git_describe_long(pieces):
- # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
- # --always -long'. The distance/hash is unconditional.
+ """TAG-DISTANCE-gHEX[-dirty].
- # exceptions:
- # 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ Like 'git describe --tags --dirty --always -long'.
+ The distance/hash is unconditional.
+ Exceptions:
+ 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ """
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
@@ -1318,12 +1399,14 @@ def render_git_describe_long(pieces):
def render(pieces, style):
+ """Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
+ "date": None,
}
if not style or style == "default":
@@ -1349,16 +1432,19 @@ def render(pieces, style):
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
+ "date": pieces.get("date"),
}
class VersioneerBadRootError(Exception):
- pass
+ """The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
- # returns dict with two keys: 'version' and 'full'
+ """Get the project version from whatever source is available.
+ Returns dict with two keys: 'version' and 'full'.
+ """
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
@@ -1431,14 +1517,21 @@ def get_versions(verbose=False):
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
+ "date": None,
}
def get_version():
+ """Get the short version string for this project."""
return get_versions()["version"]
-def get_cmdclass():
+def get_cmdclass(cmdclass=None):
+ """Get the custom setuptools/distutils subclasses used by Versioneer.
+
+ If the package uses a different cmdclass (e.g. one from numpy), it
+ should be provide as an argument.
+ """
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
@@ -1452,9 +1545,9 @@ def get_cmdclass():
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
- # Also see https://github.com/warner/python-versioneer/issues/52
+ # Also see https://github.com/python-versioneer/python-versioneer/issues/52
- cmds = {}
+ cmds = {} if cmdclass is None else cmdclass.copy()
# we add "version" to both distutils and setuptools
from distutils.core import Command
@@ -1475,6 +1568,7 @@ def run(self):
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
+ print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
@@ -1489,8 +1583,19 @@ def run(self):
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
-
- from distutils.command.build_py import build_py as _build_py
+ # pip install:
+ # copies source tree to a tempdir before running egg_info/etc
+ # if .git isn't copied too, 'git describe' will fail
+ # then does setup.py bdist_wheel, or sometimes setup.py install
+ # setup.py egg_info -> ?
+
+ # we override different "build_py" commands for both environments
+ if "build_py" in cmds:
+ _build_py = cmds["build_py"]
+ elif "setuptools" in sys.modules:
+ from setuptools.command.build_py import build_py as _build_py
+ else:
+ from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
@@ -1507,9 +1612,41 @@ def run(self):
cmds["build_py"] = cmd_build_py
+ if "setuptools" in sys.modules:
+ from setuptools.command.build_ext import build_ext as _build_ext
+ else:
+ from distutils.command.build_ext import build_ext as _build_ext
+
+ class cmd_build_ext(_build_ext):
+ def run(self):
+ root = get_root()
+ cfg = get_config_from_root(root)
+ versions = get_versions()
+ _build_ext.run(self)
+ if self.inplace:
+ # build_ext --inplace will only build extensions in
+ # build/lib<..> dir with no _version.py to write to.
+ # As in place builds will already have a _version.py
+ # in the module dir, we do not need to write one.
+ return
+ # now locate _version.py in the new build/ directory and replace
+ # it with an updated value
+ target_versionfile = os.path.join(self.build_lib, cfg.versionfile_source)
+ print("UPDATING %s" % target_versionfile)
+ write_to_version_file(target_versionfile, versions)
+
+ cmds["build_ext"] = cmd_build_ext
+
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
+ # nczeczulin reports that py2exe won't like the pep440-style string
+ # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
+ # setup(console=[{
+ # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
+ # "product_version": versioneer.get_version(),
+ # ...
+
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
@@ -1537,8 +1674,39 @@ def run(self):
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
+ if "py2exe" in sys.modules: # py2exe enabled?
+ from py2exe.distutils_buildexe import py2exe as _py2exe
+
+ class cmd_py2exe(_py2exe):
+ def run(self):
+ root = get_root()
+ cfg = get_config_from_root(root)
+ versions = get_versions()
+ target_versionfile = cfg.versionfile_source
+ print("UPDATING %s" % target_versionfile)
+ write_to_version_file(target_versionfile, versions)
+
+ _py2exe.run(self)
+ os.unlink(target_versionfile)
+ with open(cfg.versionfile_source, "w") as f:
+ LONG = LONG_VERSION_PY[cfg.VCS]
+ f.write(
+ LONG
+ % {
+ "DOLLAR": "$",
+ "STYLE": cfg.style,
+ "TAG_PREFIX": cfg.tag_prefix,
+ "PARENTDIR_PREFIX": cfg.parentdir_prefix,
+ "VERSIONFILE_SOURCE": cfg.versionfile_source,
+ }
+ )
+
+ cmds["py2exe"] = cmd_py2exe
+
# we override different "sdist" commands for both environments
- if "setuptools" in sys.modules:
+ if "sdist" in cmds:
+ _sdist = cmds["sdist"]
+ elif "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
@@ -1579,7 +1747,7 @@ def make_release_tree(self, base_dir, files):
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
- tag_prefix = ""
+ tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
@@ -1615,6 +1783,7 @@ def make_release_tree(self, base_dir, files):
def do_setup():
+ """Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
@@ -1672,7 +1841,7 @@ def do_setup():
except OSError:
pass
# That doesn't cover everything MANIFEST.in can do
- # (https://docs.python.org/2/distutils/sourcedist.html#commands), so
+ # (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
@@ -1692,13 +1861,14 @@ def do_setup():
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
- # .gitattributes to mark _version.py for export-time keyword
+ # .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
+ """Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
| bump versioneer from 0.15 to 0.19
```
(pandas-dev) fangchenli@Fangchens-MacBook-Pro pandas-fangchenli % python setup.py version
running version
keywords are unexpanded, not using
got version from VCS {'version': '1.2.0.dev0+1185.g82894a4b6', 'full-revisionid': '82894a4b6329b8af74bb9c593858453a01b91bf6', 'dirty': False, 'error': None, 'date': '2020-11-14T16:24:02-0600'}
Version: 1.2.0.dev0+1185.g82894a4b6
full-revisionid: 82894a4b6329b8af74bb9c593858453a01b91bf6
dirty: False
date: 2020-11-14T16:24:02-0600
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/37856 | 2020-11-14T22:25:58Z | 2020-11-17T12:55:16Z | 2020-11-17T12:55:16Z | 2020-12-05T18:50:45Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.