title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
TYP use typeddict to define cssdict
diff --git a/pandas/_typing.py b/pandas/_typing.py index 9f23fcc56597f..a58dc0dba1bf1 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -36,7 +36,10 @@ # and use a string literal forward reference to it in subsequent types # https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles if TYPE_CHECKING: - from typing import final + from typing import ( + TypedDict, + final, + ) from pandas._libs import ( Period, @@ -70,6 +73,8 @@ else: # typing.final does not exist until py38 final = lambda x: x + # typing.TypedDict does not exist until py38 + TypedDict = dict # array-like diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py index 15557c993eab4..776cedcf11592 100644 --- a/pandas/io/formats/style_render.py +++ b/pandas/io/formats/style_render.py @@ -12,7 +12,6 @@ Sequence, Tuple, Union, - cast, ) from uuid import uuid4 @@ -21,7 +20,10 @@ from pandas._config import get_option from pandas._libs import lib -from pandas._typing import FrameOrSeriesUnion +from pandas._typing import ( + FrameOrSeriesUnion, + TypedDict, +) from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.generic import ABCSeries @@ -45,10 +47,14 @@ CSSPair = Tuple[str, Union[str, int, float]] CSSList = List[CSSPair] CSSProperties = Union[str, CSSList] -CSSStyles = List[Dict[str, CSSProperties]] # = List[CSSDict] -# class CSSDict(TypedDict): # available when TypedDict is valid in pandas -# selector: str -# props: CSSProperties + + +class CSSDict(TypedDict): + selector: str + props: CSSProperties + + +CSSStyles = List[CSSDict] class StylerRenderer: @@ -615,15 +621,9 @@ def _format_table_styles(styles: CSSStyles) -> CSSStyles: {'selector': 'th', 'props': 'a:v;'}] """ return [ - item - for sublist in [ - [ # this is a CSSDict when TypedDict is available to avoid cast. - {"selector": x, "props": style["props"]} - for x in cast(str, style["selector"]).split(",") - ] - for style in styles - ] - for item in sublist + {"selector": selector, "props": css_dict["props"]} + for css_dict in styles + for selector in css_dict["selector"].split(",") ]
xref https://github.com/pandas-dev/pandas/pull/39942#issuecomment-783363832 - here's a solution, the [docs](https://docs.python.org/3/library/typing.html#typing.TypedDict) say that > At runtime it is a plain dict
https://api.github.com/repos/pandas-dev/pandas/pulls/40947
2021-04-14T15:45:09Z
2021-04-27T08:54:41Z
2021-04-27T08:54:41Z
2021-04-27T09:00:22Z
TYP: timestamps.pyi
diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi new file mode 100644 index 0000000000000..8728b700a1f6d --- /dev/null +++ b/pandas/_libs/tslibs/timestamps.pyi @@ -0,0 +1,205 @@ +from datetime import ( + date as _date, + datetime, + time as _time, + timedelta, + tzinfo as _tzinfo, +) +import sys +from time import struct_time +from typing import ( + ClassVar, + Optional, + Type, + TypeVar, + overload, +) + +import numpy as np + +from pandas._libs.tslibs import ( + NaT, + NaTType, + Period, + Timedelta, +) + +_S = TypeVar("_S") + + +def integer_op_not_supported(obj) -> None: ... + + +class Timestamp(datetime): + min: ClassVar[Timestamp] + max: ClassVar[Timestamp] + + resolution: ClassVar[Timedelta] + value: int # np.int64 + + # error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]") + def __new__( # type: ignore[misc] + cls: Type[_S], + ts_input: int | np.integer | float | str | _date | datetime | np.datetime64 = ..., + freq=..., + tz: str | _tzinfo | None | int= ..., + unit=..., + year: int | None = ..., + month: int | None = ..., + day: int | None = ..., + hour: int | None = ..., + minute: int | None = ..., + second: int | None = ..., + microsecond: int | None = ..., + nanosecond: int | None = ..., + tzinfo: _tzinfo | None = ..., + *, + fold: int | None= ..., + ) -> _S | NaTType: ... + + @property + def year(self) -> int: ... + @property + def month(self) -> int: ... + @property + def day(self) -> int: ... + @property + def hour(self) -> int: ... + @property + def minute(self) -> int: ... + @property + def second(self) -> int: ... + @property + def microsecond(self) -> int: ... + @property + def tzinfo(self) -> Optional[_tzinfo]: ... + @property + def tz(self) -> Optional[_tzinfo]: ... + + @property + def fold(self) -> int: ... + + @classmethod + def fromtimestamp(cls: Type[_S], t: float, tz: Optional[_tzinfo] = ...) -> _S: ... + @classmethod + def utcfromtimestamp(cls: Type[_S], t: float) -> _S: ... + @classmethod + def today(cls: Type[_S]) -> _S: ... + @classmethod + def fromordinal(cls: Type[_S], n: int) -> _S: ... + + if sys.version_info >= (3, 8): + @classmethod + def now(cls: Type[_S], tz: _tzinfo | str | None = ...) -> _S: ... + else: + @overload + @classmethod + def now(cls: Type[_S], tz: None = ...) -> _S: ... + @overload + @classmethod + def now(cls, tz: _tzinfo) -> datetime: ... + + @classmethod + def utcnow(cls: Type[_S]) -> _S: ... + @classmethod + def combine(cls, date: _date, time: _time, tzinfo: Optional[_tzinfo] = ...) -> datetime: ... + + @classmethod + def fromisoformat(cls: Type[_S], date_string: str) -> _S: ... + + def strftime(self, fmt: str) -> str: ... + def __format__(self, fmt: str) -> str: ... + + def toordinal(self) -> int: ... + def timetuple(self) -> struct_time: ... + + def timestamp(self) -> float: ... + + def utctimetuple(self) -> struct_time: ... + def date(self) -> _date: ... + def time(self) -> _time: ... + def timetz(self) -> _time: ... + + def replace( + self, + year: int = ..., + month: int = ..., + day: int = ..., + hour: int = ..., + minute: int = ..., + second: int = ..., + microsecond: int = ..., + tzinfo: Optional[_tzinfo] = ..., + *, + fold: int = ..., + ) -> datetime: ... + + if sys.version_info >= (3, 8): + def astimezone(self: _S, tz: Optional[_tzinfo] = ...) -> _S: ... + else: + def astimezone(self, tz: Optional[_tzinfo] = ...) -> datetime: ... + + def ctime(self) -> str: ... + def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ... + + @classmethod + def strptime(cls, date_string: str, format: str) -> datetime: ... + + def utcoffset(self) -> Optional[timedelta]: ... + def tzname(self) -> Optional[str]: ... + def dst(self) -> Optional[timedelta]: ... + + def __le__(self, other: datetime) -> bool: ... # type: ignore + def __lt__(self, other: datetime) -> bool: ... # type: ignore + def __ge__(self, other: datetime) -> bool: ... # type: ignore + def __gt__(self, other: datetime) -> bool: ... # type: ignore + if sys.version_info >= (3, 8): + def __add__(self: _S, other: timedelta) -> _S: ... + def __radd__(self: _S, other: timedelta) -> _S: ... + else: + def __add__(self, other: timedelta) -> datetime: ... + def __radd__(self, other: timedelta) -> datetime: ... + @overload # type: ignore + def __sub__(self, other: datetime) -> timedelta: ... + @overload + def __sub__(self, other: timedelta) -> datetime: ... + + def __hash__(self) -> int: ... + def weekday(self) -> int: ... + def isoweekday(self) -> int: ... + def isocalendar(self) -> tuple[int, int, int]: ... + + @property + def is_leap_year(self) -> bool: ... + @property + def is_month_start(self) -> bool: ... + @property + def is_quarter_start(self) -> bool: ... + @property + def is_year_start(self) -> bool: ... + @property + def is_month_end(self) -> bool: ... + @property + def is_quarter_end(self) -> bool: ... + @property + def is_year_end(self) -> bool: ... + + def to_pydatetime(self, warn: bool = ...) -> datetime: ... + def to_datetime64(self) -> np.datetime64: ... + def to_period(self, freq) -> Period: ... + def to_julian_date(self) -> np.float64: ... + + @property + def asm8(self) -> np.datetime64: ... + + def tz_convert(self: _S, tz) -> _S: ... + + # TODO: could return NaT? + def tz_localize(self: _S, tz, ambiguous: str = ..., nonexistent: str = ...) -> _S: ... + + def normalize(self: _S) -> _S: ... + + # TODO: round/floor/ceil could return NaT? + def round(self: _S, freq, ambiguous: bool | str = ..., nonexistent: str = ...) -> _S: ... + def floor(self: _S, freq, ambiguous: bool | str = ..., nonexistent: str = ...) -> _S: ... + def ceil(self: _S, freq, ambiguous: bool | str = ..., nonexistent: str = ...) -> _S: ... diff --git a/pandas/core/arrays/_ranges.py b/pandas/core/arrays/_ranges.py index 34d5ea6cfb20d..a537951786646 100644 --- a/pandas/core/arrays/_ranges.py +++ b/pandas/core/arrays/_ranges.py @@ -41,20 +41,20 @@ def generate_regular_range( ------- ndarray[np.int64] Representing nanoseconds. """ - start = start.value if start is not None else None - end = end.value if end is not None else None + istart = start.value if start is not None else None + iend = end.value if end is not None else None stride = freq.nanos if periods is None: - b = start + b = istart # cannot just use e = Timestamp(end) + 1 because arange breaks when # stride is too large, see GH10887 - e = b + (end - b) // stride * stride + stride // 2 + 1 - elif start is not None: - b = start + e = b + (iend - b) // stride * stride + stride // 2 + 1 + elif istart is not None: + b = istart e = _generate_range_overflow_safe(b, periods, stride, side="start") - elif end is not None: - e = end + stride + elif iend is not None: + e = iend + stride b = _generate_range_overflow_safe(e, periods, stride, side="end") else: raise ValueError( diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 289ed4948934f..117b267fd49e5 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -742,7 +742,9 @@ def _sub_datetimelike_scalar(self, other): assert isinstance(other, (datetime, np.datetime64)) assert other is not NaT other = Timestamp(other) - if other is NaT: + # error: Non-overlapping identity check (left operand type: "Timestamp", + # right operand type: "NaTType") + if other is NaT: # type: ignore[comparison-overlap] return self - NaT if not self._has_same_tz(other): diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index d739b46620032..d4ecec667cc86 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -189,13 +189,13 @@ def maybe_box_native(value: Scalar) -> Scalar: value = maybe_box_datetimelike(value) elif is_float(value): # error: Argument 1 to "float" has incompatible type - # "Union[Union[str, int, float, bool], Union[Any, Any, Timedelta, Any]]"; + # "Union[Union[str, int, float, bool], Union[Any, Timestamp, Timedelta, Any]]"; # expected "Union[SupportsFloat, _SupportsIndex, str]" value = float(value) # type: ignore[arg-type] elif is_integer(value): # error: Argument 1 to "int" has incompatible type - # "Union[Union[str, int, float, bool], Union[Any, Any, Timedelta, Any]]"; - # pected "Union[str, SupportsInt, _SupportsIndex, _SupportsTrunc]" + # "Union[Union[str, int, float, bool], Union[Any, Timestamp, Timedelta, Any]]"; + # expected "Union[str, SupportsInt, _SupportsIndex, _SupportsTrunc]" value = int(value) # type: ignore[arg-type] elif is_bool(value): value = bool(value) @@ -729,7 +729,9 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> tuple[DtypeObj, except OutOfBoundsDatetime: return np.dtype(object), val - if val is NaT or val.tz is None: + # error: Non-overlapping identity check (left operand type: "Timestamp", + # right operand type: "NaTType") + if val is NaT or val.tz is None: # type: ignore[comparison-overlap] dtype = np.dtype("M8[ns]") val = val.to_datetime64() else: @@ -2056,7 +2058,7 @@ def validate_numeric_casting(dtype: np.dtype, value: Scalar) -> None: ValueError """ # error: Argument 1 to "__call__" of "ufunc" has incompatible type - # "Union[Union[str, int, float, bool], Union[Any, Any, Timedelta, Any]]"; + # "Union[Union[str, int, float, bool], Union[Any, Timestamp, Timedelta, Any]]"; # expected "Union[Union[int, float, complex, str, bytes, generic], # Sequence[Union[int, float, complex, str, bytes, generic]], # Sequence[Sequence[Any]], _SupportsArray]" diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 2d7d83d6a2bc3..61396fdf372d5 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1892,7 +1892,9 @@ def get_block_type(values, dtype: Dtype | None = None): cls = ExtensionBlock elif isinstance(dtype, CategoricalDtype): cls = CategoricalBlock - elif vtype is Timestamp: + # error: Non-overlapping identity check (left operand type: "Type[generic]", + # right operand type: "Type[Timestamp]") + elif vtype is Timestamp: # type: ignore[comparison-overlap] cls = DatetimeTZBlock elif isinstance(dtype, ExtensionDtype): # Note: need to be sure PandasArray is unwrapped before we get here diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index bb37f670ed302..8577bb5dc311b 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -628,16 +628,16 @@ def _adjust_to_origin(arg, origin, unit): if offset.tz is not None: raise ValueError(f"origin offset {offset} must be tz-naive") - offset -= Timestamp(0) + td_offset = offset - Timestamp(0) # convert the offset to the unit of the arg # this should be lossless in terms of precision - offset = offset // Timedelta(1, unit=unit) + ioffset = td_offset // Timedelta(1, unit=unit) # scalars & ndarray-like can handle the addition if is_list_like(arg) and not isinstance(arg, (ABCSeries, Index, np.ndarray)): arg = np.asarray(arg) - arg = arg + offset + arg = arg + ioffset return arg @@ -887,13 +887,17 @@ def to_datetime( infer_datetime_format=infer_datetime_format, ) + result: Timestamp | NaTType | Series | Index + if isinstance(arg, Timestamp): result = arg if tz is not None: if arg.tz is not None: - result = result.tz_convert(tz) + # error: Too many arguments for "tz_convert" of "NaTType" + result = result.tz_convert(tz) # type: ignore[call-arg] else: - result = result.tz_localize(tz) + # error: Too many arguments for "tz_localize" of "NaTType" + result = result.tz_localize(tz) # type: ignore[call-arg] elif isinstance(arg, ABCSeries): cache_array = _maybe_cache(arg, format, cache, convert_listlike) if not cache_array.empty: @@ -928,7 +932,10 @@ def to_datetime( else: result = convert_listlike(np.array([arg]), format)[0] - return result + # error: Incompatible return value type (got "Union[Timestamp, NaTType, + # Series, Index]", expected "Union[DatetimeIndex, Series, float, str, + # NaTType, None]") + return result # type: ignore[return-value] # mappings for assembling units diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 1324485f49bdb..c105465cddd95 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -1,7 +1,4 @@ -from typing import ( - List, - cast, -) +from typing import List import numpy as np @@ -200,10 +197,9 @@ def _get_cell_value(self, cell, convert_float: bool) -> Scalar: cell_value = cell.attributes.get((OFFICENS, "date-value")) return pd.to_datetime(cell_value) elif cell_type == "time": - result = pd.to_datetime(str(cell)) - result = cast(pd.Timestamp, result) + stamp = pd.to_datetime(str(cell)) # error: Item "str" of "Union[float, str, NaTType]" has no attribute "time" - return result.time() # type: ignore[union-attr] + return stamp.time() # type: ignore[union-attr] else: self.close() raise ValueError(f"Unrecognized type {cell_type}") diff --git a/pandas/tests/indexes/test_engines.py b/pandas/tests/indexes/test_engines.py index 52af29d999fcc..9f41c68909f6e 100644 --- a/pandas/tests/indexes/test_engines.py +++ b/pandas/tests/indexes/test_engines.py @@ -61,7 +61,13 @@ class TestTimedeltaEngine: @pytest.mark.parametrize( "scalar", [ - pd.Timestamp(pd.Timedelta(days=42).asm8.view("datetime64[ns]")), + # error: Argument 1 to "Timestamp" has incompatible type "timedelta64"; + # expected "Union[integer[Any], float, str, date, datetime64]" + pd.Timestamp( + pd.Timedelta(days=42).asm8.view( + "datetime64[ns]" + ) # type: ignore[arg-type] + ), pd.Timedelta(days=42).value, pd.Timedelta(days=42).to_pytimedelta(), pd.Timedelta(days=42).to_timedelta64(), diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py index 663892cefb5e6..98ec4de614a07 100644 --- a/pandas/tests/scalar/timestamp/test_constructors.py +++ b/pandas/tests/scalar/timestamp/test_constructors.py @@ -331,7 +331,9 @@ def test_constructor_fromordinal(self): tz="UTC", ), Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None), - Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC), + # error: Argument 9 to "Timestamp" has incompatible type "_UTCclass"; + # expected "Optional[int]" + Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC), # type: ignore[arg-type] ], ) def test_constructor_nanosecond(self, result):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40945
2021-04-14T14:44:23Z
2021-04-26T09:31:48Z
2021-04-26T09:31:47Z
2021-04-26T17:18:07Z
REF: handle dtype dispatch in libhashtable, share Vector/Hashtable code
diff --git a/pandas/_libs/hashtable.pxd b/pandas/_libs/hashtable.pxd index a5679af44ac06..80d7ab58dc559 100644 --- a/pandas/_libs/hashtable.pxd +++ b/pandas/_libs/hashtable.pxd @@ -128,10 +128,12 @@ cdef struct Int64VectorData: int64_t *data Py_ssize_t n, m -cdef class Int64Vector: +cdef class Vector: + cdef bint external_view_exists + +cdef class Int64Vector(Vector): cdef Int64VectorData *data cdef ndarray ao - cdef bint external_view_exists cdef resize(self) cpdef ndarray to_array(self) diff --git a/pandas/_libs/hashtable.pyi b/pandas/_libs/hashtable.pyi index b6278b3956a1d..0612acd25a5d5 100644 --- a/pandas/_libs/hashtable.pyi +++ b/pandas/_libs/hashtable.pyi @@ -12,34 +12,28 @@ def unique_label_indices( class Factorizer: - table: PyObjectHashTable - uniques: ObjectVector count: int def __init__(self, size_hint: int): ... def get_count(self) -> int: ... + +class ObjectFactorizer(Factorizer): + table: PyObjectHashTable + uniques: ObjectVector + def factorize( self, - values: np.ndarray, # np.ndarray[object] + values: np.ndarray, # ndarray[object] sort: bool = ..., na_sentinel=..., na_value=..., ) -> np.ndarray: ... # np.ndarray[intp] - def unique( - self, - values: np.ndarray, # np.ndarray[object] - ) -> np.ndarray: ... # np.ndarray[object] - -class Int64Factorizer: +class Int64Factorizer(Factorizer): table: Int64HashTable uniques: Int64Vector - count: int - - def __init__(self, size_hint: int): ... - def get_count(self) -> int: ... def factorize( self, @@ -240,3 +234,26 @@ def value_count_int64( np.ndarray, # np.ndarray[np.int64] np.ndarray, # np.ndarray[np.int64] ]: ... + + +def duplicated( + values: np.ndarray, + keep: Literal["last", "first", False] = ..., +) -> np.ndarray: ... # np.ndarray[bool] + +def mode(values: np.ndarray, dropna: bool) -> np.ndarray: ... + +def value_count( + values: np.ndarray, + dropna: bool, +) -> tuple[ + np.ndarray, + np.ndarray, # np.ndarray[np.int64] +]: ... + + +# arr and values should have same dtype +def ismember( + arr: np.ndarray, + values: np.ndarray, +) -> np.ndarray: ... # np.ndarray[bool] diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index 4566f22be2c36..7df3f69337643 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -56,19 +56,25 @@ include "hashtable_class_helper.pxi" include "hashtable_func_helper.pxi" cdef class Factorizer: - cdef public: - PyObjectHashTable table - ObjectVector uniques + cdef readonly: Py_ssize_t count - def __init__(self, size_hint: int): - self.table = PyObjectHashTable(size_hint) - self.uniques = ObjectVector() + def __cinit__(self, size_hint: int): self.count = 0 def get_count(self) -> int: return self.count + +cdef class ObjectFactorizer(Factorizer): + cdef public: + PyObjectHashTable table + ObjectVector uniques + + def __cinit__(self, size_hint: int): + self.table = PyObjectHashTable(size_hint) + self.uniques = ObjectVector() + def factorize( self, ndarray[object] values, sort=False, na_sentinel=-1, na_value=None ) -> np.ndarray: @@ -105,24 +111,15 @@ cdef class Factorizer: self.count = len(self.uniques) return labels - def unique(self, ndarray[object] values): - # just for fun - return self.table.unique(values) - -cdef class Int64Factorizer: +cdef class Int64Factorizer(Factorizer): cdef public: Int64HashTable table Int64Vector uniques - Py_ssize_t count - def __init__(self, size_hint: int): + def __cinit__(self, size_hint: int): self.table = Int64HashTable(size_hint) self.uniques = Int64Vector() - self.count = 0 - - def get_count(self) -> int: - return self.count def factorize(self, const int64_t[:] values, sort=False, na_sentinel=-1, na_value=None) -> np.ndarray: diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index 4cacd3245f9d8..6d51ea7d5de7b 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -127,6 +127,8 @@ dtypes = [('Complex128', 'complex128', 'khcomplex128_t'), {{if dtype != 'int64'}} +# Int64VectorData is defined in the .pxd file because it is needed (indirectly) +# by IntervalTree ctypedef struct {{name}}VectorData: {{c_type}} *data @@ -167,6 +169,14 @@ cdef inline bint needs_resize(vector_data *data) nogil: # Vector # ---------------------------------------------------------------------- +cdef class Vector: + # cdef readonly: + # bint external_view_exists + + def __cinit__(self): + self.external_view_exists = False + + {{py: # name, dtype, c_type @@ -187,11 +197,12 @@ dtypes = [('Complex128', 'complex128', 'khcomplex128_t'), {{for name, dtype, c_type in dtypes}} -cdef class {{name}}Vector: +cdef class {{name}}Vector(Vector): + # For int64 we have to put this declaration in the .pxd file; + # Int64Vector is the only one we need exposed for other cython files. {{if dtype != 'int64'}} cdef: - bint external_view_exists {{name}}VectorData *data ndarray ao {{endif}} @@ -201,7 +212,6 @@ cdef class {{name}}Vector: sizeof({{name}}VectorData)) if not self.data: raise MemoryError() - self.external_view_exists = False self.data.n = 0 self.data.m = _INIT_VEC_CAP self.ao = np.empty(self.data.m, dtype=np.{{dtype}}) @@ -246,17 +256,15 @@ cdef class {{name}}Vector: {{endfor}} -cdef class StringVector: +cdef class StringVector(Vector): cdef: StringVectorData *data - bint external_view_exists def __cinit__(self): self.data = <StringVectorData *>PyMem_Malloc(sizeof(StringVectorData)) if not self.data: raise MemoryError() - self.external_view_exists = False self.data.n = 0 self.data.m = _INIT_VEC_CAP self.data.data = <char **>malloc(self.data.m * sizeof(char *)) @@ -314,16 +322,14 @@ cdef class StringVector: self.append(x[i]) -cdef class ObjectVector: +cdef class ObjectVector(Vector): cdef: PyObject **data Py_ssize_t n, m ndarray ao - bint external_view_exists def __cinit__(self): - self.external_view_exists = False self.n = 0 self.m = _INIT_VEC_CAP self.ao = np.empty(_INIT_VEC_CAP, dtype=object) diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in index 772d83e67394c..ceb473a0b06af 100644 --- a/pandas/_libs/hashtable_func_helper.pxi.in +++ b/pandas/_libs/hashtable_func_helper.pxi.in @@ -31,9 +31,9 @@ dtypes = [('Complex128', 'complex128', 'complex128', @cython.wraparound(False) @cython.boundscheck(False) {{if dtype == 'object'}} -cpdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna, navalue=np.NaN): +cdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna, navalue=np.NaN): {{else}} -cpdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna): +cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna): {{endif}} cdef: Py_ssize_t i = 0 @@ -107,9 +107,9 @@ cpdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna): @cython.wraparound(False) @cython.boundscheck(False) {{if dtype == 'object'}} -def duplicated_{{dtype}}(ndarray[{{dtype}}] values, object keep='first'): +cdef duplicated_{{dtype}}(ndarray[{{dtype}}] values, object keep='first'): {{else}} -def duplicated_{{dtype}}(const {{dtype}}_t[:] values, object keep='first'): +cdef duplicated_{{dtype}}(const {{dtype}}_t[:] values, object keep='first'): {{endif}} cdef: int ret = 0 @@ -189,9 +189,9 @@ def duplicated_{{dtype}}(const {{dtype}}_t[:] values, object keep='first'): @cython.wraparound(False) @cython.boundscheck(False) {{if dtype == 'object'}} -def ismember_{{dtype}}(ndarray[{{c_type}}] arr, ndarray[{{c_type}}] values): +cdef ismember_{{dtype}}(ndarray[{{c_type}}] arr, ndarray[{{c_type}}] values): {{else}} -def ismember_{{dtype}}(const {{dtype}}_t[:] arr, const {{dtype}}_t[:] values): +cdef ismember_{{dtype}}(const {{dtype}}_t[:] arr, const {{dtype}}_t[:] values): {{endif}} """ Return boolean of values in arr on an @@ -256,9 +256,9 @@ def ismember_{{dtype}}(const {{dtype}}_t[:] arr, const {{dtype}}_t[:] values): @cython.wraparound(False) @cython.boundscheck(False) {{if dtype == 'object'}} -def mode_{{dtype}}(ndarray[{{dtype}}] values, bint dropna): +cdef mode_{{dtype}}(ndarray[{{dtype}}] values, bint dropna): {{else}} -def mode_{{dtype}}(const {{dtype}}_t[:] values, bint dropna): +cdef mode_{{dtype}}(const {{dtype}}_t[:] values, bint dropna): {{endif}} cdef: {{if dtype == 'object'}} @@ -310,3 +310,163 @@ def mode_{{dtype}}(const {{dtype}}_t[:] values, bint dropna): return modes[:j + 1] {{endfor}} + + +ctypedef fused htfunc_t: + complex128_t + complex64_t + float64_t + float32_t + uint64_t + uint32_t + uint16_t + uint8_t + int64_t + int32_t + int16_t + int8_t + object + + +cpdef value_count(ndarray[htfunc_t] values, bint dropna): + if htfunc_t is object: + return value_count_object(values, dropna) + + elif htfunc_t is int8_t: + return value_count_int8(values, dropna) + elif htfunc_t is int16_t: + return value_count_int16(values, dropna) + elif htfunc_t is int32_t: + return value_count_int32(values, dropna) + elif htfunc_t is int64_t: + return value_count_int64(values, dropna) + + elif htfunc_t is uint8_t: + return value_count_uint8(values, dropna) + elif htfunc_t is uint16_t: + return value_count_uint16(values, dropna) + elif htfunc_t is uint32_t: + return value_count_uint32(values, dropna) + elif htfunc_t is uint64_t: + return value_count_uint64(values, dropna) + + elif htfunc_t is float64_t: + return value_count_float64(values, dropna) + elif htfunc_t is float32_t: + return value_count_float32(values, dropna) + + elif htfunc_t is complex128_t: + return value_count_complex128(values, dropna) + elif htfunc_t is complex64_t: + return value_count_complex64(values, dropna) + + else: + raise TypeError(values.dtype) + + +cpdef duplicated(ndarray[htfunc_t] values, object keep="first"): + if htfunc_t is object: + return duplicated_object(values, keep) + + elif htfunc_t is int8_t: + return duplicated_int8(values, keep) + elif htfunc_t is int16_t: + return duplicated_int16(values, keep) + elif htfunc_t is int32_t: + return duplicated_int32(values, keep) + elif htfunc_t is int64_t: + return duplicated_int64(values, keep) + + elif htfunc_t is uint8_t: + return duplicated_uint8(values, keep) + elif htfunc_t is uint16_t: + return duplicated_uint16(values, keep) + elif htfunc_t is uint32_t: + return duplicated_uint32(values, keep) + elif htfunc_t is uint64_t: + return duplicated_uint64(values, keep) + + elif htfunc_t is float64_t: + return duplicated_float64(values, keep) + elif htfunc_t is float32_t: + return duplicated_float32(values, keep) + + elif htfunc_t is complex128_t: + return duplicated_complex128(values, keep) + elif htfunc_t is complex64_t: + return duplicated_complex64(values, keep) + + else: + raise TypeError(values.dtype) + + +cpdef ismember(ndarray[htfunc_t] arr, ndarray[htfunc_t] values): + if htfunc_t is object: + return ismember_object(arr, values) + + elif htfunc_t is int8_t: + return ismember_int8(arr, values) + elif htfunc_t is int16_t: + return ismember_int16(arr, values) + elif htfunc_t is int32_t: + return ismember_int32(arr, values) + elif htfunc_t is int64_t: + return ismember_int64(arr, values) + + elif htfunc_t is uint8_t: + return ismember_uint8(arr, values) + elif htfunc_t is uint16_t: + return ismember_uint16(arr, values) + elif htfunc_t is uint32_t: + return ismember_uint32(arr, values) + elif htfunc_t is uint64_t: + return ismember_uint64(arr, values) + + elif htfunc_t is float64_t: + return ismember_float64(arr, values) + elif htfunc_t is float32_t: + return ismember_float32(arr, values) + + elif htfunc_t is complex128_t: + return ismember_complex128(arr, values) + elif htfunc_t is complex64_t: + return ismember_complex64(arr, values) + + else: + raise TypeError(values.dtype) + + +cpdef mode(ndarray[htfunc_t] values, bint dropna): + if htfunc_t is object: + return mode_object(values, dropna) + + elif htfunc_t is int8_t: + return mode_int8(values, dropna) + elif htfunc_t is int16_t: + return mode_int16(values, dropna) + elif htfunc_t is int32_t: + return mode_int32(values, dropna) + elif htfunc_t is int64_t: + return mode_int64(values, dropna) + + elif htfunc_t is uint8_t: + return mode_uint8(values, dropna) + elif htfunc_t is uint16_t: + return mode_uint16(values, dropna) + elif htfunc_t is uint32_t: + return mode_uint32(values, dropna) + elif htfunc_t is uint64_t: + return mode_uint64(values, dropna) + + elif htfunc_t is float64_t: + return mode_float64(values, dropna) + elif htfunc_t is float32_t: + return mode_float32(values, dropna) + + elif htfunc_t is complex128_t: + return mode_complex128(values, dropna) + elif htfunc_t is complex64_t: + return mode_complex64(values, dropna) + + else: + raise TypeError(values.dtype) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index ce718d9c9c810..f8f5e5e05bc35 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -84,6 +84,8 @@ from pandas.core.indexers import validate_indices if TYPE_CHECKING: + from typing import Literal + from pandas import ( Categorical, DataFrame, @@ -188,7 +190,7 @@ def _reconstruct_data( Parameters ---------- values : np.ndarray or ExtensionArray - dtype : np.ndtype or ExtensionDtype + dtype : np.dtype or ExtensionDtype original : AnyArrayLike Returns @@ -516,10 +518,7 @@ def f(c, v): ) values = values.astype(common, copy=False) comps = comps.astype(common, copy=False) - name = common.name - if name == "bool": - name = "uint8" - f = getattr(htable, f"ismember_{name}") + f = htable.ismember return f(comps, values) @@ -888,30 +887,24 @@ def value_counts_arraylike(values, dropna: bool): values = _ensure_arraylike(values) original = values values, _ = _ensure_data(values) - ndtype = values.dtype.name + + # TODO: handle uint8 + keys, counts = htable.value_count(values, dropna) if needs_i8_conversion(original.dtype): # datetime, timedelta, or period - keys, counts = htable.value_count_int64(values, dropna) - if dropna: msk = keys != iNaT keys, counts = keys[msk], counts[msk] - else: - # ndarray like - - # TODO: handle uint8 - f = getattr(htable, f"value_count_{ndtype}") - keys, counts = f(values, dropna) - res_keys = _reconstruct_data(keys, original.dtype, original) - return res_keys, counts -def duplicated(values: ArrayLike, keep: str | bool = "first") -> np.ndarray: +def duplicated( + values: ArrayLike, keep: Literal["first", "last", False] = "first" +) -> np.ndarray: """ Return boolean ndarray denoting duplicate values. @@ -931,9 +924,7 @@ def duplicated(values: ArrayLike, keep: str | bool = "first") -> np.ndarray: duplicated : ndarray[bool] """ values, _ = _ensure_data(values) - ndtype = values.dtype.name - f = getattr(htable, f"duplicated_{ndtype}") - return f(values, keep=keep) + return htable.duplicated(values, keep=keep) def mode(values, dropna: bool = True) -> Series: @@ -971,16 +962,14 @@ def mode(values, dropna: bool = True) -> Series: values = values[~mask] values, _ = _ensure_data(values) - ndtype = values.dtype.name - f = getattr(htable, f"mode_{ndtype}") - result = f(values, dropna=dropna) + npresult = htable.mode(values, dropna=dropna) try: - result = np.sort(result) + npresult = np.sort(npresult) except TypeError as err: warn(f"Unable to sort modes: {err}") - result = _reconstruct_data(result, original.dtype, original) + result = _reconstruct_data(npresult, original.dtype, original) # Ensure index is type stable (should always use int index) return Series(result, index=ibase.default_index(len(result))) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index a82c75f4b2557..26c582561cd3d 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2183,11 +2183,9 @@ def mode(self, dropna=True): if dropna: good = self._codes != -1 codes = self._codes[good] - # error: Incompatible types in assignment (expression has type "List[Any]", - # variable has type "ndarray") - codes = sorted( # type: ignore[assignment] - htable.mode_int64(ensure_int64(codes), dropna) - ) + + codes = htable.mode(codes, dropna) + codes.sort() codes = coerce_indexer_dtype(codes, self.dtype.categories) return self._from_backing_data(codes) diff --git a/pandas/core/base.py b/pandas/core/base.py index 3270e3dd82f7d..adc904d80fea8 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -61,6 +61,8 @@ import pandas.core.nanops as nanops if TYPE_CHECKING: + from typing import Literal + from pandas import Categorical _shared_docs: dict[str, str] = {} @@ -1258,5 +1260,7 @@ def drop_duplicates(self, keep="first"): return self[~duplicated] # type: ignore[index] @final - def _duplicated(self, keep: str | bool = "first") -> np.ndarray: + def _duplicated( + self, keep: Literal["first", "last", False] = "first" + ) -> np.ndarray: return duplicated(self._values, keep=keep) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6d3042507d930..899526694f4d9 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -43,6 +43,7 @@ lib, properties, ) +from pandas._libs.hashtable import duplicated from pandas._libs.lib import no_default from pandas._typing import ( AggFuncType, @@ -6141,7 +6142,6 @@ def duplicated( 4 True dtype: bool """ - from pandas._libs.hashtable import duplicated_int64 if self.empty: return self._constructor_sliced(dtype=bool) @@ -6181,7 +6181,7 @@ def f(vals) -> tuple[np.ndarray, int]: sort=False, xnull=False, ) - result = self._constructor_sliced(duplicated_int64(ids, keep), index=self.index) + result = self._constructor_sliced(duplicated(ids, keep), index=self.index) return result.__finalize__(self, method="duplicated") # ---------------------------------------------------------------------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 84f1245299d53..5895d12622aa1 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2685,7 +2685,7 @@ def drop_duplicates(self: _IndexT, keep: str_t | bool = "first") -> _IndexT: return super().drop_duplicates(keep=keep) - def duplicated(self, keep: str_t | bool = "first") -> np.ndarray: + def duplicated(self, keep: Literal["first", "last", False] = "first") -> np.ndarray: """ Indicate duplicate index values. diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index a68238af003e4..4e4bcd570391d 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -25,7 +25,7 @@ index as libindex, lib, ) -from pandas._libs.hashtable import duplicated_int64 +from pandas._libs.hashtable import duplicated from pandas._typing import ( AnyArrayLike, DtypeObj, @@ -1614,7 +1614,7 @@ def duplicated(self, keep="first") -> np.ndarray: shape = tuple(len(lev) for lev in self.levels) ids = get_group_index(self.codes, shape, sort=False, xnull=False) - return duplicated_int64(ids, keep) + return duplicated(ids, keep) # error: Cannot override final attribute "_duplicated" # (previously declared in base class "IndexOpsMixin") diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 8478e2a17efa5..f8085b2bab1ed 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -2153,7 +2153,7 @@ def _factorize_keys( rk = ensure_int64(np.asarray(rk, dtype=np.int64)) else: - klass = libhashtable.Factorizer + klass = libhashtable.ObjectFactorizer lk = ensure_object(lk) rk = ensure_object(rk) diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py index bc0b6e0b028a8..ea59d55989f8b 100644 --- a/pandas/tests/indexes/multi/test_duplicates.py +++ b/pandas/tests/indexes/multi/test_duplicates.py @@ -253,7 +253,7 @@ def test_duplicated_large(keep): mi = MultiIndex(levels=levels, codes=codes) result = mi.duplicated(keep=keep) - expected = hashtable.duplicated_object(mi.values, keep=keep) + expected = hashtable.duplicated(mi.values, keep=keep) tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/libs/test_hashtable.py b/pandas/tests/libs/test_hashtable.py index 04a8aeefbfcd6..aeff591e3f0dc 100644 --- a/pandas/tests/libs/test_hashtable.py +++ b/pandas/tests/libs/test_hashtable.py @@ -278,7 +278,7 @@ def test_unique(self, table_type, dtype): def get_ht_function(fun_name, type_suffix): - return getattr(ht, fun_name + "_" + type_suffix) + return getattr(ht, fun_name) @pytest.mark.parametrize( @@ -374,7 +374,7 @@ def test_modes_with_nans(): values = np.array([True, pd.NA, np.nan], dtype=np.object_) # pd.Na and np.nan will have the same representative: np.nan # thus we have 2 nans and 1 True - modes = ht.mode_object(values, False) + modes = ht.mode(values, False) assert modes.size == 1 assert np.isnan(modes[0]) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 964dd9bdd0e0a..4df95d895e475 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -194,7 +194,7 @@ def test_factorize_nan(self): # rizer.factorize should not raise an exception if na_sentinel indexes # outside of reverse_indexer key = np.array([1, 2, 1, np.nan], dtype="O") - rizer = ht.Factorizer(len(key)) + rizer = ht.ObjectFactorizer(len(key)) for na_sentinel in (-1, 20): ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel) expected = np.array([0, 1, 0, na_sentinel], dtype="int32")
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40944
2021-04-14T14:23:49Z
2021-05-10T13:52:45Z
2021-05-10T13:52:45Z
2021-05-10T14:07:16Z
Backport PR #40924: BUG: concat with DTI and all-None Index
diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst index cdfc2e5686b91..16f9284802407 100644 --- a/doc/source/whatsnew/v1.2.5.rst +++ b/doc/source/whatsnew/v1.2.5.rst @@ -14,7 +14,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - +- Regression in :func:`concat` between two :class:`DataFrames` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`) - - diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 11d191597d61e..25860d6a4ecb3 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2742,7 +2742,8 @@ def _union(self, other, sort): # worth making this faster? a very unusual case value_set = set(lvals) result.extend([x for x in rvals if x not in value_set]) - result = Index(result)._values # do type inference here + # If objects are unorderable, we must have object dtype. + return np.array(result, dtype=object) else: # find indexes of things in "other" that are not in "self" if self.is_unique: diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py index cd58df4fc5da6..3b11a5dbda41c 100644 --- a/pandas/tests/reshape/concat/test_concat.py +++ b/pandas/tests/reshape/concat/test_concat.py @@ -572,3 +572,23 @@ def test_concat_repeated_keys(keys, integrity): tuples = list(zip(keys, ["a", "b", "c"])) expected = Series([1, 2, 3], index=MultiIndex.from_tuples(tuples)) tm.assert_series_equal(result, expected) + + +def test_concat_null_object_with_dti(): + # GH#40841 + dti = pd.DatetimeIndex( + ["2021-04-08 21:21:14+00:00"], dtype="datetime64[ns, UTC]", name="Time (UTC)" + ) + right = DataFrame(data={"C": [0.5274]}, index=dti) + + idx = Index([None], dtype="object", name="Maybe Time (UTC)") + left = DataFrame(data={"A": [None], "B": [np.nan]}, index=idx) + + result = concat([left, right], axis="columns") + + exp_index = Index([None, dti[0]], dtype=object) + expected = DataFrame( + {"A": [None, None], "B": [np.nan, np.nan], "C": [np.nan, 0.5274]}, + index=exp_index, + ) + tm.assert_frame_equal(result, expected)
Backport PR #40924
https://api.github.com/repos/pandas-dev/pandas/pulls/40942
2021-04-14T14:11:39Z
2021-04-14T15:41:27Z
2021-04-14T15:41:27Z
2021-04-14T15:41:31Z
BUG: Dataframe mask method does not work properly with pd.StringDtype()
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 2b0b62ab7facf..8ebb5437978ea 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -830,7 +830,7 @@ ExtensionArray - Bug in :meth:`DataFrame.where` when ``other`` is a :class:`Series` with :class:`ExtensionArray` dtype (:issue:`38729`) - Fixed bug where :meth:`Series.idxmax`, :meth:`Series.idxmin` and ``argmax/min`` fail when the underlying data is :class:`ExtensionArray` (:issue:`32749`, :issue:`33719`, :issue:`36566`) - Fixed a bug where some properties of subclasses of :class:`PandasExtensionDtype` where improperly cached (:issue:`40329`) -- +- Bug in :meth:`DataFrame.mask` where masking a :class:`Dataframe` with an :class:`ExtensionArray` dtype raises ``ValueError`` (:issue:`40941`) Styler ^^^^^^ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index cbc353eead464..bad42a85aeeee 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -8958,7 +8958,7 @@ def _where( join="left", axis=axis, level=level, - fill_value=np.nan, + fill_value=None, copy=False, ) diff --git a/pandas/tests/frame/indexing/test_mask.py b/pandas/tests/frame/indexing/test_mask.py index afa8c757c23e4..364475428e529 100644 --- a/pandas/tests/frame/indexing/test_mask.py +++ b/pandas/tests/frame/indexing/test_mask.py @@ -5,7 +5,10 @@ import numpy as np from pandas import ( + NA, DataFrame, + Series, + StringDtype, isna, ) import pandas._testing as tm @@ -99,3 +102,24 @@ def test_mask_try_cast_deprecated(frame_or_series): with tm.assert_produces_warning(FutureWarning): # try_cast keyword deprecated obj.mask(mask, -1, try_cast=True) + + +def test_mask_stringdtype(): + # GH 40824 + df = DataFrame( + {"A": ["foo", "bar", "baz", NA]}, + index=["id1", "id2", "id3", "id4"], + dtype=StringDtype(), + ) + filtered_df = DataFrame( + {"A": ["this", "that"]}, index=["id2", "id3"], dtype=StringDtype() + ) + filter_ser = Series([False, True, True, False]) + result = df.mask(filter_ser, filtered_df) + + expected = DataFrame( + {"A": [NA, "this", "that", NA]}, + index=["id1", "id2", "id3", "id4"], + dtype=StringDtype(), + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py index 574fa46d10f67..7ffe2fb9ab1ff 100644 --- a/pandas/tests/frame/indexing/test_where.py +++ b/pandas/tests/frame/indexing/test_where.py @@ -10,6 +10,7 @@ DataFrame, DatetimeIndex, Series, + StringDtype, Timestamp, date_range, isna, @@ -709,3 +710,22 @@ def test_where_copies_with_noop(frame_or_series): where_res *= 2 tm.assert_equal(result, expected) + + +def test_where_string_dtype(frame_or_series): + # GH40824 + obj = frame_or_series( + ["a", "b", "c", "d"], index=["id1", "id2", "id3", "id4"], dtype=StringDtype() + ) + filtered_obj = frame_or_series( + ["b", "c"], index=["id2", "id3"], dtype=StringDtype() + ) + filter_ser = Series([False, True, True, False]) + + result = obj.where(filter_ser, filtered_obj) + expected = frame_or_series( + [pd.NA, "b", "c", pd.NA], + index=["id1", "id2", "id3", "id4"], + dtype=StringDtype(), + ) + tm.assert_equal(result, expected) diff --git a/pandas/tests/series/indexing/test_mask.py b/pandas/tests/series/indexing/test_mask.py index dc4fb530dbb52..a4dda3a5c0c5b 100644 --- a/pandas/tests/series/indexing/test_mask.py +++ b/pandas/tests/series/indexing/test_mask.py @@ -1,7 +1,11 @@ import numpy as np import pytest -from pandas import Series +from pandas import ( + NA, + Series, + StringDtype, +) import pandas._testing as tm @@ -63,3 +67,22 @@ def test_mask_inplace(): rs = s.copy() rs.mask(cond, -s, inplace=True) tm.assert_series_equal(rs, s.mask(cond, -s)) + + +def test_mask_stringdtype(): + # GH 40824 + ser = Series( + ["foo", "bar", "baz", NA], + index=["id1", "id2", "id3", "id4"], + dtype=StringDtype(), + ) + filtered_ser = Series(["this", "that"], index=["id2", "id3"], dtype=StringDtype()) + filter_ser = Series([False, True, True, False]) + result = ser.mask(filter_ser, filtered_ser) + + expected = Series( + [NA, "this", "that", NA], + index=["id1", "id2", "id3", "id4"], + dtype=StringDtype(), + ) + tm.assert_series_equal(result, expected)
- [x] closes #40824 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40941
2021-04-14T14:04:23Z
2021-04-16T16:14:42Z
2021-04-16T16:14:42Z
2021-04-16T16:16:25Z
DOC: clarify (un)aware logic in tz_localize() docstring
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 1be2ec0dd92d7..06ff6ffa61559 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -857,8 +857,9 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise") -> DatetimeArr This method takes a time zone (tz) naive Datetime Array/Index object and makes this time zone aware. It does not move the time to another time zone. - Time zone localization helps to switch from time zone aware to time - zone unaware objects. + + This method can also be used to do the inverse -- to create a time + zone unaware object from an aware object. To that end, pass `tz=None`. Parameters ----------
A simple hopefully no-brainer documentation patch; came across this while reading https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DatetimeIndex.tz_localize.html#pandas-datetimeindex-tz-localize.
https://api.github.com/repos/pandas-dev/pandas/pulls/40940
2021-04-14T10:54:48Z
2021-04-16T16:24:04Z
2021-04-16T16:24:04Z
2021-04-16T16:24:09Z
DOC/CI: add missing import to ipython directive in `whatsnew/v0.11.0.rst`
diff --git a/doc/source/whatsnew/v0.11.0.rst b/doc/source/whatsnew/v0.11.0.rst index a69d1ad1dec3b..0fba784e36661 100644 --- a/doc/source/whatsnew/v0.11.0.rst +++ b/doc/source/whatsnew/v0.11.0.rst @@ -306,6 +306,7 @@ Astype conversion on ``datetime64[ns]`` to ``object``, implicitly converts ``NaT .. ipython:: python + import datetime s = pd.Series([datetime.datetime(2001, 1, 2, 0, 0) for i in range(3)]) s.dtype s[1] = np.nan
some of the builds have failed recently due to this ipython directive, so I added the fix as advised in the error warning.
https://api.github.com/repos/pandas-dev/pandas/pulls/40937
2021-04-14T08:17:33Z
2021-04-15T08:56:14Z
2021-04-15T08:56:14Z
2021-04-15T11:41:31Z
CLN: remove unused out kwd from take functions
diff --git a/pandas/core/array_algos/take.py b/pandas/core/array_algos/take.py index b97a777400134..93d87f6bb4dfa 100644 --- a/pandas/core/array_algos/take.py +++ b/pandas/core/array_algos/take.py @@ -100,14 +100,13 @@ def take_nd( return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) arr = np.asarray(arr) - return _take_nd_ndarray(arr, indexer, axis, None, fill_value, allow_fill) + return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill) def _take_nd_ndarray( arr: np.ndarray, indexer, axis: int, - out: np.ndarray | None, fill_value, allow_fill: bool, ) -> np.ndarray: @@ -119,7 +118,7 @@ def _take_nd_ndarray( indexer = ensure_platform_int(indexer) indexer, dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( - arr, indexer, out, fill_value, allow_fill + arr, indexer, fill_value, allow_fill ) flip_order = False @@ -129,23 +128,20 @@ def _take_nd_ndarray( if flip_order: arr = arr.T axis = arr.ndim - axis - 1 - if out is not None: - out = out.T # at this point, it's guaranteed that dtype can hold both the arr values # and the fill_value - if out is None: - out_shape_ = list(arr.shape) - out_shape_[axis] = len(indexer) - out_shape = tuple(out_shape_) - if arr.flags.f_contiguous and axis == arr.ndim - 1: - # minor tweak that can make an order-of-magnitude difference - # for dataframes initialized directly from 2-d ndarrays - # (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its - # f-contiguous transpose) - out = np.empty(out_shape, dtype=dtype, order="F") - else: - out = np.empty(out_shape, dtype=dtype) + out_shape_ = list(arr.shape) + out_shape_[axis] = len(indexer) + out_shape = tuple(out_shape_) + if arr.flags.f_contiguous and axis == arr.ndim - 1: + # minor tweak that can make an order-of-magnitude difference + # for dataframes initialized directly from 2-d ndarrays + # (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its + # f-contiguous transpose) + out = np.empty(out_shape, dtype=dtype, order="F") + else: + out = np.empty(out_shape, dtype=dtype) func = _get_take_nd_function( arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info @@ -190,7 +186,7 @@ def take_1d( return arr.take(indexer) indexer, dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( - arr, indexer, None, fill_value, True + arr, indexer, fill_value, True ) # at this point, it's guaranteed that dtype can hold both the arr values @@ -516,7 +512,6 @@ def _take_2d_multi_object( def _take_preprocess_indexer_and_fill_value( arr: np.ndarray, indexer: np.ndarray, - out: np.ndarray | None, fill_value, allow_fill: bool, ): @@ -534,10 +529,7 @@ def _take_preprocess_indexer_and_fill_value( mask = indexer == -1 needs_masking = mask.any() mask_info = mask, needs_masking - if needs_masking: - if out is not None and out.dtype != dtype: - raise TypeError("Incompatible type for fill_value") - else: + if not needs_masking: # if not, then depromote, set fill_value to dummy # (it won't be used but we don't want the cython code # to crash when trying to cast it to dtype)
Un-revert part of #40510 in the hopes of tracking down where the perf impact was. xref #40852 which un-reverted a different part. Let's not merge for a few days to make sure that any surprising effects from #40852 show up in the asvs.
https://api.github.com/repos/pandas-dev/pandas/pulls/40934
2021-04-13T22:34:56Z
2021-04-20T22:48:34Z
2021-04-20T22:48:33Z
2021-04-20T23:05:19Z
TYP: fix mypy on master
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index d6c5935ecf685..a381a7bcb33f5 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -177,9 +177,6 @@ def pinner(cls): class SeriesGroupBy(GroupBy[Series]): _apply_allowlist = base.series_apply_allowlist - # Defined as a cache_readonly in SelectionMixin - _obj_with_exclusions: Series - def _iterate_slices(self) -> Iterable[Series]: yield self._selected_obj @@ -930,9 +927,6 @@ def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None): @pin_allowlisted_properties(DataFrame, base.dataframe_apply_allowlist) class DataFrameGroupBy(GroupBy[DataFrame]): - # Defined as a cache_readonly in SelectionMixin - _obj_with_exclusions: DataFrame - _apply_allowlist = base.dataframe_apply_allowlist _agg_examples_doc = dedent(
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40930
2021-04-13T16:49:25Z
2021-04-13T20:55:24Z
2021-04-13T20:55:24Z
2021-04-13T20:55:35Z
CLN: preliminary refactor before `Styler.highlight_quantile`
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 9b4673ddb7906..f51f81d7c3504 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -1558,55 +1558,10 @@ def highlight_between( .. figure:: ../../_static/style/hbetw_props.png """ - - def f( - data: FrameOrSeries, - props: str, - left: Scalar | Sequence | np.ndarray | FrameOrSeries | None = None, - right: Scalar | Sequence | np.ndarray | FrameOrSeries | None = None, - inclusive: bool | str = True, - ) -> np.ndarray: - if np.iterable(left) and not isinstance(left, str): - left = _validate_apply_axis_arg( - left, "left", None, data # type: ignore[arg-type] - ) - - if np.iterable(right) and not isinstance(right, str): - right = _validate_apply_axis_arg( - right, "right", None, data # type: ignore[arg-type] - ) - - # get ops with correct boundary attribution - if inclusive == "both": - ops = (operator.ge, operator.le) - elif inclusive == "neither": - ops = (operator.gt, operator.lt) - elif inclusive == "left": - ops = (operator.ge, operator.lt) - elif inclusive == "right": - ops = (operator.gt, operator.le) - else: - raise ValueError( - f"'inclusive' values can be 'both', 'left', 'right', or 'neither' " - f"got {inclusive}" - ) - - g_left = ( - ops[0](data, left) - if left is not None - else np.full(data.shape, True, dtype=bool) - ) - l_right = ( - ops[1](data, right) - if right is not None - else np.full(data.shape, True, dtype=bool) - ) - return np.where(g_left & l_right, props, "") - if props is None: props = f"background-color: {color};" return self.apply( - f, # type: ignore[arg-type] + _highlight_between, # type: ignore[arg-type] axis=axis, subset=subset, props=props, @@ -1831,3 +1786,51 @@ def css(rgba) -> str: index=data.index, columns=data.columns, ) + + +def _highlight_between( + data: FrameOrSeries, + props: str, + left: Scalar | Sequence | np.ndarray | FrameOrSeries | None = None, + right: Scalar | Sequence | np.ndarray | FrameOrSeries | None = None, + inclusive: bool | str = True, +) -> np.ndarray: + """ + Return an array of css props based on condition of data values within given range. + """ + if np.iterable(left) and not isinstance(left, str): + left = _validate_apply_axis_arg( + left, "left", None, data # type: ignore[arg-type] + ) + + if np.iterable(right) and not isinstance(right, str): + right = _validate_apply_axis_arg( + right, "right", None, data # type: ignore[arg-type] + ) + + # get ops with correct boundary attribution + if inclusive == "both": + ops = (operator.ge, operator.le) + elif inclusive == "neither": + ops = (operator.gt, operator.lt) + elif inclusive == "left": + ops = (operator.ge, operator.lt) + elif inclusive == "right": + ops = (operator.gt, operator.le) + else: + raise ValueError( + f"'inclusive' values can be 'both', 'left', 'right', or 'neither' " + f"got {inclusive}" + ) + + g_left = ( + ops[0](data, left) + if left is not None + else np.full(data.shape, True, dtype=bool) + ) + l_right = ( + ops[1](data, right) + if right is not None + else np.full(data.shape, True, dtype=bool) + ) + return np.where(g_left & l_right, props, "")
The current structure is: ``` def highlight_between: def f: return x return self.apply(f) ``` So that `Styler.highlight_quantile` can reuse code this PR refactors the above to: ``` def highlight_between: return self.apply(_new_name) def _new_name: return x ```
https://api.github.com/repos/pandas-dev/pandas/pulls/40928
2021-04-13T15:34:33Z
2021-04-13T23:19:12Z
2021-04-13T23:19:12Z
2021-04-14T05:08:38Z
Fix 40420: Interpret NaN in clip() as no bound.
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 34269185bccd6..34d833bb52d99 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -910,6 +910,7 @@ Other - Bug in :meth:`DataFrame.equals`, :meth:`Series.equals`, :meth:`Index.equals` with object-dtype containing ``np.datetime64("NaT")`` or ``np.timedelta64("NaT")`` (:issue:`39650`) - Bug in :func:`pandas.util.show_versions` where console JSON output was not proper JSON (:issue:`39701`) - Bug in :meth:`DataFrame.convert_dtypes` incorrectly raised ValueError when called on an empty DataFrame (:issue:`40393`) +- Bug in :meth:`DataFrame.clip` not interpreting missing values as no threshold (:issue:`40420`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d69e933164118..c77a3717c4c03 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7341,8 +7341,6 @@ def _clip_with_one_bound(self, threshold, method, axis, inplace): return self._clip_with_scalar(None, threshold, inplace=inplace) return self._clip_with_scalar(threshold, None, inplace=inplace) - subset = method(threshold, axis=axis) | isna(self) - # GH #15390 # In order for where method to work, the threshold must # be transformed to NDFrame from other array like structure. @@ -7351,6 +7349,18 @@ def _clip_with_one_bound(self, threshold, method, axis, inplace): threshold = self._constructor(threshold, index=self.index) else: threshold = align_method_FRAME(self, threshold, axis, flex=None)[1] + + # GH 40420 + # Treat missing thresholds as no bounds, not clipping the values + if is_list_like(threshold): + fill_value = np.inf if method.__name__ == "le" else -np.inf + threshold_inf = threshold.fillna(fill_value) + else: + threshold_inf = threshold + + subset = method(threshold_inf, axis=axis) | isna(self) + + # GH 40420 return self.where(subset, threshold, axis=axis, inplace=inplace) @overload @@ -7482,10 +7492,12 @@ def clip( ---------- lower : float or array_like, default None Minimum threshold value. All values below this - threshold will be set to it. + threshold will be set to it. A missing + threshold (e.g `NA`) will not clip the value. upper : float or array_like, default None Maximum threshold value. All values above this - threshold will be set to it. + threshold will be set to it. A missing + threshold (e.g `NA`) will not clip the value. axis : int or str axis name, optional Align object with lower and upper along the given axis. inplace : bool, default False @@ -7546,6 +7558,25 @@ def clip( 2 0 3 3 6 8 4 5 3 + + Clips using specific lower threshold per column element, with missing values: + + >>> t = pd.Series([2, -4, np.NaN, 6, 3]) + >>> t + 0 2.0 + 1 -4.0 + 2 NaN + 3 6.0 + 4 3.0 + dtype: float64 + + >>> df.clip(t, axis=0) + col_0 col_1 + 0 9 2 + 1 -3 -4 + 2 0 6 + 3 6 8 + 4 5 3 """ inplace = validate_bool_kwarg(inplace, "inplace") @@ -7558,9 +7589,17 @@ def clip( # so ignore # GH 19992 # numpy doesn't drop a list-like bound containing NaN - if not is_list_like(lower) and np.any(isna(lower)): + isna_lower = isna(lower) + if not is_list_like(lower): + if np.any(isna_lower): + lower = None + elif np.all(isna_lower): lower = None - if not is_list_like(upper) and np.any(isna(upper)): + isna_upper = isna(upper) + if not is_list_like(upper): + if np.any(isna_upper): + upper = None + elif np.all(isna_upper): upper = None # GH 2747 (arguments were reversed) diff --git a/pandas/tests/frame/methods/test_clip.py b/pandas/tests/frame/methods/test_clip.py index 8a2374a414482..6525109da4394 100644 --- a/pandas/tests/frame/methods/test_clip.py +++ b/pandas/tests/frame/methods/test_clip.py @@ -144,17 +144,25 @@ def test_clip_with_na_args(self, float_frame): tm.assert_frame_equal(float_frame.clip(np.nan), float_frame) tm.assert_frame_equal(float_frame.clip(upper=np.nan, lower=np.nan), float_frame) - # GH#19992 + # GH#19992 and adjusted in GH#40420 df = DataFrame({"col_0": [1, 2, 3], "col_1": [4, 5, 6], "col_2": [7, 8, 9]}) result = df.clip(lower=[4, 5, np.nan], axis=0) expected = DataFrame( - {"col_0": [4, 5, np.nan], "col_1": [4, 5, np.nan], "col_2": [7, 8, np.nan]} + {"col_0": [4, 5, 3], "col_1": [4, 5, 6], "col_2": [7, 8, 9]} ) tm.assert_frame_equal(result, expected) result = df.clip(lower=[4, 5, np.nan], axis=1) expected = DataFrame( - {"col_0": [4, 4, 4], "col_1": [5, 5, 6], "col_2": [np.nan, np.nan, np.nan]} + {"col_0": [4, 4, 4], "col_1": [5, 5, 6], "col_2": [7, 8, 9]} ) tm.assert_frame_equal(result, expected) + + # GH#40420 + data = {"col_0": [9, -3, 0, -1, 5], "col_1": [-2, -7, 6, 8, -5]} + df = DataFrame(data) + t = Series([2, -4, np.NaN, 6, 3]) + result = df.clip(lower=t, axis=0) + expected = DataFrame({"col_0": [9, -3, 0, 6, 5], "col_1": [2, -4, 6, 8, 3]}) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/methods/test_clip.py b/pandas/tests/series/methods/test_clip.py index 528e95f65c8f4..442718d677101 100644 --- a/pandas/tests/series/methods/test_clip.py +++ b/pandas/tests/series/methods/test_clip.py @@ -49,8 +49,13 @@ def test_clip_with_na_args(self): tm.assert_series_equal(s.clip(upper=np.nan, lower=np.nan), Series([1, 2, 3])) # GH#19992 - tm.assert_series_equal(s.clip(lower=[0, 4, np.nan]), Series([1, 4, np.nan])) - tm.assert_series_equal(s.clip(upper=[1, np.nan, 1]), Series([1, np.nan, 1])) + tm.assert_series_equal(s.clip(lower=[0, 4, np.nan]), Series([1, 4, 3])) + tm.assert_series_equal(s.clip(upper=[1, np.nan, 1]), Series([1, 2, 1])) + + # GH#40420 + s = Series([1, 2, 3]) + result = s.clip(0, [np.nan, np.nan, np.nan]) + tm.assert_series_equal(s, result) def test_clip_against_series(self): # GH#6966
- [x] closes #40420 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40927
2021-04-13T15:34:11Z
2021-04-23T14:58:08Z
2021-04-23T14:58:07Z
2021-12-15T06:44:52Z
ENH: `Styler.highlight_quantile` method
diff --git a/doc/source/_static/style/hq_ax1.png b/doc/source/_static/style/hq_ax1.png new file mode 100644 index 0000000000000..95d840b7c8f99 Binary files /dev/null and b/doc/source/_static/style/hq_ax1.png differ diff --git a/doc/source/_static/style/hq_axNone.png b/doc/source/_static/style/hq_axNone.png new file mode 100644 index 0000000000000..40a33b194e640 Binary files /dev/null and b/doc/source/_static/style/hq_axNone.png differ diff --git a/doc/source/_static/style/hq_props.png b/doc/source/_static/style/hq_props.png new file mode 100644 index 0000000000000..1f11749096690 Binary files /dev/null and b/doc/source/_static/style/hq_props.png differ diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 85d9acff353be..bba71b0d62e92 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -119,7 +119,9 @@ to accept more universal CSS language for arguments, such as ``'color:red;'`` in to allow custom CSS highlighting instead of default background coloring (:issue:`40242`). Enhancements to other built-in methods include extending the :meth:`.Styler.background_gradient` method to shade elements based on a given gradient map and not be restricted only to -values in the DataFrame (:issue:`39930` :issue:`22727` :issue:`28901`). +values in the DataFrame (:issue:`39930` :issue:`22727` :issue:`28901`). Additional +built-in methods such as :meth:`.Styler.highlight_between` and :meth:`.Styler.highlight_quantile` +have been added (:issue:`39821` and :issue:`40926`). The :meth:`.Styler.apply` now consistently allows functions with ``ndarray`` output to allow more flexible development of UDFs when ``axis`` is ``None`` ``0`` or ``1`` (:issue:`39393`). diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index f51f81d7c3504..7998365234682 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -1355,6 +1355,7 @@ def highlight_null( Styler.highlight_max: Highlight the maximum with a style. Styler.highlight_min: Highlight the minimum with a style. Styler.highlight_between: Highlight a defined range with a style. + Styler.highlight_quantile: Highlight values defined by a quantile with a style. """ def f(data: DataFrame, props: str) -> np.ndarray: @@ -1403,6 +1404,7 @@ def highlight_max( Styler.highlight_null: Highlight missing values with a style. Styler.highlight_min: Highlight the minimum with a style. Styler.highlight_between: Highlight a defined range with a style. + Styler.highlight_quantile: Highlight values defined by a quantile with a style. """ def f(data: FrameOrSeries, props: str) -> np.ndarray: @@ -1451,6 +1453,7 @@ def highlight_min( Styler.highlight_null: Highlight missing values with a style. Styler.highlight_max: Highlight the maximum with a style. Styler.highlight_between: Highlight a defined range with a style. + Styler.highlight_quantile: Highlight values defined by a quantile with a style. """ def f(data: FrameOrSeries, props: str) -> np.ndarray: @@ -1507,6 +1510,7 @@ def highlight_between( Styler.highlight_null: Highlight missing values with a style. Styler.highlight_max: Highlight the maximum with a style. Styler.highlight_min: Highlight the minimum with a style. + Styler.highlight_quantile: Highlight values defined by a quantile with a style. Notes ----- @@ -1570,6 +1574,110 @@ def highlight_between( inclusive=inclusive, ) + def highlight_quantile( + self, + subset: IndexLabel | None = None, + color: str = "yellow", + axis: Axis | None = 0, + q_left: float = 0.0, + q_right: float = 1.0, + interpolation: str = "linear", + inclusive: str = "both", + props: str | None = None, + ) -> Styler: + """ + Highlight values defined by a quantile with a style. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + subset : IndexSlice, default None + A valid slice for ``data`` to limit the style application to. + color : str, default 'yellow' + Background color to use for highlighting + axis : {0 or 'index', 1 or 'columns', None}, default 0 + Axis along which to determine and highlight quantiles. If ``None`` quantiles + are measured over the entire DataFrame. See examples. + q_left : float, default 0 + Left bound, in [0, q_right), for the target quantile range. + q_right : float, default 1 + Right bound, in (q_left, 1], for the target quantile range. + interpolation : {‘linear’, ‘lower’, ‘higher’, ‘midpoint’, ‘nearest’} + Argument passed to ``Series.quantile`` or ``DataFrame.quantile`` for + quantile estimation. + inclusive : {'both', 'neither', 'left', 'right'} + Identify whether quantile bounds are closed or open. + props : str, default None + CSS properties to use for highlighting. If ``props`` is given, ``color`` + is not used. + + Returns + ------- + self : Styler + + See Also + -------- + Styler.highlight_null: Highlight missing values with a style. + Styler.highlight_max: Highlight the maximum with a style. + Styler.highlight_min: Highlight the minimum with a style. + Styler.highlight_between: Highlight a defined range with a style. + + Notes + ----- + This function does not work with ``str`` dtypes. + + Examples + -------- + Using ``axis=None`` and apply a quantile to all collective data + + >>> df = pd.DataFrame(np.arange(10).reshape(2,5) + 1) + >>> df.style.highlight_quantile(axis=None, q_left=0.8, color="#fffd75") + + .. figure:: ../../_static/style/hq_axNone.png + + Or highlight quantiles row-wise or column-wise, in this case by row-wise + + >>> df.style.highlight_quantile(axis=1, q_left=0.8, color="#fffd75") + + .. figure:: ../../_static/style/hq_ax1.png + + Use ``props`` instead of default background coloring + + >>> df.style.highlight_quantile(axis=None, q_left=0.2, q_right=0.8, + ... props='font-weight:bold;color:#e83e8c') + + .. figure:: ../../_static/style/hq_props.png + """ + subset_ = slice(None) if subset is None else subset + subset_ = non_reducing_slice(subset_) + data = self.data.loc[subset_] + + # after quantile is found along axis, e.g. along rows, + # applying the calculated quantile to alternate axis, e.g. to each column + kwargs = {"q": [q_left, q_right], "interpolation": interpolation} + if axis in [0, "index"]: + q = data.quantile(axis=axis, numeric_only=False, **kwargs) + axis_apply: int | None = 1 + elif axis in [1, "columns"]: + q = data.quantile(axis=axis, numeric_only=False, **kwargs) + axis_apply = 0 + else: # axis is None + q = Series(data.to_numpy().ravel()).quantile(**kwargs) + axis_apply = None + + if props is None: + props = f"background-color: {color};" + return self.apply( + _highlight_between, # type: ignore[arg-type] + axis=axis_apply, + subset=subset, + props=props, + left=q.iloc[0], + right=q.iloc[1], + inclusive=inclusive, + ) + @classmethod def from_custom_template(cls, searchpath, name): """ diff --git a/pandas/tests/io/formats/style/test_highlight.py b/pandas/tests/io/formats/style/test_highlight.py index b8c194f8955ab..9e956e055d1aa 100644 --- a/pandas/tests/io/formats/style/test_highlight.py +++ b/pandas/tests/io/formats/style/test_highlight.py @@ -142,3 +142,54 @@ def test_highlight_between_inclusive(styler, inclusive, expected): kwargs = {"left": 0, "right": 1, "subset": IndexSlice[[0, 1], :]} result = styler.highlight_between(**kwargs, inclusive=inclusive)._compute() assert result.ctx == expected + + +@pytest.mark.parametrize( + "kwargs", + [ + {"q_left": 0.5, "q_right": 1, "axis": 0}, # base case + {"q_left": 0.5, "q_right": 1, "axis": None}, # test axis + {"q_left": 0, "q_right": 1, "subset": IndexSlice[2, :]}, # test subset + {"q_left": 0.5, "axis": 0}, # test no high + {"q_right": 1, "subset": IndexSlice[2, :], "axis": 1}, # test no low + {"q_left": 0.5, "axis": 0, "props": "background-color: yellow"}, # tst prop + ], +) +def test_highlight_quantile(styler, kwargs): + expected = { + (2, 0): [("background-color", "yellow")], + (2, 1): [("background-color", "yellow")], + } + result = styler.highlight_quantile(**kwargs)._compute().ctx + assert result == expected + + +@pytest.mark.skipif(np.__version__[:4] in ["1.16", "1.17"], reason="Numpy Issue #14831") +@pytest.mark.parametrize( + "f,kwargs", + [ + ("highlight_min", {"axis": 1, "subset": IndexSlice[1, :]}), + ("highlight_max", {"axis": 0, "subset": [0]}), + ("highlight_quantile", {"axis": None, "q_left": 0.6, "q_right": 0.8}), + ("highlight_between", {"subset": [0]}), + ], +) +@pytest.mark.parametrize( + "df", + [ + DataFrame([[0, 10], [20, 30]], dtype=int), + DataFrame([[0, 10], [20, 30]], dtype=float), + DataFrame([[0, 10], [20, 30]], dtype="datetime64[ns]"), + DataFrame([[0, 10], [20, 30]], dtype=str), + DataFrame([[0, 10], [20, 30]], dtype="timedelta64[ns]"), + ], +) +def test_all_highlight_dtypes(f, kwargs, df): + if f == "highlight_quantile" and isinstance(df.iloc[0, 0], (str)): + return None # quantile incompatible with str + if f == "highlight_between": + kwargs["left"] = df.iloc[1, 0] # set the range low for testing + + expected = {(1, 0): [("background-color", "yellow")]} + result = getattr(df.style, f)(**kwargs)._compute().ctx + assert result == expected
![Screen Shot 2021-04-14 at 08 43 54](https://user-images.githubusercontent.com/24256554/114679634-a185c900-9d0c-11eb-9865-38a3aac1802c.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/40926
2021-04-13T15:24:37Z
2021-04-20T23:21:23Z
2021-04-20T23:21:23Z
2021-04-21T06:08:34Z
BUG: concat with DTI and all-None Index
diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst index cdfc2e5686b91..16f9284802407 100644 --- a/doc/source/whatsnew/v1.2.5.rst +++ b/doc/source/whatsnew/v1.2.5.rst @@ -14,7 +14,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - +- Regression in :func:`concat` between two :class:`DataFrames` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`) - - diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 119326622ff3f..28dfdc23eb76e 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2977,7 +2977,8 @@ def _union(self, other: Index, sort): # worth making this faster? a very unusual case value_set = set(lvals) value_list.extend([x for x in rvals if x not in value_set]) - return Index(value_list)._values # do type inference here + # If objects are unorderable, we must have object dtype. + return np.array(value_list, dtype=object) elif not other.is_unique and not self.is_unique: # self and other both have duplicates diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py index 46029b8a695ea..2ed38670e88a6 100644 --- a/pandas/tests/reshape/concat/test_concat.py +++ b/pandas/tests/reshape/concat/test_concat.py @@ -607,3 +607,23 @@ def test_concat_repeated_keys(keys, integrity): tuples = list(zip(keys, ["a", "b", "c"])) expected = Series([1, 2, 3], index=MultiIndex.from_tuples(tuples)) tm.assert_series_equal(result, expected) + + +def test_concat_null_object_with_dti(): + # GH#40841 + dti = pd.DatetimeIndex( + ["2021-04-08 21:21:14+00:00"], dtype="datetime64[ns, UTC]", name="Time (UTC)" + ) + right = DataFrame(data={"C": [0.5274]}, index=dti) + + idx = Index([None], dtype="object", name="Maybe Time (UTC)") + left = DataFrame(data={"A": [None], "B": [np.nan]}, index=idx) + + result = concat([left, right], axis="columns") + + exp_index = Index([None, dti[0]], dtype=object) + expected = DataFrame( + {"A": [None, None], "B": [np.nan, np.nan], "C": [np.nan, 0.5274]}, + index=exp_index, + ) + tm.assert_frame_equal(result, expected)
- [x] closes #40841 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40924
2021-04-13T14:19:16Z
2021-04-14T12:53:22Z
2021-04-14T12:53:21Z
2021-04-14T14:27:06Z
Backport PR #40761 on branch 1.2.x (DOC: collapse subpages in sidebar for API reference docs)
diff --git a/doc/_templates/sidebar-nav-bs.html b/doc/_templates/sidebar-nav-bs.html new file mode 100644 index 0000000000000..7e0043e771e72 --- /dev/null +++ b/doc/_templates/sidebar-nav-bs.html @@ -0,0 +1,9 @@ +<nav class="bd-links" id="bd-docs-nav" aria-label="Main navigation"> + <div class="bd-toc-item active"> + {% if pagename.startswith("reference") %} + {{ generate_nav_html("sidebar", maxdepth=4, collapse=True, includehidden=True, titles_only=True) }} + {% else %} + {{ generate_nav_html("sidebar", maxdepth=4, collapse=False, includehidden=True, titles_only=True) }} + {% endif %} + </div> +</nav>
Backport PR #40761: DOC: collapse subpages in sidebar for API reference docs
https://api.github.com/repos/pandas-dev/pandas/pulls/40922
2021-04-13T12:26:39Z
2021-04-13T14:21:25Z
2021-04-13T14:21:25Z
2021-04-13T14:21:26Z
ENH: Nullable integer/boolean/floating support in lib inferencing functions
diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi index 4c647056641f5..11f578d9e4d60 100644 --- a/pandas/_libs/lib.pyi +++ b/pandas/_libs/lib.pyi @@ -5,6 +5,8 @@ from typing import ( Any, Callable, Generator, + Literal, + overload, ) import numpy as np @@ -70,12 +72,24 @@ def maybe_convert_objects( convert_to_nullable_integer: bool = False, ) -> ArrayLike: ... +@overload def maybe_convert_numeric( values: np.ndarray, # np.ndarray[object] na_values: set, convert_empty: bool = True, coerce_numeric: bool = False, -) -> np.ndarray: ... + convert_to_masked_nullable: Literal[False] = ..., +) -> tuple[np.ndarray, None]: ... + +@overload +def maybe_convert_numeric( + values: np.ndarray, # np.ndarray[object] + na_values: set, + convert_empty: bool = True, + coerce_numeric: bool = False, + *, + convert_to_masked_nullable: Literal[True], +) -> tuple[np.ndarray, np.ndarray]: ... # TODO: restrict `arr`? def ensure_string_array( diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 77375cac39921..7b42c07b65c89 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2029,7 +2029,8 @@ def maybe_convert_numeric( set na_values, bint convert_empty=True, bint coerce_numeric=False, -) -> ndarray: + bint convert_to_masked_nullable=False, +) -> tuple[np.ndarray, np.ndarray | None]: """ Convert object array to a numeric array if possible. @@ -2053,14 +2054,20 @@ def maybe_convert_numeric( numeric array has no suitable numerical dtype to return (i.e. uint64, int32, uint8). If set to False, the original object array will be returned. Otherwise, a ValueError will be raised. - + convert_to_masked_nullable : bool, default False + Whether to return a mask for the converted values. This also disables + upcasting for ints with nulls to float64. Returns ------- np.ndarray Array of converted object values to numerical ones. + + Optional[np.ndarray] + If convert_to_masked_nullable is True, + returns a boolean mask for the converted values, otherwise returns None. """ if len(values) == 0: - return np.array([], dtype='i8') + return (np.array([], dtype='i8'), None) # fastpath for ints - try to convert all based on first value cdef: @@ -2070,7 +2077,7 @@ def maybe_convert_numeric( try: maybe_ints = values.astype('i8') if (maybe_ints == values).all(): - return maybe_ints + return (maybe_ints, None) except (ValueError, OverflowError, TypeError): pass @@ -2084,21 +2091,40 @@ def maybe_convert_numeric( ndarray[int64_t] ints = np.empty(n, dtype='i8') ndarray[uint64_t] uints = np.empty(n, dtype='u8') ndarray[uint8_t] bools = np.empty(n, dtype='u1') + ndarray[uint8_t] mask = np.zeros(n, dtype="u1") float64_t fval + bint allow_null_in_int = convert_to_masked_nullable for i in range(n): val = values[i] + # We only want to disable NaNs showing as float if + # a) convert_to_masked_nullable = True + # b) no floats have been seen ( assuming an int shows up later ) + # However, if no ints present (all null array), we need to return floats + allow_null_in_int = convert_to_masked_nullable and not seen.float_ if val.__hash__ is not None and val in na_values: - seen.saw_null() + if allow_null_in_int: + seen.null_ = True + mask[i] = 1 + else: + if convert_to_masked_nullable: + mask[i] = 1 + seen.saw_null() floats[i] = complexes[i] = NaN elif util.is_float_object(val): fval = val if fval != fval: seen.null_ = True - + if allow_null_in_int: + mask[i] = 1 + else: + if convert_to_masked_nullable: + mask[i] = 1 + seen.float_ = True + else: + seen.float_ = True floats[i] = complexes[i] = fval - seen.float_ = True elif util.is_integer_object(val): floats[i] = complexes[i] = val @@ -2121,7 +2147,13 @@ def maybe_convert_numeric( floats[i] = uints[i] = ints[i] = bools[i] = val seen.bool_ = True elif val is None or val is C_NA: - seen.saw_null() + if allow_null_in_int: + seen.null_ = True + mask[i] = 1 + else: + if convert_to_masked_nullable: + mask[i] = 1 + seen.saw_null() floats[i] = complexes[i] = NaN elif hasattr(val, '__len__') and len(val) == 0: if convert_empty or seen.coerce_numeric: @@ -2142,9 +2174,11 @@ def maybe_convert_numeric( if fval in na_values: seen.saw_null() floats[i] = complexes[i] = NaN + mask[i] = 1 else: if fval != fval: seen.null_ = True + mask[i] = 1 floats[i] = fval @@ -2152,7 +2186,10 @@ def maybe_convert_numeric( as_int = int(val) if as_int in na_values: - seen.saw_null() + mask[i] = 1 + seen.null_ = True + if not allow_null_in_int: + seen.float_ = True else: seen.saw_int(as_int) @@ -2180,22 +2217,34 @@ def maybe_convert_numeric( floats[i] = NaN if seen.check_uint64_conflict(): - return values + return (values, None) + + # This occurs since we disabled float nulls showing as null in anticipation + # of seeing ints that were never seen. So then, we return float + if allow_null_in_int and seen.null_ and not seen.int_: + seen.float_ = True if seen.complex_: - return complexes + return (complexes, None) elif seen.float_: - return floats + if seen.null_ and convert_to_masked_nullable: + return (floats, mask.view(np.bool_)) + return (floats, None) elif seen.int_: + if seen.null_ and convert_to_masked_nullable: + if seen.uint_: + return (uints, mask.view(np.bool_)) + else: + return (ints, mask.view(np.bool_)) if seen.uint_: - return uints + return (uints, None) else: - return ints + return (ints, None) elif seen.bool_: - return bools.view(np.bool_) + return (bools.view(np.bool_), None) elif seen.uint_: - return uints - return ints + return (uints, None) + return (ints, None) @cython.boundscheck(False) diff --git a/pandas/_libs/ops.pyi b/pandas/_libs/ops.pyi index b4f42f217a5db..11d67dfb93d5f 100644 --- a/pandas/_libs/ops.pyi +++ b/pandas/_libs/ops.pyi @@ -1,6 +1,8 @@ from typing import ( Any, Callable, + Literal, + overload, ) import numpy as np @@ -35,9 +37,19 @@ def vec_binop( op: _BinOp, # binary operator ) -> np.ndarray: ... +@overload +def maybe_convert_bool( + arr: np.ndarray, # np.ndarray[object] + true_values=..., + false_values=..., + convert_to_masked_nullable: Literal[False] = ..., +) -> tuple[np.ndarray, None]: ... +@overload def maybe_convert_bool( arr: np.ndarray, # np.ndarray[object] true_values=..., - false_values=... -) -> np.ndarray: ... + false_values=..., + *, + convert_to_masked_nullable: Literal[True], +) -> tuple[np.ndarray, np.ndarray]: ... diff --git a/pandas/_libs/ops.pyx b/pandas/_libs/ops.pyx index 7951bb5c093ef..ac8a7f2cc57f7 100644 --- a/pandas/_libs/ops.pyx +++ b/pandas/_libs/ops.pyx @@ -24,10 +24,7 @@ import_array() from pandas._libs.missing cimport checknull -from pandas._libs.util cimport ( - UINT8_MAX, - is_nan, -) +from pandas._libs.util cimport is_nan @cython.wraparound(False) @@ -212,7 +209,7 @@ def scalar_binop(object[:] values, object val, object op) -> ndarray: else: result[i] = op(x, val) - return maybe_convert_bool(result.base) + return maybe_convert_bool(result.base)[0] @cython.wraparound(False) @@ -254,21 +251,25 @@ def vec_binop(object[:] left, object[:] right, object op) -> ndarray: else: raise - return maybe_convert_bool(result.base) # `.base` to access np.ndarray + return maybe_convert_bool(result.base)[0] # `.base` to access np.ndarray def maybe_convert_bool(ndarray[object] arr, - true_values=None, false_values=None) -> ndarray: + true_values=None, + false_values=None, + convert_to_masked_nullable=False + ) -> tuple[np.ndarray, np.ndarray | None]: cdef: Py_ssize_t i, n ndarray[uint8_t] result + ndarray[uint8_t] mask object val set true_vals, false_vals - int na_count = 0 + bint has_na = False n = len(arr) result = np.empty(n, dtype=np.uint8) - + mask = np.zeros(n, dtype=np.uint8) # the defaults true_vals = {'True', 'TRUE', 'true'} false_vals = {'False', 'FALSE', 'false'} @@ -291,16 +292,19 @@ def maybe_convert_bool(ndarray[object] arr, result[i] = 1 elif val in false_vals: result[i] = 0 - elif isinstance(val, float): - result[i] = UINT8_MAX - na_count += 1 + elif is_nan(val): + mask[i] = 1 + result[i] = 0 # Value here doesn't matter, will be replaced w/ nan + has_na = True else: - return arr + return (arr, None) - if na_count > 0: - mask = result == UINT8_MAX - arr = result.view(np.bool_).astype(object) - np.putmask(arr, mask, np.nan) - return arr + if has_na: + if convert_to_masked_nullable: + return (result.view(np.bool_), mask.view(np.bool_)) + else: + arr = result.view(np.bool_).astype(object) + np.putmask(arr, mask, np.nan) + return (arr, None) else: - return result.view(np.bool_) + return (result.view(np.bool_), None) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index d4ecec667cc86..46dc97214e2f6 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1356,7 +1356,7 @@ def soft_convert_objects( return converted if numeric and is_object_dtype(values.dtype): - converted = lib.maybe_convert_numeric(values, set(), coerce_numeric=True) + converted, _ = lib.maybe_convert_numeric(values, set(), coerce_numeric=True) # If all NaNs, then do not-alter values = converted if not isna(converted).all() else values diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py index b7116ee95949b..6f5e8ab900dfd 100644 --- a/pandas/core/tools/numeric.py +++ b/pandas/core/tools/numeric.py @@ -180,7 +180,7 @@ def to_numeric(arg, errors="raise", downcast=None): values = ensure_object(values) coerce_numeric = errors not in ("ignore", "raise") try: - values = lib.maybe_convert_numeric( + values, _ = lib.maybe_convert_numeric( values, set(), coerce_numeric=coerce_numeric ) except (ValueError, TypeError): diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index a011a789bf17c..11fbdb860592e 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -676,7 +676,7 @@ def _infer_types(self, values, na_values, try_num_bool=True): if try_num_bool and is_object_dtype(values.dtype): # exclude e.g DatetimeIndex here try: - result = lib.maybe_convert_numeric(values, na_values, False) + result, _ = lib.maybe_convert_numeric(values, na_values, False) except (ValueError, TypeError): # e.g. encountering datetime string gets ValueError # TypeError can be raised in floatify @@ -690,7 +690,7 @@ def _infer_types(self, values, na_values, try_num_bool=True): na_count = parsers.sanitize_objects(values, na_values, False) if result.dtype == np.object_ and try_num_bool: - result = libops.maybe_convert_bool( + result, _ = libops.maybe_convert_bool( np.asarray(values), true_values=self.true_values, false_values=self.false_values, diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index d1e6409307915..076cc155f3626 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -24,6 +24,7 @@ from pandas._libs import ( lib, missing as libmissing, + ops as libops, ) import pandas.util._test_decorators as td @@ -61,7 +62,11 @@ Timestamp, ) import pandas._testing as tm -from pandas.core.arrays import IntegerArray +from pandas.core.arrays import ( + BooleanArray, + FloatingArray, + IntegerArray, +) @pytest.fixture(params=[True, False], ids=str) @@ -416,73 +421,116 @@ def test_isneginf_scalar(self, value, expected): result = libmissing.isneginf_scalar(value) assert result is expected + @pytest.mark.parametrize( + "convert_to_masked_nullable, exp", + [ + ( + True, + BooleanArray( + np.array([True, False], dtype="bool"), np.array([False, True]) + ), + ), + (False, np.array([True, np.nan], dtype="object")), + ], + ) + def test_maybe_convert_nullable_boolean(self, convert_to_masked_nullable, exp): + # GH 40687 + arr = np.array([True, np.NaN], dtype=object) + result = libops.maybe_convert_bool( + arr, set(), convert_to_masked_nullable=convert_to_masked_nullable + ) + if convert_to_masked_nullable: + tm.assert_extension_array_equal(BooleanArray(*result), exp) + else: + result = result[0] + tm.assert_numpy_array_equal(result, exp) + + @pytest.mark.parametrize("convert_to_masked_nullable", [True, False]) @pytest.mark.parametrize("coerce_numeric", [True, False]) @pytest.mark.parametrize( "infinity", ["inf", "inF", "iNf", "Inf", "iNF", "InF", "INf", "INF"] ) @pytest.mark.parametrize("prefix", ["", "-", "+"]) - def test_maybe_convert_numeric_infinities(self, coerce_numeric, infinity, prefix): + def test_maybe_convert_numeric_infinities( + self, coerce_numeric, infinity, prefix, convert_to_masked_nullable + ): # see gh-13274 - result = lib.maybe_convert_numeric( + result, _ = lib.maybe_convert_numeric( np.array([prefix + infinity], dtype=object), na_values={"", "NULL", "nan"}, coerce_numeric=coerce_numeric, + convert_to_masked_nullable=convert_to_masked_nullable, ) expected = np.array([np.inf if prefix in ["", "+"] else -np.inf]) tm.assert_numpy_array_equal(result, expected) - def test_maybe_convert_numeric_infinities_raises(self): + @pytest.mark.parametrize("convert_to_masked_nullable", [True, False]) + def test_maybe_convert_numeric_infinities_raises(self, convert_to_masked_nullable): msg = "Unable to parse string" with pytest.raises(ValueError, match=msg): lib.maybe_convert_numeric( np.array(["foo_inf"], dtype=object), na_values={"", "NULL", "nan"}, coerce_numeric=False, + convert_to_masked_nullable=convert_to_masked_nullable, ) - def test_maybe_convert_numeric_post_floatify_nan(self, coerce): + @pytest.mark.parametrize("convert_to_masked_nullable", [True, False]) + def test_maybe_convert_numeric_post_floatify_nan( + self, coerce, convert_to_masked_nullable + ): # see gh-13314 data = np.array(["1.200", "-999.000", "4.500"], dtype=object) expected = np.array([1.2, np.nan, 4.5], dtype=np.float64) nan_values = {-999, -999.0} - out = lib.maybe_convert_numeric(data, nan_values, coerce) - tm.assert_numpy_array_equal(out, expected) + out = lib.maybe_convert_numeric( + data, + nan_values, + coerce, + convert_to_masked_nullable=convert_to_masked_nullable, + ) + if convert_to_masked_nullable: + expected = FloatingArray(expected, np.isnan(expected)) + tm.assert_extension_array_equal(expected, FloatingArray(*out)) + else: + out = out[0] + tm.assert_numpy_array_equal(out, expected) def test_convert_infs(self): arr = np.array(["inf", "inf", "inf"], dtype="O") - result = lib.maybe_convert_numeric(arr, set(), False) + result, _ = lib.maybe_convert_numeric(arr, set(), False) assert result.dtype == np.float64 arr = np.array(["-inf", "-inf", "-inf"], dtype="O") - result = lib.maybe_convert_numeric(arr, set(), False) + result, _ = lib.maybe_convert_numeric(arr, set(), False) assert result.dtype == np.float64 def test_scientific_no_exponent(self): # See PR 12215 arr = np.array(["42E", "2E", "99e", "6e"], dtype="O") - result = lib.maybe_convert_numeric(arr, set(), False, True) + result, _ = lib.maybe_convert_numeric(arr, set(), False, True) assert np.all(np.isnan(result)) def test_convert_non_hashable(self): # GH13324 # make sure that we are handing non-hashables arr = np.array([[10.0, 2], 1.0, "apple"], dtype=object) - result = lib.maybe_convert_numeric(arr, set(), False, True) + result, _ = lib.maybe_convert_numeric(arr, set(), False, True) tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan])) def test_convert_numeric_uint64(self): arr = np.array([2 ** 63], dtype=object) exp = np.array([2 ** 63], dtype=np.uint64) - tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp) + tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp) arr = np.array([str(2 ** 63)], dtype=object) exp = np.array([2 ** 63], dtype=np.uint64) - tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp) + tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp) arr = np.array([np.uint64(2 ** 63)], dtype=object) exp = np.array([2 ** 63], dtype=np.uint64) - tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp) + tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp) @pytest.mark.parametrize( "arr", @@ -495,17 +543,33 @@ def test_convert_numeric_uint64(self): ) def test_convert_numeric_uint64_nan(self, coerce, arr): expected = arr.astype(float) if coerce else arr.copy() - result = lib.maybe_convert_numeric(arr, set(), coerce_numeric=coerce) + result, _ = lib.maybe_convert_numeric(arr, set(), coerce_numeric=coerce) tm.assert_almost_equal(result, expected) - def test_convert_numeric_uint64_nan_values(self, coerce): + @pytest.mark.parametrize("convert_to_masked_nullable", [True, False]) + def test_convert_numeric_uint64_nan_values( + self, coerce, convert_to_masked_nullable + ): arr = np.array([2 ** 63, 2 ** 63 + 1], dtype=object) na_values = {2 ** 63} expected = ( np.array([np.nan, 2 ** 63 + 1], dtype=float) if coerce else arr.copy() ) - result = lib.maybe_convert_numeric(arr, na_values, coerce_numeric=coerce) + result = lib.maybe_convert_numeric( + arr, + na_values, + coerce_numeric=coerce, + convert_to_masked_nullable=convert_to_masked_nullable, + ) + if convert_to_masked_nullable and coerce: + expected = IntegerArray( + np.array([0, 2 ** 63 + 1], dtype="u8"), + np.array([True, False], dtype="bool"), + ) + result = IntegerArray(*result) + else: + result = result[0] # discard mask tm.assert_almost_equal(result, expected) @pytest.mark.parametrize( @@ -519,16 +583,33 @@ def test_convert_numeric_uint64_nan_values(self, coerce): np.array([str(-1), str(2 ** 63)], dtype=object), ], ) - def test_convert_numeric_int64_uint64(self, case, coerce): + @pytest.mark.parametrize("convert_to_masked_nullable", [True, False]) + def test_convert_numeric_int64_uint64( + self, case, coerce, convert_to_masked_nullable + ): expected = case.astype(float) if coerce else case.copy() - result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce) + result, _ = lib.maybe_convert_numeric( + case, + set(), + coerce_numeric=coerce, + convert_to_masked_nullable=convert_to_masked_nullable, + ) + tm.assert_almost_equal(result, expected) - def test_convert_numeric_string_uint64(self): + @pytest.mark.parametrize("convert_to_masked_nullable", [True, False]) + def test_convert_numeric_string_uint64(self, convert_to_masked_nullable): # GH32394 result = lib.maybe_convert_numeric( - np.array(["uint64"], dtype=object), set(), coerce_numeric=True + np.array(["uint64"], dtype=object), + set(), + coerce_numeric=True, + convert_to_masked_nullable=convert_to_masked_nullable, ) + if convert_to_masked_nullable: + result = FloatingArray(*result) + else: + result = result[0] assert np.isnan(result) @pytest.mark.parametrize("value", [-(2 ** 63) - 1, 2 ** 64]) @@ -608,6 +689,54 @@ def test_maybe_convert_objects_nullable_integer(self, exp): tm.assert_extension_array_equal(result, exp) + @pytest.mark.parametrize( + "convert_to_masked_nullable, exp", + [ + (True, IntegerArray(np.array([2, 0], dtype="i8"), np.array([False, True]))), + (False, np.array([2, np.nan], dtype="float64")), + ], + ) + def test_maybe_convert_numeric_nullable_integer( + self, convert_to_masked_nullable, exp + ): + # GH 40687 + arr = np.array([2, np.NaN], dtype=object) + result = lib.maybe_convert_numeric( + arr, set(), convert_to_masked_nullable=convert_to_masked_nullable + ) + if convert_to_masked_nullable: + result = IntegerArray(*result) + tm.assert_extension_array_equal(result, exp) + else: + result = result[0] + tm.assert_numpy_array_equal(result, exp) + + @pytest.mark.parametrize( + "convert_to_masked_nullable, exp", + [ + ( + True, + FloatingArray( + np.array([2.0, 0.0], dtype="float64"), np.array([False, True]) + ), + ), + (False, np.array([2.0, np.nan], dtype="float64")), + ], + ) + def test_maybe_convert_numeric_floating_array( + self, convert_to_masked_nullable, exp + ): + # GH 40687 + arr = np.array([2.0, np.nan], dtype=object) + result = lib.maybe_convert_numeric( + arr, set(), convert_to_masked_nullable=convert_to_masked_nullable + ) + if convert_to_masked_nullable: + tm.assert_extension_array_equal(FloatingArray(*result), exp) + else: + result = result[0] + tm.assert_numpy_array_equal(result, exp) + def test_maybe_convert_objects_bool_nan(self): # GH32146 ind = Index([True, False, np.nan], dtype=object)
Precursor for #40687
https://api.github.com/repos/pandas-dev/pandas/pulls/40914
2021-04-13T03:00:36Z
2021-05-05T12:46:39Z
2021-05-05T12:46:39Z
2021-05-29T17:55:11Z
REF: re-use maybe_cast_result in Series.combine
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index f62aa95e1e814..b68ec3c473a41 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -102,7 +102,6 @@ if TYPE_CHECKING: from typing import Literal - from pandas import Series from pandas.core.arrays import ( DatetimeArray, ExtensionArray, @@ -375,7 +374,11 @@ def trans(x): def maybe_cast_result( - result: ArrayLike, obj: Series, numeric_only: bool = False, how: str = "" + result: ArrayLike, + dtype: DtypeObj, + numeric_only: bool = False, + how: str = "", + same_dtype: bool = True, ) -> ArrayLike: """ Try casting result to a different type if appropriate @@ -384,19 +387,20 @@ def maybe_cast_result( ---------- result : array-like Result to cast. - obj : Series + dtype : np.dtype or ExtensionDtype Input Series from which result was calculated. numeric_only : bool, default False Whether to cast only numerics or datetimes as well. how : str, default "" How the result was computed. + same_dtype : bool, default True + Specify dtype when calling _from_sequence Returns ------- result : array-like result maybe casted to the dtype. """ - dtype = obj.dtype dtype = maybe_cast_result_dtype(dtype, how) assert not is_scalar(result) @@ -407,7 +411,10 @@ def maybe_cast_result( # things like counts back to categorical cls = dtype.construct_array_type() - result = maybe_cast_to_extension_array(cls, result, dtype=dtype) + if same_dtype: + result = maybe_cast_to_extension_array(cls, result, dtype=dtype) + else: + result = maybe_cast_to_extension_array(cls, result) elif (numeric_only and is_numeric_dtype(dtype)) or not numeric_only: result = maybe_downcast_to_dtype(result, dtype) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index bc5318a1f367c..2e7031ab2888e 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -788,7 +788,7 @@ def _aggregate_series_pure_python(self, obj: Series, func: F): result[label] = res out = lib.maybe_convert_objects(result, try_float=False) - out = maybe_cast_result(out, obj, numeric_only=True) + out = maybe_cast_result(out, obj.dtype, numeric_only=True) return out, counts diff --git a/pandas/core/series.py b/pandas/core/series.py index 5ba68aaa5c16d..1c49a28ef93ed 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -60,15 +60,13 @@ from pandas.core.dtypes.cast import ( convert_dtypes, maybe_box_native, - maybe_cast_to_extension_array, + maybe_cast_result, validate_numeric_casting, ) from pandas.core.dtypes.common import ( ensure_platform_int, is_bool, - is_categorical_dtype, is_dict_like, - is_extension_array_dtype, is_integer, is_iterator, is_list_like, @@ -3079,22 +3077,9 @@ def combine(self, other, func, fill_value=None) -> Series: new_values = [func(lv, other) for lv in self._values] new_name = self.name - if is_categorical_dtype(self.dtype): - pass - elif is_extension_array_dtype(self.dtype): - # TODO: can we do this for only SparseDtype? - # The function can return something of any type, so check - # if the type is compatible with the calling EA. - - # error: Incompatible types in assignment (expression has type - # "Union[ExtensionArray, ndarray]", variable has type "List[Any]") - new_values = maybe_cast_to_extension_array( # type: ignore[assignment] - # error: Argument 2 to "maybe_cast_to_extension_array" has incompatible - # type "List[Any]"; expected "Union[ExtensionArray, ndarray]" - type(self._values), - new_values, # type: ignore[arg-type] - ) - return self._constructor(new_values, index=new_index, name=new_name) + res_values = sanitize_array(new_values, None) + res_values = maybe_cast_result(res_values, self.dtype, same_dtype=False) + return self._constructor(res_values, index=new_index, name=new_name) def combine_first(self, other) -> Series: """ diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index 55f9d85574f94..7a3f88d0d6c41 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -362,13 +362,18 @@ def _create_arithmetic_method(cls, op): DecimalArrayWithoutCoercion._add_arithmetic_ops() -def test_combine_from_sequence_raises(): +def test_combine_from_sequence_raises(monkeypatch): # https://github.com/pandas-dev/pandas/issues/22850 - ser = pd.Series( - DecimalArrayWithoutFromSequence( - [decimal.Decimal("1.0"), decimal.Decimal("2.0")] - ) - ) + cls = DecimalArrayWithoutFromSequence + + @classmethod + def construct_array_type(cls): + return DecimalArrayWithoutFromSequence + + monkeypatch.setattr(DecimalDtype, "construct_array_type", construct_array_type) + + arr = cls([decimal.Decimal("1.0"), decimal.Decimal("2.0")]) + ser = pd.Series(arr) result = ser.combine(ser, operator.add) # note: object dtype
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40909
2021-04-12T22:23:22Z
2021-04-13T15:25:30Z
2021-04-13T15:25:30Z
2021-04-13T16:54:33Z
ENH: Make maybe_convert_object respect dtype itemsize
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index a286d152f03c3..8cebdbe5ca7b0 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -221,6 +221,7 @@ Other enhancements - :meth:`pandas.read_csv` and :meth:`pandas.read_json` expose the argument ``encoding_errors`` to control how encoding errors are handled (:issue:`39450`) - :meth:`.GroupBy.any` and :meth:`.GroupBy.all` use Kleene logic with nullable data types (:issue:`37506`) - :meth:`.GroupBy.any` and :meth:`.GroupBy.all` return a ``BooleanDtype`` for columns with nullable data types (:issue:`33449`) +- Constructing a :class:`DataFrame` or :class:`Series` with the ``data`` argument being a Python iterable that is *not* a NumPy ``ndarray`` consisting of NumPy scalars will now result in a dtype with a precision the maximum of the NumPy scalars; this was already the case when ``data`` is a NumPy ``ndarray`` (:issue:`40908`) - Add keyword ``sort`` to :func:`pivot_table` to allow non-sorting of the result (:issue:`39143`) - @@ -689,7 +690,7 @@ Numeric - Bug in :meth:`DataFrame.apply` and :meth:`DataFrame.agg` when passed argument ``func="size"`` would operate on the entire ``DataFrame`` instead of rows or columns (:issue:`39934`) - Bug in :meth:`DataFrame.transform` would raise ``SpecificationError`` when passed a dictionary and columns were missing; will now raise a ``KeyError`` instead (:issue:`40004`) - Bug in :meth:`DataFrameGroupBy.rank` giving incorrect results with ``pct=True`` and equal values between consecutive groups (:issue:`40518`) -- +- Bug in :meth:`Series.count` would result in an ``int32`` result on 32-bit platforms when argument ``level=None`` (:issue:`40908`) Conversion ^^^^^^^^^^ diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index a5ed650d72911..77375cac39921 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -68,6 +68,9 @@ cdef extern from "numpy/arrayobject.h": object fields tuple names +cdef extern from "numpy/ndarrayobject.h": + bint PyArray_CheckScalar(obj) nogil + cdef extern from "src/parse_helper.h": int floatify(object, float64_t *result, int *maybe_int) except -1 @@ -209,6 +212,24 @@ def is_scalar(val: object) -> bool: or is_offset_object(val)) +cdef inline int64_t get_itemsize(object val): + """ + Get the itemsize of a NumPy scalar, -1 if not a NumPy scalar. + + Parameters + ---------- + val : object + + Returns + ------- + is_ndarray : bool + """ + if PyArray_CheckScalar(val): + return cnp.PyArray_DescrFromScalar(val).itemsize + else: + return -1 + + def is_iterator(obj: object) -> bool: """ Check if the object is an iterator. @@ -2188,7 +2209,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=False, Parameters ---------- - values : ndarray[object] + objects : ndarray[object] Array of object elements to convert. try_float : bool, default False If an array-like object contains only float or NaN values is @@ -2212,7 +2233,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=False, Array of converted object values to more specific dtypes if applicable. """ cdef: - Py_ssize_t i, n + Py_ssize_t i, n, itemsize_max = 0 ndarray[float64_t] floats ndarray[complex128_t] complexes ndarray[int64_t] ints @@ -2245,6 +2266,10 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=False, for i in range(n): val = objects[i] + if itemsize_max != -1: + itemsize = get_itemsize(val) + if itemsize > itemsize_max or itemsize == -1: + itemsize_max = itemsize if val is None: seen.null_ = True @@ -2346,50 +2371,51 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=False, seen.object_ = True if not seen.object_: + result = None if not safe: if seen.null_ or seen.nan_: if seen.is_float_or_complex: if seen.complex_: - return complexes + result = complexes elif seen.float_: - return floats + result = floats elif seen.int_: if convert_to_nullable_integer: from pandas.core.arrays import IntegerArray - return IntegerArray(ints, mask) + result = IntegerArray(ints, mask) else: - return floats + result = floats elif seen.nan_: - return floats + result = floats else: if not seen.bool_: if seen.datetime_: if not seen.numeric_ and not seen.timedelta_: - return datetimes + result = datetimes elif seen.timedelta_: if not seen.numeric_: - return timedeltas + result = timedeltas elif seen.nat_: if not seen.numeric_: if convert_datetime and convert_timedelta: # TODO: array full of NaT ambiguity resolve here needed pass elif convert_datetime: - return datetimes + result = datetimes elif convert_timedelta: - return timedeltas + result = timedeltas else: if seen.complex_: - return complexes + result = complexes elif seen.float_: - return floats + result = floats elif seen.int_: if seen.uint_: - return uints + result = uints else: - return ints + result = ints elif seen.is_bool: - return bools.view(np.bool_) + result = bools.view(np.bool_) else: # don't cast int to float, etc. @@ -2397,41 +2423,49 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=False, if seen.is_float_or_complex: if seen.complex_: if not seen.int_: - return complexes + result = complexes elif seen.float_ or seen.nan_: if not seen.int_: - return floats + result = floats else: if not seen.bool_: if seen.datetime_: if not seen.numeric_ and not seen.timedelta_: - return datetimes + result = datetimes elif seen.timedelta_: if not seen.numeric_: - return timedeltas + result = timedeltas elif seen.nat_: if not seen.numeric_: if convert_datetime and convert_timedelta: # TODO: array full of NaT ambiguity resolve here needed pass elif convert_datetime: - return datetimes + result = datetimes elif convert_timedelta: - return timedeltas + result = timedeltas else: if seen.complex_: if not seen.int_: - return complexes + result = complexes elif seen.float_ or seen.nan_: if not seen.int_: - return floats + result = floats elif seen.int_: if seen.uint_: - return uints + result = uints else: - return ints + result = ints elif seen.is_bool and not seen.nan_: - return bools.view(np.bool_) + result = bools.view(np.bool_) + + if result is uints or result is ints or result is floats or result is complexes: + # cast to the largest itemsize when all values are NumPy scalars + if itemsize_max > 0 and itemsize_max != result.dtype.itemsize: + result = result.astype(result.dtype.kind + str(itemsize_max)) + return result + elif result is not None: + return result return objects diff --git a/pandas/core/series.py b/pandas/core/series.py index 440bc4c89e647..e19d521bda3df 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1891,7 +1891,7 @@ def count(self, level=None): 2 """ if level is None: - return notna(self._values).sum() + return notna(self._values).sum().astype("int64") else: warnings.warn( "Using the level keyword in DataFrame and Series aggregations is " diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 907991b97ead1..d1e6409307915 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -31,6 +31,7 @@ from pandas.core.dtypes.common import ( ensure_int32, is_bool, + is_complex, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype, @@ -614,6 +615,69 @@ def test_maybe_convert_objects_bool_nan(self): out = lib.maybe_convert_objects(ind.values, safe=1) tm.assert_numpy_array_equal(out, exp) + @pytest.mark.parametrize( + "data0", + [ + True, + 1, + 1.0, + 1.0 + 1.0j, + np.int8(1), + np.int16(1), + np.int32(1), + np.int64(1), + np.float16(1), + np.float32(1), + np.float64(1), + np.complex64(1), + np.complex128(1), + ], + ) + @pytest.mark.parametrize( + "data1", + [ + True, + 1, + 1.0, + 1.0 + 1.0j, + np.int8(1), + np.int16(1), + np.int32(1), + np.int64(1), + np.float16(1), + np.float32(1), + np.float64(1), + np.complex64(1), + np.complex128(1), + ], + ) + def test_maybe_convert_objects_itemsize(self, data0, data1): + # GH 40908 + data = [data0, data1] + arr = np.array(data, dtype="object") + + common_kind = np.find_common_type( + [type(data0), type(data1)], scalar_types=[] + ).kind + kind0 = "python" if not hasattr(data0, "dtype") else data0.dtype.kind + kind1 = "python" if not hasattr(data1, "dtype") else data1.dtype.kind + if kind0 != "python" and kind1 != "python": + kind = common_kind + itemsize = max(data0.dtype.itemsize, data1.dtype.itemsize) + elif is_bool(data0) or is_bool(data1): + kind = "bool" if (is_bool(data0) and is_bool(data1)) else "object" + itemsize = "" + elif is_complex(data0) or is_complex(data1): + kind = common_kind + itemsize = 16 + else: + kind = common_kind + itemsize = 8 + + expected = np.array(data, dtype=f"{kind}{itemsize}") + result = lib.maybe_convert_objects(arr) + tm.assert_numpy_array_equal(result, expected) + def test_mixed_dtypes_remain_object_array(self): # GH14956 arr = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1], dtype=object) diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index 759277a47f62b..f0d3fb7ff9e1b 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -16,10 +16,6 @@ import numpy as np import pytest -from pandas.compat import ( - IS64, - is_platform_windows, -) from pandas.errors import PerformanceWarning from pandas.core.dtypes.common import is_object_dtype @@ -428,9 +424,6 @@ def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request): ]: mark = pytest.mark.xfail(reason="result dtype.fill_value mismatch") request.node.add_marker(mark) - elif is_platform_windows() or not IS64: - mark = pytest.mark.xfail(reason="results are int32, expected int64") - request.node.add_marker(mark) super().test_arith_frame_with_scalar(data, all_arithmetic_operators) diff --git a/pandas/tests/frame/constructors/test_from_records.py b/pandas/tests/frame/constructors/test_from_records.py index e8d0a789e7cbd..35ad9f3e9693b 100644 --- a/pandas/tests/frame/constructors/test_from_records.py +++ b/pandas/tests/frame/constructors/test_from_records.py @@ -117,7 +117,7 @@ def test_from_records_sequencelike(self): result = DataFrame.from_records(tuples, exclude=exclude) result.columns = [columns[i] for i in sorted(columns_to_test)] tm.assert_series_equal(result["C"], df["C"]) - tm.assert_series_equal(result["E1"], df["E1"].astype("float64")) + tm.assert_series_equal(result["E1"], df["E1"]) def test_from_records_sequencelike_empty(self): # empty case diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index d8f93f047e74b..e6ed60dc2bb08 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -10,6 +10,8 @@ import numpy as np import pytest +from pandas.compat import np_version_under1p20 + import pandas as pd from pandas import ( DataFrame, @@ -1514,8 +1516,14 @@ def test_replace_commutative(self, df, to_replace, exp): np.float64(1), ], ) - def test_replace_replacer_dtype(self, replacer): + def test_replace_replacer_dtype(self, request, replacer): # GH26632 + if np.isscalar(replacer) and replacer.dtype.itemsize < 8: + request.node.add_marker( + pytest.mark.xfail( + np_version_under1p20, reason="np.putmask doesn't coerce dtype" + ) + ) df = DataFrame(["a"]) result = df.replace({"a": replacer, "b": replacer}) expected = DataFrame([replacer]) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index ca68885fdc470..c565567754da0 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1924,12 +1924,12 @@ def test_constructor_for_list_with_dtypes(self): # test list of lists/ndarrays df = DataFrame([np.arange(5) for x in range(5)]) result = df.dtypes - expected = Series([np.dtype("int64")] * 5) + expected = Series([np.dtype("int")] * 5) tm.assert_series_equal(result, expected) df = DataFrame([np.array(np.arange(5), dtype="int32") for x in range(5)]) result = df.dtypes - expected = Series([np.dtype("int64")] * 5) + expected = Series([np.dtype("int32")] * 5) tm.assert_series_equal(result, expected) # overflow issue? (we always expected int64 upcasting here) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index c5620d6d8c06c..3f6485be871f1 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -99,10 +99,7 @@ def max_value(group): applied = df.groupby("A").apply(max_value) result = applied.dtypes - expected = Series( - [np.dtype("object")] * 2 + [np.dtype("float64")] * 2 + [np.dtype("int64")], - index=["A", "B", "C", "D", "value"], - ) + expected = df.dtypes tm.assert_series_equal(result, expected) diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 7642f78076dcb..2bb9b51df2285 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -641,7 +641,7 @@ def test_where_series_complex128(self, fill_val, exp_dtype): values = klass([True, False, True, True]) else: values = klass(x * fill_val for x in [5, 6, 7, 8]) - exp = klass([1 + 1j, values[1], 3 + 3j, values[3]]) + exp = klass([1 + 1j, values[1], 3 + 3j, values[3]], dtype=exp_dtype) self._assert_where_conversion(obj, cond, values, exp, exp_dtype) @pytest.mark.parametrize(
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them Precursor to #40790 This adds support for e.g. `float32` NumPy dtypes to maybe_convert_object. If any non-NumPy scalar is hit, the behavior is the same as master. This is my first foray into the NumPy C-API, so any tips are appreciated. In particular, I couldn't figure out how to use the C API to do the cast: result = result.astype(result.dtype.kind + str(itemsize)) Not sure if there should also be specific logic for EAs/nullable types. From a full ASV run: ``` before after ratio [7d4757b4] [c1288962] <maybe_convert_object_itemsize~12> <maybe_convert_object_itemsize> + 768±8μs 1.01±0ms 1.32 ctors.SeriesConstructors.time_series_constructor(<class 'list'>, False, 'float') + 779±9μs 1.02±0ms 1.31 ctors.SeriesConstructors.time_series_constructor(<class 'list'>, True, 'float') + 579±5μs 710±20μs 1.23 arithmetic.NumericInferOps.time_divide(<class 'numpy.int8'>) + 608±30μs 715±20μs 1.18 arithmetic.NumericInferOps.time_divide(<class 'numpy.uint8'>) + 3.18±0.2μs 3.69±0.3μs 1.16 index_cached_properties.IndexCache.time_engine('UInt64Index') + 1.58±0.01ms 1.83±0.01ms 1.16 ctors.SeriesConstructors.time_series_constructor(<function arr_dict at 0x7f5964729820>, False, 'float') + 3.30±0.2μs 3.80±0.5μs 1.15 index_cached_properties.IndexCache.time_engine('TimedeltaIndex') + 943±80ns 1.08±0.1μs 1.15 index_cached_properties.IndexCache.time_inferred_type('Float64Index') + 1.73±0.01ms 1.98±0ms 1.14 ctors.SeriesConstructors.time_series_constructor(<class 'list'>, True, 'int') + 1.72±0.01ms 1.96±0ms 1.14 ctors.SeriesConstructors.time_series_constructor(<class 'list'>, False, 'int') + 5.19±0.03μs 5.93±0.3μs 1.14 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1, 6000, datetime.timezone(datetime.timedelta(seconds=3600))) + 1.65±0.01ms 1.89±0.01ms 1.14 ctors.SeriesConstructors.time_series_constructor(<function arr_dict at 0x7f5964729820>, True, 'float') + 6.06±0.06μs 6.91±0.1μs 1.14 tslibs.tz_convert.TimeTZConvert.time_tz_convert_from_utc(1, datetime.timezone(datetime.timedelta(seconds=3600))) + 7.46±0.04μs 8.47±0.5μs 1.14 tslibs.offsets.OffestDatetimeArithmetic.time_subtract(<BusinessDay>) + 1.15±0.09μs 1.27±0.08μs 1.11 index_cached_properties.IndexCache.time_values('UInt64Index') - 11.7±1ms 10.5±0.1ms 0.90 algos.isin.IsinAlmostFullWithRandomInt.time_isin(<class 'numpy.object_'>, 18, 'outside') - 641±30ns 577±20ns 0.90 index_cached_properties.IndexCache.time_is_monotonic('RangeIndex') - 641±20ns 576±20ns 0.90 index_cached_properties.IndexCache.time_shape('Int64Index') - 3.37±0.03ms 3.02±0.02ms 0.90 timeseries.ResampleSeries.time_resample('period', '5min', 'ohlc') - 787±20ns 702±20ns 0.89 index_cached_properties.IndexCache.time_is_monotonic_decreasing('RangeIndex') - 1.25±0.04μs 1.12±0.04μs 0.89 index_cached_properties.IndexCache.time_is_all_dates('Int64Index') - 1.50±0.1ms 1.33±0.02ms 0.88 dtypes.SelectDtypes.time_select_dtype_string_exclude('float32') - 1.19±0.09μs 1.05±0.08μs 0.88 index_cached_properties.IndexCache.time_inferred_type('UInt64Index') - 57.6±1μs 50.3±0.2μs 0.87 frame_methods.Dtypes.time_frame_dtypes - 168±3μs 146±0.6μs 0.87 algos.isin.IsinWithArangeSorted.time_isin(<class 'numpy.uint64'>, 8000) - 552±20ns 480±20ns 0.87 index_cached_properties.IndexCache.time_is_monotonic_increasing('RangeIndex') - 1.50±0.1ms 1.30±0ms 0.87 dtypes.SelectDtypes.time_select_dtype_string_exclude('complex64') - 1.89±0ms 1.64±0.01ms 0.87 period.DataFramePeriodColumn.time_set_index - 11.0±0.2μs 9.50±0.08μs 0.87 period.Indexing.time_series_loc - 110±20ms 93.1±0.06ms 0.85 algos.isin.IsInLongSeriesLookUpDominates.time_isin('float32', 1000, 'random_hits') - 392±30ns 330±10ns 0.84 index_cached_properties.IndexCache.time_inferred_type('RangeIndex') - 4.45±0.4ms 3.68±0.02ms 0.83 algorithms.Factorize.time_factorize(True, True, 'Int64') - 9.60±0.8ms 7.75±0.01ms 0.81 algorithms.Factorize.time_factorize(True, False, 'string') - 11.2±0.8ms 8.90±0.1ms 0.80 algos.isin.IsinAlmostFullWithRandomInt.time_isin(<class 'numpy.object_'>, 18, 'inside') - 615±30ns 484±30ns 0.79 index_cached_properties.IndexCache.time_is_monotonic_increasing('Int64Index') - 73.8±20ms 55.4±0.5ms 0.75 algos.isin.IsInLongSeriesLookUpDominates.time_isin('float32', 1000, 'monotone_hits') - 180±0.1μs 128±0.5μs 0.71 indexing_engines.NumericEngineIndexing.time_get_loc((<class 'pandas._libs.index.Int8Engine'>, <class 'numpy.int8'>), 'monotonic_incr') - 128±40ms 90.8±0.03ms 0.71 algos.isin.IsInLongSeriesLookUpDominates.time_isin('object', 5, 'monotone_hits') - 186±0.3μs 131±2μs 0.70 indexing_engines.NumericEngineIndexing.time_get_loc((<class 'pandas._libs.index.Int16Engine'>, <class 'numpy.int16'>), 'monotonic_incr') - 1.72±0.4μs 1.17±0.1μs 0.68 index_cached_properties.IndexCache.time_inferred_type('TimedeltaIndex') - 216±0.8μs 140±3μs 0.65 indexing_engines.NumericEngineIndexing.time_get_loc((<class 'pandas._libs.index.UInt32Engine'>, <class 'numpy.uint32'>), 'monotonic_incr') - 1.55±0.01ms 972±7μs 0.63 algos.isin.IsinWithArangeSorted.time_isin(<class 'numpy.int64'>, 100000) SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY. PERFORMANCE DECREASED. ``` Specific timings via %timeit on `maybe_convert_objects` directly: ``` np.array of python integers: 127 ms ± 80.9 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) <--- PR 120 ms ± 155 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) <--- master np.array of int32: 179 ms ± 1.21 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) <--- PR 133 ms ± 274 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) <--- master np.array of int32 with last one a python int: 178 ms ± 1.71 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) <--- PR 133 ms ± 106 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) <--- master ``` <details> <summary>timeit code</summary> ``` values = np.array(range(1_000_000), dtype="object") print('np.array of python integers:') %timeit maybe_convert_objects(values) print() values = np.array([np.int32(1)] * 1_000_000, dtype="object") print('np.array of int32:') %timeit maybe_convert_objects(values) print() values = np.array([np.int32(1)] * 999_999 + [1], dtype="object") print('np.array of int32 with last one a python int:') %timeit maybe_convert_objects(values) print() ``` <details>
https://api.github.com/repos/pandas-dev/pandas/pulls/40908
2021-04-12T22:18:39Z
2021-04-21T12:53:28Z
2021-04-21T12:53:28Z
2021-04-21T15:37:42Z
BUG: to_string truncation column with index=False
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 0ec9758477eba..fcfac2d2d63f1 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -740,6 +740,7 @@ I/O - Bug in :func:`read_hdf` returning unexpected records when filtering on categorical string columns using ``where`` parameter (:issue:`39189`) - Bug in :func:`read_sas` raising ``ValueError`` when ``datetimes`` were null (:issue:`39725`) - Bug in :func:`read_excel` dropping empty values from single-column spreadsheets (:issue:`39808`) +- Bug in :meth:`DataFrame.to_string` misplacing the truncation column when ``index=False`` (:issue:`40907`) Period ^^^^^^ diff --git a/pandas/io/formats/string.py b/pandas/io/formats/string.py index 84333cfc441b2..de53646b5f95f 100644 --- a/pandas/io/formats/string.py +++ b/pandas/io/formats/string.py @@ -77,7 +77,8 @@ def _insert_dot_separators(self, strcols: List[List[str]]) -> List[List[str]]: def _insert_dot_separator_horizontal( self, strcols: List[List[str]], index_length: int ) -> List[List[str]]: - strcols.insert(self.fmt.tr_col_num + 1, [" ..."] * index_length) + tr_col_num = self.fmt.tr_col_num + 1 if self.fmt.index else self.fmt.tr_col_num + strcols.insert(tr_col_num, [" ..."] * index_length) return strcols def _insert_dot_separator_vertical( diff --git a/pandas/tests/io/formats/test_to_string.py b/pandas/tests/io/formats/test_to_string.py index 551734f343dfa..f9b3cac3527ef 100644 --- a/pandas/tests/io/formats/test_to_string.py +++ b/pandas/tests/io/formats/test_to_string.py @@ -106,6 +106,40 @@ def test_format_remove_leading_space_dataframe(input_array, expected): assert df == expected +@pytest.mark.parametrize( + "max_cols, expected", + [ + ( + 10, + [ + " 0 1 2 3 4 ... 6 7 8 9 10", + " 0 0 0 0 0 ... 0 0 0 0 0", + " 0 0 0 0 0 ... 0 0 0 0 0", + ], + ), + ( + 9, + [ + " 0 1 2 3 ... 7 8 9 10", + " 0 0 0 0 ... 0 0 0 0", + " 0 0 0 0 ... 0 0 0 0", + ], + ), + ( + 1, + [ + " 0 ...", + " 0 ...", + " 0 ...", + ], + ), + ], +) +def test_truncation_col_placement_no_index(max_cols, expected): + df = DataFrame([[0] * 11] * 2) + assert df.to_string(index=False, max_cols=max_cols).split("\n") == expected + + def test_to_string_unicode_columns(float_frame): df = DataFrame({"\u03c3": np.arange(10.0)})
- [x] closes #40904 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40907
2021-04-12T21:02:05Z
2021-04-16T01:11:25Z
2021-04-16T01:11:25Z
2021-04-16T01:11:32Z
STYLE use pandas-dev-flaker
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9424b2f34eaff..5b11490479088 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -38,7 +38,10 @@ repos: rev: 3.9.0 hooks: - id: flake8 - additional_dependencies: [flake8-comprehensions>=3.1.0, flake8-bugbear>=21.3.2] + additional_dependencies: + - flake8-comprehensions==3.1.0 + - flake8-bugbear==21.3.2 + - pandas-dev-flaker==0.2.0 - id: flake8 name: flake8 (cython) types: [cython] @@ -71,7 +74,11 @@ repos: rev: v1.2.2 hooks: - id: yesqa - additional_dependencies: [flake8==3.9.0] + additional_dependencies: + - flake8==3.9.0 + - flake8-comprehensions==3.1.0 + - flake8-bugbear==21.3.2 + - pandas-dev-flaker==0.2.0 - repo: local hooks: - id: flake8-rst @@ -82,28 +89,6 @@ repos: types: [rst] args: [--filename=*.rst] additional_dependencies: [flake8-rst==0.7.0, flake8==3.7.9] - - id: frame-or-series-union - name: Check for use of Union[Series, DataFrame] instead of FrameOrSeriesUnion alias - entry: Union\[.*(Series,.*DataFrame|DataFrame,.*Series).*\] - language: pygrep - types: [python] - exclude: ^pandas/_typing\.py$ - - id: inconsistent-namespace-usage - name: 'Check for inconsistent use of pandas namespace' - entry: python scripts/check_for_inconsistent_pandas_namespace.py - language: python - types: [python] - - id: no-os-remove - name: Check code for instances of os.remove - entry: os\.remove - language: pygrep - types: [python] - files: ^pandas/tests/ - exclude: | - (?x)^ - pandas/tests/io/excel/test_writers\.py - |pandas/tests/io/pytables/common\.py - |pandas/tests/io/pytables/test_store\.py$ - id: unwanted-patterns name: Unwanted patterns language: pygrep @@ -113,52 +98,10 @@ repos: \#\ type:\ (?!ignore) |\#\ type:\s?ignore(?!\[) - # foo._class__ instead of type(foo) - |\.__class__ - - # np.bool/np.object instead of np.bool_/np.object_ - |np\.bool[^_8] - |np\.object[^_8] - - # imports from pandas.core.common instead of `import pandas.core.common as com` - |from\ pandas\.core\.common\ import - |from\ pandas\.core\ import\ common - - # imports from collections.abc instead of `from collections import abc` - |from\ collections\.abc\ import - - # Numpy - |from\ numpy\ import\ random - |from\ numpy\.random\ import - # Incorrect code-block / IPython directives |\.\.\ code-block\ :: |\.\.\ ipython\ :: types_or: [python, cython, rst] - exclude: ^doc/source/development/code_style\.rst # contains examples of patterns to avoid - - id: unwanted-patterns-in-tests - name: Unwanted patterns in tests - language: pygrep - entry: | - (?x) - # pytest.xfail instead of pytest.mark.xfail - pytest\.xfail - - # imports from pandas._testing instead of `import pandas._testing as tm` - |from\ pandas\._testing\ import - |from\ pandas\ import\ _testing\ as\ tm - - # No direct imports from conftest - |conftest\ import - |import\ conftest - - # pandas.testing instead of tm - |pd\.testing\. - - # pd.api.types instead of from pandas.api.types import ... - |(pd|pandas)\.api\.types\. - files: ^pandas/tests/ - types_or: [python, cython, rst] - id: pip-to-conda name: Generate pip dependency from conda description: This hook checks if the conda environment.yml and requirements-dev.txt are equal @@ -180,35 +123,6 @@ repos: language: python types: [rst] files: ^doc/source/(development|reference)/ - - id: unwanted-patterns-bare-pytest-raises - name: Check for use of bare pytest raises - language: python - entry: python scripts/validate_unwanted_patterns.py --validation-type="bare_pytest_raises" - types: [python] - files: ^pandas/tests/ - exclude: ^pandas/tests/extension/ - - id: unwanted-patterns-private-function-across-module - name: Check for use of private functions across modules - language: python - entry: python scripts/validate_unwanted_patterns.py --validation-type="private_function_across_module" - types: [python] - exclude: ^(asv_bench|pandas/tests|doc)/ - - id: unwanted-patterns-private-import-across-module - name: Check for import of private attributes across modules - language: python - entry: python scripts/validate_unwanted_patterns.py --validation-type="private_import_across_module" - types: [python] - exclude: ^(asv_bench|pandas/tests|doc)/ - - id: unwanted-patterns-strings-to-concatenate - name: Check for use of not concatenated strings - language: python - entry: python scripts/validate_unwanted_patterns.py --validation-type="strings_to_concatenate" - types_or: [python, cython] - - id: unwanted-patterns-strings-with-wrong-placed-whitespace - name: Check for strings with wrong placed spaces - language: python - entry: python scripts/validate_unwanted_patterns.py --validation-type="strings_with_wrong_placed_whitespace" - types_or: [python, cython] - id: use-pd_array-in-core name: Import pandas.array as pd_array in core language: python diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py index 459046d2decfb..ac7cd87c846d5 100644 --- a/asv_bench/benchmarks/gil.py +++ b/asv_bench/benchmarks/gil.py @@ -31,7 +31,7 @@ except ImportError: from pandas import algos try: - from pandas._testing import test_parallel + from pandas._testing import test_parallel # noqa: PDF014 have_real_test_parallel = True except ImportError: diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py index 7bd4d639633b3..ed44102700dc6 100644 --- a/asv_bench/benchmarks/pandas_vb_common.py +++ b/asv_bench/benchmarks/pandas_vb_common.py @@ -70,7 +70,7 @@ class BaseIO: def remove(self, f): """Remove created files""" try: - os.remove(f) + os.remove(f) # noqa: PDF008 except OSError: # On Windows, attempting to remove a file that is in use # causes an exception to be raised diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 3b1774ade6f85..d4b6c0d6ff09d 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -64,27 +64,6 @@ fi ### PATTERNS ### if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then - MSG='Check for use of exec' ; echo $MSG - invgrep -R --include="*.py*" -E "[^a-zA-Z0-9_]exec\(" pandas - RET=$(($RET + $?)) ; echo $MSG "DONE" - - MSG='Check for pytest warns' ; echo $MSG - invgrep -r -E --include '*.py' 'pytest\.warns' pandas/tests/ - RET=$(($RET + $?)) ; echo $MSG "DONE" - - MSG='Check for pytest raises without context' ; echo $MSG - invgrep -r -E --include '*.py' "[[:space:]] pytest.raises" pandas/tests/ - RET=$(($RET + $?)) ; echo $MSG "DONE" - - MSG='Check for use of builtin filter function' ; echo $MSG - invgrep -R --include="*.py" -P '(?<!def)[\(\s]filter\(' pandas - RET=$(($RET + $?)) ; echo $MSG "DONE" - - # Check for the following code in testing: `np.testing` and `np.array_equal` - MSG='Check for invalid testing' ; echo $MSG - invgrep -r -E --include '*.py' --exclude testing.py '(numpy|np)(\.testing|\.array_equal)' pandas/tests/ - RET=$(($RET + $?)) ; echo $MSG "DONE" - # Check for the following code in the extension array base tests: `tm.assert_frame_equal` and `tm.assert_series_equal` MSG='Check for invalid EA testing' ; echo $MSG invgrep -r -E --include '*.py' --exclude base.py 'tm.assert_(series|frame)_equal' pandas/tests/extension/base @@ -98,15 +77,6 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then invgrep -R --include="*.rst" -E "[a-zA-Z0-9]\`\`?[a-zA-Z0-9]" doc/source/ RET=$(($RET + $?)) ; echo $MSG "DONE" - # Check for the following code in testing: `unittest.mock`, `mock.Mock()` or `mock.patch` - MSG='Check that unittest.mock is not used (pytest builtin monkeypatch fixture should be used instead)' ; echo $MSG - invgrep -r -E --include '*.py' '(unittest(\.| import )mock|mock\.Mock\(\)|mock\.patch)' pandas/tests/ - RET=$(($RET + $?)) ; echo $MSG "DONE" - - MSG='Check for use of {foo!r} instead of {repr(foo)}' ; echo $MSG - invgrep -R --include=*.{py,pyx} '!r}' pandas - RET=$(($RET + $?)) ; echo $MSG "DONE" - echo $MSG "DONE" fi ### CODE ### diff --git a/doc/source/development/code_style.rst b/doc/source/development/code_style.rst index 19d83eb75e5bd..8f399ef6f1192 100644 --- a/doc/source/development/code_style.rst +++ b/doc/source/development/code_style.rst @@ -19,147 +19,8 @@ consistent code format throughout the project. We encourage you to use Patterns ======== -Using foo.__class__ -------------------- - - -pandas uses 'type(foo)' instead 'foo.__class__' as it is making the code more -readable. -For example: - -**Good:** - -.. code-block:: python - - foo = "bar" - type(foo) - -**Bad:** - -.. code-block:: python - - foo = "bar" - foo.__class__ - - -String formatting -================= - -Concatenated strings --------------------- - -Using f-strings -~~~~~~~~~~~~~~~ - -pandas uses f-strings formatting instead of '%' and '.format()' string formatters. - -The convention of using f-strings on a string that is concatenated over several lines, -is to prefix only the lines containing values which need to be interpreted. - -For example: - -**Good:** - -.. code-block:: python - - foo = "old_function" - bar = "new_function" - - my_warning_message = ( - f"Warning, {foo} is deprecated, " - "please use the new and way better " - f"{bar}" - ) - -**Bad:** - -.. code-block:: python - - foo = "old_function" - bar = "new_function" - - my_warning_message = ( - f"Warning, {foo} is deprecated, " - f"please use the new and way better " - f"{bar}" - ) - -White spaces -~~~~~~~~~~~~ - -Only put white space at the end of the previous line, so -there is no whitespace at the beginning of the concatenated string. - -For example: - -**Good:** - -.. code-block:: python - - example_string = ( - "Some long concatenated string, " - "with good placement of the " - "whitespaces" - ) - -**Bad:** - -.. code-block:: python - - example_string = ( - "Some long concatenated string," - " with bad placement of the" - " whitespaces" - ) - -Representation function (aka 'repr()') --------------------------------------- - -pandas uses 'repr()' instead of '%r' and '!r'. - -The use of 'repr()' will only happen when the value is not an obvious string. - -For example: - -**Good:** - -.. code-block:: python - - value = str - f"Unknown received value, got: {repr(value)}" - -**Good:** - -.. code-block:: python - - value = str - f"Unknown received type, got: '{type(value).__name__}'" - - -Imports (aim for absolute) -========================== - -In Python 3, absolute imports are recommended. Using absolute imports, doing something -like ``import string`` will import the string module rather than ``string.py`` -in the same directory. As much as possible, you should try to write out -absolute imports that show the whole import chain from top-level pandas. - -Explicit relative imports are also supported in Python 3 but it is not -recommended to use them. Implicit relative imports should never be used -and are removed in Python 3. - -For example: - -:: - - # preferred - import pandas.core.common as com - - # not preferred - from .common import test_base - - # wrong - from common import test_base +We use a ``flake8`` plugin, `pandas-dev-flaker <https://github.com/pandas-dev/pandas-dev-flaker>`_, to +check our codebase for unwanted patterns. See its ``README`` for the up-to-date list of rules we enforce. Testing ======= diff --git a/environment.yml b/environment.yml index 90a9186aa017f..146bf6db08d8b 100644 --- a/environment.yml +++ b/environment.yml @@ -21,8 +21,8 @@ dependencies: - black=20.8b1 - cpplint - flake8=3.9.0 - - flake8-bugbear>=21.3.2 # used by flake8, find likely bugs - - flake8-comprehensions>=3.1.0 # used by flake8, linting of unnecessary comprehensions + - flake8-bugbear=21.3.2 # used by flake8, find likely bugs + - flake8-comprehensions=3.1.0 # used by flake8, linting of unnecessary comprehensions - isort>=5.2.1 # check that imports are in the right order - mypy=0.812 - pre-commit>=2.9.2 @@ -117,3 +117,4 @@ dependencies: - pip: - git+https://github.com/pydata/pydata-sphinx-theme.git@master - numpydoc < 1.2 # 2021-02-09 1.2dev breaking CI + - pandas-dev-flaker==0.2.0 diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py index a603222094bdb..aaf58f1fcb150 100644 --- a/pandas/_testing/__init__.py +++ b/pandas/_testing/__init__.py @@ -915,7 +915,7 @@ def external_error_raised(expected_exception: type[Exception]) -> ContextManager """ import pytest - return pytest.raises(expected_exception, match=None) + return pytest.raises(expected_exception, match=None) # noqa: PDF010 cython_table = pd.core.common._cython_table.items() diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index 67bcdb0a387dd..0628aa5add4a3 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -21,7 +21,7 @@ from pandas.core.dtypes.common import is_datetime64_ns_dtype from pandas.core.dtypes.missing import isna -import pandas.core.common as common +import pandas.core.common as common # noqa: PDF018 from pandas.core.util.numba_ import maybe_use_numba from pandas.core.window.common import zsqrt from pandas.core.window.doc import ( diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 33b1ceee6e529..e4710254d9311 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -56,7 +56,7 @@ DataError, SelectionMixin, ) -import pandas.core.common as common +import pandas.core.common as com from pandas.core.indexes.api import ( Index, MultiIndex, @@ -643,7 +643,7 @@ def _apply_pairwise( ) gb_pairs = ( - common.maybe_make_list(pair) for pair in self._grouper.indices.keys() + com.maybe_make_list(pair) for pair in self._grouper.indices.keys() ) groupby_codes = [] groupby_levels = [] diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index ee90dfa3e9a52..c36552f59da71 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -274,7 +274,7 @@ class TestTesting(Base): ] def test_testing(self): - from pandas import testing + from pandas import testing # noqa: PDF015 self.check(testing, self.funcs) diff --git a/pandas/tests/indexes/object/test_astype.py b/pandas/tests/indexes/object/test_astype.py index 42c7b8eb4aeec..9bfc0c1312200 100644 --- a/pandas/tests/indexes/object/test_astype.py +++ b/pandas/tests/indexes/object/test_astype.py @@ -1,5 +1,5 @@ from pandas import Index -import pandas.testing as tm +import pandas._testing as tm def test_astype_str_from_bytes(): diff --git a/pandas/tests/io/pytables/common.py b/pandas/tests/io/pytables/common.py index 7e7a76e287d32..6a9d5745ab457 100644 --- a/pandas/tests/io/pytables/common.py +++ b/pandas/tests/io/pytables/common.py @@ -16,7 +16,7 @@ def safe_remove(path): if path is not None: try: - os.remove(path) + os.remove(path) # noqa: PDF008 except OSError: pass diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py index 24a4d35b5d94d..bb6928d2fd95a 100644 --- a/pandas/tests/io/pytables/test_store.py +++ b/pandas/tests/io/pytables/test_store.py @@ -911,7 +911,7 @@ def do_copy(f, new_f=None, keys=None, propindexes=True, **kwargs): os.close(fd) except (OSError, ValueError): pass - os.remove(new_f) + os.remove(new_f) # noqa: PDF008 # new table df = tm.makeDataFrame() diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py index 30d6436c7e250..e863fb45b1f81 100644 --- a/pandas/tests/tools/test_to_numeric.py +++ b/pandas/tests/tools/test_to_numeric.py @@ -232,9 +232,7 @@ def test_type_check(errors): # see gh-11776 df = DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]}) kwargs = {"errors": errors} if errors is not None else {} - error_ctx = pytest.raises(TypeError, match="1-d array") - - with error_ctx: + with pytest.raises(TypeError, match="1-d array"): to_numeric(df, **kwargs) diff --git a/requirements-dev.txt b/requirements-dev.txt index 02a4e63374305..33deeef9f1f82 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -9,8 +9,8 @@ cython>=0.29.21 black==20.8b1 cpplint flake8==3.9.0 -flake8-bugbear>=21.3.2 -flake8-comprehensions>=3.1.0 +flake8-bugbear==21.3.2 +flake8-comprehensions==3.1.0 isort>=5.2.1 mypy==0.812 pre-commit>=2.9.2 @@ -80,3 +80,4 @@ tabulate>=0.8.3 natsort git+https://github.com/pydata/pydata-sphinx-theme.git@master numpydoc < 1.2 +pandas-dev-flaker==0.2.0 diff --git a/scripts/check_for_inconsistent_pandas_namespace.py b/scripts/check_for_inconsistent_pandas_namespace.py deleted file mode 100644 index 3c21821e794a9..0000000000000 --- a/scripts/check_for_inconsistent_pandas_namespace.py +++ /dev/null @@ -1,142 +0,0 @@ -""" -Check that test suite file doesn't use the pandas namespace inconsistently. - -We check for cases of ``Series`` and ``pd.Series`` appearing in the same file -(likewise for other pandas objects). - -This is meant to be run as a pre-commit hook - to run it manually, you can do: - - pre-commit run inconsistent-namespace-usage --all-files - -To automatically fixup a given file, you can pass `--replace`, e.g. - - python scripts/check_for_inconsistent_pandas_namespace.py test_me.py --replace - -though note that you may need to manually fixup some imports and that you will also -need the additional dependency `tokenize-rt` (which is left out from the pre-commit -hook so that it uses the same virtualenv as the other local ones). - -The general structure is similar to that of some plugins from -https://github.com/asottile/pyupgrade . -""" - -import argparse -import ast -import sys -from typing import ( - MutableMapping, - NamedTuple, - Optional, - Sequence, - Set, -) - -ERROR_MESSAGE = ( - "{path}:{lineno}:{col_offset}: " - "Found both '{prefix}.{name}' and '{name}' in {path}" -) - - -class OffsetWithNamespace(NamedTuple): - lineno: int - col_offset: int - namespace: str - - -class Visitor(ast.NodeVisitor): - def __init__(self) -> None: - self.pandas_namespace: MutableMapping[OffsetWithNamespace, str] = {} - self.imported_from_pandas: Set[str] = set() - - def visit_Attribute(self, node: ast.Attribute) -> None: - if isinstance(node.value, ast.Name) and node.value.id in {"pandas", "pd"}: - offset_with_namespace = OffsetWithNamespace( - node.lineno, node.col_offset, node.value.id - ) - self.pandas_namespace[offset_with_namespace] = node.attr - self.generic_visit(node) - - def visit_ImportFrom(self, node: ast.ImportFrom) -> None: - if node.module is not None and "pandas" in node.module: - self.imported_from_pandas.update(name.name for name in node.names) - self.generic_visit(node) - - -def replace_inconsistent_pandas_namespace(visitor: Visitor, content: str) -> str: - from tokenize_rt import ( - reversed_enumerate, - src_to_tokens, - tokens_to_src, - ) - - tokens = src_to_tokens(content) - for n, i in reversed_enumerate(tokens): - offset_with_namespace = OffsetWithNamespace(i.offset[0], i.offset[1], i.src) - if ( - offset_with_namespace in visitor.pandas_namespace - and visitor.pandas_namespace[offset_with_namespace] - in visitor.imported_from_pandas - ): - # Replace `pd` - tokens[n] = i._replace(src="") - # Replace `.` - tokens[n + 1] = tokens[n + 1]._replace(src="") - - new_src: str = tokens_to_src(tokens) - return new_src - - -def check_for_inconsistent_pandas_namespace( - content: str, path: str, *, replace: bool -) -> Optional[str]: - tree = ast.parse(content) - - visitor = Visitor() - visitor.visit(tree) - - inconsistencies = visitor.imported_from_pandas.intersection( - visitor.pandas_namespace.values() - ) - - if not inconsistencies: - # No inconsistent namespace usage, nothing to replace. - return None - - if not replace: - inconsistency = inconsistencies.pop() - lineno, col_offset, prefix = next( - key for key, val in visitor.pandas_namespace.items() if val == inconsistency - ) - msg = ERROR_MESSAGE.format( - lineno=lineno, - col_offset=col_offset, - prefix=prefix, - name=inconsistency, - path=path, - ) - sys.stdout.write(msg) - sys.exit(1) - - return replace_inconsistent_pandas_namespace(visitor, content) - - -def main(argv: Optional[Sequence[str]] = None) -> None: - parser = argparse.ArgumentParser() - parser.add_argument("paths", nargs="*") - parser.add_argument("--replace", action="store_true") - args = parser.parse_args(argv) - - for path in args.paths: - with open(path, encoding="utf-8") as fd: - content = fd.read() - new_content = check_for_inconsistent_pandas_namespace( - content, path, replace=args.replace - ) - if not args.replace or new_content is None: - continue - with open(path, "w", encoding="utf-8") as fd: - fd.write(new_content) - - -if __name__ == "__main__": - main() diff --git a/scripts/sync_flake8_versions.py b/scripts/sync_flake8_versions.py index 8dd7abcf47f02..cb6bb1eb0986e 100644 --- a/scripts/sync_flake8_versions.py +++ b/scripts/sync_flake8_versions.py @@ -1,5 +1,5 @@ """ -Check that the flake8 pins are the same in: +Check that the flake8 (and pandas-dev-flaker) pins are the same in: - environment.yml - .pre-commit-config.yaml, in the flake8 hook @@ -12,68 +12,152 @@ - ``python scripts/sync_flake8_versions.py``, or - ``pre-commit run sync-flake8-versions --all-files``. """ +from __future__ import annotations + +from dataclasses import ( + dataclass, + replace, +) import sys from typing import ( Any, Mapping, - NamedTuple, Sequence, - Tuple, TypeVar, ) import yaml -class Revisions(NamedTuple): - precommit_rev: str - precommit_yesqa_rev: str - environment_rev: str +@dataclass +class Revision: + name: str + compare: str + version: str + + +@dataclass +class Revisions: + name: str + pre_commit: Revision | None = None + yesqa: Revision | None = None + environment: Revision | None = None YamlMapping = Mapping[str, Any] Repo = TypeVar("Repo", bound=YamlMapping) +COMPARE = ("<=", "==", ">=", "<", ">", "=") -def _get_repo_hook(repos: Sequence[Repo], hook_name: str) -> Tuple[Repo, YamlMapping]: + +def _get_repo_hook(repos: Sequence[Repo], hook_name: str) -> tuple[Repo, YamlMapping]: for repo in repos: for hook in repo["hooks"]: if hook["id"] == hook_name: return repo, hook - else: + else: # pragma: no cover raise RuntimeError(f"Repo with hook {hook_name} not found") -def get_revisions(precommit_config: YamlMapping, environment: YamlMapping) -> Revisions: - repos = precommit_config["repos"] - flake8_repo, _ = _get_repo_hook(repos, "flake8") - precommit_rev = flake8_repo["rev"] - - _, yesqa_hook = _get_repo_hook(repos, "yesqa") - additional_dependencies = yesqa_hook.get("additional_dependencies", []) - for dep in additional_dependencies: - if "==" in dep: - pkg, rev = dep.split("==", maxsplit=1) - if pkg == "flake8": - precommit_yesqa_rev = rev - break +def _conda_to_pip_compat(dep): + if dep.compare == "=": + return replace(dep, compare="==") else: - raise RuntimeError( - "flake8 not found, or not pinned, in additional dependencies of yesqa " - "hook in .pre-commit-config.yaml" + return dep + + +def _validate_additional_dependencies( + flake8_additional_dependencies, + yesqa_additional_dependencies, + environment_additional_dependencies, +) -> None: + for dep in flake8_additional_dependencies: + if dep not in yesqa_additional_dependencies: + sys.stdout.write( + f"Mismatch of '{dep.name}' version between 'flake8' " + "and 'yesqa' in '.pre-commit-config.yaml'\n" + ) + sys.exit(1) + if dep not in environment_additional_dependencies: + sys.stdout.write( + f"Mismatch of '{dep.name}' version between 'enviroment.yml' " + "and additional dependencies of 'flake8' in '.pre-commit-config.yaml'\n" + ) + sys.exit(1) + + +def _validate_revisions(revisions): + if revisions.environment != revisions.pre_commit: + sys.stdout.write( + f"{revisions.name} in 'environment.yml' does not " + "match in 'flake8' from 'pre-commit'\n" ) + sys.exit(1) + + if revisions.yesqa != revisions.pre_commit: + sys.stdout.write( + f"{revisions.name} in 'yesqa' does not match " + "in 'flake8' from 'pre-commit'\n" + ) + sys.exit(1) - deps = environment["dependencies"] + +def _process_dependencies(deps): for dep in deps: - if isinstance(dep, str) and "=" in dep: - pkg, rev = dep.split("=", maxsplit=1) - if pkg == "flake8": - environment_rev = rev - break - else: - raise RuntimeError("flake8 not found, or not pinned, in environment.yml") + if isinstance(dep, str): + for compare in COMPARE: + if compare in dep: + pkg, rev = dep.split(compare, maxsplit=1) + yield _conda_to_pip_compat(Revision(pkg, compare, rev)) + break + else: + yield from _process_dependencies(dep["pip"]) + + +def get_revisions( + precommit_config: YamlMapping, environment: YamlMapping +) -> tuple[Revisions, Revisions]: + flake8_revisions = Revisions(name="flake8") + pandas_dev_flaker_revisions = Revisions(name="pandas-dev-flaker") + + repos = precommit_config["repos"] + flake8_repo, flake8_hook = _get_repo_hook(repos, "flake8") + flake8_revisions.pre_commit = Revision("flake8", "==", flake8_repo["rev"]) + flake8_additional_dependencies = [] + for dep in _process_dependencies(flake8_hook.get("additional_dependencies", [])): + if dep.name == "pandas-dev-flaker": + pandas_dev_flaker_revisions.pre_commit = dep + else: + flake8_additional_dependencies.append(dep) - return Revisions(precommit_rev, precommit_yesqa_rev, environment_rev) + _, yesqa_hook = _get_repo_hook(repos, "yesqa") + yesqa_additional_dependencies = [] + for dep in _process_dependencies(yesqa_hook.get("additional_dependencies", [])): + if dep.name == "flake8": + flake8_revisions.yesqa = dep + elif dep.name == "pandas-dev-flaker": + pandas_dev_flaker_revisions.yesqa = dep + else: + yesqa_additional_dependencies.append(dep) + + environment_dependencies = environment["dependencies"] + environment_additional_dependencies = [] + for dep in _process_dependencies(environment_dependencies): + if dep.name == "flake8": + flake8_revisions.environment = dep + elif dep.name == "pandas-dev-flaker": + pandas_dev_flaker_revisions.environment = dep + else: + environment_additional_dependencies.append(dep) + + _validate_additional_dependencies( + flake8_additional_dependencies, + yesqa_additional_dependencies, + environment_additional_dependencies, + ) + + for revisions in flake8_revisions, pandas_dev_flaker_revisions: + _validate_revisions(revisions) if __name__ == "__main__": @@ -81,21 +165,5 @@ def get_revisions(precommit_config: YamlMapping, environment: YamlMapping) -> Re precommit_config = yaml.safe_load(fd) with open("environment.yml") as fd: environment = yaml.safe_load(fd) - - revisions = get_revisions(precommit_config, environment) - - if revisions.environment_rev != revisions.precommit_rev: - sys.stdout.write( - f"flake8 pin in environment.yml is {revisions.environment_rev}, " - f"should be {revisions.precommit_rev}\n" - ) - sys.exit(1) - - if revisions.precommit_yesqa_rev != revisions.precommit_rev: - sys.stdout.write( - f"flake8 pin in yesqa is {revisions.precommit_yesqa_rev}, " - f"should be {revisions.precommit_rev}\n" - ) - sys.exit(1) - + get_revisions(precommit_config, environment) sys.exit(0) diff --git a/scripts/tests/test_inconsistent_namespace_check.py b/scripts/tests/test_inconsistent_namespace_check.py deleted file mode 100644 index eb995158d8cb4..0000000000000 --- a/scripts/tests/test_inconsistent_namespace_check.py +++ /dev/null @@ -1,61 +0,0 @@ -import pytest - -from ..check_for_inconsistent_pandas_namespace import ( - check_for_inconsistent_pandas_namespace, -) - -BAD_FILE_0 = ( - "from pandas import Categorical\n" - "cat_0 = Categorical()\n" - "cat_1 = pd.Categorical()" -) -BAD_FILE_1 = ( - "from pandas import Categorical\n" - "cat_0 = pd.Categorical()\n" - "cat_1 = Categorical()" -) -BAD_FILE_2 = ( - "from pandas import Categorical\n" - "cat_0 = pandas.Categorical()\n" - "cat_1 = Categorical()" -) -GOOD_FILE_0 = ( - "from pandas import Categorical\ncat_0 = Categorical()\ncat_1 = Categorical()" -) -GOOD_FILE_1 = "cat_0 = pd.Categorical()\ncat_1 = pd.Categorical()" -GOOD_FILE_2 = "from array import array\nimport pandas as pd\narr = pd.array([])" -PATH = "t.py" - - -@pytest.mark.parametrize( - "content, expected", - [ - (BAD_FILE_0, "t.py:3:8: Found both 'pd.Categorical' and 'Categorical' in t.py"), - (BAD_FILE_1, "t.py:2:8: Found both 'pd.Categorical' and 'Categorical' in t.py"), - ( - BAD_FILE_2, - "t.py:2:8: Found both 'pandas.Categorical' and 'Categorical' in t.py", - ), - ], -) -def test_inconsistent_usage(content, expected, capsys): - with pytest.raises(SystemExit): - check_for_inconsistent_pandas_namespace(content, PATH, replace=False) - result, _ = capsys.readouterr() - assert result == expected - - -@pytest.mark.parametrize("content", [GOOD_FILE_0, GOOD_FILE_1, GOOD_FILE_2]) -@pytest.mark.parametrize("replace", [True, False]) -def test_consistent_usage(content, replace): - # should not raise - check_for_inconsistent_pandas_namespace(content, PATH, replace=replace) - - -@pytest.mark.parametrize("content", [BAD_FILE_0, BAD_FILE_1, BAD_FILE_2]) -def test_inconsistent_usage_with_replace(content): - result = check_for_inconsistent_pandas_namespace(content, PATH, replace=True) - expected = ( - "from pandas import Categorical\ncat_0 = Categorical()\ncat_1 = Categorical()" - ) - assert result == expected diff --git a/scripts/tests/test_sync_flake8_versions.py b/scripts/tests/test_sync_flake8_versions.py index fc559f3e5e982..d9b6dbe8c3f0a 100644 --- a/scripts/tests/test_sync_flake8_versions.py +++ b/scripts/tests/test_sync_flake8_versions.py @@ -1,25 +1,221 @@ -from ..sync_flake8_versions import ( - Revisions, - get_revisions, -) +import pytest +from ..sync_flake8_versions import get_revisions -def test_get_revisions(): + +def test_wrong_yesqa_flake8(capsys): + precommit_config = { + "repos": [ + { + "repo": "https://gitlab.com/pycqa/flake8", + "rev": "0.1.1", + "hooks": [ + { + "id": "flake8", + } + ], + }, + { + "repo": "https://github.com/asottile/yesqa", + "rev": "v1.2.2", + "hooks": [ + { + "id": "yesqa", + "additional_dependencies": [ + "flake8==0.4.2", + ], + } + ], + }, + ] + } + environment = { + "dependencies": [ + "flake8=0.1.1", + ] + } + with pytest.raises(SystemExit, match=None): + get_revisions(precommit_config, environment) + result, _ = capsys.readouterr() + expected = "flake8 in 'yesqa' does not match in 'flake8' from 'pre-commit'\n" + assert result == expected + + +def test_wrong_env_flake8(capsys): + precommit_config = { + "repos": [ + { + "repo": "https://gitlab.com/pycqa/flake8", + "rev": "0.1.1", + "hooks": [ + { + "id": "flake8", + } + ], + }, + { + "repo": "https://github.com/asottile/yesqa", + "rev": "v1.2.2", + "hooks": [ + { + "id": "yesqa", + "additional_dependencies": [ + "flake8==0.4.2", + ], + } + ], + }, + ] + } + environment = { + "dependencies": [ + "flake8=1.5.6", + ] + } + with pytest.raises(SystemExit, match=None): + get_revisions(precommit_config, environment) + result, _ = capsys.readouterr() + expected = ( + "flake8 in 'environment.yml' does not match in 'flake8' from 'pre-commit'\n" + ) + assert result == expected + + +def test_wrong_yesqa_add_dep(capsys): + precommit_config = { + "repos": [ + { + "repo": "https://gitlab.com/pycqa/flake8", + "rev": "0.1.1", + "hooks": [ + { + "id": "flake8", + "additional_dependencies": [ + "flake8-bugs==1.1.1", + ], + } + ], + }, + { + "repo": "https://github.com/asottile/yesqa", + "rev": "v1.2.2", + "hooks": [ + { + "id": "yesqa", + "additional_dependencies": [ + "flake8==0.4.2", + "flake8-bugs>=1.1.1", + ], + } + ], + }, + ] + } + environment = { + "dependencies": [ + "flake8=1.5.6", + "flake8-bugs=1.1.1", + ] + } + with pytest.raises(SystemExit, match=None): + get_revisions(precommit_config, environment) + result, _ = capsys.readouterr() + expected = ( + "Mismatch of 'flake8-bugs' version between 'flake8' and 'yesqa' in " + "'.pre-commit-config.yaml'\n" + ) + assert result == expected + + +def test_wrong_env_add_dep(capsys): precommit_config = { "repos": [ { "repo": "https://gitlab.com/pycqa/flake8", - "rev": "foo", - "hooks": [{"id": "flake8"}], + "rev": "0.1.1", + "hooks": [ + { + "id": "flake8", + "additional_dependencies": [ + "flake8-bugs==1.1.1", + ], + } + ], }, { "repo": "https://github.com/asottile/yesqa", "rev": "v1.2.2", - "hooks": [{"id": "yesqa", "additional_dependencies": ["flake8==bar"]}], + "hooks": [ + { + "id": "yesqa", + "additional_dependencies": [ + "flake8==0.4.2", + "flake8-bugs==1.1.1", + ], + } + ], }, ] } - environment = {"dependencies": ["flake8=qux"]} - result = get_revisions(precommit_config, environment) - expected = Revisions("foo", "bar", "qux") + environment = { + "dependencies": [ + "flake8=1.5.6", + "flake8-bugs=1.1.2", + ] + } + with pytest.raises(SystemExit, match=None): + get_revisions(precommit_config, environment) + result, _ = capsys.readouterr() + expected = ( + "Mismatch of 'flake8-bugs' version between 'enviroment.yml' " + "and additional dependencies of 'flake8' in '.pre-commit-config.yaml'\n" + ) assert result == expected + + +def test_get_revisions_no_failure(capsys): + precommit_config = { + "repos": [ + { + "repo": "https://gitlab.com/pycqa/flake8", + "rev": "0.1.1", + "hooks": [ + { + "id": "flake8", + "additional_dependencies": [ + "pandas-dev-flaker==0.2.0", + "flake8-bugs==1.1.1", + ], + } + ], + }, + { + "repo": "https://github.com/asottile/yesqa", + "rev": "v1.2.2", + "hooks": [ + { + "id": "yesqa", + "additional_dependencies": [ + "flake8==0.1.1", + "pandas-dev-flaker==0.2.0", + "flake8-bugs==1.1.1", + ], + } + ], + }, + ] + } + environment = { + "dependencies": [ + "flake8=0.1.1", + "flake8-bugs=1.1.1", + { + "pip": [ + "git+https://github.com/pydata/pydata-sphinx-theme.git@master", + "pandas-dev-flaker==0.2.0", + ] + }, + ] + } + # should not raise + get_revisions(precommit_config, environment) diff --git a/scripts/tests/test_use_pd_array_in_core.py b/scripts/tests/test_use_pd_array_in_core.py index 9c66199a82846..8f13a6e735899 100644 --- a/scripts/tests/test_use_pd_array_in_core.py +++ b/scripts/tests/test_use_pd_array_in_core.py @@ -14,7 +14,7 @@ def test_inconsistent_usage(content, capsys): result_msg = ( "t.py:2:0: Don't use pd.array in core, import array as pd_array instead\n" ) - with pytest.raises(SystemExit): + with pytest.raises(SystemExit, match=None): use_pd_array(content, PATH) expected_msg, _ = capsys.readouterr() assert result_msg == expected_msg diff --git a/scripts/tests/test_validate_unwanted_patterns.py b/scripts/tests/test_validate_unwanted_patterns.py deleted file mode 100644 index ef93fd1d21981..0000000000000 --- a/scripts/tests/test_validate_unwanted_patterns.py +++ /dev/null @@ -1,419 +0,0 @@ -import io - -import pytest - -from .. import validate_unwanted_patterns - - -class TestBarePytestRaises: - @pytest.mark.parametrize( - "data", - [ - ( - """ - with pytest.raises(ValueError, match="foo"): - pass - """ - ), - ( - """ - # with pytest.raises(ValueError, match="foo"): - # pass - """ - ), - ( - """ - # with pytest.raises(ValueError): - # pass - """ - ), - ( - """ - with pytest.raises( - ValueError, - match="foo" - ): - pass - """ - ), - ], - ) - def test_pytest_raises(self, data): - fd = io.StringIO(data.strip()) - result = list(validate_unwanted_patterns.bare_pytest_raises(fd)) - assert result == [] - - @pytest.mark.parametrize( - "data, expected", - [ - ( - ( - """ - with pytest.raises(ValueError): - pass - """ - ), - [ - ( - 1, - ( - "Bare pytests raise have been found. " - "Please pass in the argument 'match' " - "as well the exception." - ), - ), - ], - ), - ( - ( - """ - with pytest.raises(ValueError, match="foo"): - with pytest.raises(ValueError): - pass - pass - """ - ), - [ - ( - 2, - ( - "Bare pytests raise have been found. " - "Please pass in the argument 'match' " - "as well the exception." - ), - ), - ], - ), - ( - ( - """ - with pytest.raises(ValueError): - with pytest.raises(ValueError, match="foo"): - pass - pass - """ - ), - [ - ( - 1, - ( - "Bare pytests raise have been found. " - "Please pass in the argument 'match' " - "as well the exception." - ), - ), - ], - ), - ( - ( - """ - with pytest.raises( - ValueError - ): - pass - """ - ), - [ - ( - 1, - ( - "Bare pytests raise have been found. " - "Please pass in the argument 'match' " - "as well the exception." - ), - ), - ], - ), - ( - ( - """ - with pytest.raises( - ValueError, - # match = "foo" - ): - pass - """ - ), - [ - ( - 1, - ( - "Bare pytests raise have been found. " - "Please pass in the argument 'match' " - "as well the exception." - ), - ), - ], - ), - ], - ) - def test_pytest_raises_raises(self, data, expected): - fd = io.StringIO(data.strip()) - result = list(validate_unwanted_patterns.bare_pytest_raises(fd)) - assert result == expected - - -@pytest.mark.parametrize( - "data, expected", - [ - ( - 'msg = ("bar " "baz")', - [ - ( - 1, - ( - "String unnecessarily split in two by black. " - "Please merge them manually." - ), - ) - ], - ), - ( - 'msg = ("foo " "bar " "baz")', - [ - ( - 1, - ( - "String unnecessarily split in two by black. " - "Please merge them manually." - ), - ), - ( - 1, - ( - "String unnecessarily split in two by black. " - "Please merge them manually." - ), - ), - ], - ), - ], -) -def test_strings_to_concatenate(data, expected): - fd = io.StringIO(data.strip()) - result = list(validate_unwanted_patterns.strings_to_concatenate(fd)) - assert result == expected - - -class TestStringsWithWrongPlacedWhitespace: - @pytest.mark.parametrize( - "data", - [ - ( - """ - msg = ( - "foo\n" - " bar" - ) - """ - ), - ( - """ - msg = ( - "foo" - " bar" - "baz" - ) - """ - ), - ( - """ - msg = ( - f"foo" - " bar" - ) - """ - ), - ( - """ - msg = ( - "foo" - f" bar" - ) - """ - ), - ( - """ - msg = ( - "foo" - rf" bar" - ) - """ - ), - ], - ) - def test_strings_with_wrong_placed_whitespace(self, data): - fd = io.StringIO(data.strip()) - result = list( - validate_unwanted_patterns.strings_with_wrong_placed_whitespace(fd) - ) - assert result == [] - - @pytest.mark.parametrize( - "data, expected", - [ - ( - ( - """ - msg = ( - "foo" - " bar" - ) - """ - ), - [ - ( - 3, - ( - "String has a space at the beginning instead " - "of the end of the previous string." - ), - ) - ], - ), - ( - ( - """ - msg = ( - f"foo" - " bar" - ) - """ - ), - [ - ( - 3, - ( - "String has a space at the beginning instead " - "of the end of the previous string." - ), - ) - ], - ), - ( - ( - """ - msg = ( - "foo" - f" bar" - ) - """ - ), - [ - ( - 3, - ( - "String has a space at the beginning instead " - "of the end of the previous string." - ), - ) - ], - ), - ( - ( - """ - msg = ( - f"foo" - f" bar" - ) - """ - ), - [ - ( - 3, - ( - "String has a space at the beginning instead " - "of the end of the previous string." - ), - ) - ], - ), - ( - ( - """ - msg = ( - "foo" - rf" bar" - " baz" - ) - """ - ), - [ - ( - 3, - ( - "String has a space at the beginning instead " - "of the end of the previous string." - ), - ), - ( - 4, - ( - "String has a space at the beginning instead " - "of the end of the previous string." - ), - ), - ], - ), - ( - ( - """ - msg = ( - "foo" - " bar" - rf" baz" - ) - """ - ), - [ - ( - 3, - ( - "String has a space at the beginning instead " - "of the end of the previous string." - ), - ), - ( - 4, - ( - "String has a space at the beginning instead " - "of the end of the previous string." - ), - ), - ], - ), - ( - ( - """ - msg = ( - "foo" - rf" bar" - rf" baz" - ) - """ - ), - [ - ( - 3, - ( - "String has a space at the beginning instead " - "of the end of the previous string." - ), - ), - ( - 4, - ( - "String has a space at the beginning instead " - "of the end of the previous string." - ), - ), - ], - ), - ], - ) - def test_strings_with_wrong_placed_whitespace_raises(self, data, expected): - fd = io.StringIO(data.strip()) - result = list( - validate_unwanted_patterns.strings_with_wrong_placed_whitespace(fd) - ) - assert result == expected diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py deleted file mode 100755 index b6b038ae9dd17..0000000000000 --- a/scripts/validate_unwanted_patterns.py +++ /dev/null @@ -1,487 +0,0 @@ -#!/usr/bin/env python3 -""" -Unwanted patterns test cases. - -The reason this file exist despite the fact we already have -`ci/code_checks.sh`, -(see https://github.com/pandas-dev/pandas/blob/master/ci/code_checks.sh) - -is that some of the test cases are more complex/imposible to validate via regex. -So this file is somewhat an extensions to `ci/code_checks.sh` -""" - -import argparse -import ast -import sys -import token -import tokenize -from typing import ( - IO, - Callable, - Iterable, - List, - Set, - Tuple, -) - -PRIVATE_IMPORTS_TO_IGNORE: Set[str] = { - "_extension_array_shared_docs", - "_index_shared_docs", - "_interval_shared_docs", - "_merge_doc", - "_shared_docs", - "_apply_docs", - "_new_Index", - "_new_PeriodIndex", - "_doc_template", - "_agg_template", - "_pipe_template", - "__main__", - "_transform_template", - "_flex_comp_doc_FRAME", - "_op_descriptions", - "_IntegerDtype", - "_use_inf_as_na", - "_get_plot_backend", - "_matplotlib", - "_arrow_utils", - "_registry", - "_get_offset", # TODO: remove after get_offset deprecation enforced - "_test_parse_iso8601", - "_json_normalize", # TODO: remove after deprecation is enforced - "_testing", - "_test_decorators", - "__version__", # check np.__version__ in compat.numpy.function -} - - -def _get_literal_string_prefix_len(token_string: str) -> int: - """ - Getting the length of the literal string prefix. - - Parameters - ---------- - token_string : str - String to check. - - Returns - ------- - int - Length of the literal string prefix. - - Examples - -------- - >>> example_string = "'Hello world'" - >>> _get_literal_string_prefix_len(example_string) - 0 - >>> example_string = "r'Hello world'" - >>> _get_literal_string_prefix_len(example_string) - 1 - """ - try: - return min( - token_string.find(quote) - for quote in (r"'", r'"') - if token_string.find(quote) >= 0 - ) - except ValueError: - return 0 - - -def bare_pytest_raises(file_obj: IO[str]) -> Iterable[Tuple[int, str]]: - """ - Test Case for bare pytest raises. - - For example, this is wrong: - - >>> with pytest.raise(ValueError): - ... # Some code that raises ValueError - - And this is what we want instead: - - >>> with pytest.raise(ValueError, match="foo"): - ... # Some code that raises ValueError - - Parameters - ---------- - file_obj : IO - File-like object containing the Python code to validate. - - Yields - ------ - line_number : int - Line number of unconcatenated string. - msg : str - Explenation of the error. - - Notes - ----- - GH #23922 - """ - contents = file_obj.read() - tree = ast.parse(contents) - - for node in ast.walk(tree): - if not isinstance(node, ast.Call): - continue - - try: - if not (node.func.value.id == "pytest" and node.func.attr == "raises"): - continue - except AttributeError: - continue - - if not node.keywords: - yield ( - node.lineno, - "Bare pytests raise have been found. " - "Please pass in the argument 'match' as well the exception.", - ) - else: - # Means that there are arguments that are being passed in, - # now we validate that `match` is one of the passed in arguments - if not any(keyword.arg == "match" for keyword in node.keywords): - yield ( - node.lineno, - "Bare pytests raise have been found. " - "Please pass in the argument 'match' as well the exception.", - ) - - -PRIVATE_FUNCTIONS_ALLOWED = {"sys._getframe"} # no known alternative - - -def private_function_across_module(file_obj: IO[str]) -> Iterable[Tuple[int, str]]: - """ - Checking that a private function is not used across modules. - Parameters - ---------- - file_obj : IO - File-like object containing the Python code to validate. - Yields - ------ - line_number : int - Line number of the private function that is used across modules. - msg : str - Explenation of the error. - """ - contents = file_obj.read() - tree = ast.parse(contents) - - imported_modules: Set[str] = set() - - for node in ast.walk(tree): - if isinstance(node, (ast.Import, ast.ImportFrom)): - for module in node.names: - module_fqdn = module.name if module.asname is None else module.asname - imported_modules.add(module_fqdn) - - if not isinstance(node, ast.Call): - continue - - try: - module_name = node.func.value.id - function_name = node.func.attr - except AttributeError: - continue - - # Exception section # - - # (Debatable) Class case - if module_name[0].isupper(): - continue - # (Debatable) Dunder methods case - elif function_name.startswith("__") and function_name.endswith("__"): - continue - elif module_name + "." + function_name in PRIVATE_FUNCTIONS_ALLOWED: - continue - - if module_name in imported_modules and function_name.startswith("_"): - yield (node.lineno, f"Private function '{module_name}.{function_name}'") - - -def private_import_across_module(file_obj: IO[str]) -> Iterable[Tuple[int, str]]: - """ - Checking that a private function is not imported across modules. - Parameters - ---------- - file_obj : IO - File-like object containing the Python code to validate. - Yields - ------ - line_number : int - Line number of import statement, that imports the private function. - msg : str - Explenation of the error. - """ - contents = file_obj.read() - tree = ast.parse(contents) - - for node in ast.walk(tree): - if not (isinstance(node, ast.Import) or isinstance(node, ast.ImportFrom)): - continue - - for module in node.names: - module_name = module.name.split(".")[-1] - if module_name in PRIVATE_IMPORTS_TO_IGNORE: - continue - - if module_name.startswith("_"): - yield (node.lineno, f"Import of internal function {repr(module_name)}") - - -def strings_to_concatenate(file_obj: IO[str]) -> Iterable[Tuple[int, str]]: - """ - This test case is necessary after 'Black' (https://github.com/psf/black), - is formating strings over multiple lines. - - For example, when this: - - >>> foo = ( - ... "bar " - ... "baz" - ... ) - - Is becoming this: - - >>> foo = ("bar " "baz") - - 'Black' is not considering this as an - issue (see https://github.com/psf/black/issues/1051), - so we are checking it here instead. - - Parameters - ---------- - file_obj : IO - File-like object containing the Python code to validate. - - Yields - ------ - line_number : int - Line number of unconcatenated string. - msg : str - Explenation of the error. - - Notes - ----- - GH #30454 - """ - tokens: List = list(tokenize.generate_tokens(file_obj.readline)) - - for current_token, next_token in zip(tokens, tokens[1:]): - if current_token.type == next_token.type == token.STRING: - yield ( - current_token.start[0], - ( - "String unnecessarily split in two by black. " - "Please merge them manually." - ), - ) - - -def strings_with_wrong_placed_whitespace( - file_obj: IO[str], -) -> Iterable[Tuple[int, str]]: - """ - Test case for leading spaces in concated strings. - - For example: - - >>> rule = ( - ... "We want the space at the end of the line, " - ... "not at the beginning" - ... ) - - Instead of: - - >>> rule = ( - ... "We want the space at the end of the line," - ... " not at the beginning" - ... ) - - Parameters - ---------- - file_obj : IO - File-like object containing the Python code to validate. - - Yields - ------ - line_number : int - Line number of unconcatenated string. - msg : str - Explenation of the error. - """ - - def has_wrong_whitespace(first_line: str, second_line: str) -> bool: - """ - Checking if the two lines are mattching the unwanted pattern. - - Parameters - ---------- - first_line : str - First line to check. - second_line : str - Second line to check. - - Returns - ------- - bool - True if the two recived string match, an unwanted pattern. - - Notes - ----- - The unwanted pattern that we are trying to catch is if the spaces in - a string that is concatenated over multiple lines are placed at the - end of each string, unless this string is ending with a - newline character (\n). - - For example, this is bad: - - >>> rule = ( - ... "We want the space at the end of the line," - ... " not at the beginning" - ... ) - - And what we want is: - - >>> rule = ( - ... "We want the space at the end of the line, " - ... "not at the beginning" - ... ) - - And if the string is ending with a new line character (\n) we - do not want any trailing whitespaces after it. - - For example, this is bad: - - >>> rule = ( - ... "We want the space at the begging of " - ... "the line if the previous line is ending with a \n " - ... "not at the end, like always" - ... ) - - And what we do want is: - - >>> rule = ( - ... "We want the space at the begging of " - ... "the line if the previous line is ending with a \n" - ... " not at the end, like always" - ... ) - """ - if first_line.endswith(r"\n"): - return False - elif first_line.startswith(" ") or second_line.startswith(" "): - return False - elif first_line.endswith(" ") or second_line.endswith(" "): - return False - elif (not first_line.endswith(" ")) and second_line.startswith(" "): - return True - return False - - tokens: List = list(tokenize.generate_tokens(file_obj.readline)) - - for first_token, second_token, third_token in zip(tokens, tokens[1:], tokens[2:]): - # Checking if we are in a block of concated string - if ( - first_token.type == third_token.type == token.STRING - and second_token.type == token.NL - ): - # Striping the quotes, with the string litteral prefix - first_string: str = first_token.string[ - _get_literal_string_prefix_len(first_token.string) + 1 : -1 - ] - second_string: str = third_token.string[ - _get_literal_string_prefix_len(third_token.string) + 1 : -1 - ] - - if has_wrong_whitespace(first_string, second_string): - yield ( - third_token.start[0], - ( - "String has a space at the beginning instead " - "of the end of the previous string." - ), - ) - - -def main( - function: Callable[[IO[str]], Iterable[Tuple[int, str]]], - source_path: str, - output_format: str, -) -> bool: - """ - Main entry point of the script. - - Parameters - ---------- - function : Callable - Function to execute for the specified validation type. - source_path : str - Source path representing path to a file/directory. - output_format : str - Output format of the error message. - file_extensions_to_check : str - Comma separated values of what file extensions to check. - excluded_file_paths : str - Comma separated values of what file paths to exclude during the check. - - Returns - ------- - bool - True if found any patterns are found related to the given function. - - Raises - ------ - ValueError - If the `source_path` is not pointing to existing file/directory. - """ - is_failed: bool = False - - for file_path in source_path: - with open(file_path, encoding="utf-8") as file_obj: - for line_number, msg in function(file_obj): - is_failed = True - print( - output_format.format( - source_path=file_path, line_number=line_number, msg=msg - ) - ) - - return is_failed - - -if __name__ == "__main__": - available_validation_types: List[str] = [ - "bare_pytest_raises", - "private_function_across_module", - "private_import_across_module", - "strings_to_concatenate", - "strings_with_wrong_placed_whitespace", - ] - - parser = argparse.ArgumentParser(description="Unwanted patterns checker.") - - parser.add_argument("paths", nargs="*", help="Source paths of files to check.") - parser.add_argument( - "--format", - "-f", - default="{source_path}:{line_number}:{msg}", - help="Output format of the error message.", - ) - parser.add_argument( - "--validation-type", - "-vt", - choices=available_validation_types, - required=True, - help="Validation test case to check.", - ) - - args = parser.parse_args() - - sys.exit( - main( - function=globals().get(args.validation_type), - source_path=args.paths, - output_format=args.format, - ) - ) diff --git a/setup.cfg b/setup.cfg index 9e3deff4c7183..610b30e4422a9 100644 --- a/setup.cfg +++ b/setup.cfg @@ -93,6 +93,16 @@ exclude = .eggs/*.py, versioneer.py, env # exclude asv benchmark environments from linting +per-file-ignores = + # private import across modules + pandas/tests/*:PDF020 + # pytest.raises without match= + pandas/tests/extension/*:PDF009 + # os.remove + doc/make.py:PDF008 + # import from pandas._testing + pandas/testing.py:PDF014 + [flake8-rst] max-line-length = 84
- [ ] closes #40826 - [ ] closes #40873 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry ---- This means that, just by running `flake8`, you'll be able to get all these custom linting errors alongside the regular `flake8` ones. E.g. if we make `pandas/tests/t.py` as follows: ```python import pytest import pandas as pd from pandas import Categorical def test_foo(): cat_0 = pd.Categorical([1]) with pytest.warns(FutureWarning): cat_1 = pd.Categorical(1) ``` then we get: ```console $ flake8 pandas/tests/t.py pandas/tests/t.py:4:1: F401 'pandas.Categorical' imported but unused pandas/tests/t.py:8:5: F841 local variable 'cat_0' is assigned to but never used pandas/tests/t.py:8:13: PDF019 found both 'pd.Categorical' and 'Categorical' in the same file pandas/tests/t.py:9:10: PDF011 found 'pytest.warns' (use 'pandas._testing.assert_produces_warning') pandas/tests/t.py:10:9: F841 local variable 'cat_1' is assigned to but never used pandas/tests/t.py:10:17: PDF019 found both 'pd.Categorical' and 'Categorical' in the same file ``` These can then easily be configured using `flake8`'s configuration options, like `# noqa` and `per-file-ignores` in `setup.cfg`. This also brings down the number of pre-commit checks. ---- I this is something the team wants, and it's decided to keep it as a separate repo, then some outstanding things to do would be: - transferring it to `pandas-dev` - sorting out the license (it's currently in my name, and I presume that should change)
https://api.github.com/repos/pandas-dev/pandas/pulls/40906
2021-04-12T20:40:51Z
2021-04-16T01:06:48Z
2021-04-16T01:06:48Z
2021-04-16T07:17:01Z
Backport PR #40902 on branch 1.2.x (DOC: Start v1.2.5 release notes)
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index 8739694c20e33..e546c8c8b80e3 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -16,6 +16,7 @@ Version 1.2 .. toctree:: :maxdepth: 2 + v1.2.5 v1.2.4 v1.2.3 v1.2.2 diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst index dd74091b64014..433ee37508e66 100644 --- a/doc/source/whatsnew/v1.2.4.rst +++ b/doc/source/whatsnew/v1.2.4.rst @@ -30,4 +30,4 @@ Fixed regressions Contributors ~~~~~~~~~~~~ -.. contributors:: v1.2.3..v1.2.4|HEAD +.. contributors:: v1.2.3..v1.2.4 diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst new file mode 100644 index 0000000000000..cdfc2e5686b91 --- /dev/null +++ b/doc/source/whatsnew/v1.2.5.rst @@ -0,0 +1,48 @@ +.. _whatsnew_125: + +What's new in 1.2.5 (May ??, 2021) +---------------------------------- + +These are the changes in pandas 1.2.5. See :ref:`release` for a full changelog +including other versions of pandas. + +{{ header }} + +.. --------------------------------------------------------------------------- + +.. _whatsnew_125.regressions: + +Fixed regressions +~~~~~~~~~~~~~~~~~ + +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_125.bug_fixes: + +Bug fixes +~~~~~~~~~ + +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_125.other: + +Other +~~~~~ + +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_125.contributors: + +Contributors +~~~~~~~~~~~~ + +.. contributors:: v1.2.4..v1.2.5|HEAD
Backport PR #40902: DOC: Start v1.2.5 release notes
https://api.github.com/repos/pandas-dev/pandas/pulls/40905
2021-04-12T19:29:22Z
2021-04-12T20:36:13Z
2021-04-12T20:36:13Z
2021-04-12T20:36:13Z
DOC: Fixed documentation for few files
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index a2a108924a0f2..c178e9f7cecbe 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -110,10 +110,13 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then pytest -q --doctest-modules \ pandas/core/accessor.py \ pandas/core/aggregation.py \ + pandas/core/algorithms.py \ pandas/core/base.py \ pandas/core/construction.py \ pandas/core/frame.py \ pandas/core/generic.py \ + pandas/core/indexers.py \ + pandas/core/nanops.py \ pandas/core/series.py \ pandas/io/sql.py RET=$(($RET + $?)) ; echo $MSG "DONE" diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index c8389ae24f000..16ec2bb5f253c 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -375,46 +375,60 @@ def unique(values): >>> pd.unique(pd.Series([2] + [1] * 5)) array([2, 1]) - >>> pd.unique(pd.Series([pd.Timestamp('20160101'), - ... pd.Timestamp('20160101')])) + >>> pd.unique(pd.Series([pd.Timestamp("20160101"), pd.Timestamp("20160101")])) array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') - >>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'), - ... pd.Timestamp('20160101', tz='US/Eastern')])) - array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')], - dtype=object) - - >>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'), - ... pd.Timestamp('20160101', tz='US/Eastern')])) + >>> pd.unique( + ... pd.Series( + ... [ + ... pd.Timestamp("20160101", tz="US/Eastern"), + ... pd.Timestamp("20160101", tz="US/Eastern"), + ... ] + ... ) + ... ) + <DatetimeArray> + ['2016-01-01 00:00:00-05:00'] + Length: 1, dtype: datetime64[ns, US/Eastern] + + >>> pd.unique( + ... pd.Index( + ... [ + ... pd.Timestamp("20160101", tz="US/Eastern"), + ... pd.Timestamp("20160101", tz="US/Eastern"), + ... ] + ... ) + ... ) DatetimeIndex(['2016-01-01 00:00:00-05:00'], - ... dtype='datetime64[ns, US/Eastern]', freq=None) + dtype='datetime64[ns, US/Eastern]', + freq=None) - >>> pd.unique(list('baabc')) + >>> pd.unique(list("baabc")) array(['b', 'a', 'c'], dtype=object) An unordered Categorical will return categories in the order of appearance. - >>> pd.unique(pd.Series(pd.Categorical(list('baabc')))) - [b, a, c] - Categories (3, object): [b, a, c] + >>> pd.unique(pd.Series(pd.Categorical(list("baabc")))) + ['b', 'a', 'c'] + Categories (3, object): ['a', 'b', 'c'] - >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), - ... categories=list('abc')))) - [b, a, c] - Categories (3, object): [b, a, c] + >>> pd.unique(pd.Series(pd.Categorical(list("baabc"), categories=list("abc")))) + ['b', 'a', 'c'] + Categories (3, object): ['a', 'b', 'c'] An ordered Categorical preserves the category ordering. - >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), - ... categories=list('abc'), - ... ordered=True))) - [b, a, c] - Categories (3, object): [a < b < c] + >>> pd.unique( + ... pd.Series( + ... pd.Categorical(list("baabc"), categories=list("abc"), ordered=True) + ... ) + ... ) + ['b', 'a', 'c'] + Categories (3, object): ['a' < 'b' < 'c'] An array of tuples - >>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')]) + >>> pd.unique([("a", "b"), ("b", "a"), ("a", "c"), ("b", "a")]) array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object) """ values = _ensure_arraylike(values) diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py index db28ad710989d..aa780787d58b6 100644 --- a/pandas/core/indexers.py +++ b/pandas/core/indexers.py @@ -209,16 +209,24 @@ def validate_indices(indices: np.ndarray, n: int) -> None: Examples -------- - >>> validate_indices([1, 2], 3) - # OK - >>> validate_indices([1, -2], 3) - ValueError - >>> validate_indices([1, 2, 3], 3) - IndexError - >>> validate_indices([-1, -1], 0) - # OK - >>> validate_indices([0, 1], 0) - IndexError + >>> validate_indices(np.array([1, 2]), 3) # OK + + >>> validate_indices(np.array([1, -2]), 3) + Traceback (most recent call last): + ... + ValueError: negative dimensions are not allowed + + >>> validate_indices(np.array([1, 2, 3]), 3) + Traceback (most recent call last): + ... + IndexError: indices are out-of-bounds + + >>> validate_indices(np.array([-1, -1]), 0) # OK + + >>> validate_indices(np.array([0, 1]), 0) + Traceback (most recent call last): + ... + IndexError: indices are out-of-bounds """ if len(indices): min_idx = indices.min() diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 54588eafc3fa0..92618605e47cc 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1056,7 +1056,7 @@ def nanargmax( [ 6., 7., nan], [ 9., 10., nan]]) >>> nanops.nanargmax(arr, axis=1) - array([2, 2, 1, 1], dtype=int64) + array([2, 2, 1, 1]) """ values, mask, _, _, _ = _get_values(values, True, fill_value_typ="-inf", mask=mask) # error: Need type annotation for 'result' @@ -1102,7 +1102,7 @@ def nanargmin( [nan, 7., 8.], [nan, 10., 11.]]) >>> nanops.nanargmin(arr, axis=1) - array([0, 0, 1, 1], dtype=int64) + array([0, 0, 1, 1]) """ values, mask, _, _, _ = _get_values(values, True, fill_value_typ="+inf", mask=mask) # error: Need type annotation for 'result'
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/40903
2021-04-12T17:59:27Z
2021-04-26T21:21:37Z
2021-04-26T21:21:37Z
2021-04-28T13:01:44Z
DOC: Start v1.2.5 release notes
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index 8697182f5ca6f..986cf43b80494 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -24,6 +24,7 @@ Version 1.2 .. toctree:: :maxdepth: 2 + v1.2.5 v1.2.4 v1.2.3 v1.2.2 diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst index dd74091b64014..433ee37508e66 100644 --- a/doc/source/whatsnew/v1.2.4.rst +++ b/doc/source/whatsnew/v1.2.4.rst @@ -30,4 +30,4 @@ Fixed regressions Contributors ~~~~~~~~~~~~ -.. contributors:: v1.2.3..v1.2.4|HEAD +.. contributors:: v1.2.3..v1.2.4 diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst new file mode 100644 index 0000000000000..cdfc2e5686b91 --- /dev/null +++ b/doc/source/whatsnew/v1.2.5.rst @@ -0,0 +1,48 @@ +.. _whatsnew_125: + +What's new in 1.2.5 (May ??, 2021) +---------------------------------- + +These are the changes in pandas 1.2.5. See :ref:`release` for a full changelog +including other versions of pandas. + +{{ header }} + +.. --------------------------------------------------------------------------- + +.. _whatsnew_125.regressions: + +Fixed regressions +~~~~~~~~~~~~~~~~~ + +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_125.bug_fixes: + +Bug fixes +~~~~~~~~~ + +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_125.other: + +Other +~~~~~ + +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_125.contributors: + +Contributors +~~~~~~~~~~~~ + +.. contributors:: v1.2.4..v1.2.5|HEAD
~~do not merge yet. will merge to master after github release to trigger website update~~
https://api.github.com/repos/pandas-dev/pandas/pulls/40902
2021-04-12T17:30:14Z
2021-04-12T19:29:00Z
2021-04-12T19:29:00Z
2021-04-12T19:29:04Z
CLN: change jinja2 template name to `template_html`
diff --git a/doc/source/reference/style.rst b/doc/source/reference/style.rst index 90ec5a2283f1e..6eacc90f4f62a 100644 --- a/doc/source/reference/style.rst +++ b/doc/source/reference/style.rst @@ -23,7 +23,7 @@ Styler properties :toctree: api/ Styler.env - Styler.template + Styler.template_html Styler.loader Style application diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb index 8a10a6e4d4c2e..765b2929d3014 100644 --- a/doc/source/user_guide/style.ipynb +++ b/doc/source/user_guide/style.ipynb @@ -1710,7 +1710,7 @@ " Styler.loader, # the default\n", " ])\n", " )\n", - " template = env.get_template(\"myhtml.tpl\")" + " template_html = env.get_template(\"myhtml.tpl\")" ] }, { diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 7b5347ba2d9a9..91e4413e14e62 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -1485,7 +1485,7 @@ def from_custom_template(cls, searchpath, name): # error: Invalid base class "cls" class MyStyler(cls): # type:ignore[valid-type,misc] env = jinja2.Environment(loader=loader) - template = env.get_template(name) + template_html = env.get_template(name) return MyStyler diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py index 82f57b71caebf..9baa0542670e3 100644 --- a/pandas/io/formats/style_render.py +++ b/pandas/io/formats/style_render.py @@ -58,7 +58,7 @@ class StylerRenderer: loader = jinja2.PackageLoader("pandas", "io/formats/templates") env = jinja2.Environment(loader=loader, trim_blocks=True) - template = env.get_template("html.tpl") + template_html = env.get_template("html.tpl") def __init__( self, @@ -143,7 +143,7 @@ def render(self, **kwargs) -> str: # TODO: namespace all the pandas keys d = self._translate() d.update(kwargs) - return self.template.render(**d) + return self.template_html.render(**d) def _compute(self): """ diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py index 56e9581f8785a..1bb672e06291f 100644 --- a/pandas/tests/io/formats/style/test_style.py +++ b/pandas/tests/io/formats/style/test_style.py @@ -1367,7 +1367,7 @@ def test_block_names(): "tr", "after_rows", } - result = set(Styler.template.blocks) + result = set(Styler.template_html.blocks) assert result == expected @@ -1386,6 +1386,6 @@ def test_from_custom_template(tmpdir): result = Styler.from_custom_template(str(tmpdir.join("templates")), "myhtml.tpl") assert issubclass(result, Styler) assert result.env is not Styler.env - assert result.template is not Styler.template + assert result.template_html is not Styler.template_html styler = result(DataFrame({"A": [1, 2]})) assert styler.render()
prior to adding `template_latex` this changes the name of the html template to `template_html` including: - updating docs on the new name for subclassing - updating the custom method for subclassing - updating tests which refer to it
https://api.github.com/repos/pandas-dev/pandas/pulls/40901
2021-04-12T17:12:06Z
2021-04-13T14:15:41Z
2021-04-13T14:15:41Z
2021-04-13T19:00:12Z
DOC: whats new `other` section reduced
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 0ec9758477eba..0066eabf36c2b 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -117,6 +117,9 @@ We provided some focused development on :class:`.Styler`, including altering met to accept more universal CSS language for arguments, such as ``'color:red;'`` instead of ``[('color', 'red')]`` (:issue:`39564`). This is also added to the built-in methods to allow custom CSS highlighting instead of default background coloring (:issue:`40242`). +Enhancements to other built-in methods include extending the :meth:`.Styler.background_gradient` +method to shade elements based on a given gradient map and not be restricted only to +values in the DataFrame (:issue:`39930` :issue:`22727` :issue:`28901`). The :meth:`.Styler.apply` now consistently allows functions with ``ndarray`` output to allow more flexible development of UDFs when ``axis`` is ``None`` ``0`` or ``1`` (:issue:`39393`). @@ -818,24 +821,29 @@ ExtensionArray - Fixed a bug where some properties of subclasses of :class:`PandasExtensionDtype` where improperly cached (:issue:`40329`) - +Styler +^^^^^^ + +- Bug in :class:`Styler` where ``subset`` arg in methods raised an error for some valid multiindex slices (:issue:`33562`) +- :class:`Styler` rendered HTML output minor alterations to support w3 good code standard (:issue:`39626`) +- Bug in :class:`Styler` where rendered HTML was missing a column class identifier for certain header cells (:issue:`39716`) +- Bug in :meth:`Styler.background_gradient` where text-color was not determined correctly (:issue:`39888`) +- Bug in :class:`Styler` where multiple elements in CSS-selectors were not correctly added to ``table_styles`` (:issue:`39942`) +- Bug in :class:`.Styler` where copying from Jupyter dropped top left cell and misaligned headers (:issue:`12147`) +- Bug in :class:`.Styler.where` where ``kwargs`` were not passed to the applicable callable (:issue:`40845`) +- Bug in :class:`Styler` which caused CSS to duplicate on multiple renders. (:issue:`39395`, :issue:`40334`) + + Other ^^^^^ - Bug in :class:`Index` constructor sometimes silently ignoring a specified ``dtype`` (:issue:`38879`) - Bug in :func:`pandas.api.types.infer_dtype` not recognizing Series, Index or array with a period dtype (:issue:`23553`) - Bug in :func:`pandas.api.types.infer_dtype` raising an error for general :class:`.ExtensionArray` objects. It will now return ``"unknown-array"`` instead of raising (:issue:`37367`) - Bug in constructing a :class:`Series` from a list and a :class:`PandasDtype` (:issue:`39357`) -- Bug in :class:`Styler` which caused CSS to duplicate on multiple renders. (:issue:`39395`, :issue:`40334`) - ``inspect.getmembers(Series)`` no longer raises an ``AbstractMethodError`` (:issue:`38782`) - Bug in :meth:`Series.where` with numeric dtype and ``other = None`` not casting to ``nan`` (:issue:`39761`) - :meth:`Index.where` behavior now mirrors :meth:`Index.putmask` behavior, i.e. ``index.where(mask, other)`` matches ``index.putmask(~mask, other)`` (:issue:`39412`) - Bug in :func:`pandas.testing.assert_series_equal`, :func:`pandas.testing.assert_frame_equal`, :func:`pandas.testing.assert_index_equal` and :func:`pandas.testing.assert_extension_array_equal` incorrectly raising when an attribute has an unrecognized NA type (:issue:`39461`) -- Bug in :class:`Styler` where ``subset`` arg in methods raised an error for some valid multiindex slices (:issue:`33562`) -- :class:`Styler` rendered HTML output minor alterations to support w3 good code standard (:issue:`39626`) -- Bug in :class:`Styler` where rendered HTML was missing a column class identifier for certain header cells (:issue:`39716`) -- Bug in :meth:`Styler.background_gradient` where text-color was not determined correctly (:issue:`39888`) -- Bug in :class:`Styler` where multiple elements in CSS-selectors were not correctly added to ``table_styles`` (:issue:`39942`) -- Bug in :class:`.Styler` where copying from Jupyter dropped top left cell and misaligned headers (:issue:`12147`) -- Bug in :class:`.Styler.where` where ``kwargs`` were not passed to the applicable callable (:issue:`40845`) - Bug in :meth:`DataFrame.equals`, :meth:`Series.equals`, :meth:`Index.equals` with object-dtype containing ``np.datetime64("NaT")`` or ``np.timedelta64("NaT")`` (:issue:`39650`) - Bug in :func:`pandas.util.show_versions` where console JSON output was not proper JSON (:issue:`39701`) - Bug in :meth:`DataFrame.convert_dtypes` incorrectly raised ValueError when called on an empty DataFrame (:issue:`40393`)
- [x] closes #40897
https://api.github.com/repos/pandas-dev/pandas/pulls/40900
2021-04-12T16:23:37Z
2021-04-13T14:09:32Z
2021-04-13T14:09:32Z
2021-04-13T14:21:50Z
CLN: refactor `Styler._translate` into composite translate functions
diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py index 82f57b71caebf..0b7279d796464 100644 --- a/pandas/io/formats/style_render.py +++ b/pandas/io/formats/style_render.py @@ -173,15 +173,6 @@ def _translate(self): BLANK_CLASS = "blank" BLANK_VALUE = "&nbsp;" - # mapping variables - ctx = self.ctx # td css styles from apply() and applymap() - cell_context = self.cell_context # td css classes from set_td_classes() - cellstyle_map: DefaultDict[tuple[CSSPair, ...], list[str]] = defaultdict(list) - - # copied attributes - hidden_index = self.hidden_index - hidden_columns = self.hidden_columns - # construct render dict d = { "uuid": self.uuid, @@ -189,165 +180,185 @@ def _translate(self): "caption": self.caption, } + head = self._translate_header( + BLANK_CLASS, BLANK_VALUE, INDEX_NAME_CLASS, COL_HEADING_CLASS + ) + d.update({"head": head}) + + self.cellstyle_map: DefaultDict[tuple[CSSPair, ...], list[str]] = defaultdict( + list + ) + body = self._translate_body(DATA_CLASS, ROW_HEADING_CLASS) + d.update({"body": body}) + + cellstyle: list[dict[str, CSSList | list[str]]] = [ + {"props": list(props), "selectors": selectors} + for props, selectors in self.cellstyle_map.items() + ] + d.update({"cellstyle": cellstyle}) + + table_attr = self.table_attributes + use_mathjax = get_option("display.html.use_mathjax") + if not use_mathjax: + table_attr = table_attr or "" + if 'class="' in table_attr: + table_attr = table_attr.replace('class="', 'class="tex2jax_ignore ') + else: + table_attr += ' class="tex2jax_ignore"' + d.update({"table_attributes": table_attr}) + + if self.tooltips: + d = self.tooltips._translate(self.data, self.uuid, d) + + return d + + def _translate_header( + self, blank_class, blank_value, index_name_class, col_heading_class + ): + """ + Build each <tr> within table <head>, using the structure: + +----------------------------+---------------+---------------------------+ + | index_blanks ... | column_name_0 | column_headers (level_0) | + 1) | .. | .. | .. | + | index_blanks ... | column_name_n | column_headers (level_n) | + +----------------------------+---------------+---------------------------+ + 2) | index_names (level_0 to level_n) ... | column_blanks ... | + +----------------------------+---------------+---------------------------+ + """ # for sparsifying a MultiIndex - idx_lengths = _get_level_lengths(self.index) - col_lengths = _get_level_lengths(self.columns, hidden_columns) + col_lengths = _get_level_lengths(self.columns, self.hidden_columns) - n_rlvls = self.data.index.nlevels - n_clvls = self.data.columns.nlevels - rlabels = self.data.index.tolist() clabels = self.data.columns.tolist() - - if n_rlvls == 1: - rlabels = [[x] for x in rlabels] - if n_clvls == 1: + if self.data.columns.nlevels == 1: clabels = [[x] for x in clabels] clabels = list(zip(*clabels)) head = [] - for r in range(n_clvls): - # Blank for Index columns... - row_es = [ - { - "type": "th", - "value": BLANK_VALUE, - "display_value": BLANK_VALUE, - "is_visible": not hidden_index, - "class": " ".join([BLANK_CLASS]), - } - ] * (n_rlvls - 1) - - # ... except maybe the last for columns.names + # 1) column headers + for r in range(self.data.columns.nlevels): + index_blanks = [ + _element("th", blank_class, blank_value, not self.hidden_index) + ] * (self.data.index.nlevels - 1) + name = self.data.columns.names[r] - cs = [ - BLANK_CLASS if name is None else INDEX_NAME_CLASS, - f"level{r}", + column_name = [ + _element( + "th", + f"{blank_class if name is None else index_name_class} level{r}", + name if name is not None else blank_value, + not self.hidden_index, + ) ] - name = BLANK_VALUE if name is None else name - row_es.append( - { - "type": "th", - "value": name, - "display_value": name, - "class": " ".join(cs), - "is_visible": not hidden_index, - } - ) if clabels: - for c, value in enumerate(clabels[r]): - es = { - "type": "th", - "value": value, - "display_value": value, - "class": f"{COL_HEADING_CLASS} level{r} col{c}", - "is_visible": _is_visible(c, r, col_lengths), - } - colspan = col_lengths.get((r, c), 0) - if colspan > 1: - es["attributes"] = f'colspan="{colspan}"' - row_es.append(es) - head.append(row_es) + column_headers = [ + _element( + "th", + f"{col_heading_class} level{r} col{c}", + value, + _is_visible(c, r, col_lengths), + attributes=( + f'colspan="{col_lengths.get((r, c), 0)}"' + if col_lengths.get((r, c), 0) > 1 + else "" + ), + ) + for c, value in enumerate(clabels[r]) + ] + head.append(index_blanks + column_name + column_headers) + # 2) index names if ( self.data.index.names and com.any_not_none(*self.data.index.names) - and not hidden_index + and not self.hidden_index ): - index_header_row = [] + index_names = [ + _element( + "th", + f"{index_name_class} level{c}", + blank_value if name is None else name, + True, + ) + for c, name in enumerate(self.data.index.names) + ] - for c, name in enumerate(self.data.index.names): - cs = [INDEX_NAME_CLASS, f"level{c}"] - name = "" if name is None else name - index_header_row.append( - {"type": "th", "value": name, "class": " ".join(cs)} + column_blanks = [ + _element( + "th", + f"{blank_class} col{c}", + blank_value, + c not in self.hidden_columns, ) + for c in range(len(clabels[0])) + ] + head.append(index_names + column_blanks) - index_header_row.extend( - [ - { - "type": "th", - "value": BLANK_VALUE, - "class": " ".join([BLANK_CLASS, f"col{c}"]), - } - for c in range(len(clabels[0])) - if c not in hidden_columns - ] - ) + return head - head.append(index_header_row) - d.update({"head": head}) + def _translate_body(self, data_class, row_heading_class): + """ + Build each <tr> in table <body> in the following format: + +--------------------------------------------+---------------------------+ + | index_header_0 ... index_header_n | data_by_column | + +--------------------------------------------+---------------------------+ + + Also add elements to the cellstyle_map for more efficient grouped elements in + <style></style> block + """ + # for sparsifying a MultiIndex + idx_lengths = _get_level_lengths(self.index) + + rlabels = self.data.index.tolist() + if self.data.index.nlevels == 1: + rlabels = [[x] for x in rlabels] body = [] for r, row_tup in enumerate(self.data.itertuples()): - row_es = [] - for c, value in enumerate(rlabels[r]): - rid = [ - ROW_HEADING_CLASS, - f"level{c}", - f"row{r}", - ] - es = { - "type": "th", - "is_visible": (_is_visible(r, c, idx_lengths) and not hidden_index), - "value": value, - "display_value": value, - "id": "_".join(rid[1:]), - "class": " ".join(rid), - } - rowspan = idx_lengths.get((c, r), 0) - if rowspan > 1: - es["attributes"] = f'rowspan="{rowspan}"' - row_es.append(es) + index_headers = [ + _element( + "th", + f"{row_heading_class} level{c} row{r}", + value, + (_is_visible(r, c, idx_lengths) and not self.hidden_index), + id=f"level{c}_row{r}", + attributes=( + f'rowspan="{idx_lengths.get((c, r), 0)}"' + if idx_lengths.get((c, r), 0) > 1 + else "" + ), + ) + for c, value in enumerate(rlabels[r]) + ] + data = [] for c, value in enumerate(row_tup[1:]): - formatter = self._display_funcs[(r, c)] - row_dict = { - "type": "td", - "value": value, - "display_value": formatter(value), - "is_visible": (c not in hidden_columns), - "attributes": "", - } - - # only add an id if the cell has a style - props: CSSList = [] - if self.cell_ids or (r, c) in ctx: - row_dict["id"] = f"row{r}_col{c}" - props.extend(ctx[r, c]) - # add custom classes from cell context cls = "" - if (r, c) in cell_context: - cls = " " + cell_context[r, c] - row_dict["class"] = f"{DATA_CLASS} row{r} col{c}{cls}" - - row_es.append(row_dict) - if props: # (), [] won't be in cellstyle_map, cellstyle respectively - cellstyle_map[tuple(props)].append(f"row{r}_col{c}") - body.append(row_es) - d.update({"body": body}) - - cellstyle: list[dict[str, CSSList | list[str]]] = [ - {"props": list(props), "selectors": selectors} - for props, selectors in cellstyle_map.items() - ] - d.update({"cellstyle": cellstyle}) + if (r, c) in self.cell_context: + cls = " " + self.cell_context[r, c] + + data_element = _element( + "td", + f"{data_class} row{r} col{c}{cls}", + value, + (c not in self.hidden_columns), + attributes="", + display_value=self._display_funcs[(r, c)](value), + ) - table_attr = self.table_attributes - use_mathjax = get_option("display.html.use_mathjax") - if not use_mathjax: - table_attr = table_attr or "" - if 'class="' in table_attr: - table_attr = table_attr.replace('class="', 'class="tex2jax_ignore ') - else: - table_attr += ' class="tex2jax_ignore"' - d.update({"table_attributes": table_attr}) + # only add an id if the cell has a style + if self.cell_ids or (r, c) in self.ctx: + data_element["id"] = f"row{r}_col{c}" + if (r, c) in self.ctx and self.ctx[r, c]: # only add if non-empty + self.cellstyle_map[tuple(self.ctx[r, c])].append( + f"row{r}_col{c}" + ) - if self.tooltips: - d = self.tooltips._translate(self.data, self.uuid, d) + data.append(data_element) - return d + body.append(index_headers + data) + return body def format( self, @@ -502,6 +513,27 @@ def format( return self +def _element( + html_element: str, + html_class: str, + value: Any, + is_visible: bool, + **kwargs, +) -> dict: + """ + Template to return container with information for a <td></td> or <th></th> element. + """ + if "display_value" not in kwargs: + kwargs["display_value"] = value + return { + "type": html_element, + "value": value, + "class": html_class, + "is_visible": is_visible, + **kwargs, + } + + def _get_level_lengths(index, hidden_elements=None): """ Given an index, find the level length for each element. diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py index 56e9581f8785a..25a7eb36d6b48 100644 --- a/pandas/tests/io/formats/style/test_style.py +++ b/pandas/tests/io/formats/style/test_style.py @@ -273,6 +273,7 @@ def test_empty_index_name_doesnt_display(self): "type": "th", "value": "A", "is_visible": True, + "attributes": "", }, { "class": "col_heading level0 col1", @@ -280,6 +281,7 @@ def test_empty_index_name_doesnt_display(self): "type": "th", "value": "B", "is_visible": True, + "attributes": "", }, { "class": "col_heading level0 col2", @@ -287,6 +289,7 @@ def test_empty_index_name_doesnt_display(self): "type": "th", "value": "C", "is_visible": True, + "attributes": "", }, ] ] @@ -295,6 +298,7 @@ def test_empty_index_name_doesnt_display(self): def test_index_name(self): # https://github.com/pandas-dev/pandas/issues/11655 + # TODO: this test can be minimised to address the test more directly df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) result = df.set_index("A").style._translate() @@ -313,6 +317,7 @@ def test_index_name(self): "value": "B", "display_value": "B", "is_visible": True, + "attributes": "", }, { "class": "col_heading level0 col1", @@ -320,12 +325,31 @@ def test_index_name(self): "value": "C", "display_value": "C", "is_visible": True, + "attributes": "", }, ], [ - {"class": "index_name level0", "type": "th", "value": "A"}, - {"class": "blank col0", "type": "th", "value": self.blank_value}, - {"class": "blank col1", "type": "th", "value": self.blank_value}, + { + "class": "index_name level0", + "type": "th", + "value": "A", + "is_visible": True, + "display_value": "A", + }, + { + "class": "blank col0", + "type": "th", + "value": self.blank_value, + "is_visible": True, + "display_value": self.blank_value, + }, + { + "class": "blank col1", + "type": "th", + "value": self.blank_value, + "is_visible": True, + "display_value": self.blank_value, + }, ], ] @@ -333,6 +357,7 @@ def test_index_name(self): def test_multiindex_name(self): # https://github.com/pandas-dev/pandas/issues/11655 + # TODO: this test can be minimised to address the test more directly df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) result = df.set_index(["A", "B"]).style._translate() @@ -358,12 +383,31 @@ def test_multiindex_name(self): "value": "C", "display_value": "C", "is_visible": True, + "attributes": "", }, ], [ - {"class": "index_name level0", "type": "th", "value": "A"}, - {"class": "index_name level1", "type": "th", "value": "B"}, - {"class": "blank col0", "type": "th", "value": self.blank_value}, + { + "class": "index_name level0", + "type": "th", + "value": "A", + "is_visible": True, + "display_value": "A", + }, + { + "class": "index_name level1", + "type": "th", + "value": "B", + "is_visible": True, + "display_value": "B", + }, + { + "class": "blank col0", + "type": "th", + "value": self.blank_value, + "is_visible": True, + "display_value": self.blank_value, + }, ], ] @@ -838,7 +882,7 @@ def test_mi_sparse(self): "class": "row_heading level0 row0", "id": "level0_row0", } - tm.assert_dict_equal(body_0, expected_0) + assert body_0 == expected_0 body_1 = result["body"][0][1] expected_1 = { @@ -848,8 +892,9 @@ def test_mi_sparse(self): "type": "th", "class": "row_heading level1 row0", "id": "level1_row0", + "attributes": "", } - tm.assert_dict_equal(body_1, expected_1) + assert body_1 == expected_1 body_10 = result["body"][1][0] expected_10 = { @@ -859,8 +904,9 @@ def test_mi_sparse(self): "type": "th", "class": "row_heading level0 row1", "id": "level0_row1", + "attributes": "", } - tm.assert_dict_equal(body_10, expected_10) + assert body_10 == expected_10 head = result["head"][0] expected = [ @@ -884,21 +930,26 @@ def test_mi_sparse(self): "value": "A", "is_visible": True, "display_value": "A", + "attributes": "", }, ] assert head == expected def test_mi_sparse_disabled(self): + df = DataFrame( + {"A": [1, 2]}, index=pd.MultiIndex.from_arrays([["a", "a"], [0, 1]]) + ) + result = df.style._translate()["body"] + assert 'rowspan="2"' in result[0][0]["attributes"] + assert result[1][0]["is_visible"] is False + with pd.option_context("display.multi_sparse", False): - df = DataFrame( - {"A": [1, 2]}, index=pd.MultiIndex.from_arrays([["a", "a"], [0, 1]]) - ) - result = df.style._translate() - body = result["body"] - for row in body: - assert "attributes" not in row[0] + result = df.style._translate()["body"] + assert 'rowspan="2"' not in result[0][0]["attributes"] + assert result[1][0]["is_visible"] is True def test_mi_sparse_index_names(self): + # TODO this test is verbose can be minimised to more directly target test df = DataFrame( {"A": [1, 2]}, index=pd.MultiIndex.from_arrays( @@ -908,14 +959,33 @@ def test_mi_sparse_index_names(self): result = df.style._translate() head = result["head"][1] expected = [ - {"class": "index_name level0", "value": "idx_level_0", "type": "th"}, - {"class": "index_name level1", "value": "idx_level_1", "type": "th"}, - {"class": "blank col0", "value": self.blank_value, "type": "th"}, + { + "class": "index_name level0", + "value": "idx_level_0", + "type": "th", + "is_visible": True, + "display_value": "idx_level_0", + }, + { + "class": "index_name level1", + "value": "idx_level_1", + "type": "th", + "is_visible": True, + "display_value": "idx_level_1", + }, + { + "class": "blank col0", + "value": self.blank_value, + "type": "th", + "is_visible": True, + "display_value": self.blank_value, + }, ] assert head == expected def test_mi_sparse_column_names(self): + # TODO this test is verbose - could be minimised df = DataFrame( np.arange(16).reshape(4, 4), index=pd.MultiIndex.from_arrays( @@ -949,6 +1019,7 @@ def test_mi_sparse_column_names(self): "is_visible": True, "type": "th", "value": 1, + "attributes": "", }, { "class": "col_heading level1 col1", @@ -956,6 +1027,7 @@ def test_mi_sparse_column_names(self): "is_visible": True, "type": "th", "value": 0, + "attributes": "", }, { "class": "col_heading level1 col2", @@ -963,6 +1035,7 @@ def test_mi_sparse_column_names(self): "is_visible": True, "type": "th", "value": 1, + "attributes": "", }, { "class": "col_heading level1 col3", @@ -970,6 +1043,7 @@ def test_mi_sparse_column_names(self): "is_visible": True, "type": "th", "value": 0, + "attributes": "", }, ] assert head == expected
This composites `Styler._translate()` into: ``` def _translate(): ... self._translate_header() ... self._translate_body() ... ``` where the `_translate_header()` and `_translate_body()` methods are given some documentation explaining what they are building. The code is not fundamentally changed, but variable names are renamed to add clarity to match the documentation and loop comprehensions replace `for loops` where possible. Some tests are minimally altered since the generic `_element` method now returns more dict keys for some elements.
https://api.github.com/repos/pandas-dev/pandas/pulls/40898
2021-04-12T16:08:10Z
2021-04-13T15:26:21Z
2021-04-13T15:26:21Z
2021-04-13T15:26:25Z
Backport PR #40878 on branch 1.2.x (REGR: ufunc with DataFrame input not passing all kwargs)
diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst index 54652ecc4dceb..dd74091b64014 100644 --- a/doc/source/whatsnew/v1.2.4.rst +++ b/doc/source/whatsnew/v1.2.4.rst @@ -21,6 +21,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.where` not returning a copy in the case of an all True condition (:issue:`39595`) - Fixed regression in :meth:`DataFrame.replace` raising ``IndexError`` when ``regex`` was a multi-key dictionary (:issue:`39338`) - Fixed regression in repr of floats in an ``object`` column not respecting ``float_format`` when printed in the console or outputted through :meth:`DataFrame.to_string`, :meth:`DataFrame.to_html`, and :meth:`DataFrame.to_latex` (:issue:`40024`) +- Fixed regression in NumPy ufuncs such as ``np.add`` not passing through all arguments for :class:`DataFrame` (:issue:`40662`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py index cb185dcf78f63..8d02ddef29593 100644 --- a/pandas/core/arraylike.py +++ b/pandas/core/arraylike.py @@ -351,15 +351,17 @@ def reconstruct(result): # * len(inputs) > 1 is doable when we know that we have # aligned blocks / dtypes. inputs = tuple(np.asarray(x) for x in inputs) - result = getattr(ufunc, method)(*inputs) + result = getattr(ufunc, method)(*inputs, **kwargs) elif self.ndim == 1: # ufunc(series, ...) inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs) result = getattr(ufunc, method)(*inputs, **kwargs) else: # ufunc(dataframe) - if method == "__call__": + if method == "__call__" and not kwargs: # for np.<ufunc>(..) calls + # kwargs cannot necessarily be handled block-by-block, so only + # take this path if there are no kwargs mgr = inputs[0]._mgr result = mgr.apply(getattr(ufunc, method)) else: diff --git a/pandas/tests/frame/test_ufunc.py b/pandas/tests/frame/test_ufunc.py index 83fd3db72a90c..19ebae449ecc3 100644 --- a/pandas/tests/frame/test_ufunc.py +++ b/pandas/tests/frame/test_ufunc.py @@ -1,3 +1,5 @@ +from functools import partial + import numpy as np import pytest @@ -55,6 +57,42 @@ def test_binary_input_dispatch_binop(dtype): tm.assert_frame_equal(result, expected) +@pytest.mark.parametrize( + "func,arg,expected", + [ + (np.add, 1, [2, 3, 4, 5]), + ( + partial(np.add, where=[[False, True], [True, False]]), + np.array([[1, 1], [1, 1]]), + [0, 3, 4, 0], + ), + (np.power, np.array([[1, 1], [2, 2]]), [1, 2, 9, 16]), + (np.subtract, 2, [-1, 0, 1, 2]), + ( + partial(np.negative, where=np.array([[False, True], [True, False]])), + None, + [0, -2, -3, 0], + ), + ], +) +def test_ufunc_passes_args(func, arg, expected, request): + # GH#40662 + arr = np.array([[1, 2], [3, 4]]) + df = pd.DataFrame(arr) + result_inplace = np.zeros_like(arr) + # 1-argument ufunc + if arg is None: + result = func(df, out=result_inplace) + else: + result = func(df, arg, out=result_inplace) + + expected = np.array(expected).reshape(2, 2) + tm.assert_numpy_array_equal(result_inplace, expected) + + expected = pd.DataFrame(expected) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype_a", dtypes) @pytest.mark.parametrize("dtype_b", dtypes) def test_binary_input_aligns_columns(dtype_a, dtype_b):
Backport PR #40878: REGR: ufunc with DataFrame input not passing all kwargs
https://api.github.com/repos/pandas-dev/pandas/pulls/40895
2021-04-12T14:29:47Z
2021-04-12T15:46:14Z
2021-04-12T15:46:14Z
2021-04-12T15:46:14Z
Backport PR #40880 on branch 1.2.x (DOC: 1.2.4 release date)
diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst index fffdf333178fc..54652ecc4dceb 100644 --- a/doc/source/whatsnew/v1.2.4.rst +++ b/doc/source/whatsnew/v1.2.4.rst @@ -1,7 +1,7 @@ .. _whatsnew_124: -What's new in 1.2.4 (April ??, 2021) ---------------------------------------- +What's new in 1.2.4 (April 12, 2021) +------------------------------------ These are the changes in pandas 1.2.4. See :ref:`release` for a full changelog including other versions of pandas. @@ -24,26 +24,6 @@ Fixed regressions .. --------------------------------------------------------------------------- -.. _whatsnew_124.bug_fixes: - -Bug fixes -~~~~~~~~~ - -- -- - -.. --------------------------------------------------------------------------- - -.. _whatsnew_124.other: - -Other -~~~~~ - -- -- - -.. --------------------------------------------------------------------------- - .. _whatsnew_124.contributors: Contributors
Backport PR #40880: DOC: 1.2.4 release date
https://api.github.com/repos/pandas-dev/pandas/pulls/40894
2021-04-12T12:17:57Z
2021-04-12T13:30:57Z
2021-04-12T13:30:57Z
2021-04-12T13:30:57Z
⬆️ UPGRADE: Autoupdate pre-commit config
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b6df108a3166c..fa5bc10f979ed 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,7 +51,7 @@ repos: hooks: - id: isort - repo: https://github.com/asottile/pyupgrade - rev: v2.11.0 + rev: v2.12.0 hooks: - id: pyupgrade args: [--py37-plus]
<!-- START pr-commits --> <!-- END pr-commits --> ## Base PullRequest default branch (https://github.com/pandas-dev/pandas/tree/master) ## Command results <details> <summary>Details: </summary> <details> <summary><em>add path</em></summary> ```Shell /home/runner/work/_actions/technote-space/create-pr-action/v2/node_modules/npm-check-updates/bin ``` </details> <details> <summary><em>pip install pre-commit</em></summary> ```Shell Collecting pre-commit Downloading pre_commit-2.12.0-py2.py3-none-any.whl (189 kB) Collecting toml Using cached toml-0.10.2-py2.py3-none-any.whl (16 kB) Collecting nodeenv>=0.11.1 Downloading nodeenv-1.6.0-py2.py3-none-any.whl (21 kB) Collecting virtualenv>=20.0.8 Downloading virtualenv-20.4.3-py2.py3-none-any.whl (7.2 MB) Collecting cfgv>=2.0.0 Using cached cfgv-3.2.0-py2.py3-none-any.whl (7.3 kB) Collecting identify>=1.0.0 Downloading identify-2.2.3-py2.py3-none-any.whl (98 kB) Collecting pyyaml>=5.1 Downloading PyYAML-5.4.1-cp39-cp39-manylinux1_x86_64.whl (630 kB) Collecting distlib<1,>=0.3.1 Using cached distlib-0.3.1-py2.py3-none-any.whl (335 kB) Collecting six<2,>=1.9.0 Using cached six-1.15.0-py2.py3-none-any.whl (10 kB) Collecting appdirs<2,>=1.4.3 Using cached appdirs-1.4.4-py2.py3-none-any.whl (9.6 kB) Collecting filelock<4,>=3.0.0 Using cached filelock-3.0.12-py3-none-any.whl (7.6 kB) Installing collected packages: six, filelock, distlib, appdirs, virtualenv, toml, pyyaml, nodeenv, identify, cfgv, pre-commit Successfully installed appdirs-1.4.4 cfgv-3.2.0 distlib-0.3.1 filelock-3.0.12 identify-2.2.3 nodeenv-1.6.0 pre-commit-2.12.0 pyyaml-5.4.1 six-1.15.0 toml-0.10.2 virtualenv-20.4.3 ``` </details> <details> <summary><em>pre-commit autoupdate || (exit 0);</em></summary> ```Shell Updating https://github.com/MarcoGorelli/absolufy-imports ... [INFO] Initializing environment for https://github.com/MarcoGorelli/absolufy-imports. already up to date. Updating https://github.com/python/black ... already up to date. Updating https://github.com/codespell-project/codespell ... [INFO] Initializing environment for https://github.com/codespell-project/codespell. already up to date. Updating https://github.com/pre-commit/pre-commit-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pre-commit-hooks. already up to date. Updating https://github.com/cpplint/cpplint ... [INFO] Initializing environment for https://github.com/cpplint/cpplint. =====> /home/runner/.cache/pre-commit/repo2ir9bkt3/.pre-commit-hooks.yaml does not exist Updating https://gitlab.com/pycqa/flake8 ... [INFO] Initializing environment for https://gitlab.com/pycqa/flake8. already up to date. Updating https://github.com/PyCQA/isort ... [INFO] Initializing environment for https://github.com/PyCQA/isort. already up to date. Updating https://github.com/asottile/pyupgrade ... [INFO] Initializing environment for https://github.com/asottile/pyupgrade. updating v2.11.0 -> v2.12.0. Updating https://github.com/pre-commit/pygrep-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pygrep-hooks. already up to date. Updating https://github.com/asottile/yesqa ... already up to date. ``` </details> <details> <summary><em>pre-commit run -a || (exit 0);</em></summary> ```Shell [INFO] Initializing environment for https://github.com/cpplint/cpplint. [INFO] Initializing environment for https://gitlab.com/pycqa/flake8:flake8-bugbear>=21.3.2,flake8-comprehensions>=3.1.0. [INFO] Initializing environment for https://github.com/asottile/yesqa:flake8==3.9.0. [INFO] Installing environment for https://github.com/MarcoGorelli/absolufy-imports. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/python/black. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/codespell-project/codespell. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/pre-commit/pre-commit-hooks. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/cpplint/cpplint. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://gitlab.com/pycqa/flake8. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://gitlab.com/pycqa/flake8. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/PyCQA/isort. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/asottile/pyupgrade. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/asottile/yesqa. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for local. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for local. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for local. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... absolufy-imports....................................................................................Passed black...............................................................................................Passed codespell...........................................................................................Passed Fix End of Files....................................................................................Passed Trim Trailing Whitespace............................................................................Passed cpplint.............................................................................................Passed flake8..............................................................................................Passed flake8 (cython).....................................................................................Passed flake8 (cython template)............................................................................Passed isort...............................................................................................Passed pyupgrade...........................................................................................Passed rst ``code`` is two backticks.......................................................................Passed rst directives end with two colons..................................................................Passed rst ``inline code`` next to normal text.............................................................Passed Strip unnecessary `# noqa`s.........................................................................Passed flake8-rst..........................................................................................Passed Check for use of Union[Series, DataFrame] instead of FrameOrSeriesUnion alias.......................Passed Check for inconsistent use of pandas namespace......................................................Passed Check code for instances of os.remove...............................................................Passed Unwanted patterns...................................................................................Passed Unwanted patterns in tests..........................................................................Passed Generate pip dependency from conda..................................................................Passed Check flake8 version is synced across flake8, yesqa, and environment.yml............................Passed Validate correct capitalization among titles in documentation.......................................Passed Check for use of bare pytest raises.................................................................Passed Check for use of private functions across modules...................................................Passed Check for import of private attributes across modules...............................................Passed Check for use of not concatenated strings...........................................................Passed Check for strings with wrong placed spaces..........................................................Passed Import pandas.array as pd_array in core.............................................................Passed Use bool_t instead of bool in pandas/core/generic.py................................................Passed ``` </details> </details> ## Changed files <details> <summary>Changed file: </summary> - .pre-commit-config.yaml </details> <hr> [:octocat: Repo](https://github.com/technote-space/create-pr-action) | [:memo: Issues](https://github.com/technote-space/create-pr-action/issues) | [:department_store: Marketplace](https://github.com/marketplace/actions/create-pr-action)
https://api.github.com/repos/pandas-dev/pandas/pulls/40890
2021-04-12T07:20:11Z
2021-04-12T08:25:46Z
2021-04-12T08:25:45Z
2021-04-12T08:25:50Z
TYP overload fillna #40737
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 045776c3f5c50..37fc5de95b3d2 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5007,6 +5007,121 @@ def rename( errors=errors, ) + @overload + def fillna( + self, + value=..., + method: str | None = ..., + axis: Axis | None = ..., + inplace: Literal[False] = ..., + limit=..., + downcast=..., + ) -> DataFrame: + ... + + @overload + def fillna( + self, + value, + method: str | None, + axis: Axis | None, + inplace: Literal[True], + limit=..., + downcast=..., + ) -> None: + ... + + @overload + def fillna( + self, + *, + inplace: Literal[True], + limit=..., + downcast=..., + ) -> None: + ... + + @overload + def fillna( + self, + value, + *, + inplace: Literal[True], + limit=..., + downcast=..., + ) -> None: + ... + + @overload + def fillna( + self, + *, + method: str | None, + inplace: Literal[True], + limit=..., + downcast=..., + ) -> None: + ... + + @overload + def fillna( + self, + *, + axis: Axis | None, + inplace: Literal[True], + limit=..., + downcast=..., + ) -> None: + ... + + @overload + def fillna( + self, + *, + method: str | None, + axis: Axis | None, + inplace: Literal[True], + limit=..., + downcast=..., + ) -> None: + ... + + @overload + def fillna( + self, + value, + *, + axis: Axis | None, + inplace: Literal[True], + limit=..., + downcast=..., + ) -> None: + ... + + @overload + def fillna( + self, + value, + method: str | None, + *, + inplace: Literal[True], + limit=..., + downcast=..., + ) -> None: + ... + + @overload + def fillna( + self, + value=..., + method: str | None = ..., + axis: Axis | None = ..., + inplace: bool = ..., + limit=..., + downcast=..., + ) -> DataFrame | None: + ... + @doc(NDFrame.fillna, **_shared_doc_kwargs) def fillna( self, diff --git a/pandas/core/series.py b/pandas/core/series.py index 4b36b846ef9c4..5c605a6b441c6 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4581,6 +4581,121 @@ def drop( errors=errors, ) + @overload + def fillna( + self, + value=..., + method: str | None = ..., + axis: Axis | None = ..., + inplace: Literal[False] = ..., + limit=..., + downcast=..., + ) -> Series: + ... + + @overload + def fillna( + self, + value, + method: str | None, + axis: Axis | None, + inplace: Literal[True], + limit=..., + downcast=..., + ) -> None: + ... + + @overload + def fillna( + self, + *, + inplace: Literal[True], + limit=..., + downcast=..., + ) -> None: + ... + + @overload + def fillna( + self, + value, + *, + inplace: Literal[True], + limit=..., + downcast=..., + ) -> None: + ... + + @overload + def fillna( + self, + *, + method: str | None, + inplace: Literal[True], + limit=..., + downcast=..., + ) -> None: + ... + + @overload + def fillna( + self, + *, + axis: Axis | None, + inplace: Literal[True], + limit=..., + downcast=..., + ) -> None: + ... + + @overload + def fillna( + self, + *, + method: str | None, + axis: Axis | None, + inplace: Literal[True], + limit=..., + downcast=..., + ) -> None: + ... + + @overload + def fillna( + self, + value, + *, + axis: Axis | None, + inplace: Literal[True], + limit=..., + downcast=..., + ) -> None: + ... + + @overload + def fillna( + self, + value, + method: str | None, + *, + inplace: Literal[True], + limit=..., + downcast=..., + ) -> None: + ... + + @overload + def fillna( + self, + value=..., + method: str | None = ..., + axis: Axis | None = ..., + inplace: bool = ..., + limit=..., + downcast=..., + ) -> Series | None: + ... + @doc(NDFrame.fillna, **_shared_doc_kwargs) def fillna( self, diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py index f2027f2707a8b..dc4550484fa3b 100644 --- a/pandas/core/strings/object_array.py +++ b/pandas/core/strings/object_array.py @@ -5,7 +5,6 @@ Pattern, Set, Union, - cast, ) import unicodedata import warnings @@ -371,9 +370,7 @@ def _str_get_dummies(self, sep="|"): try: arr = sep + arr + sep except TypeError: - arr = cast(Series, arr) arr = sep + arr.astype(str) + sep - arr = cast(Series, arr) tags: Set[str] = set() for ts in Series(arr).str.split(sep):
- [x] closes #40737 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/40887
2021-04-11T23:15:42Z
2021-04-15T21:21:57Z
2021-04-15T21:21:56Z
2021-04-15T21:21:57Z
Parametrize PeriodIndex tests
diff --git a/pandas/tests/indexes/period/test_arithmetic.py b/pandas/tests/indexes/period/test_arithmetic.py index 81171920f635f..5f8f9533e9c44 100644 --- a/pandas/tests/indexes/period/test_arithmetic.py +++ b/pandas/tests/indexes/period/test_arithmetic.py @@ -11,7 +11,81 @@ import pandas.core.indexes.period as period +_common_mismatch = [pd.offsets.YearBegin(2), + pd.offsets.MonthBegin(1), + pd.offsets.Minute()] + + +@pytest.fixture(params=[timedelta(minutes=30), + np.timedelta64(30, 's'), + Timedelta(seconds=30)] + _common_mismatch) +def not_hourly(request): + """ + Several timedelta-like and DateOffset instances that are _not_ + compatible with Hourly frequencies. + """ + return request.param + + +@pytest.fixture(params=[np.timedelta64(4, 'h'), + timedelta(hours=23), + Timedelta('23:00:00')] + _common_mismatch) +def not_daily(request): + """ + Several timedelta-like and DateOffset instances that are _not_ + compatible with Daily frequencies. + """ + return request.param + + +@pytest.fixture(params=[np.timedelta64(365, 'D'), + timedelta(365), + Timedelta(days=365)] + _common_mismatch) +def mismatched(request): + """ + Several timedelta-like and DateOffset instances that are _not_ + compatible with Monthly or Annual frequencies. + """ + return request.param + + +@pytest.fixture(params=[pd.offsets.Day(3), + timedelta(days=3), + np.timedelta64(3, 'D'), + pd.offsets.Hour(72), + timedelta(minutes=60 * 24 * 3), + np.timedelta64(72, 'h'), + Timedelta('72:00:00')]) +def three_days(request): + """ + Several timedelta-like and DateOffset objects that each represent + a 3-day timedelta + """ + return request.param + + +@pytest.fixture(params=[pd.offsets.Hour(2), + timedelta(hours=2), + np.timedelta64(2, 'h'), + pd.offsets.Minute(120), + timedelta(minutes=120), + np.timedelta64(120, 'm')]) +def two_hours(request): + """ + Several timedelta-like and DateOffset objects that each represent + a 2-hour timedelta + """ + return request.param + + class TestPeriodIndexComparisons(object): + def test_pi_cmp_period(self): + idx = period_range('2007-01', periods=20, freq='M') + + result = idx < idx[10] + exp = idx.values < idx.values[10] + tm.assert_numpy_array_equal(result, exp) + @pytest.mark.parametrize('freq', ['M', '2M', '3M']) def test_pi_cmp_pi(self, freq): base = PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'], @@ -148,32 +222,35 @@ def test_pi_cmp_nat_mismatched_freq_raises(self, freq): idx1 == diff # TODO: De-duplicate with test_pi_cmp_nat - def test_comp_nat(self): + @pytest.mark.parametrize('dtype', [object, None]) + def test_comp_nat(self, dtype): left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT, pd.Period('2011-01-03')]) right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')]) - for lhs, rhs in [(left, right), - (left.astype(object), right.astype(object))]: - result = lhs == rhs - expected = np.array([False, False, True]) - tm.assert_numpy_array_equal(result, expected) + if dtype is not None: + left = left.astype(dtype) + right = right.astype(dtype) - result = lhs != rhs - expected = np.array([True, True, False]) - tm.assert_numpy_array_equal(result, expected) + result = left == right + expected = np.array([False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = left != right + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) - expected = np.array([False, False, False]) - tm.assert_numpy_array_equal(lhs == pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT == rhs, expected) + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(left == pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT == right, expected) - expected = np.array([True, True, True]) - tm.assert_numpy_array_equal(lhs != pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT != lhs, expected) + expected = np.array([True, True, True]) + tm.assert_numpy_array_equal(left != pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT != left, expected) - expected = np.array([False, False, False]) - tm.assert_numpy_array_equal(lhs < pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT > lhs, expected) + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(left < pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT > left, expected) class TestPeriodIndexArithmetic(object): @@ -203,7 +280,7 @@ def test_pi_radd_offset_array(self): expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')]) tm.assert_index_equal(res, expected) - def test_add_iadd(self): + def test_pi_add_iadd_pi_raises(self): rng = pd.period_range('1/1/2000', freq='D', periods=5) other = pd.period_range('1/6/2000', freq='D', periods=5) @@ -214,89 +291,7 @@ def test_add_iadd(self): with pytest.raises(TypeError): rng += other - # offset - # DateOffset - rng = pd.period_range('2014', '2024', freq='A') - result = rng + pd.offsets.YearEnd(5) - expected = pd.period_range('2019', '2029', freq='A') - tm.assert_index_equal(result, expected) - rng += pd.offsets.YearEnd(5) - tm.assert_index_equal(rng, expected) - - for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), - pd.offsets.Minute(), np.timedelta64(365, 'D'), - timedelta(365), Timedelta(days=365)]: - msg = ('Input has different freq(=.+)? ' - 'from PeriodIndex\\(freq=A-DEC\\)') - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - rng + o - - rng = pd.period_range('2014-01', '2016-12', freq='M') - result = rng + pd.offsets.MonthEnd(5) - expected = pd.period_range('2014-06', '2017-05', freq='M') - tm.assert_index_equal(result, expected) - rng += pd.offsets.MonthEnd(5) - tm.assert_index_equal(rng, expected) - - for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), - pd.offsets.Minute(), np.timedelta64(365, 'D'), - timedelta(365), Timedelta(days=365)]: - rng = pd.period_range('2014-01', '2016-12', freq='M') - msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)' - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - rng + o - - # Tick - offsets = [pd.offsets.Day(3), timedelta(days=3), - np.timedelta64(3, 'D'), pd.offsets.Hour(72), - timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'), - Timedelta('72:00:00')] - for delta in offsets: - rng = pd.period_range('2014-05-01', '2014-05-15', freq='D') - result = rng + delta - expected = pd.period_range('2014-05-04', '2014-05-18', freq='D') - tm.assert_index_equal(result, expected) - rng += delta - tm.assert_index_equal(rng, expected) - - for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), - pd.offsets.Minute(), np.timedelta64(4, 'h'), - timedelta(hours=23), Timedelta('23:00:00')]: - rng = pd.period_range('2014-05-01', '2014-05-15', freq='D') - msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)' - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - rng + o - - offsets = [pd.offsets.Hour(2), timedelta(hours=2), - np.timedelta64(2, 'h'), pd.offsets.Minute(120), - timedelta(minutes=120), np.timedelta64(120, 'm'), - Timedelta(minutes=120)] - for delta in offsets: - rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', - freq='H') - result = rng + delta - expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00', - freq='H') - tm.assert_index_equal(result, expected) - rng += delta - tm.assert_index_equal(rng, expected) - - for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30), - np.timedelta64(30, 's'), Timedelta(seconds=30)]: - rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', - freq='H') - msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)' - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - rng + delta - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - rng += delta - - def test_pi_add_int(self, one): + def test_pi_add_iadd_int(self, one): # Variants of `one` for #19012 rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10) result = rng + one @@ -305,16 +300,27 @@ def test_pi_add_int(self, one): rng += one tm.assert_index_equal(rng, expected) + def test_pi_sub_isub_int(self, one): + """ + PeriodIndex.__sub__ and __isub__ with several representations of + the integer 1, e.g. int, long, np.int64, np.uint8, ... + """ + rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10) + result = rng - one + expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10) + tm.assert_index_equal(result, expected) + rng -= one + tm.assert_index_equal(rng, expected) + @pytest.mark.parametrize('five', [5, np.array(5, dtype=np.int64)]) - def test_sub(self, five): + def test_pi_sub_intlike(self, five): rng = period_range('2007-01', periods=50) result = rng - five exp = rng + (-five) tm.assert_index_equal(result, exp) - def test_sub_isub(self): - + def test_pi_sub_isub_pi_raises(self): # previously performed setop, now raises TypeError (GH14164) # TODO needs to wait on #13077 for decision on result type rng = pd.period_range('1/1/2000', freq='D', periods=5) @@ -326,6 +332,7 @@ def test_sub_isub(self): with pytest.raises(TypeError): rng -= other + def test_pi_sub_isub_offset(self): # offset # DateOffset rng = pd.period_range('2014', '2024', freq='A') @@ -335,102 +342,165 @@ def test_sub_isub(self): rng -= pd.offsets.YearEnd(5) tm.assert_index_equal(rng, expected) - for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), - pd.offsets.Minute(), np.timedelta64(365, 'D'), - timedelta(365)]: - rng = pd.period_range('2014', '2024', freq='A') - msg = ('Input has different freq(=.+)? ' - 'from PeriodIndex\\(freq=A-DEC\\)') - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - rng - o - rng = pd.period_range('2014-01', '2016-12', freq='M') result = rng - pd.offsets.MonthEnd(5) expected = pd.period_range('2013-08', '2016-07', freq='M') tm.assert_index_equal(result, expected) + rng -= pd.offsets.MonthEnd(5) tm.assert_index_equal(rng, expected) - for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), - pd.offsets.Minute(), np.timedelta64(365, 'D'), - timedelta(365)]: - rng = pd.period_range('2014-01', '2016-12', freq='M') - msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)' - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - rng - o + # --------------------------------------------------------------- + # Timedelta-like (timedelta, timedelta64, Timedelta, Tick) + # TODO: Some of these are misnomers because of non-Tick DateOffsets + def test_pi_add_iadd_timedeltalike_daily(self, three_days): # Tick - offsets = [pd.offsets.Day(3), timedelta(days=3), - np.timedelta64(3, 'D'), pd.offsets.Hour(72), - timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')] - for delta in offsets: - rng = pd.period_range('2014-05-01', '2014-05-15', freq='D') - result = rng - delta - expected = pd.period_range('2014-04-28', '2014-05-12', freq='D') - tm.assert_index_equal(result, expected) - rng -= delta - tm.assert_index_equal(rng, expected) - - for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), - pd.offsets.Minute(), np.timedelta64(4, 'h'), - timedelta(hours=23)]: - rng = pd.period_range('2014-05-01', '2014-05-15', freq='D') - msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)' - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - rng - o - - offsets = [pd.offsets.Hour(2), timedelta(hours=2), - np.timedelta64(2, 'h'), pd.offsets.Minute(120), - timedelta(minutes=120), np.timedelta64(120, 'm')] - for delta in offsets: - rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', - freq='H') - result = rng - delta - expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00', - freq='H') - tm.assert_index_equal(result, expected) - rng -= delta - tm.assert_index_equal(rng, expected) - - for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30), - np.timedelta64(30, 's')]: - rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', - freq='H') - msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)' - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - rng + delta - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - rng += delta - - # int - rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10) - result = rng - 1 - expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10) + other = three_days + rng = pd.period_range('2014-05-01', '2014-05-15', freq='D') + expected = pd.period_range('2014-05-04', '2014-05-18', freq='D') + + result = rng + other + tm.assert_index_equal(result, expected) + + rng += other + tm.assert_index_equal(rng, expected) + + def test_pi_sub_isub_timedeltalike_daily(self, three_days): + # Tick-like 3 Days + other = three_days + rng = pd.period_range('2014-05-01', '2014-05-15', freq='D') + expected = pd.period_range('2014-04-28', '2014-05-12', freq='D') + + result = rng - other + tm.assert_index_equal(result, expected) + + rng -= other + tm.assert_index_equal(rng, expected) + + def test_pi_add_iadd_timedeltalike_freq_mismatch_daily(self, not_daily): + other = not_daily + rng = pd.period_range('2014-05-01', '2014-05-15', freq='D') + msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)' + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + rng + other + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + rng += other + + def test_pi_sub_timedeltalike_freq_mismatch_daily(self, not_daily): + other = not_daily + rng = pd.period_range('2014-05-01', '2014-05-15', freq='D') + msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)' + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + rng - other + + def test_pi_add_iadd_timedeltalike_hourly(self, two_hours): + other = two_hours + rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H') + expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00', + freq='H') + + result = rng + other + tm.assert_index_equal(result, expected) + + rng += other + tm.assert_index_equal(rng, expected) + + def test_pi_add_timedeltalike_mismatched_freq_hourly(self, not_hourly): + other = not_hourly + rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H') + msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)' + + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + rng + other + + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + rng += other + + def test_pi_sub_isub_timedeltalike_hourly(self, two_hours): + other = two_hours + rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H') + expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00', + freq='H') + + result = rng - other + tm.assert_index_equal(result, expected) + + rng -= other + tm.assert_index_equal(rng, expected) + + def test_add_iadd_timedeltalike_annual(self): + # offset + # DateOffset + rng = pd.period_range('2014', '2024', freq='A') + result = rng + pd.offsets.YearEnd(5) + expected = pd.period_range('2019', '2029', freq='A') + tm.assert_index_equal(result, expected) + rng += pd.offsets.YearEnd(5) + tm.assert_index_equal(rng, expected) + + def test_pi_add_iadd_timedeltalike_freq_mismatch_annual(self, mismatched): + other = mismatched + rng = pd.period_range('2014', '2024', freq='A') + msg = ('Input has different freq(=.+)? ' + 'from PeriodIndex\\(freq=A-DEC\\)') + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + rng + other + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + rng += other + + def test_pi_sub_isub_timedeltalike_freq_mismatch_annual(self, mismatched): + other = mismatched + rng = pd.period_range('2014', '2024', freq='A') + msg = ('Input has different freq(=.+)? ' + 'from PeriodIndex\\(freq=A-DEC\\)') + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + rng - other + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + rng -= other + + def test_pi_add_iadd_timedeltalike_M(self): + rng = pd.period_range('2014-01', '2016-12', freq='M') + expected = pd.period_range('2014-06', '2017-05', freq='M') + + result = rng + pd.offsets.MonthEnd(5) tm.assert_index_equal(result, expected) - rng -= 1 + + rng += pd.offsets.MonthEnd(5) tm.assert_index_equal(rng, expected) + def test_pi_add_iadd_timedeltalike_freq_mismatch_monthly(self, mismatched): + other = mismatched + rng = pd.period_range('2014-01', '2016-12', freq='M') + msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)' + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + rng + other + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + rng += other + + def test_pi_sub_isub_timedeltalike_freq_mismatch_monthly(self, mismatched): + other = mismatched + rng = pd.period_range('2014-01', '2016-12', freq='M') + msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)' + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + rng - other + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + rng -= other + # --------------------------------------------------------------- # PeriodIndex.shift is used by __add__ and __sub__ def test_pi_shift_ndarray(self): - idx = PeriodIndex(['2011-01', '2011-02', 'NaT', - '2011-04'], freq='M', name='idx') + idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], + freq='M', name='idx') result = idx.shift(np.array([1, 2, 3, 4])) - expected = PeriodIndex(['2011-02', '2011-04', 'NaT', - '2011-08'], freq='M', name='idx') + expected = PeriodIndex(['2011-02', '2011-04', 'NaT', '2011-08'], + freq='M', name='idx') tm.assert_index_equal(result, expected) - idx = PeriodIndex(['2011-01', '2011-02', 'NaT', - '2011-04'], freq='M', name='idx') result = idx.shift(np.array([1, -2, 3, -4])) - expected = PeriodIndex(['2011-02', '2010-12', 'NaT', - '2010-12'], freq='M', name='idx') + expected = PeriodIndex(['2011-02', '2010-12', 'NaT', '2010-12'], + freq='M', name='idx') tm.assert_index_equal(result, expected) def test_shift(self): @@ -489,11 +559,11 @@ def test_shift_corner_cases(self): tm.assert_index_equal(idx.shift(-3), exp) def test_shift_nat(self): - idx = PeriodIndex(['2011-01', '2011-02', 'NaT', - '2011-04'], freq='M', name='idx') + idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], + freq='M', name='idx') result = idx.shift(1) - expected = PeriodIndex(['2011-02', '2011-03', 'NaT', - '2011-05'], freq='M', name='idx') + expected = PeriodIndex(['2011-02', '2011-03', 'NaT', '2011-05'], + freq='M', name='idx') tm.assert_index_equal(result, expected) assert result.name == expected.name @@ -519,18 +589,18 @@ def _check(self, values, func, expected): # comp op results in bool tm.assert_numpy_array_equal(result, expected) - s = pd.Series(values) - result = func(s) + ser = pd.Series(values) + result = func(ser) exp = pd.Series(expected, name=values.name) tm.assert_series_equal(result, exp) def test_pi_ops(self): - idx = PeriodIndex(['2011-01', '2011-02', '2011-03', - '2011-04'], freq='M', name='idx') + idx = PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'], + freq='M', name='idx') - expected = PeriodIndex(['2011-03', '2011-04', - '2011-05', '2011-06'], freq='M', name='idx') + expected = PeriodIndex(['2011-03', '2011-04', '2011-05', '2011-06'], + freq='M', name='idx') self._check(idx, lambda x: x + 2, expected) self._check(idx, lambda x: 2 + x, expected) @@ -544,13 +614,13 @@ def test_pi_ops(self): tm.assert_index_equal(result, exp) def test_pi_ops_errors(self): - idx = PeriodIndex(['2011-01', '2011-02', '2011-03', - '2011-04'], freq='M', name='idx') - s = pd.Series(idx) + idx = PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'], + freq='M', name='idx') + ser = pd.Series(idx) msg = r"unsupported operand type\(s\)" - for obj in [idx, s]: + for obj in [idx, ser]: for ng in ["str", 1.5]: with tm.assert_raises_regex(TypeError, msg): obj + ng @@ -581,10 +651,10 @@ def test_pi_ops_errors(self): np.subtract(ng, obj) def test_pi_ops_nat(self): - idx = PeriodIndex(['2011-01', '2011-02', 'NaT', - '2011-04'], freq='M', name='idx') - expected = PeriodIndex(['2011-03', '2011-04', - 'NaT', '2011-06'], freq='M', name='idx') + idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], + freq='M', name='idx') + expected = PeriodIndex(['2011-03', '2011-04', 'NaT', '2011-06'], + freq='M', name='idx') self._check(idx, lambda x: x + 2, expected) self._check(idx, lambda x: 2 + x, expected) self._check(idx, lambda x: np.add(x, 2), expected) @@ -593,10 +663,10 @@ def test_pi_ops_nat(self): self._check(idx + 2, lambda x: np.subtract(x, 2), idx) # freq with mult - idx = PeriodIndex(['2011-01', '2011-02', 'NaT', - '2011-04'], freq='2M', name='idx') - expected = PeriodIndex(['2011-07', '2011-08', - 'NaT', '2011-10'], freq='2M', name='idx') + idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], + freq='2M', name='idx') + expected = PeriodIndex(['2011-07', '2011-08', 'NaT', '2011-10'], + freq='2M', name='idx') self._check(idx, lambda x: x + 3, expected) self._check(idx, lambda x: 3 + x, expected) self._check(idx, lambda x: np.add(x, 3), expected) @@ -605,26 +675,26 @@ def test_pi_ops_nat(self): self._check(idx + 3, lambda x: np.subtract(x, 3), idx) def test_pi_ops_array_int(self): - idx = PeriodIndex(['2011-01', '2011-02', 'NaT', - '2011-04'], freq='M', name='idx') + idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], + freq='M', name='idx') f = lambda x: x + np.array([1, 2, 3, 4]) - exp = PeriodIndex(['2011-02', '2011-04', 'NaT', - '2011-08'], freq='M', name='idx') + exp = PeriodIndex(['2011-02', '2011-04', 'NaT', '2011-08'], + freq='M', name='idx') self._check(idx, f, exp) f = lambda x: np.add(x, np.array([4, -1, 1, 2])) - exp = PeriodIndex(['2011-05', '2011-01', 'NaT', - '2011-06'], freq='M', name='idx') + exp = PeriodIndex(['2011-05', '2011-01', 'NaT', '2011-06'], + freq='M', name='idx') self._check(idx, f, exp) f = lambda x: x - np.array([1, 2, 3, 4]) - exp = PeriodIndex(['2010-12', '2010-12', 'NaT', - '2010-12'], freq='M', name='idx') + exp = PeriodIndex(['2010-12', '2010-12', 'NaT', '2010-12'], + freq='M', name='idx') self._check(idx, f, exp) f = lambda x: np.subtract(x, np.array([3, 2, 3, -2])) - exp = PeriodIndex(['2010-10', '2010-12', 'NaT', - '2011-06'], freq='M', name='idx') + exp = PeriodIndex(['2010-10', '2010-12', 'NaT', '2011-06'], + freq='M', name='idx') self._check(idx, f, exp) def test_pi_ops_offset(self): @@ -648,29 +718,26 @@ def test_pi_ops_offset(self): def test_pi_offset_errors(self): idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01'], freq='D', name='idx') - s = pd.Series(idx) + ser = pd.Series(idx) # Series op is applied per Period instance, thus error is raised # from Period msg_idx = r"Input has different freq from PeriodIndex\(freq=D\)" msg_s = r"Input cannot be converted to Period\(freq=D\)" - for obj, msg in [(idx, msg_idx), (s, msg_s)]: - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): + for obj, msg in [(idx, msg_idx), (ser, msg_s)]: + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): obj + pd.offsets.Hour(2) - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): pd.offsets.Hour(2) + obj - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): obj - pd.offsets.Hour(2) def test_pi_sub_period(self): # GH 13071 - idx = PeriodIndex(['2011-01', '2011-02', '2011-03', - '2011-04'], freq='M', name='idx') + idx = PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'], + freq='M', name='idx') result = idx - pd.Period('2012-01', freq='M') exp = pd.Index([-12, -11, -10, -9], name='idx') @@ -695,16 +762,16 @@ def test_pi_sub_period(self): def test_pi_sub_pdnat(self): # GH 13071 - idx = PeriodIndex(['2011-01', '2011-02', 'NaT', - '2011-04'], freq='M', name='idx') + idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], + freq='M', name='idx') exp = pd.TimedeltaIndex([pd.NaT] * 4, name='idx') tm.assert_index_equal(pd.NaT - idx, exp) tm.assert_index_equal(idx - pd.NaT, exp) def test_pi_sub_period_nat(self): # GH 13071 - idx = PeriodIndex(['2011-01', 'NaT', '2011-03', - '2011-04'], freq='M', name='idx') + idx = PeriodIndex(['2011-01', 'NaT', '2011-03', '2011-04'], + freq='M', name='idx') result = idx - pd.Period('2012-01', freq='M') exp = pd.Index([-12, np.nan, -10, -9], name='idx') diff --git a/pandas/tests/indexes/period/test_astype.py b/pandas/tests/indexes/period/test_astype.py new file mode 100644 index 0000000000000..f2126487496c4 --- /dev/null +++ b/pandas/tests/indexes/period/test_astype.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- + +import numpy as np +import pytest + +import pandas as pd +import pandas.util.testing as tm +from pandas import NaT, Period, PeriodIndex, Int64Index, Index, period_range + + +class TestPeriodIndexAsType(object): + @pytest.mark.parametrize('dtype', [ + float, 'timedelta64', 'timedelta64[ns]']) + def test_astype_raises(self, dtype): + # GH#13149, GH#13209 + idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D') + msg = 'Cannot cast PeriodIndex to dtype' + with tm.assert_raises_regex(TypeError, msg): + idx.astype(dtype) + + def test_astype_conversion(self): + # GH#13149, GH#13209 + idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D') + + result = idx.astype(object) + expected = Index([Period('2016-05-16', freq='D')] + + [Period(NaT, freq='D')] * 3, dtype='object') + tm.assert_index_equal(result, expected) + + result = idx.astype(int) + expected = Int64Index([16937] + [-9223372036854775808] * 3, + dtype=np.int64) + tm.assert_index_equal(result, expected) + + result = idx.astype(str) + expected = Index(str(x) for x in idx) + tm.assert_index_equal(result, expected) + + idx = period_range('1990', '2009', freq='A') + result = idx.astype('i8') + tm.assert_index_equal(result, Index(idx.asi8)) + tm.assert_numpy_array_equal(result.values, idx.asi8) + + def test_astype_object(self): + idx = pd.PeriodIndex([], freq='M') + + exp = np.array([], dtype=object) + tm.assert_numpy_array_equal(idx.astype(object).values, exp) + tm.assert_numpy_array_equal(idx._mpl_repr(), exp) + + idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M') + + exp = np.array([pd.Period('2011-01', freq='M'), pd.NaT], dtype=object) + tm.assert_numpy_array_equal(idx.astype(object).values, exp) + tm.assert_numpy_array_equal(idx._mpl_repr(), exp) + + exp = np.array([pd.Period('2011-01-01', freq='D'), pd.NaT], + dtype=object) + idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D') + tm.assert_numpy_array_equal(idx.astype(object).values, exp) + tm.assert_numpy_array_equal(idx._mpl_repr(), exp) + + # TODO: de-duplicate this version (from test_ops) with the one above + # (from test_period) + def test_astype_object2(self): + idx = pd.period_range(start='2013-01-01', periods=4, freq='M', + name='idx') + expected_list = [pd.Period('2013-01-31', freq='M'), + pd.Period('2013-02-28', freq='M'), + pd.Period('2013-03-31', freq='M'), + pd.Period('2013-04-30', freq='M')] + expected = pd.Index(expected_list, dtype=object, name='idx') + result = idx.astype(object) + assert isinstance(result, Index) + assert result.dtype == object + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert idx.tolist() == expected_list + + idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT', + '2013-01-04'], freq='D', name='idx') + expected_list = [pd.Period('2013-01-01', freq='D'), + pd.Period('2013-01-02', freq='D'), + pd.Period('NaT', freq='D'), + pd.Period('2013-01-04', freq='D')] + expected = pd.Index(expected_list, dtype=object, name='idx') + result = idx.astype(object) + assert isinstance(result, Index) + assert result.dtype == object + tm.assert_index_equal(result, expected) + for i in [0, 1, 3]: + assert result[i] == expected[i] + assert result[2] is pd.NaT + assert result.name == expected.name + + result_list = idx.tolist() + for i in [0, 1, 3]: + assert result_list[i] == expected_list[i] + assert result_list[2] is pd.NaT diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py index 8745de0c2a7aa..6c272864e0026 100644 --- a/pandas/tests/indexes/period/test_ops.py +++ b/pandas/tests/indexes/period/test_ops.py @@ -25,42 +25,6 @@ def test_ops_properties(self): self.check_ops_properties(PeriodIndex._object_ops, f) self.check_ops_properties(PeriodIndex._bool_ops, f) - def test_astype_object(self): - idx = pd.period_range(start='2013-01-01', periods=4, freq='M', - name='idx') - expected_list = [pd.Period('2013-01-31', freq='M'), - pd.Period('2013-02-28', freq='M'), - pd.Period('2013-03-31', freq='M'), - pd.Period('2013-04-30', freq='M')] - expected = pd.Index(expected_list, dtype=object, name='idx') - result = idx.astype(object) - assert isinstance(result, Index) - assert result.dtype == object - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert idx.tolist() == expected_list - - idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT', - '2013-01-04'], freq='D', name='idx') - expected_list = [pd.Period('2013-01-01', freq='D'), - pd.Period('2013-01-02', freq='D'), - pd.Period('NaT', freq='D'), - pd.Period('2013-01-04', freq='D')] - expected = pd.Index(expected_list, dtype=object, name='idx') - result = idx.astype(object) - assert isinstance(result, Index) - assert result.dtype == object - tm.assert_index_equal(result, expected) - for i in [0, 1, 3]: - assert result[i] == expected[i] - assert result[2] is pd.NaT - assert result.name == expected.name - - result_list = idx.tolist() - for i in [0, 1, 3]: - assert result_list[i] == expected_list[i] - assert result_list[2] is pd.NaT - def test_minmax(self): # monotonic diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index f3469b829f8a3..b1fa9e27fd5b2 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -6,7 +6,7 @@ import pandas.util._test_decorators as td from pandas.util import testing as tm from pandas import (PeriodIndex, period_range, notna, DatetimeIndex, NaT, - Index, Period, Int64Index, Series, DataFrame, date_range, + Index, Period, Series, DataFrame, date_range, offsets) from ..datetimelike import DatetimeLike @@ -24,38 +24,6 @@ def setup_method(self, method): def create_index(self): return period_range('20130101', periods=5, freq='D') - def test_astype_conversion(self): - # GH 13149, GH 13209 - idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D') - - result = idx.astype(object) - expected = Index([Period('2016-05-16', freq='D')] + - [Period(NaT, freq='D')] * 3, dtype='object') - tm.assert_index_equal(result, expected) - - result = idx.astype(int) - expected = Int64Index([16937] + [-9223372036854775808] * 3, - dtype=np.int64) - tm.assert_index_equal(result, expected) - - result = idx.astype(str) - expected = Index(str(x) for x in idx) - tm.assert_index_equal(result, expected) - - idx = period_range('1990', '2009', freq='A') - result = idx.astype('i8') - tm.assert_index_equal(result, Index(idx.asi8)) - tm.assert_numpy_array_equal(result.values, idx.asi8) - - @pytest.mark.parametrize('dtype', [ - float, 'timedelta64', 'timedelta64[ns]']) - def test_astype_raises(self, dtype): - # GH 13149, GH 13209 - idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D') - msg = 'Cannot cast PeriodIndex to dtype' - with tm.assert_raises_regex(TypeError, msg): - idx.astype(dtype) - def test_pickle_compat_construction(self): pass @@ -384,25 +352,6 @@ def test_factorize(self): tm.assert_numpy_array_equal(arr, exp_arr) tm.assert_index_equal(idx, exp_idx) - def test_astype_object(self): - idx = pd.PeriodIndex([], freq='M') - - exp = np.array([], dtype=object) - tm.assert_numpy_array_equal(idx.astype(object).values, exp) - tm.assert_numpy_array_equal(idx._mpl_repr(), exp) - - idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M') - - exp = np.array([pd.Period('2011-01', freq='M'), pd.NaT], dtype=object) - tm.assert_numpy_array_equal(idx.astype(object).values, exp) - tm.assert_numpy_array_equal(idx._mpl_repr(), exp) - - exp = np.array([pd.Period('2011-01-01', freq='D'), pd.NaT], - dtype=object) - idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D') - tm.assert_numpy_array_equal(idx.astype(object).values, exp) - tm.assert_numpy_array_equal(idx._mpl_repr(), exp) - def test_is_(self): create_index = lambda: PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') @@ -421,13 +370,6 @@ def test_is_(self): assert not index.is_(index - 2) assert not index.is_(index - 0) - def test_comp_period(self): - idx = period_range('2007-01', periods=20, freq='M') - - result = idx < idx[10] - exp = idx.values < idx.values[10] - tm.assert_numpy_array_equal(result, exp) - def test_contains(self): rng = period_range('2007-01', freq='M', periods=10)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19659
2018-02-12T16:53:15Z
2018-02-17T20:47:19Z
2018-02-17T20:47:19Z
2018-02-18T15:42:06Z
DOC: removed vendored IPython.sphinxext - take 2
diff --git a/ci/requirements-optional-conda.txt b/ci/requirements-optional-conda.txt index 65357ce2018d2..e8cfcdf80f2e8 100644 --- a/ci/requirements-optional-conda.txt +++ b/ci/requirements-optional-conda.txt @@ -4,7 +4,7 @@ bottleneck fastparquet feather-format html5lib -ipython +ipython>=5.6.0 ipykernel jinja2 lxml diff --git a/ci/requirements-optional-pip.txt b/ci/requirements-optional-pip.txt index 43c7d47892095..877c52fa0b4fd 100644 --- a/ci/requirements-optional-pip.txt +++ b/ci/requirements-optional-pip.txt @@ -6,7 +6,7 @@ bottleneck fastparquet feather-format html5lib -ipython +ipython>=5.6.0 ipykernel jinja2 lxml diff --git a/doc/source/conf.py b/doc/source/conf.py index 965b537c15ce5..e410b8da67b32 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -59,9 +59,7 @@ 'sphinx.ext.extlinks', 'sphinx.ext.todo', 'numpydoc', - 'ipython_sphinxext.ipython_directive', - 'ipython_sphinxext.ipython_console_highlighting', - # lowercase didn't work + 'IPython.sphinxext.ipython_directive', 'IPython.sphinxext.ipython_console_highlighting', 'matplotlib.sphinxext.plot_directive', 'sphinx.ext.intersphinx', diff --git a/doc/sphinxext/ipython_sphinxext/__init__.py b/doc/sphinxext/ipython_sphinxext/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/doc/sphinxext/ipython_sphinxext/ipython_console_highlighting.py b/doc/sphinxext/ipython_sphinxext/ipython_console_highlighting.py deleted file mode 100644 index b93a151fb3cb0..0000000000000 --- a/doc/sphinxext/ipython_sphinxext/ipython_console_highlighting.py +++ /dev/null @@ -1,28 +0,0 @@ -""" -reST directive for syntax-highlighting ipython interactive sessions. - -""" - -from sphinx import highlighting -from IPython.lib.lexers import IPyLexer - -def setup(app): - """Setup as a sphinx extension.""" - - # This is only a lexer, so adding it below to pygments appears sufficient. - # But if somebody knows what the right API usage should be to do that via - # sphinx, by all means fix it here. At least having this setup.py - # suppresses the sphinx warning we'd get without it. - metadata = {'parallel_read_safe': True, 'parallel_write_safe': True} - return metadata - -# Register the extension as a valid pygments lexer. -# Alternatively, we could register the lexer with pygments instead. This would -# require using setuptools entrypoints: http://pygments.org/docs/plugins - -ipy2 = IPyLexer(python3=False) -ipy3 = IPyLexer(python3=True) - -highlighting.lexers['ipython'] = ipy2 -highlighting.lexers['ipython2'] = ipy2 -highlighting.lexers['ipython3'] = ipy3 diff --git a/doc/sphinxext/ipython_sphinxext/ipython_directive.py b/doc/sphinxext/ipython_sphinxext/ipython_directive.py deleted file mode 100644 index a0e6728861b66..0000000000000 --- a/doc/sphinxext/ipython_sphinxext/ipython_directive.py +++ /dev/null @@ -1,1170 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Sphinx directive to support embedded IPython code. - -This directive allows pasting of entire interactive IPython sessions, prompts -and all, and their code will actually get re-executed at doc build time, with -all prompts renumbered sequentially. It also allows you to input code as a pure -python input by giving the argument python to the directive. The output looks -like an interactive ipython section. - -To enable this directive, simply list it in your Sphinx ``conf.py`` file -(making sure the directory where you placed it is visible to sphinx, as is -needed for all Sphinx directives). For example, to enable syntax highlighting -and the IPython directive:: - - extensions = ['IPython.sphinxext.ipython_console_highlighting', - 'IPython.sphinxext.ipython_directive'] - -The IPython directive outputs code-blocks with the language 'ipython'. So -if you do not have the syntax highlighting extension enabled as well, then -all rendered code-blocks will be uncolored. By default this directive assumes -that your prompts are unchanged IPython ones, but this can be customized. -The configurable options that can be placed in conf.py are: - -ipython_savefig_dir: - The directory in which to save the figures. This is relative to the - Sphinx source directory. The default is `html_static_path`. -ipython_rgxin: - The compiled regular expression to denote the start of IPython input - lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You - shouldn't need to change this. -ipython_rgxout: - The compiled regular expression to denote the start of IPython output - lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You - shouldn't need to change this. -ipython_promptin: - The string to represent the IPython input prompt in the generated ReST. - The default is 'In [%d]:'. This expects that the line numbers are used - in the prompt. -ipython_promptout: - The string to represent the IPython prompt in the generated ReST. The - default is 'Out [%d]:'. This expects that the line numbers are used - in the prompt. -ipython_mplbackend: - The string which specifies if the embedded Sphinx shell should import - Matplotlib and set the backend. The value specifies a backend that is - passed to `matplotlib.use()` before any lines in `ipython_execlines` are - executed. If not specified in conf.py, then the default value of 'agg' is - used. To use the IPython directive without matplotlib as a dependency, set - the value to `None`. It may end up that matplotlib is still imported - if the user specifies so in `ipython_execlines` or makes use of the - @savefig pseudo decorator. -ipython_execlines: - A list of strings to be exec'd in the embedded Sphinx shell. Typical - usage is to make certain packages always available. Set this to an empty - list if you wish to have no imports always available. If specified in - conf.py as `None`, then it has the effect of making no imports available. - If omitted from conf.py altogether, then the default value of - ['import numpy as np', 'import matplotlib.pyplot as plt'] is used. -ipython_holdcount - When the @suppress pseudo-decorator is used, the execution count can be - incremented or not. The default behavior is to hold the execution count, - corresponding to a value of `True`. Set this to `False` to increment - the execution count after each suppressed command. - -As an example, to use the IPython directive when `matplotlib` is not available, -one sets the backend to `None`:: - - ipython_mplbackend = None - -An example usage of the directive is: - -.. code-block:: rst - - .. ipython:: - - In [1]: x = 1 - - In [2]: y = x**2 - - In [3]: print(y) - -See http://matplotlib.org/sampledoc/ipython_directive.html for additional -documentation. - -Pseudo-Decorators -================= - -Note: Only one decorator is supported per input. If more than one decorator -is specified, then only the last one is used. - -In addition to the Pseudo-Decorators/options described at the above link, -several enhancements have been made. The directive will emit a message to the -console at build-time if code-execution resulted in an exception or warning. -You can suppress these on a per-block basis by specifying the :okexcept: -or :okwarning: options: - -.. code-block:: rst - - .. ipython:: - :okexcept: - :okwarning: - - In [1]: 1/0 - In [2]: # raise warning. - -To Do ------ - -- Turn the ad-hoc test() function into a real test suite. -- Break up ipython-specific functionality from matplotlib stuff into better - separated code. - -Authors -------- - -- John D Hunter: original author. -- Fernando Perez: refactoring, documentation, cleanups, port to 0.11. -- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations. -- Skipper Seabold, refactoring, cleanups, pure python addition -""" - -#----------------------------------------------------------------------------- -# Imports -#----------------------------------------------------------------------------- - -# Stdlib -import atexit -import errno -import os -import re -import sys -import tempfile -import ast -import warnings -import shutil -from io import StringIO - -# Third-party -from docutils.parsers.rst import directives -from docutils.parsers.rst import Directive - -# Our own -from traitlets.config import Config -from IPython import InteractiveShell -from IPython.core.profiledir import ProfileDir - -#----------------------------------------------------------------------------- -# Globals -#----------------------------------------------------------------------------- -# for tokenizing blocks -COMMENT, INPUT, OUTPUT = range(3) - -#----------------------------------------------------------------------------- -# Functions and class declarations -#----------------------------------------------------------------------------- - -def block_parser(part, rgxin, rgxout, fmtin, fmtout): - """ - part is a string of ipython text, comprised of at most one - input, one output, comments, and blank lines. The block parser - parses the text into a list of:: - - blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...] - - where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and - data is, depending on the type of token:: - - COMMENT : the comment string - - INPUT: the (DECORATOR, INPUT_LINE, REST) where - DECORATOR: the input decorator (or None) - INPUT_LINE: the input as string (possibly multi-line) - REST : any stdout generated by the input line (not OUTPUT) - - OUTPUT: the output string, possibly multi-line - - """ - block = [] - lines = part.split('\n') - N = len(lines) - i = 0 - decorator = None - while 1: - - if i==N: - # nothing left to parse -- the last line - break - - line = lines[i] - i += 1 - line_stripped = line.strip() - if line_stripped.startswith('#'): - block.append((COMMENT, line)) - continue - - if line_stripped.startswith('@'): - # Here is where we assume there is, at most, one decorator. - # Might need to rethink this. - decorator = line_stripped - continue - - # does this look like an input line? - matchin = rgxin.match(line) - if matchin: - lineno, inputline = int(matchin.group(1)), matchin.group(2) - - # the ....: continuation string - continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2)) - Nc = len(continuation) - # input lines can continue on for more than one line, if - # we have a '\' line continuation char or a function call - # echo line 'print'. The input line can only be - # terminated by the end of the block or an output line, so - # we parse out the rest of the input line if it is - # multiline as well as any echo text - - rest = [] - while i<N: - - # look ahead; if the next line is blank, or a comment, or - # an output line, we're done - - nextline = lines[i] - matchout = rgxout.match(nextline) - #print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation)) - if matchout or nextline.startswith('#'): - break - elif nextline.startswith(continuation): - # The default ipython_rgx* treat the space following the colon as optional. - # However, If the space is there we must consume it or code - # employing the cython_magic extension will fail to execute. - # - # This works with the default ipython_rgx* patterns, - # If you modify them, YMMV. - nextline = nextline[Nc:] - if nextline and nextline[0] == ' ': - nextline = nextline[1:] - - inputline += '\n' + nextline - else: - rest.append(nextline) - i+= 1 - - block.append((INPUT, (decorator, inputline, '\n'.join(rest)))) - continue - - # if it looks like an output line grab all the text to the end - # of the block - matchout = rgxout.match(line) - if matchout: - lineno, output = int(matchout.group(1)), matchout.group(2) - if i<N-1: - output = '\n'.join([output] + lines[i:]) - - block.append((OUTPUT, output)) - break - - return block - - -class EmbeddedSphinxShell(object): - """An embedded IPython instance to run inside Sphinx""" - - def __init__(self, exec_lines=None): - - self.cout = StringIO() - - if exec_lines is None: - exec_lines = [] - - # Create config object for IPython - config = Config() - config.HistoryManager.hist_file = ':memory:' - config.InteractiveShell.autocall = False - config.InteractiveShell.autoindent = False - config.InteractiveShell.colors = 'NoColor' - - # create a profile so instance history isn't saved - tmp_profile_dir = tempfile.mkdtemp(prefix='profile_') - profname = 'auto_profile_sphinx_build' - pdir = os.path.join(tmp_profile_dir,profname) - profile = ProfileDir.create_profile_dir(pdir) - - # Create and initialize global ipython, but don't start its mainloop. - # This will persist across different EmbeddedSphinxShell instances. - IP = InteractiveShell.instance(config=config, profile_dir=profile) - atexit.register(self.cleanup) - - # Store a few parts of IPython we'll need. - self.IP = IP - self.user_ns = self.IP.user_ns - self.user_global_ns = self.IP.user_global_ns - - self.input = '' - self.output = '' - self.tmp_profile_dir = tmp_profile_dir - - self.is_verbatim = False - self.is_doctest = False - self.is_suppress = False - - # Optionally, provide more detailed information to shell. - # this is assigned by the SetUp method of IPythonDirective - # to point at itself. - # - # So, you can access handy things at self.directive.state - self.directive = None - - # on the first call to the savefig decorator, we'll import - # pyplot as plt so we can make a call to the plt.gcf().savefig - self._pyplot_imported = False - - # Prepopulate the namespace. - for line in exec_lines: - self.process_input_line(line, store_history=False) - - def cleanup(self): - shutil.rmtree(self.tmp_profile_dir, ignore_errors=True) - - def clear_cout(self): - self.cout.seek(0) - self.cout.truncate(0) - - def process_input_line(self, line, store_history=True): - """process the input, capturing stdout""" - - stdout = sys.stdout - splitter = self.IP.input_splitter - try: - sys.stdout = self.cout - splitter.push(line) - more = splitter.push_accepts_more() - if not more: - source_raw = splitter.raw_reset() - self.IP.run_cell(source_raw, store_history=store_history) - finally: - sys.stdout = stdout - - def process_image(self, decorator): - """ - # build out an image directive like - # .. image:: somefile.png - # :width 4in - # - # from an input like - # savefig somefile.png width=4in - """ - savefig_dir = self.savefig_dir - source_dir = self.source_dir - saveargs = decorator.split(' ') - filename = saveargs[1] - # insert relative path to image file in source (as absolute path for Sphinx) - outfile = '/' + os.path.relpath(os.path.join(savefig_dir,filename), - source_dir) - - imagerows = ['.. image:: %s'%outfile] - - for kwarg in saveargs[2:]: - arg, val = kwarg.split('=') - arg = arg.strip() - val = val.strip() - imagerows.append(' :%s: %s'%(arg, val)) - - image_file = os.path.basename(outfile) # only return file name - image_directive = '\n'.join(imagerows) - return image_file, image_directive - - # Callbacks for each type of token - def process_input(self, data, input_prompt, lineno): - """ - Process data block for INPUT token. - - """ - decorator, input, rest = data - image_file = None - image_directive = None - - is_verbatim = decorator=='@verbatim' or self.is_verbatim - is_doctest = (decorator is not None and \ - decorator.startswith('@doctest')) or self.is_doctest - is_suppress = decorator=='@suppress' or self.is_suppress - is_okexcept = decorator=='@okexcept' or self.is_okexcept - is_okwarning = decorator=='@okwarning' or self.is_okwarning - is_savefig = decorator is not None and \ - decorator.startswith('@savefig') - - input_lines = input.split('\n') - if len(input_lines) > 1: - if input_lines[-1] != "": - input_lines.append('') # make sure there's a blank line - # so splitter buffer gets reset - - continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2)) - - if is_savefig: - image_file, image_directive = self.process_image(decorator) - - ret = [] - is_semicolon = False - - # Hold the execution count, if requested to do so. - if is_suppress and self.hold_count: - store_history = False - else: - store_history = True - - # Note: catch_warnings is not thread safe - with warnings.catch_warnings(record=True) as ws: - for i, line in enumerate(input_lines): - if line.endswith(';'): - is_semicolon = True - - if i == 0: - # process the first input line - if is_verbatim: - self.process_input_line('') - self.IP.execution_count += 1 # increment it anyway - else: - # only submit the line in non-verbatim mode - self.process_input_line(line, store_history=store_history) - formatted_line = '%s %s'%(input_prompt, line) - else: - # process a continuation line - if not is_verbatim: - self.process_input_line(line, store_history=store_history) - - formatted_line = '%s %s'%(continuation, line) - - if not is_suppress: - ret.append(formatted_line) - - if not is_suppress and len(rest.strip()) and is_verbatim: - # The "rest" is the standard output of the input. This needs to be - # added when in verbatim mode. If there is no "rest", then we don't - # add it, as the new line will be added by the processed output. - ret.append(rest) - - # Fetch the processed output. (This is not the submitted output.) - self.cout.seek(0) - processed_output = self.cout.read() - if not is_suppress and not is_semicolon: - # - # In IPythonDirective.run, the elements of `ret` are eventually - # combined such that '' entries correspond to newlines. So if - # `processed_output` is equal to '', then the adding it to `ret` - # ensures that there is a blank line between consecutive inputs - # that have no outputs, as in: - # - # In [1]: x = 4 - # - # In [2]: x = 5 - # - # When there is processed output, it has a '\n' at the tail end. So - # adding the output to `ret` will provide the necessary spacing - # between consecutive input/output blocks, as in: - # - # In [1]: x - # Out[1]: 5 - # - # In [2]: x - # Out[2]: 5 - # - # When there is stdout from the input, it also has a '\n' at the - # tail end, and so this ensures proper spacing as well. E.g.: - # - # In [1]: print x - # 5 - # - # In [2]: x = 5 - # - # When in verbatim mode, `processed_output` is empty (because - # nothing was passed to IP. Sometimes the submitted code block has - # an Out[] portion and sometimes it does not. When it does not, we - # need to ensure proper spacing, so we have to add '' to `ret`. - # However, if there is an Out[] in the submitted code, then we do - # not want to add a newline as `process_output` has stuff to add. - # The difficulty is that `process_input` doesn't know if - # `process_output` will be called---so it doesn't know if there is - # Out[] in the code block. The requires that we include a hack in - # `process_block`. See the comments there. - # - ret.append(processed_output) - elif is_semicolon: - # Make sure there is a newline after the semicolon. - ret.append('') - - # context information - filename = "Unknown" - lineno = 0 - if self.directive.state: - filename = self.directive.state.document.current_source - lineno = self.directive.state.document.current_line - - # output any exceptions raised during execution to stdout - # unless :okexcept: has been specified. - if not is_okexcept and "Traceback" in processed_output: - s = "\nException in %s at block ending on line %s\n" % (filename, lineno) - s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n" - sys.stdout.write('\n\n>>>' + ('-' * 73)) - sys.stdout.write(s) - sys.stdout.write(processed_output) - sys.stdout.write('<<<' + ('-' * 73) + '\n\n') - - # output any warning raised during execution to stdout - # unless :okwarning: has been specified. - if not is_okwarning: - for w in ws: - s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno) - s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n" - sys.stdout.write('\n\n>>>' + ('-' * 73)) - sys.stdout.write(s) - sys.stdout.write(('-' * 76) + '\n') - s=warnings.formatwarning(w.message, w.category, - w.filename, w.lineno, w.line) - sys.stdout.write(s) - sys.stdout.write('<<<' + ('-' * 73) + '\n') - - self.cout.truncate(0) - - return (ret, input_lines, processed_output, - is_doctest, decorator, image_file, image_directive) - - - def process_output(self, data, output_prompt, input_lines, output, - is_doctest, decorator, image_file): - """ - Process data block for OUTPUT token. - - """ - # Recall: `data` is the submitted output, and `output` is the processed - # output from `input_lines`. - - TAB = ' ' * 4 - - if is_doctest and output is not None: - - found = output # This is the processed output - found = found.strip() - submitted = data.strip() - - if self.directive is None: - source = 'Unavailable' - content = 'Unavailable' - else: - source = self.directive.state.document.current_source - content = self.directive.content - # Add tabs and join into a single string. - content = '\n'.join([TAB + line for line in content]) - - # Make sure the output contains the output prompt. - ind = found.find(output_prompt) - if ind < 0: - e = ('output does not contain output prompt\n\n' - 'Document source: {0}\n\n' - 'Raw content: \n{1}\n\n' - 'Input line(s):\n{TAB}{2}\n\n' - 'Output line(s):\n{TAB}{3}\n\n') - e = e.format(source, content, '\n'.join(input_lines), - repr(found), TAB=TAB) - raise RuntimeError(e) - found = found[len(output_prompt):].strip() - - # Handle the actual doctest comparison. - if decorator.strip() == '@doctest': - # Standard doctest - if found != submitted: - e = ('doctest failure\n\n' - 'Document source: {0}\n\n' - 'Raw content: \n{1}\n\n' - 'On input line(s):\n{TAB}{2}\n\n' - 'we found output:\n{TAB}{3}\n\n' - 'instead of the expected:\n{TAB}{4}\n\n') - e = e.format(source, content, '\n'.join(input_lines), - repr(found), repr(submitted), TAB=TAB) - raise RuntimeError(e) - else: - self.custom_doctest(decorator, input_lines, found, submitted) - - # When in verbatim mode, this holds additional submitted output - # to be written in the final Sphinx output. - # https://github.com/ipython/ipython/issues/5776 - out_data = [] - - is_verbatim = decorator=='@verbatim' or self.is_verbatim - if is_verbatim and data.strip(): - # Note that `ret` in `process_block` has '' as its last element if - # the code block was in verbatim mode. So if there is no submitted - # output, then we will have proper spacing only if we do not add - # an additional '' to `out_data`. This is why we condition on - # `and data.strip()`. - - # The submitted output has no output prompt. If we want the - # prompt and the code to appear, we need to join them now - # instead of adding them separately---as this would create an - # undesired newline. How we do this ultimately depends on the - # format of the output regex. I'll do what works for the default - # prompt for now, and we might have to adjust if it doesn't work - # in other cases. Finally, the submitted output does not have - # a trailing newline, so we must add it manually. - out_data.append("{0} {1}\n".format(output_prompt, data)) - - return out_data - - def process_comment(self, data): - """Process data fPblock for COMMENT token.""" - if not self.is_suppress: - return [data] - - def save_image(self, image_file): - """ - Saves the image file to disk. - """ - self.ensure_pyplot() - command = 'plt.gcf().savefig("%s")'%image_file - #print 'SAVEFIG', command # dbg - self.process_input_line('bookmark ipy_thisdir', store_history=False) - self.process_input_line('cd -b ipy_savedir', store_history=False) - self.process_input_line(command, store_history=False) - self.process_input_line('cd -b ipy_thisdir', store_history=False) - self.process_input_line('bookmark -d ipy_thisdir', store_history=False) - self.clear_cout() - - def process_block(self, block): - """ - process block from the block_parser and return a list of processed lines - """ - ret = [] - output = None - input_lines = None - lineno = self.IP.execution_count - - input_prompt = self.promptin % lineno - output_prompt = self.promptout % lineno - image_file = None - image_directive = None - - found_input = False - for token, data in block: - if token == COMMENT: - out_data = self.process_comment(data) - elif token == INPUT: - found_input = True - (out_data, input_lines, output, is_doctest, - decorator, image_file, image_directive) = \ - self.process_input(data, input_prompt, lineno) - elif token == OUTPUT: - if not found_input: - - TAB = ' ' * 4 - linenumber = 0 - source = 'Unavailable' - content = 'Unavailable' - if self.directive: - linenumber = self.directive.state.document.current_line - source = self.directive.state.document.current_source - content = self.directive.content - # Add tabs and join into a single string. - content = '\n'.join([TAB + line for line in content]) - - e = ('\n\nInvalid block: Block contains an output prompt ' - 'without an input prompt.\n\n' - 'Document source: {0}\n\n' - 'Content begins at line {1}: \n\n{2}\n\n' - 'Problematic block within content: \n\n{TAB}{3}\n\n') - e = e.format(source, linenumber, content, block, TAB=TAB) - - # Write, rather than include in exception, since Sphinx - # will truncate tracebacks. - sys.stdout.write(e) - raise RuntimeError('An invalid block was detected.') - - out_data = \ - self.process_output(data, output_prompt, input_lines, - output, is_doctest, decorator, - image_file) - if out_data: - # Then there was user submitted output in verbatim mode. - # We need to remove the last element of `ret` that was - # added in `process_input`, as it is '' and would introduce - # an undesirable newline. - assert(ret[-1] == '') - del ret[-1] - - if out_data: - ret.extend(out_data) - - # save the image files - if image_file is not None: - self.save_image(image_file) - - return ret, image_directive - - def ensure_pyplot(self): - """ - Ensures that pyplot has been imported into the embedded IPython shell. - - Also, makes sure to set the backend appropriately if not set already. - - """ - # We are here if the @figure pseudo decorator was used. Thus, it's - # possible that we could be here even if python_mplbackend were set to - # `None`. That's also strange and perhaps worthy of raising an - # exception, but for now, we just set the backend to 'agg'. - - if not self._pyplot_imported: - if 'matplotlib.backends' not in sys.modules: - # Then ipython_matplotlib was set to None but there was a - # call to the @figure decorator (and ipython_execlines did - # not set a backend). - #raise Exception("No backend was set, but @figure was used!") - import matplotlib - matplotlib.use('agg') - - # Always import pyplot into embedded shell. - self.process_input_line('import matplotlib.pyplot as plt', - store_history=False) - self._pyplot_imported = True - - def process_pure_python(self, content): - """ - content is a list of strings. it is unedited directive content - - This runs it line by line in the InteractiveShell, prepends - prompts as needed capturing stderr and stdout, then returns - the content as a list as if it were ipython code - """ - output = [] - savefig = False # keep up with this to clear figure - multiline = False # to handle line continuation - multiline_start = None - fmtin = self.promptin - - ct = 0 - - for lineno, line in enumerate(content): - - line_stripped = line.strip() - if not len(line): - output.append(line) - continue - - # handle decorators - if line_stripped.startswith('@'): - output.extend([line]) - if 'savefig' in line: - savefig = True # and need to clear figure - continue - - # handle comments - if line_stripped.startswith('#'): - output.extend([line]) - continue - - # deal with lines checking for multiline - continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2)) - if not multiline: - modified = u"%s %s" % (fmtin % ct, line_stripped) - output.append(modified) - ct += 1 - try: - ast.parse(line_stripped) - output.append(u'') - except Exception: # on a multiline - multiline = True - multiline_start = lineno - else: # still on a multiline - modified = u'%s %s' % (continuation, line) - output.append(modified) - - # if the next line is indented, it should be part of multiline - if len(content) > lineno + 1: - nextline = content[lineno + 1] - if len(nextline) - len(nextline.lstrip()) > 3: - continue - try: - mod = ast.parse( - '\n'.join(content[multiline_start:lineno+1])) - if isinstance(mod.body[0], ast.FunctionDef): - # check to see if we have the whole function - for element in mod.body[0].body: - if isinstance(element, ast.Return): - multiline = False - else: - output.append(u'') - multiline = False - except Exception: - pass - - if savefig: # clear figure if plotted - self.ensure_pyplot() - self.process_input_line('plt.clf()', store_history=False) - self.clear_cout() - savefig = False - - return output - - def custom_doctest(self, decorator, input_lines, found, submitted): - """ - Perform a specialized doctest. - - """ - from .custom_doctests import doctests - - args = decorator.split() - doctest_type = args[1] - if doctest_type in doctests: - doctests[doctest_type](self, args, input_lines, found, submitted) - else: - e = "Invalid option to @doctest: {0}".format(doctest_type) - raise Exception(e) - - -class IPythonDirective(Directive): - - has_content = True - required_arguments = 0 - optional_arguments = 4 # python, suppress, verbatim, doctest - final_argumuent_whitespace = True - option_spec = { 'python': directives.unchanged, - 'suppress' : directives.flag, - 'verbatim' : directives.flag, - 'doctest' : directives.flag, - 'okexcept': directives.flag, - 'okwarning': directives.flag - } - - shell = None - - seen_docs = set() - - def get_config_options(self): - # contains sphinx configuration variables - config = self.state.document.settings.env.config - - # get config variables to set figure output directory - savefig_dir = config.ipython_savefig_dir - source_dir = self.state.document.settings.env.srcdir - savefig_dir = os.path.join(source_dir, savefig_dir) - - # get regex and prompt stuff - rgxin = config.ipython_rgxin - rgxout = config.ipython_rgxout - promptin = config.ipython_promptin - promptout = config.ipython_promptout - mplbackend = config.ipython_mplbackend - exec_lines = config.ipython_execlines - hold_count = config.ipython_holdcount - - return (savefig_dir, source_dir, rgxin, rgxout, - promptin, promptout, mplbackend, exec_lines, hold_count) - - def setup(self): - # Get configuration values. - (savefig_dir, source_dir, rgxin, rgxout, promptin, promptout, - mplbackend, exec_lines, hold_count) = self.get_config_options() - - try: - os.makedirs(savefig_dir) - except OSError as e: - if e.errno != errno.EEXIST: - raise - - if self.shell is None: - # We will be here many times. However, when the - # EmbeddedSphinxShell is created, its interactive shell member - # is the same for each instance. - - if mplbackend and 'matplotlib.backends' not in sys.modules: - import matplotlib - matplotlib.use(mplbackend) - - # Must be called after (potentially) importing matplotlib and - # setting its backend since exec_lines might import pylab. - self.shell = EmbeddedSphinxShell(exec_lines) - - # Store IPython directive to enable better error messages - self.shell.directive = self - - # reset the execution count if we haven't processed this doc - #NOTE: this may be borked if there are multiple seen_doc tmp files - #check time stamp? - if not self.state.document.current_source in self.seen_docs: - self.shell.IP.history_manager.reset() - self.shell.IP.execution_count = 1 - self.seen_docs.add(self.state.document.current_source) - - # and attach to shell so we don't have to pass them around - self.shell.rgxin = rgxin - self.shell.rgxout = rgxout - self.shell.promptin = promptin - self.shell.promptout = promptout - self.shell.savefig_dir = savefig_dir - self.shell.source_dir = source_dir - self.shell.hold_count = hold_count - - # setup bookmark for saving figures directory - self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir, - store_history=False) - self.shell.clear_cout() - - return rgxin, rgxout, promptin, promptout - - def teardown(self): - # delete last bookmark - self.shell.process_input_line('bookmark -d ipy_savedir', - store_history=False) - self.shell.clear_cout() - - def run(self): - debug = False - - #TODO, any reason block_parser can't be a method of embeddable shell - # then we wouldn't have to carry these around - rgxin, rgxout, promptin, promptout = self.setup() - - options = self.options - self.shell.is_suppress = 'suppress' in options - self.shell.is_doctest = 'doctest' in options - self.shell.is_verbatim = 'verbatim' in options - self.shell.is_okexcept = 'okexcept' in options - self.shell.is_okwarning = 'okwarning' in options - - # handle pure python code - if 'python' in self.arguments: - content = self.content - self.content = self.shell.process_pure_python(content) - - # parts consists of all text within the ipython-block. - # Each part is an input/output block. - parts = '\n'.join(self.content).split('\n\n') - - lines = ['.. code-block:: ipython', ''] - figures = [] - - for part in parts: - block = block_parser(part, rgxin, rgxout, promptin, promptout) - if len(block): - rows, figure = self.shell.process_block(block) - for row in rows: - lines.extend([' {0}'.format(line) - for line in row.split('\n')]) - - if figure is not None: - figures.append(figure) - - for figure in figures: - lines.append('') - lines.extend(figure.split('\n')) - lines.append('') - - if len(lines) > 2: - if debug: - print('\n'.join(lines)) - else: - # This has to do with input, not output. But if we comment - # these lines out, then no IPython code will appear in the - # final output. - self.state_machine.insert_input( - lines, self.state_machine.input_lines.source(0)) - - # cleanup - self.teardown() - - return [] - -# Enable as a proper Sphinx directive -def setup(app): - setup.app = app - - app.add_directive('ipython', IPythonDirective) - app.add_config_value('ipython_savefig_dir', 'savefig', 'env') - app.add_config_value('ipython_rgxin', - re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env') - app.add_config_value('ipython_rgxout', - re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env') - app.add_config_value('ipython_promptin', 'In [%d]:', 'env') - app.add_config_value('ipython_promptout', 'Out[%d]:', 'env') - - # We could just let matplotlib pick whatever is specified as the default - # backend in the matplotlibrc file, but this would cause issues if the - # backend didn't work in headless environments. For this reason, 'agg' - # is a good default backend choice. - app.add_config_value('ipython_mplbackend', 'agg', 'env') - - # If the user sets this config value to `None`, then EmbeddedSphinxShell's - # __init__ method will treat it as []. - execlines = ['import numpy as np', 'import matplotlib.pyplot as plt'] - app.add_config_value('ipython_execlines', execlines, 'env') - - app.add_config_value('ipython_holdcount', True, 'env') - - metadata = {'parallel_read_safe': True, 'parallel_write_safe': True} - return metadata - -# Simple smoke test, needs to be converted to a proper automatic test. -def test(): - - examples = [ - r""" -In [9]: pwd -Out[9]: '/home/jdhunter/py4science/book' - -In [10]: cd bookdata/ -/home/jdhunter/py4science/book/bookdata - -In [2]: from pylab import * - -In [2]: ion() - -In [3]: im = imread('stinkbug.png') - -@savefig mystinkbug.png width=4in -In [4]: imshow(im) -Out[4]: <matplotlib.image.AxesImage object at 0x39ea850> - -""", - r""" - -In [1]: x = 'hello world' - -# string methods can be -# used to alter the string -@doctest -In [2]: x.upper() -Out[2]: 'HELLO WORLD' - -@verbatim -In [3]: x.st<TAB> -x.startswith x.strip -""", - r""" - -In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\ - .....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv' - -In [131]: print url.split('&') -['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv'] - -In [60]: import urllib - -""", - r"""\ - -In [133]: import numpy.random - -@suppress -In [134]: numpy.random.seed(2358) - -@doctest -In [135]: numpy.random.rand(10,2) -Out[135]: -array([[ 0.64524308, 0.59943846], - [ 0.47102322, 0.8715456 ], - [ 0.29370834, 0.74776844], - [ 0.99539577, 0.1313423 ], - [ 0.16250302, 0.21103583], - [ 0.81626524, 0.1312433 ], - [ 0.67338089, 0.72302393], - [ 0.7566368 , 0.07033696], - [ 0.22591016, 0.77731835], - [ 0.0072729 , 0.34273127]]) - -""", - - r""" -In [106]: print x -jdh - -In [109]: for i in range(10): - .....: print i - .....: - .....: -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -""", - - r""" - -In [144]: from pylab import * - -In [145]: ion() - -# use a semicolon to suppress the output -@savefig test_hist.png width=4in -In [151]: hist(np.random.randn(10000), 100); - - -@savefig test_plot.png width=4in -In [151]: plot(np.random.randn(10000), 'o'); - """, - - r""" -# use a semicolon to suppress the output -In [151]: plt.clf() - -@savefig plot_simple.png width=4in -In [151]: plot([1,2,3]) - -@savefig hist_simple.png width=4in -In [151]: hist(np.random.randn(10000), 100); - -""", - r""" -# update the current fig -In [151]: ylabel('number') - -In [152]: title('normal distribution') - - -@savefig hist_with_text.png -In [153]: grid(True) - -@doctest float -In [154]: 0.1 + 0.2 -Out[154]: 0.3 - -@doctest float -In [155]: np.arange(16).reshape(4,4) -Out[155]: -array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11], - [12, 13, 14, 15]]) - -In [1]: x = np.arange(16, dtype=float).reshape(4,4) - -In [2]: x[0,0] = np.inf - -In [3]: x[0,1] = np.nan - -@doctest float -In [4]: x -Out[4]: -array([[ inf, nan, 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]]) - - - """, - ] - # skip local-file depending first example: - examples = examples[1:] - - #ipython_directive.DEBUG = True # dbg - #options = dict(suppress=True) # dbg - options = {} - for example in examples: - content = example.split('\n') - IPythonDirective('debug', arguments=None, options=options, - content=content, lineno=0, - content_offset=None, block_text=None, - state=None, state_machine=None, - ) - -# Run test suite as a script -if __name__=='__main__': - if not os.path.isdir('_static'): - os.mkdir('_static') - test() - print('All OK? Check figures in _static/')
This reverts commit d6049a0efb7c04996bc374f3747bab8fe4c84bac. xref https://github.com/pandas-dev/pandas/issues/18147, re-do https://github.com/pandas-dev/pandas/pull/18193 (which was reverted in https://github.com/pandas-dev/pandas/pull/18320 because there was still a bug in IPython, which is now fixed in the released version: https://github.com/ipython/ipython/pull/10907)
https://api.github.com/repos/pandas-dev/pandas/pulls/19657
2018-02-12T12:49:53Z
2018-04-23T13:59:17Z
2018-04-23T13:59:17Z
2018-04-23T14:00:47Z
BUG: fix Period.asfreq conversion near datetime(1, 1, 1)
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 879b245af49cd..7cecad338101b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -741,8 +741,9 @@ Timedelta - Bug in :class:`TimedeltaIndex` where division by a ``Series`` would return a ``TimedeltaIndex`` instead of a ``Series`` (:issue:`19042`) - Bug in :func:`Timedelta.__add__`, :func:`Timedelta.__sub__` where adding or subtracting a ``np.timedelta64`` object would return another ``np.timedelta64`` instead of a ``Timedelta`` (:issue:`19738`) - Bug in :func:`Timedelta.__floordiv__`, :func:`Timedelta.__rfloordiv__` where operating with a ``Tick`` object would raise a ``TypeError`` instead of returning a numeric value (:issue:`19738`) +- Bug in :func:`Period.asfreq` where periods near ``datetime(1, 1, 1)`` could be converted incorrectly (:issue:`19643`) - Bug in :func:`Timedelta.total_seconds()` causing precision errors i.e. ``Timedelta('30S').total_seconds()==30.000000000000004`` (:issue:`19458`) - +- Timezones ^^^^^^^^^ diff --git a/pandas/_libs/src/period_helper.c b/pandas/_libs/src/period_helper.c index 7c4de8e42e73b..a812ed2e7e2b3 100644 --- a/pandas/_libs/src/period_helper.c +++ b/pandas/_libs/src/period_helper.c @@ -138,7 +138,7 @@ PANDAS_INLINE npy_int64 transform_via_day(npy_int64 ordinal, } static npy_int64 DtoB_weekday(npy_int64 absdate) { - return (((absdate) / 7) * 5) + (absdate) % 7 - BDAY_OFFSET; + return floordiv(absdate, 7) * 5 + mod_compat(absdate, 7) - BDAY_OFFSET; } static npy_int64 DtoB(struct date_info *dinfo, @@ -245,7 +245,8 @@ static npy_int64 asfreq_UpsampleWithinDay(npy_int64 ordinal, static npy_int64 asfreq_BtoDT(npy_int64 ordinal, asfreq_info *af_info) { ordinal += BDAY_OFFSET; ordinal = - (((ordinal - 1) / 5) * 7 + mod_compat(ordinal - 1, 5) + 1 - ORD_OFFSET); + (floordiv(ordinal - 1, 5) * 7 + mod_compat(ordinal - 1, 5) + 1 - + ORD_OFFSET); return upsample_daytime(ordinal, af_info); } diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 32ffe4e6d0453..e1c783ac9fa54 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -154,12 +154,32 @@ cdef inline int get_freq_group(int freq) nogil: return (freq // 1000) * 1000 -@cython.cdivision +# specifically _dont_ use cdvision or else ordinals near -1 are assigned to +# incorrect dates GH#19643 +@cython.cdivision(False) cdef int64_t get_period_ordinal(int year, int month, int day, int hour, int minute, int second, int microseconds, int picoseconds, int freq) nogil: - """generate an ordinal in period space""" + """ + Generate an ordinal in period space + + Parameters + ---------- + year : int + month : int + day : int + hour : int + minute : int + second : int + microseconds : int + picoseconds : int + freq : int + + Returns + ------- + period_ordinal : int64_t + """ cdef: int64_t absdays, unix_date, seconds, delta int64_t weeks @@ -190,7 +210,7 @@ cdef int64_t get_period_ordinal(int year, int month, int day, if month >= fmonth: mdiff += 12 - return (year - 1970) * 4 + (mdiff - 1) / 3 + return (year - 1970) * 4 + (mdiff - 1) // 3 elif freq == FR_MTH: return (year - 1970) * 12 + month - 1 @@ -202,14 +222,14 @@ cdef int64_t get_period_ordinal(int year, int month, int day, seconds = unix_date * 86400 + hour * 3600 + minute * 60 + second if freq == FR_MS: - return seconds * 1000 + microseconds / 1000 + return seconds * 1000 + microseconds // 1000 elif freq == FR_US: return seconds * 1000000 + microseconds elif freq == FR_NS: return (seconds * 1000000000 + - microseconds * 1000 + picoseconds / 1000) + microseconds * 1000 + picoseconds // 1000) else: return seconds @@ -229,7 +249,7 @@ cdef int64_t get_period_ordinal(int year, int month, int day, elif freq == FR_BUS: # calculate the current week assuming sunday as last day of a week # Jan 1 0001 is a Monday, so subtract 1 to get to end-of-week - weeks = (unix_date + ORD_OFFSET - 1) / 7 + weeks = (unix_date + ORD_OFFSET - 1) // 7 # calculate the current weekday (in range 1 .. 7) delta = (unix_date + ORD_OFFSET - 1) % 7 + 1 # return the number of business days in full weeks plus the business @@ -241,12 +261,12 @@ cdef int64_t get_period_ordinal(int year, int month, int day, elif freq_group == FR_WK: day_adj = freq - FR_WK - return (unix_date + ORD_OFFSET - (1 + day_adj)) / 7 + 1 - WEEK_OFFSET + return (unix_date + ORD_OFFSET - (1 + day_adj)) // 7 + 1 - WEEK_OFFSET # raise ValueError -cdef int get_date_info(int64_t ordinal, int freq, date_info *dinfo) nogil: +cdef void get_date_info(int64_t ordinal, int freq, date_info *dinfo) nogil: cdef: int64_t absdate double abstime @@ -263,7 +283,6 @@ cdef int get_date_info(int64_t ordinal, int freq, date_info *dinfo) nogil: absdate += 1 dInfoCalc_SetFromAbsDateTime(dinfo, absdate, abstime) - return 0 cdef int64_t get_python_ordinal(int64_t period_ordinal, int freq) nogil: @@ -272,6 +291,15 @@ cdef int64_t get_python_ordinal(int64_t period_ordinal, int freq) nogil: This corresponds to the number of days since Jan., 1st, 1AD. When the instance has a frequency less than daily, the proleptic date is calculated for the last day of the period. + + Parameters + ---------- + period_ordinal : int64_t + freq : int + + Returns + ------- + absdate : int64_t number of days since datetime(1, 1, 1) """ cdef: asfreq_info af_info @@ -285,11 +313,23 @@ cdef int64_t get_python_ordinal(int64_t period_ordinal, int freq) nogil: return toDaily(period_ordinal, &af_info) + ORD_OFFSET -cdef int dInfoCalc_SetFromAbsDateTime(date_info *dinfo, - int64_t absdate, double abstime) nogil: +cdef void dInfoCalc_SetFromAbsDateTime(date_info *dinfo, + int64_t absdate, double abstime) nogil: """ Set the instance's value using the given date and time. Assumes GREGORIAN_CALENDAR. + + Parameters + ---------- + dinfo : date_info* + absdate : int64_t + days elapsed since datetime(1, 1, 1) + abstime : double + seconds elapsed since beginning of day described by absdate + + Notes + ----- + Updates dinfo inplace """ # Bounds check # The calling function is responsible for ensuring that @@ -300,13 +340,21 @@ cdef int dInfoCalc_SetFromAbsDateTime(date_info *dinfo, # Calculate the time dInfoCalc_SetFromAbsTime(dinfo, abstime) - return 0 -cdef int dInfoCalc_SetFromAbsDate(date_info *dinfo, int64_t absdate) nogil: +cdef void dInfoCalc_SetFromAbsDate(date_info *dinfo, int64_t absdate) nogil: """ Sets the date part of the date_info struct Assumes GREGORIAN_CALENDAR + + Parameters + ---------- + dinfo : date_info* + unix_date : int64_t + + Notes + ----- + Updates dinfo inplace """ cdef: pandas_datetimestruct dts @@ -315,13 +363,22 @@ cdef int dInfoCalc_SetFromAbsDate(date_info *dinfo, int64_t absdate) nogil: dinfo.year = dts.year dinfo.month = dts.month dinfo.day = dts.day - return 0 @cython.cdivision -cdef int dInfoCalc_SetFromAbsTime(date_info *dinfo, double abstime) nogil: +cdef void dInfoCalc_SetFromAbsTime(date_info *dinfo, double abstime) nogil: """ Sets the time part of the DateTime object. + + Parameters + ---------- + dinfo : date_info* + abstime : double + seconds elapsed since beginning of day described by absdate + + Notes + ----- + Updates dinfo inplace """ cdef: int inttime @@ -336,7 +393,6 @@ cdef int dInfoCalc_SetFromAbsTime(date_info *dinfo, double abstime) nogil: dinfo.hour = hour dinfo.minute = minute dinfo.second = second - return 0 @cython.cdivision @@ -370,7 +426,19 @@ cdef int64_t absdate_from_ymd(int year, int month, int day) nogil: Find the absdate (days elapsed since datetime(1, 1, 1) for the given year/month/day. Assumes GREGORIAN_CALENDAR + + Parameters + ---------- + year : int + month : int + day : int + + Returns + ------- + absdate : int + days elapsed since datetime(1, 1, 1) """ + # /* Calculate the absolute date cdef: pandas_datetimestruct dts @@ -385,6 +453,25 @@ cdef int64_t absdate_from_ymd(int year, int month, int day) nogil: cdef int get_yq(int64_t ordinal, int freq, int *quarter, int *year): + """ + Find the year and quarter of a Period with the given ordinal and frequency + + Parameters + ---------- + ordinal : int64_t + freq : int + quarter : *int + year : *int + + Returns + ------- + qtr_freq : int + describes the implied quarterly frequency associated with `freq` + + Notes + ----- + Sets quarter and year inplace + """ cdef: asfreq_info af_info int qtr_freq @@ -403,8 +490,8 @@ cdef int get_yq(int64_t ordinal, int freq, int *quarter, int *year): return qtr_freq -cdef int64_t DtoQ_yq(int64_t ordinal, asfreq_info *af_info, - int *year, int *quarter): +cdef void DtoQ_yq(int64_t ordinal, asfreq_info *af_info, + int *year, int *quarter): cdef: date_info dinfo @@ -419,7 +506,6 @@ cdef int64_t DtoQ_yq(int64_t ordinal, asfreq_info *af_info, year[0] = dinfo.year quarter[0] = monthToQuarter(dinfo.month) - return 0 cdef inline int monthToQuarter(int month): diff --git a/pandas/tests/scalar/period/test_period_asfreq.py b/pandas/tests/scalar/period/test_period_asfreq.py index a2819a3478f79..9f8b2562e9e20 100644 --- a/pandas/tests/scalar/period/test_period_asfreq.py +++ b/pandas/tests/scalar/period/test_period_asfreq.py @@ -1,3 +1,7 @@ +import pytest + +from pandas.errors import OutOfBoundsDatetime + import pandas as pd from pandas import Period, offsets from pandas.util import testing as tm @@ -6,6 +10,24 @@ class TestFreqConversion(object): """Test frequency conversion of date objects""" + @pytest.mark.parametrize('freq', ['A', 'Q', 'M', 'W', 'B', 'D']) + def test_asfreq_near_zero(self, freq): + # GH#19643, GH#19650 + per = Period('0001-01-01', freq=freq) + tup1 = (per.year, per.hour, per.day) + + prev = per - 1 + assert (per - 1).ordinal == per.ordinal - 1 + tup2 = (prev.year, prev.month, prev.day) + assert tup2 < tup1 + + @pytest.mark.xfail(reason='GH#19643 period_helper asfreq functions fail ' + 'to check for overflows') + def test_to_timestamp_out_of_bounds(self): + # GH#19643, currently gives Timestamp('1754-08-30 22:43:41.128654848') + per = Period('0001-01-01', freq='B') + with pytest.raises(OutOfBoundsDatetime): + per.to_timestamp() def test_asfreq_corner(self): val = Period(freq='A', year=2007) diff --git a/pandas/tests/tslibs/test_period_asfreq.py b/pandas/tests/tslibs/test_period_asfreq.py index 98959adf6fda4..61737083e22ea 100644 --- a/pandas/tests/tslibs/test_period_asfreq.py +++ b/pandas/tests/tslibs/test_period_asfreq.py @@ -5,6 +5,7 @@ class TestPeriodFreqConversion(object): + def test_intraday_conversion_factors(self): assert period_asfreq(1, get_freq('D'), get_freq('H'), False) == 24 assert period_asfreq(1, get_freq('D'), get_freq('T'), False) == 1440
- [x] closes #19643 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19650
2018-02-11T21:53:56Z
2018-02-21T10:31:56Z
2018-02-21T10:31:56Z
2018-02-22T05:45:43Z
Reduce redirection in ops
diff --git a/pandas/core/ops.py b/pandas/core/ops.py index dff2b6844af94..da65f1f31ed2a 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -208,6 +208,78 @@ def _get_frame_op_default_axis(name): return 'columns' +def _get_opstr(op, cls): + """ + Find the operation string, if any, to pass to numexpr for this + operation. + + Parameters + ---------- + op : binary operator + cls : class + + Returns + ------- + op_str : string or None + """ + # numexpr is available for non-sparse classes + subtyp = getattr(cls, '_subtyp', '') + use_numexpr = 'sparse' not in subtyp + + if not use_numexpr: + # if we're not using numexpr, then don't pass a str_rep + return None + + return {operator.add: '+', + radd: '+', + operator.mul: '*', + rmul: '*', + operator.sub: '-', + rsub: '-', + operator.truediv: '/', + rtruediv: '/', + operator.floordiv: '//', + rfloordiv: '//', + operator.mod: None, # TODO: Why None for mod but '%' for rmod? + rmod: '%', + operator.pow: '**', + rpow: '**', + operator.eq: '==', + operator.ne: '!=', + operator.le: '<=', + operator.lt: '<', + operator.ge: '>=', + operator.gt: '>', + operator.and_: '&', + rand_: '&', + operator.or_: '|', + ror_: '|', + operator.xor: '^', + rxor: '^', + divmod: None, + rdivmod: None}[op] + + +def _get_op_name(op, special): + """ + Find the name to attach to this method according to conventions + for special and non-special methods. + + Parameters + ---------- + op : binary operator + special : bool + + Returns + ------- + op_name : str + """ + opname = op.__name__.strip('_') + if special: + opname = '__{opname}__'.format(opname=opname) + return opname + + # ----------------------------------------------------------------------------- # Docstring Generation and Templates @@ -501,48 +573,29 @@ def _create_methods(cls, arith_method, comp_method, bool_method, # creates actual methods based upon arithmetic, comp and bool method # constructors. - # numexpr is available for non-sparse classes - subtyp = getattr(cls, '_subtyp', '') - use_numexpr = 'sparse' not in subtyp - have_divmod = issubclass(cls, ABCSeries) # divmod is available for Series and SparseSeries - # if we're not using numexpr, then don't pass a str_rep - if use_numexpr: - op = lambda x: x - else: - op = lambda x: None - if special: - - def names(x): - if x[-1] == "_": - return "__{name}_".format(name=x) - else: - return "__{name}__".format(name=x) - else: - names = lambda x: x - # yapf: disable new_methods = dict( - add=arith_method(operator.add, names('add'), op('+')), - radd=arith_method(radd, names('radd'), op('+')), - sub=arith_method(operator.sub, names('sub'), op('-')), - mul=arith_method(operator.mul, names('mul'), op('*')), - truediv=arith_method(operator.truediv, names('truediv'), op('/')), - floordiv=arith_method(operator.floordiv, names('floordiv'), op('//')), + add=arith_method(cls, operator.add, special), + radd=arith_method(cls, radd, special), + sub=arith_method(cls, operator.sub, special), + mul=arith_method(cls, operator.mul, special), + truediv=arith_method(cls, operator.truediv, special), + floordiv=arith_method(cls, operator.floordiv, special), # Causes a floating point exception in the tests when numexpr enabled, # so for now no speedup - mod=arith_method(operator.mod, names('mod'), None), - pow=arith_method(operator.pow, names('pow'), op('**')), + mod=arith_method(cls, operator.mod, special), + pow=arith_method(cls, operator.pow, special), # not entirely sure why this is necessary, but previously was included # so it's here to maintain compatibility - rmul=arith_method(operator.mul, names('rmul'), op('*')), - rsub=arith_method(rsub, names('rsub'), op('-')), - rtruediv=arith_method(rtruediv, names('rtruediv'), op('/')), - rfloordiv=arith_method(rfloordiv, names('rfloordiv'), op('//')), - rpow=arith_method(rpow, names('rpow'), op('**')), - rmod=arith_method(rmod, names('rmod'), op('%'))) + rmul=arith_method(cls, rmul, special), + rsub=arith_method(cls, rsub, special), + rtruediv=arith_method(cls, rtruediv, special), + rfloordiv=arith_method(cls, rfloordiv, special), + rpow=arith_method(cls, rpow, special), + rmod=arith_method(cls, rmod, special)) # yapf: enable new_methods['div'] = new_methods['truediv'] new_methods['rdiv'] = new_methods['rtruediv'] @@ -550,26 +603,30 @@ def names(x): # Comp methods never had a default axis set if comp_method: new_methods.update(dict( - eq=comp_method(operator.eq, names('eq'), op('==')), - ne=comp_method(operator.ne, names('ne'), op('!=')), - lt=comp_method(operator.lt, names('lt'), op('<')), - gt=comp_method(operator.gt, names('gt'), op('>')), - le=comp_method(operator.le, names('le'), op('<=')), - ge=comp_method(operator.ge, names('ge'), op('>=')))) + eq=comp_method(cls, operator.eq, special), + ne=comp_method(cls, operator.ne, special), + lt=comp_method(cls, operator.lt, special), + gt=comp_method(cls, operator.gt, special), + le=comp_method(cls, operator.le, special), + ge=comp_method(cls, operator.ge, special))) if bool_method: new_methods.update( - dict(and_=bool_method(operator.and_, names('and_'), op('&')), - or_=bool_method(operator.or_, names('or_'), op('|')), + dict(and_=bool_method(cls, operator.and_, special), + or_=bool_method(cls, operator.or_, special), # For some reason ``^`` wasn't used in original. - xor=bool_method(operator.xor, names('xor'), op('^')), - rand_=bool_method(rand_, names('rand_'), op('&')), - ror_=bool_method(ror_, names('ror_'), op('|')), - rxor=bool_method(rxor, names('rxor'), op('^')))) + xor=bool_method(cls, operator.xor, special), + rand_=bool_method(cls, rand_, special), + ror_=bool_method(cls, ror_, special), + rxor=bool_method(cls, rxor, special))) if have_divmod: # divmod doesn't have an op that is supported by numexpr - new_methods['divmod'] = arith_method(divmod, names('divmod'), None) + new_methods['divmod'] = arith_method(cls, divmod, special) - new_methods = {names(k): v for k, v in new_methods.items()} + if special: + dunderize = lambda x: '__{name}__'.format(name=x.strip('_')) + else: + dunderize = lambda x: x + new_methods = {dunderize(k): v for k, v in new_methods.items()} return new_methods @@ -596,16 +653,15 @@ def add_special_arithmetic_methods(cls, arith_method=None, Parameters ---------- arith_method : function (optional) - factory for special arithmetic methods, with op string: - f(op, name, str_rep) + factory for special arithmetic methods: + f(cls, op, special) comp_method : function (optional) - factory for rich comparison - signature: f(op, name, str_rep) + factory for rich comparison - signature: f(cls, op, special) bool_method : function (optional) - factory for boolean methods - signature: f(op, name, str_rep) + factory for boolean methods - signature: f(cls, op, special) """ new_methods = _create_methods(cls, arith_method, comp_method, bool_method, special=True) - # inplace operators (I feel like these should get passed an `inplace=True` # or just be removed @@ -645,8 +701,7 @@ def f(self, other): add_methods(cls, new_methods=new_methods) -def add_flex_arithmetic_methods(cls, flex_arith_method, - flex_comp_method=None, flex_bool_method=None): +def add_flex_arithmetic_methods(cls, flex_arith_method, flex_comp_method=None): """ Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) to the class. @@ -654,13 +709,13 @@ def add_flex_arithmetic_methods(cls, flex_arith_method, Parameters ---------- flex_arith_method : function - factory for flex arithmetic methods, with op string: - f(op, name, str_rep) + factory for flex arithmetic methods: + f(cls, op, special) flex_comp_method : function, optional, - factory for rich comparison - signature: f(op, name, str_rep) + factory for rich comparison - signature: f(cls, op, special) """ new_methods = _create_methods(cls, flex_arith_method, - flex_comp_method, flex_bool_method, + flex_comp_method, bool_method=None, special=False) new_methods.update(dict(multiply=new_methods['mul'], subtract=new_methods['sub'], @@ -719,11 +774,13 @@ def _construct_divmod_result(left, result, index, name, dtype): ) -def _arith_method_SERIES(op, name, str_rep): +def _arith_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ + str_rep = _get_opstr(op, cls) + name = _get_op_name(op, special) eval_kwargs = _gen_eval_kwargs(name) fill_zeros = _gen_fill_zeros(name) construct_result = (_construct_divmod_result @@ -856,11 +913,12 @@ def _comp_method_OBJECT_ARRAY(op, x, y): return result -def _comp_method_SERIES(op, name, str_rep): +def _comp_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ + name = _get_op_name(op, special) masker = _gen_eval_kwargs(name).get('masker', False) def na_op(x, y): @@ -995,7 +1053,7 @@ def wrapper(self, other, axis=None): return wrapper -def _bool_method_SERIES(op, name, str_rep): +def _bool_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. @@ -1066,7 +1124,8 @@ def wrapper(self, other): return wrapper -def _flex_method_SERIES(op, name, str_rep): +def _flex_method_SERIES(cls, op, special): + name = _get_op_name(op, special) doc = _make_flex_doc(name, 'series') @Appender(doc) @@ -1192,7 +1251,9 @@ def to_series(right): return right -def _arith_method_FRAME(op, name, str_rep=None): +def _arith_method_FRAME(cls, op, special): + str_rep = _get_opstr(op, cls) + name = _get_op_name(op, special) eval_kwargs = _gen_eval_kwargs(name) fill_zeros = _gen_fill_zeros(name) default_axis = _get_frame_op_default_axis(name) @@ -1270,7 +1331,9 @@ def f(self, other, axis=default_axis, level=None, fill_value=None): return f -def _flex_comp_method_FRAME(op, name, str_rep=None): +def _flex_comp_method_FRAME(cls, op, special): + str_rep = _get_opstr(op, cls) + name = _get_op_name(op, special) default_axis = _get_frame_op_default_axis(name) def na_op(x, y): @@ -1306,7 +1369,10 @@ def f(self, other, axis=default_axis, level=None): return f -def _comp_method_FRAME(func, name, str_rep): +def _comp_method_FRAME(cls, func, special): + str_rep = _get_opstr(func, cls) + name = _get_op_name(func, special) + @Appender('Wrapper for comparison method {name}'.format(name=name)) def f(self, other): if isinstance(other, ABCDataFrame): @@ -1345,8 +1411,10 @@ def f(self, other): # ----------------------------------------------------------------------------- # Panel -def _arith_method_PANEL(op, name, str_rep=None): +def _arith_method_PANEL(cls, op, special): # work only for scalars + name = _get_op_name(op, special) + def f(self, other): if not is_scalar(other): raise ValueError('Simple arithmetic with {name} can only be ' @@ -1359,7 +1427,10 @@ def f(self, other): return f -def _comp_method_PANEL(op, name, str_rep=None): +def _comp_method_PANEL(cls, op, special): + str_rep = _get_opstr(op, cls) + name = _get_op_name(op, special) + def na_op(x, y): import pandas.core.computation.expressions as expressions @@ -1389,7 +1460,9 @@ def f(self, other, axis=None): return f -def _flex_method_PANEL(op, name, str_rep=None): +def _flex_method_PANEL(cls, op, special): + str_rep = _get_opstr(op, cls) + name = _get_op_name(op, special) eval_kwargs = _gen_eval_kwargs(name) fill_zeros = _gen_fill_zeros(name) @@ -1427,18 +1500,19 @@ def f(self, other, axis=0): comp_method=_comp_method_PANEL, bool_method=_arith_method_PANEL) +panel_flex_funcs = dict(flex_arith_method=_flex_method_PANEL, + flex_comp_method=_comp_method_PANEL) # ----------------------------------------------------------------------------- # Sparse -def _arith_method_SPARSE_SERIES(op, name, str_rep=None): +def _arith_method_SPARSE_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. - - str_rep is not used, but is present for compatibility. """ + name = _get_op_name(op, special) def wrapper(self, other): if isinstance(other, ABCDataFrame): @@ -1476,11 +1550,12 @@ def _sparse_series_op(left, right, op, name): return left._constructor(result, index=new_index, name=new_name) -def _arith_method_SPARSE_ARRAY(op, name, str_rep=None): +def _arith_method_SPARSE_ARRAY(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ + name = _get_op_name(op, special) def wrapper(self, other): from pandas.core.sparse.array import ( @@ -1508,3 +1583,12 @@ def wrapper(self, other): name = name[2:-2] wrapper.__name__ = name return wrapper + + +sparse_array_special_funcs = dict(arith_method=_arith_method_SPARSE_ARRAY, + comp_method=_arith_method_SPARSE_ARRAY, + bool_method=_arith_method_SPARSE_ARRAY) + +sparse_series_special_funcs = dict(arith_method=_arith_method_SPARSE_SERIES, + comp_method=_arith_method_SPARSE_SERIES, + bool_method=None) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 7f973992fb07f..3be1e3ef8734d 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1528,8 +1528,7 @@ def _extract_axis(self, data, axis=0, intersect=False): 'minor_axis': 'columns'}) ops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs) -ops.add_flex_arithmetic_methods(Panel, ops._flex_method_PANEL, - flex_comp_method=ops._comp_method_PANEL) +ops.add_flex_arithmetic_methods(Panel, **ops.panel_flex_funcs) Panel._add_numeric_operations() diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py index 3cbae717d0e07..4f7152666f7bf 100644 --- a/pandas/core/sparse/array.py +++ b/pandas/core/sparse/array.py @@ -844,6 +844,4 @@ def _make_index(length, indices, kind): ops.add_special_arithmetic_methods(SparseArray, - arith_method=ops._arith_method_SPARSE_ARRAY, - comp_method=ops._arith_method_SPARSE_ARRAY, - bool_method=ops._arith_method_SPARSE_ARRAY) + **ops.sparse_array_special_funcs) diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 62a467bec2683..335a4c80adc63 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -817,6 +817,4 @@ def from_coo(cls, A, dense_index=False): # overwrite basic arithmetic to use SparseSeries version # force methods to overwrite previous definitions. ops.add_special_arithmetic_methods(SparseSeries, - ops._arith_method_SPARSE_SERIES, - comp_method=ops._arith_method_SPARSE_SERIES, - bool_method=None) + **ops.sparse_series_special_funcs)
Among other things, this will make it easier to handle the sparse vs non-sparse Frame ops in one place.
https://api.github.com/repos/pandas-dev/pandas/pulls/19649
2018-02-11T21:13:40Z
2018-02-19T23:19:02Z
2018-02-19T23:19:02Z
2018-02-21T16:49:04Z
TST: placement of network error catching in s3 tests
diff --git a/pandas/io/common.py b/pandas/io/common.py index 4ba969f0abac4..e312181f08512 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -183,7 +183,10 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None, Returns ------- - a filepath_ or buffer or S3File instance, the encoding, the compression + tuple of ({a filepath_ or buffer or S3File instance}, + encoding, str, + compression, str, + should_close, bool) """ filepath_or_buffer = _stringify_path(filepath_or_buffer) @@ -194,7 +197,8 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None, # Override compression based on Content-Encoding header compression = 'gzip' reader = BytesIO(req.read()) - return reader, encoding, compression + req.close() + return reader, encoding, compression, True if is_s3_url(filepath_or_buffer): from pandas.io import s3 @@ -206,13 +210,13 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None, if isinstance(filepath_or_buffer, (compat.string_types, compat.binary_type, mmap.mmap)): - return _expand_user(filepath_or_buffer), None, compression + return _expand_user(filepath_or_buffer), None, compression, False if not is_file_like(filepath_or_buffer): msg = "Invalid file path or buffer object type: {_type}" raise ValueError(msg.format(_type=type(filepath_or_buffer))) - return filepath_or_buffer, None, compression + return filepath_or_buffer, None, compression, False def file_path_to_url(path): @@ -309,6 +313,7 @@ def _get_handle(path_or_buf, mode, encoding=None, compression=None, is_text : boolean, default True whether file/buffer is in text format (csv, json, etc.), or in binary mode (pickle, etc.) + Returns ------- f : file-like diff --git a/pandas/io/excel.py b/pandas/io/excel.py index b03987e933bff..0d3d4286f5a3c 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -381,7 +381,7 @@ def __init__(self, io, **kwds): if _is_url(self._io): io = _urlopen(self._io) elif not isinstance(self.io, (ExcelFile, xlrd.Book)): - io, _, _ = get_filepath_or_buffer(self._io) + io, _, _, _ = get_filepath_or_buffer(self._io) if engine == 'xlrd' and isinstance(io, xlrd.Book): self.book = io diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index e3a1321336fb3..24364fe07405e 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -404,7 +404,7 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, """ compression = _infer_compression(path_or_buf, compression) - filepath_or_buffer, _, compression = get_filepath_or_buffer( + filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, ) @@ -419,7 +419,13 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, if chunksize: return json_reader - return json_reader.read() + result = json_reader.read() + if should_close: + try: + filepath_or_buffer.close() + except: # noqa: flake8 + pass + return result class JsonReader(BaseIterator): diff --git a/pandas/io/packers.py b/pandas/io/packers.py index 9289853a1bbfd..d3e6f0cf4a1bc 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -180,7 +180,7 @@ def read_msgpack(path_or_buf, encoding='utf-8', iterator=False, **kwargs): obj : type of object stored in file """ - path_or_buf, _, _ = get_filepath_or_buffer(path_or_buf) + path_or_buf, _, _, should_close = get_filepath_or_buffer(path_or_buf) if iterator: return Iterator(path_or_buf) @@ -188,6 +188,12 @@ def read(fh): l = list(unpack(fh, encoding=encoding, **kwargs)) if len(l) == 1: return l[0] + + if should_close: + try: + path_or_buf.close() + except: # noqa: flake8 + pass return l # see if we have an actual file diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 6e1b6e14861c3..1c22a305c089d 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -107,7 +107,7 @@ def write(self, df, path, compression='snappy', self.validate_dataframe(df) if self._pyarrow_lt_070: self._validate_write_lt_070(df) - path, _, _ = get_filepath_or_buffer(path, mode='wb') + path, _, _, _ = get_filepath_or_buffer(path, mode='wb') if self._pyarrow_lt_060: table = self.api.Table.from_pandas(df, timestamps_to_ms=True) @@ -121,13 +121,21 @@ def write(self, df, path, compression='snappy', coerce_timestamps=coerce_timestamps, **kwargs) def read(self, path, columns=None, **kwargs): - path, _, _ = get_filepath_or_buffer(path) + path, _, _, should_close = get_filepath_or_buffer(path) if self._pyarrow_lt_070: - return self.api.parquet.read_pandas(path, columns=columns, - **kwargs).to_pandas() - kwargs['use_pandas_metadata'] = True - return self.api.parquet.read_table(path, columns=columns, - **kwargs).to_pandas() + result = self.api.parquet.read_pandas(path, columns=columns, + **kwargs).to_pandas() + else: + kwargs['use_pandas_metadata'] = True + result = self.api.parquet.read_table(path, columns=columns, + **kwargs).to_pandas() + if should_close: + try: + path.close() + except: # noqa: flake8 + pass + + return result def _validate_write_lt_070(self, df): # Compatibility shim for pyarrow < 0.7.0 @@ -199,11 +207,11 @@ def write(self, df, path, compression='snappy', **kwargs): # path is s3:// so we need to open the s3file in 'wb' mode. # TODO: Support 'ab' - path, _, _ = get_filepath_or_buffer(path, mode='wb') + path, _, _, _ = get_filepath_or_buffer(path, mode='wb') # And pass the opened s3file to the fastparquet internal impl. kwargs['open_with'] = lambda path, _: path else: - path, _, _ = get_filepath_or_buffer(path) + path, _, _, _ = get_filepath_or_buffer(path) with catch_warnings(record=True): self.api.write(path, df, @@ -214,13 +222,13 @@ def read(self, path, columns=None, **kwargs): # When path is s3:// an S3File is returned. # We need to retain the original path(str) while also # pass the S3File().open function to fsatparquet impl. - s3, _, _ = get_filepath_or_buffer(path) + s3, _, _, should_close = get_filepath_or_buffer(path) try: parquet_file = self.api.ParquetFile(path, open_with=s3.s3.open) finally: s3.close() else: - path, _, _ = get_filepath_or_buffer(path) + path, _, _, _ = get_filepath_or_buffer(path) parquet_file = self.api.ParquetFile(path) return parquet_file.to_pandas(columns=columns, **kwargs) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index af1441f4a0fc9..7ea6d321e0fdd 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -413,7 +413,7 @@ def _read(filepath_or_buffer, kwds): compression = kwds.get('compression') compression = _infer_compression(filepath_or_buffer, compression) - filepath_or_buffer, _, compression = get_filepath_or_buffer( + filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer( filepath_or_buffer, encoding, compression) kwds['compression'] = compression @@ -439,6 +439,13 @@ def _read(filepath_or_buffer, kwds): data = parser.read(nrows) finally: parser.close() + + if should_close: + try: + filepath_or_buffer.close() + except: # noqa: flake8 + pass + return data diff --git a/pandas/io/s3.py b/pandas/io/s3.py index e2650e29c0db3..bd2286c5c8569 100644 --- a/pandas/io/s3.py +++ b/pandas/io/s3.py @@ -27,7 +27,7 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None, fs = s3fs.S3FileSystem(anon=False) try: filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer), mode) - except (OSError, NoCredentialsError): + except (compat.FileNotFoundError, NoCredentialsError): # boto3 has troubles when trying to access a public file # when credentialed... # An OSError is raised if you have credentials, but they @@ -36,4 +36,4 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None, # for that bucket. fs = s3fs.S3FileSystem(anon=True) filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer), mode) - return filepath_or_buffer, None, compression + return filepath_or_buffer, None, compression, True diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 26e39f0df8b29..806cbddaa2ee2 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -90,7 +90,7 @@ def __init__(self, path_or_buf, index=None, convert_dates=True, self._current_row_on_page_index = 0 self._current_row_in_file_index = 0 - self._path_or_buf, _, _ = get_filepath_or_buffer(path_or_buf) + self._path_or_buf, _, _, _ = get_filepath_or_buffer(path_or_buf) if isinstance(self._path_or_buf, compat.string_types): self._path_or_buf = open(self._path_or_buf, 'rb') self.handle = self._path_or_buf diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index c14524f7d7cd6..7994517b9f303 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -236,7 +236,8 @@ def __init__(self, filepath_or_buffer, index=None, encoding='ISO-8859-1', self._chunksize = chunksize if isinstance(filepath_or_buffer, str): - filepath_or_buffer, encoding, compression = get_filepath_or_buffer( + (filepath_or_buffer, encoding, + compression, should_close) = get_filepath_or_buffer( filepath_or_buffer, encoding=encoding) if isinstance(filepath_or_buffer, (str, compat.text_type, bytes)): diff --git a/pandas/io/stata.py b/pandas/io/stata.py index ee6975ea1d938..9646831cb612c 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -988,7 +988,7 @@ def __init__(self, path_or_buf, convert_dates=True, self._native_byteorder = _set_endianness(sys.byteorder) path_or_buf = _stringify_path(path_or_buf) if isinstance(path_or_buf, str): - path_or_buf, encoding, _ = get_filepath_or_buffer( + path_or_buf, encoding, _, should_close = get_filepath_or_buffer( path_or_buf, encoding=self._default_encoding ) diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py index 57e72da2fd3f4..8deb51e190bab 100644 --- a/pandas/tests/io/conftest.py +++ b/pandas/tests/io/conftest.py @@ -2,30 +2,34 @@ import pytest from pandas.io.parsers import read_table +from pandas.util import testing as tm -HERE = os.path.dirname(__file__) +@pytest.fixture +def parser_data(request): + return os.path.join(tm.get_data_path(), '..', 'parser', 'data') -@pytest.fixture(scope='module') -def tips_file(): + +@pytest.fixture +def tips_file(parser_data): """Path to the tips dataset""" - return os.path.join(HERE, 'parser', 'data', 'tips.csv') + return os.path.join(parser_data, 'tips.csv') -@pytest.fixture(scope='module') -def jsonl_file(): +@pytest.fixture +def jsonl_file(parser_data): """Path a JSONL dataset""" - return os.path.join(HERE, 'parser', 'data', 'items.jsonl') + return os.path.join(parser_data, 'items.jsonl') -@pytest.fixture(scope='module') -def salaries_table(): +@pytest.fixture +def salaries_table(parser_data): """DataFrame with the salaries dataset""" - path = os.path.join(HERE, 'parser', 'data', 'salaries.csv') + path = os.path.join(parser_data, 'salaries.csv') return read_table(path) -@pytest.fixture(scope='module') +@pytest.fixture def s3_resource(tips_file, jsonl_file): """Fixture for mocking S3 interaction. @@ -41,8 +45,8 @@ def s3_resource(tips_file, jsonl_file): is yielded by the fixture. """ pytest.importorskip('s3fs') + boto3 = pytest.importorskip('boto3') moto = pytest.importorskip('moto') - moto.mock_s3().start() test_s3_files = [ ('tips.csv', tips_file), @@ -58,17 +62,22 @@ def add_tips_files(bucket_name): Key=s3_key, Body=f) - boto3 = pytest.importorskip('boto3') - # see gh-16135 - bucket = 'pandas-test' + try: - conn = boto3.resource("s3", region_name="us-east-1") - conn.create_bucket(Bucket=bucket) - add_tips_files(bucket) + s3 = moto.mock_s3() + s3.start() - conn.create_bucket(Bucket='cant_get_it', ACL='private') - add_tips_files('cant_get_it') + # see gh-16135 + bucket = 'pandas-test' + conn = boto3.resource("s3", region_name="us-east-1") - yield conn + conn.create_bucket(Bucket=bucket) + add_tips_files(bucket) - moto.mock_s3().stop() + conn.create_bucket(Bucket='cant_get_it', ACL='private') + add_tips_files('cant_get_it') + yield conn + except: # noqa: flake8 + pytest.skip("failure to use s3 resource") + finally: + s3.stop() diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 10139eb07a925..a72744e08fa7c 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1039,7 +1039,6 @@ def test_read_inline_jsonl(self): assert_frame_equal(result, expected) def test_read_s3_jsonl(self, s3_resource): - pytest.importorskip('s3fs') # GH17200 result = read_json('s3n://pandas-test/items.jsonl', lines=True) diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index 10f6cef04b593..f16338fda6245 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -46,6 +46,7 @@ def check_compressed_urls(salaries_table, compression, extension, mode, class TestS3(object): + @tm.network def test_parse_public_s3_bucket(self): pytest.importorskip('s3fs') @@ -65,7 +66,8 @@ def test_parse_public_s3_bucket(self): assert not df.empty tm.assert_frame_equal(read_csv(tm.get_data_path('tips.csv')), df) - def test_parse_public_s3n_bucket(self, s3_resource): + @tm.network + def test_parse_public_s3n_bucket(self): # Read from AWS s3 as "s3n" URL df = read_csv('s3n://pandas-test/tips.csv', nrows=10) @@ -74,7 +76,8 @@ def test_parse_public_s3n_bucket(self, s3_resource): tm.assert_frame_equal(read_csv( tm.get_data_path('tips.csv')).iloc[:10], df) - def test_parse_public_s3a_bucket(self, s3_resource): + @tm.network + def test_parse_public_s3a_bucket(self): # Read from AWS s3 as "s3a" URL df = read_csv('s3a://pandas-test/tips.csv', nrows=10) assert isinstance(df, DataFrame) @@ -82,7 +85,8 @@ def test_parse_public_s3a_bucket(self, s3_resource): tm.assert_frame_equal(read_csv( tm.get_data_path('tips.csv')).iloc[:10], df) - def test_parse_public_s3_bucket_nrows(self, s3_resource): + @tm.network + def test_parse_public_s3_bucket_nrows(self): for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]: df = read_csv('s3://pandas-test/tips.csv' + ext, nrows=10, compression=comp) @@ -91,7 +95,8 @@ def test_parse_public_s3_bucket_nrows(self, s3_resource): tm.assert_frame_equal(read_csv( tm.get_data_path('tips.csv')).iloc[:10], df) - def test_parse_public_s3_bucket_chunked(self, s3_resource): + @tm.network + def test_parse_public_s3_bucket_chunked(self): # Read with a chunksize chunksize = 5 local_tips = read_csv(tm.get_data_path('tips.csv')) @@ -109,7 +114,8 @@ def test_parse_public_s3_bucket_chunked(self, s3_resource): chunksize * i_chunk: chunksize * (i_chunk + 1)] tm.assert_frame_equal(true_df, df) - def test_parse_public_s3_bucket_chunked_python(self, s3_resource): + @tm.network + def test_parse_public_s3_bucket_chunked_python(self): # Read with a chunksize using the Python parser chunksize = 5 local_tips = read_csv(tm.get_data_path('tips.csv')) @@ -127,7 +133,8 @@ def test_parse_public_s3_bucket_chunked_python(self, s3_resource): chunksize * i_chunk: chunksize * (i_chunk + 1)] tm.assert_frame_equal(true_df, df) - def test_parse_public_s3_bucket_python(self, s3_resource): + @tm.network + def test_parse_public_s3_bucket_python(self): for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]: df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python', compression=comp) @@ -136,7 +143,8 @@ def test_parse_public_s3_bucket_python(self, s3_resource): tm.assert_frame_equal(read_csv( tm.get_data_path('tips.csv')), df) - def test_infer_s3_compression(self, s3_resource): + @tm.network + def test_infer_s3_compression(self): for ext in ['', '.gz', '.bz2']: df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python', compression='infer') @@ -145,7 +153,8 @@ def test_infer_s3_compression(self, s3_resource): tm.assert_frame_equal(read_csv( tm.get_data_path('tips.csv')), df) - def test_parse_public_s3_bucket_nrows_python(self, s3_resource): + @tm.network + def test_parse_public_s3_bucket_nrows_python(self): for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]: df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python', nrows=10, compression=comp) @@ -154,7 +163,8 @@ def test_parse_public_s3_bucket_nrows_python(self, s3_resource): tm.assert_frame_equal(read_csv( tm.get_data_path('tips.csv')).iloc[:10], df) - def test_s3_fails(self, s3_resource): + @tm.network + def test_s3_fails(self): with pytest.raises(IOError): read_csv('s3://nyqpug/asdf.csv') diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index a0070dce6a7f1..a89156db38ae3 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -102,15 +102,19 @@ def test_infer_compression_from_path(self, extension, expected, path_type): def test_get_filepath_or_buffer_with_path(self): filename = '~/sometest' - filepath_or_buffer, _, _ = common.get_filepath_or_buffer(filename) + filepath_or_buffer, _, _, should_close = common.get_filepath_or_buffer( + filename) assert filepath_or_buffer != filename assert isabs(filepath_or_buffer) assert os.path.expanduser(filename) == filepath_or_buffer + assert not should_close def test_get_filepath_or_buffer_with_buffer(self): input_buffer = StringIO() - filepath_or_buffer, _, _ = common.get_filepath_or_buffer(input_buffer) + filepath_or_buffer, _, _, should_close = common.get_filepath_or_buffer( + input_buffer) assert filepath_or_buffer == input_buffer + assert not should_close def test_iterator(self): reader = read_csv(StringIO(self.data1), chunksize=1)
https://api.github.com/repos/pandas-dev/pandas/pulls/19645
2018-02-11T14:43:33Z
2018-02-13T00:12:56Z
2018-02-13T00:12:56Z
2018-09-27T18:13:49Z
tests for tslibs.conversion and tslibs.timezones
diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py index 565e735c14c80..97326dc04a522 100644 --- a/pandas/tests/tseries/test_timezones.py +++ b/pandas/tests/tseries/test_timezones.py @@ -2,15 +2,10 @@ import pytest import pytz -import dateutil -import numpy as np from datetime import datetime -import pandas.util.testing as tm -from pandas.core.indexes.datetimes import date_range -from pandas._libs import tslib -from pandas._libs.tslibs import timezones, conversion +from pandas._libs.tslibs import timezones from pandas import Timestamp @@ -111,82 +106,3 @@ def localize(self, tz, x): def normalize(self, ts): # no-op for dateutil return ts - - def test_tzlocal(self): - # GH 13583 - ts = Timestamp('2011-01-01', tz=dateutil.tz.tzlocal()) - assert ts.tz == dateutil.tz.tzlocal() - assert "tz='tzlocal()')" in repr(ts) - - tz = timezones.maybe_get_tz('tzlocal()') - assert tz == dateutil.tz.tzlocal() - - # get offset using normal datetime for test - offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1)) - offset = offset.total_seconds() * 1000000000 - assert ts.value + offset == Timestamp('2011-01-01').value - - -class TestTimeZoneCacheKey(object): - - @pytest.mark.parametrize('tz_name', list(pytz.common_timezones)) - def test_cache_keys_are_distinct_for_pytz_vs_dateutil(self, tz_name): - if tz_name == 'UTC': - # skip utc as it's a special case in dateutil - return - tz_p = timezones.maybe_get_tz(tz_name) - tz_d = timezones.maybe_get_tz('dateutil/' + tz_name) - if tz_d is None: - # skip timezones that dateutil doesn't know about. - return - assert (timezones._p_tz_cache_key(tz_p) != - timezones._p_tz_cache_key(tz_d)) - - -class TestTslib(object): - - def test_tslib_tz_convert(self): - def compare_utc_to_local(tz_didx, utc_didx): - f = lambda x: conversion.tz_convert_single(x, 'UTC', tz_didx.tz) - result = conversion.tz_convert(tz_didx.asi8, 'UTC', tz_didx.tz) - result_single = np.vectorize(f)(tz_didx.asi8) - tm.assert_numpy_array_equal(result, result_single) - - def compare_local_to_utc(tz_didx, utc_didx): - f = lambda x: conversion.tz_convert_single(x, tz_didx.tz, 'UTC') - result = conversion.tz_convert(utc_didx.asi8, tz_didx.tz, 'UTC') - result_single = np.vectorize(f)(utc_didx.asi8) - tm.assert_numpy_array_equal(result, result_single) - - for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'Europe/Moscow']: - # US: 2014-03-09 - 2014-11-11 - # MOSCOW: 2014-10-26 / 2014-12-31 - tz_didx = date_range('2014-03-01', '2015-01-10', freq='H', tz=tz) - utc_didx = date_range('2014-03-01', '2015-01-10', freq='H') - compare_utc_to_local(tz_didx, utc_didx) - # local tz to UTC can be differ in hourly (or higher) freqs because - # of DST - compare_local_to_utc(tz_didx, utc_didx) - - tz_didx = date_range('2000-01-01', '2020-01-01', freq='D', tz=tz) - utc_didx = date_range('2000-01-01', '2020-01-01', freq='D') - compare_utc_to_local(tz_didx, utc_didx) - compare_local_to_utc(tz_didx, utc_didx) - - tz_didx = date_range('2000-01-01', '2100-01-01', freq='A', tz=tz) - utc_didx = date_range('2000-01-01', '2100-01-01', freq='A') - compare_utc_to_local(tz_didx, utc_didx) - compare_local_to_utc(tz_didx, utc_didx) - - # Check empty array - result = conversion.tz_convert(np.array([], dtype=np.int64), - timezones.maybe_get_tz('US/Eastern'), - timezones.maybe_get_tz('Asia/Tokyo')) - tm.assert_numpy_array_equal(result, np.array([], dtype=np.int64)) - - # Check all-NaT array - result = conversion.tz_convert(np.array([tslib.iNaT], dtype=np.int64), - timezones.maybe_get_tz('US/Eastern'), - timezones.maybe_get_tz('Asia/Tokyo')) - tm.assert_numpy_array_equal(result, np.array( - [tslib.iNaT], dtype=np.int64)) diff --git a/pandas/tests/tslibs/test_conversion.py b/pandas/tests/tslibs/test_conversion.py new file mode 100644 index 0000000000000..76038136c26cb --- /dev/null +++ b/pandas/tests/tslibs/test_conversion.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- + +import numpy as np +import pytest + +import pandas.util.testing as tm +from pandas import date_range +from pandas._libs.tslib import iNaT +from pandas._libs.tslibs import conversion, timezones + + +def compare_utc_to_local(tz_didx, utc_didx): + f = lambda x: conversion.tz_convert_single(x, 'UTC', tz_didx.tz) + result = conversion.tz_convert(tz_didx.asi8, 'UTC', tz_didx.tz) + result_single = np.vectorize(f)(tz_didx.asi8) + tm.assert_numpy_array_equal(result, result_single) + + +def compare_local_to_utc(tz_didx, utc_didx): + f = lambda x: conversion.tz_convert_single(x, tz_didx.tz, 'UTC') + result = conversion.tz_convert(utc_didx.asi8, tz_didx.tz, 'UTC') + result_single = np.vectorize(f)(utc_didx.asi8) + tm.assert_numpy_array_equal(result, result_single) + + +class TestTZConvert(object): + + @pytest.mark.parametrize('tz', ['UTC', 'Asia/Tokyo', + 'US/Eastern', 'Europe/Moscow']) + def test_tz_convert_single_matches_tz_convert_hourly(self, tz): + # US: 2014-03-09 - 2014-11-11 + # MOSCOW: 2014-10-26 / 2014-12-31 + tz_didx = date_range('2014-03-01', '2015-01-10', freq='H', tz=tz) + utc_didx = date_range('2014-03-01', '2015-01-10', freq='H') + compare_utc_to_local(tz_didx, utc_didx) + + # local tz to UTC can be differ in hourly (or higher) freqs because + # of DST + compare_local_to_utc(tz_didx, utc_didx) + + @pytest.mark.parametrize('tz', ['UTC', 'Asia/Tokyo', + 'US/Eastern', 'Europe/Moscow']) + @pytest.mark.parametrize('freq', ['D', 'A']) + def test_tz_convert_single_matches_tz_convert(self, tz, freq): + tz_didx = date_range('2000-01-01', '2020-01-01', freq=freq, tz=tz) + utc_didx = date_range('2000-01-01', '2020-01-01', freq=freq) + compare_utc_to_local(tz_didx, utc_didx) + compare_local_to_utc(tz_didx, utc_didx) + + @pytest.mark.parametrize('arr', [ + pytest.param(np.array([], dtype=np.int64), id='empty'), + pytest.param(np.array([iNaT], dtype=np.int64), id='all_nat')]) + def test_tz_convert_corner(self, arr): + result = conversion.tz_convert(arr, + timezones.maybe_get_tz('US/Eastern'), + timezones.maybe_get_tz('Asia/Tokyo')) + tm.assert_numpy_array_equal(result, arr) diff --git a/pandas/tests/tslibs/test_timezones.py b/pandas/tests/tslibs/test_timezones.py new file mode 100644 index 0000000000000..603c5e3fea26f --- /dev/null +++ b/pandas/tests/tslibs/test_timezones.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +from datetime import datetime + +import pytest +import pytz +import dateutil.tz + +from pandas._libs.tslibs import timezones +from pandas import Timestamp + + +@pytest.mark.parametrize('tz_name', list(pytz.common_timezones)) +def test_cache_keys_are_distinct_for_pytz_vs_dateutil(tz_name): + if tz_name == 'UTC': + # skip utc as it's a special case in dateutil + return + tz_p = timezones.maybe_get_tz(tz_name) + tz_d = timezones.maybe_get_tz('dateutil/' + tz_name) + if tz_d is None: + # skip timezones that dateutil doesn't know about. + return + assert timezones._p_tz_cache_key(tz_p) != timezones._p_tz_cache_key(tz_d) + + +def test_tzlocal(): + # GH#13583 + ts = Timestamp('2011-01-01', tz=dateutil.tz.tzlocal()) + assert ts.tz == dateutil.tz.tzlocal() + assert "tz='tzlocal()')" in repr(ts) + + tz = timezones.maybe_get_tz('tzlocal()') + assert tz == dateutil.tz.tzlocal() + + # get offset using normal datetime for test + offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1)) + offset = offset.total_seconds() * 1000000000 + assert ts.value + offset == Timestamp('2011-01-01').value
This is the last one.
https://api.github.com/repos/pandas-dev/pandas/pulls/19642
2018-02-10T19:49:07Z
2018-02-14T11:26:51Z
2018-02-14T11:26:51Z
2018-02-14T15:00:17Z
Organize PeriodIndex tests
diff --git a/pandas/tests/indexes/period/test_arithmetic.py b/pandas/tests/indexes/period/test_arithmetic.py index 356ea5fc656de..81171920f635f 100644 --- a/pandas/tests/indexes/period/test_arithmetic.py +++ b/pandas/tests/indexes/period/test_arithmetic.py @@ -11,6 +11,171 @@ import pandas.core.indexes.period as period +class TestPeriodIndexComparisons(object): + @pytest.mark.parametrize('freq', ['M', '2M', '3M']) + def test_pi_cmp_pi(self, freq): + base = PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'], + freq=freq) + per = Period('2011-02', freq=freq) + + exp = np.array([False, True, False, False]) + tm.assert_numpy_array_equal(base == per, exp) + tm.assert_numpy_array_equal(per == base, exp) + + exp = np.array([True, False, True, True]) + tm.assert_numpy_array_equal(base != per, exp) + tm.assert_numpy_array_equal(per != base, exp) + + exp = np.array([False, False, True, True]) + tm.assert_numpy_array_equal(base > per, exp) + tm.assert_numpy_array_equal(per < base, exp) + + exp = np.array([True, False, False, False]) + tm.assert_numpy_array_equal(base < per, exp) + tm.assert_numpy_array_equal(per > base, exp) + + exp = np.array([False, True, True, True]) + tm.assert_numpy_array_equal(base >= per, exp) + tm.assert_numpy_array_equal(per <= base, exp) + + exp = np.array([True, True, False, False]) + tm.assert_numpy_array_equal(base <= per, exp) + tm.assert_numpy_array_equal(per >= base, exp) + + idx = PeriodIndex(['2011-02', '2011-01', '2011-03', '2011-05'], + freq=freq) + + exp = np.array([False, False, True, False]) + tm.assert_numpy_array_equal(base == idx, exp) + + exp = np.array([True, True, False, True]) + tm.assert_numpy_array_equal(base != idx, exp) + + exp = np.array([False, True, False, False]) + tm.assert_numpy_array_equal(base > idx, exp) + + exp = np.array([True, False, False, True]) + tm.assert_numpy_array_equal(base < idx, exp) + + exp = np.array([False, True, True, False]) + tm.assert_numpy_array_equal(base >= idx, exp) + + exp = np.array([True, False, True, True]) + tm.assert_numpy_array_equal(base <= idx, exp) + + @pytest.mark.parametrize('freq', ['M', '2M', '3M']) + def test_pi_cmp_pi_mismatched_freq_raises(self, freq): + # different base freq + base = PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'], + freq=freq) + + msg = "Input has different freq=A-DEC from PeriodIndex" + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + base <= Period('2011', freq='A') + + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + Period('2011', freq='A') >= base + + idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='A') + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + base <= idx + + # Different frequency + msg = "Input has different freq=4M from PeriodIndex" + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + base <= Period('2011', freq='4M') + + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + Period('2011', freq='4M') >= base + + idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='4M') + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + base <= idx + + @pytest.mark.parametrize('freq', ['M', '2M', '3M']) + def test_pi_cmp_nat(self, freq): + idx1 = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-05'], freq=freq) + + result = idx1 > Period('2011-02', freq=freq) + exp = np.array([False, False, False, True]) + tm.assert_numpy_array_equal(result, exp) + result = Period('2011-02', freq=freq) < idx1 + tm.assert_numpy_array_equal(result, exp) + + result = idx1 == Period('NaT', freq=freq) + exp = np.array([False, False, False, False]) + tm.assert_numpy_array_equal(result, exp) + result = Period('NaT', freq=freq) == idx1 + tm.assert_numpy_array_equal(result, exp) + + result = idx1 != Period('NaT', freq=freq) + exp = np.array([True, True, True, True]) + tm.assert_numpy_array_equal(result, exp) + result = Period('NaT', freq=freq) != idx1 + tm.assert_numpy_array_equal(result, exp) + + idx2 = PeriodIndex(['2011-02', '2011-01', '2011-04', 'NaT'], freq=freq) + result = idx1 < idx2 + exp = np.array([True, False, False, False]) + tm.assert_numpy_array_equal(result, exp) + + result = idx1 == idx2 + exp = np.array([False, False, False, False]) + tm.assert_numpy_array_equal(result, exp) + + result = idx1 != idx2 + exp = np.array([True, True, True, True]) + tm.assert_numpy_array_equal(result, exp) + + result = idx1 == idx1 + exp = np.array([True, True, False, True]) + tm.assert_numpy_array_equal(result, exp) + + result = idx1 != idx1 + exp = np.array([False, False, True, False]) + tm.assert_numpy_array_equal(result, exp) + + @pytest.mark.parametrize('freq', ['M', '2M', '3M']) + def test_pi_cmp_nat_mismatched_freq_raises(self, freq): + idx1 = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-05'], freq=freq) + + diff = PeriodIndex(['2011-02', '2011-01', '2011-04', 'NaT'], freq='4M') + msg = "Input has different freq=4M from PeriodIndex" + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + idx1 > diff + + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + idx1 == diff + + # TODO: De-duplicate with test_pi_cmp_nat + def test_comp_nat(self): + left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT, + pd.Period('2011-01-03')]) + right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')]) + + for lhs, rhs in [(left, right), + (left.astype(object), right.astype(object))]: + result = lhs == rhs + expected = np.array([False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = lhs != rhs + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(lhs == pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT == rhs, expected) + + expected = np.array([True, True, True]) + tm.assert_numpy_array_equal(lhs != pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT != lhs, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(lhs < pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT > lhs, expected) + + class TestPeriodIndexArithmetic(object): def test_pi_add_offset_array(self): # GH#18849 @@ -250,6 +415,97 @@ def test_sub_isub(self): rng -= 1 tm.assert_index_equal(rng, expected) + # --------------------------------------------------------------- + # PeriodIndex.shift is used by __add__ and __sub__ + + def test_pi_shift_ndarray(self): + idx = PeriodIndex(['2011-01', '2011-02', 'NaT', + '2011-04'], freq='M', name='idx') + result = idx.shift(np.array([1, 2, 3, 4])) + expected = PeriodIndex(['2011-02', '2011-04', 'NaT', + '2011-08'], freq='M', name='idx') + tm.assert_index_equal(result, expected) + + idx = PeriodIndex(['2011-01', '2011-02', 'NaT', + '2011-04'], freq='M', name='idx') + result = idx.shift(np.array([1, -2, 3, -4])) + expected = PeriodIndex(['2011-02', '2010-12', 'NaT', + '2010-12'], freq='M', name='idx') + tm.assert_index_equal(result, expected) + + def test_shift(self): + pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') + pi2 = PeriodIndex(freq='A', start='1/1/2002', end='12/1/2010') + + tm.assert_index_equal(pi1.shift(0), pi1) + + assert len(pi1) == len(pi2) + tm.assert_index_equal(pi1.shift(1), pi2) + + pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') + pi2 = PeriodIndex(freq='A', start='1/1/2000', end='12/1/2008') + assert len(pi1) == len(pi2) + tm.assert_index_equal(pi1.shift(-1), pi2) + + pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009') + pi2 = PeriodIndex(freq='M', start='2/1/2001', end='1/1/2010') + assert len(pi1) == len(pi2) + tm.assert_index_equal(pi1.shift(1), pi2) + + pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009') + pi2 = PeriodIndex(freq='M', start='12/1/2000', end='11/1/2009') + assert len(pi1) == len(pi2) + tm.assert_index_equal(pi1.shift(-1), pi2) + + pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009') + pi2 = PeriodIndex(freq='D', start='1/2/2001', end='12/2/2009') + assert len(pi1) == len(pi2) + tm.assert_index_equal(pi1.shift(1), pi2) + + pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009') + pi2 = PeriodIndex(freq='D', start='12/31/2000', end='11/30/2009') + assert len(pi1) == len(pi2) + tm.assert_index_equal(pi1.shift(-1), pi2) + + def test_shift_corner_cases(self): + # GH#9903 + idx = pd.PeriodIndex([], name='xxx', freq='H') + + with pytest.raises(TypeError): + # period shift doesn't accept freq + idx.shift(1, freq='H') + + tm.assert_index_equal(idx.shift(0), idx) + tm.assert_index_equal(idx.shift(3), idx) + + idx = pd.PeriodIndex(['2011-01-01 10:00', '2011-01-01 11:00' + '2011-01-01 12:00'], name='xxx', freq='H') + tm.assert_index_equal(idx.shift(0), idx) + exp = pd.PeriodIndex(['2011-01-01 13:00', '2011-01-01 14:00' + '2011-01-01 15:00'], name='xxx', freq='H') + tm.assert_index_equal(idx.shift(3), exp) + exp = pd.PeriodIndex(['2011-01-01 07:00', '2011-01-01 08:00' + '2011-01-01 09:00'], name='xxx', freq='H') + tm.assert_index_equal(idx.shift(-3), exp) + + def test_shift_nat(self): + idx = PeriodIndex(['2011-01', '2011-02', 'NaT', + '2011-04'], freq='M', name='idx') + result = idx.shift(1) + expected = PeriodIndex(['2011-02', '2011-03', 'NaT', + '2011-05'], freq='M', name='idx') + tm.assert_index_equal(result, expected) + assert result.name == expected.name + + def test_shift_gh8083(self): + # test shift for PeriodIndex + # GH#8083 + drange = pd.period_range('20130101', periods=5, freq='D') + result = drange.shift(1) + expected = PeriodIndex(['2013-01-02', '2013-01-03', '2013-01-04', + '2013-01-05', '2013-01-06'], freq='D') + tm.assert_index_equal(result, expected) + class TestPeriodIndexSeriesMethods(object): """ Test PeriodIndex and Period Series Ops consistency """ diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py index 21a9ffdde3444..8745de0c2a7aa 100644 --- a/pandas/tests/indexes/period/test_ops.py +++ b/pandas/tests/indexes/period/test_ops.py @@ -1,11 +1,9 @@ -import pytest import numpy as np import pandas as pd import pandas._libs.tslib as tslib import pandas.util.testing as tm -import pandas.core.indexes.period as period from pandas import (DatetimeIndex, PeriodIndex, Series, Period, _np_version_under1p10, Index) @@ -521,25 +519,8 @@ def test_nat_new(self): tm.assert_numpy_array_equal(result, exp) def test_shift(self): - # GH 9903 - idx = pd.PeriodIndex([], name='xxx', freq='H') - - with pytest.raises(TypeError): - # period shift doesn't accept freq - idx.shift(1, freq='H') - - tm.assert_index_equal(idx.shift(0), idx) - tm.assert_index_equal(idx.shift(3), idx) - - idx = pd.PeriodIndex(['2011-01-01 10:00', '2011-01-01 11:00' - '2011-01-01 12:00'], name='xxx', freq='H') - tm.assert_index_equal(idx.shift(0), idx) - exp = pd.PeriodIndex(['2011-01-01 13:00', '2011-01-01 14:00' - '2011-01-01 15:00'], name='xxx', freq='H') - tm.assert_index_equal(idx.shift(3), exp) - exp = pd.PeriodIndex(['2011-01-01 07:00', '2011-01-01 08:00' - '2011-01-01 09:00'], name='xxx', freq='H') - tm.assert_index_equal(idx.shift(-3), exp) + # This is tested in test_arithmetic + pass def test_repeat(self): index = pd.period_range('2001-01-01', periods=2, freq='D') @@ -703,172 +684,3 @@ def test_pi_comp_period_nat(self): f = lambda x: tslib.NaT >= x exp = np.array([False, False, False, False], dtype=np.bool) self._check(idx, f, exp) - - -class TestPeriodIndexComparisons(object): - - def test_pi_pi_comp(self): - - for freq in ['M', '2M', '3M']: - base = PeriodIndex(['2011-01', '2011-02', - '2011-03', '2011-04'], freq=freq) - p = Period('2011-02', freq=freq) - - exp = np.array([False, True, False, False]) - tm.assert_numpy_array_equal(base == p, exp) - tm.assert_numpy_array_equal(p == base, exp) - - exp = np.array([True, False, True, True]) - tm.assert_numpy_array_equal(base != p, exp) - tm.assert_numpy_array_equal(p != base, exp) - - exp = np.array([False, False, True, True]) - tm.assert_numpy_array_equal(base > p, exp) - tm.assert_numpy_array_equal(p < base, exp) - - exp = np.array([True, False, False, False]) - tm.assert_numpy_array_equal(base < p, exp) - tm.assert_numpy_array_equal(p > base, exp) - - exp = np.array([False, True, True, True]) - tm.assert_numpy_array_equal(base >= p, exp) - tm.assert_numpy_array_equal(p <= base, exp) - - exp = np.array([True, True, False, False]) - tm.assert_numpy_array_equal(base <= p, exp) - tm.assert_numpy_array_equal(p >= base, exp) - - idx = PeriodIndex(['2011-02', '2011-01', '2011-03', - '2011-05'], freq=freq) - - exp = np.array([False, False, True, False]) - tm.assert_numpy_array_equal(base == idx, exp) - - exp = np.array([True, True, False, True]) - tm.assert_numpy_array_equal(base != idx, exp) - - exp = np.array([False, True, False, False]) - tm.assert_numpy_array_equal(base > idx, exp) - - exp = np.array([True, False, False, True]) - tm.assert_numpy_array_equal(base < idx, exp) - - exp = np.array([False, True, True, False]) - tm.assert_numpy_array_equal(base >= idx, exp) - - exp = np.array([True, False, True, True]) - tm.assert_numpy_array_equal(base <= idx, exp) - - # different base freq - msg = "Input has different freq=A-DEC from PeriodIndex" - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - base <= Period('2011', freq='A') - - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - Period('2011', freq='A') >= base - - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='A') - base <= idx - - # Different frequency - msg = "Input has different freq=4M from PeriodIndex" - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - base <= Period('2011', freq='4M') - - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - Period('2011', freq='4M') >= base - - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='4M') - base <= idx - - def test_pi_nat_comp(self): - for freq in ['M', '2M', '3M']: - idx1 = PeriodIndex( - ['2011-01', '2011-02', 'NaT', '2011-05'], freq=freq) - - result = idx1 > Period('2011-02', freq=freq) - exp = np.array([False, False, False, True]) - tm.assert_numpy_array_equal(result, exp) - result = Period('2011-02', freq=freq) < idx1 - tm.assert_numpy_array_equal(result, exp) - - result = idx1 == Period('NaT', freq=freq) - exp = np.array([False, False, False, False]) - tm.assert_numpy_array_equal(result, exp) - result = Period('NaT', freq=freq) == idx1 - tm.assert_numpy_array_equal(result, exp) - - result = idx1 != Period('NaT', freq=freq) - exp = np.array([True, True, True, True]) - tm.assert_numpy_array_equal(result, exp) - result = Period('NaT', freq=freq) != idx1 - tm.assert_numpy_array_equal(result, exp) - - idx2 = PeriodIndex(['2011-02', '2011-01', '2011-04', - 'NaT'], freq=freq) - result = idx1 < idx2 - exp = np.array([True, False, False, False]) - tm.assert_numpy_array_equal(result, exp) - - result = idx1 == idx2 - exp = np.array([False, False, False, False]) - tm.assert_numpy_array_equal(result, exp) - - result = idx1 != idx2 - exp = np.array([True, True, True, True]) - tm.assert_numpy_array_equal(result, exp) - - result = idx1 == idx1 - exp = np.array([True, True, False, True]) - tm.assert_numpy_array_equal(result, exp) - - result = idx1 != idx1 - exp = np.array([False, False, True, False]) - tm.assert_numpy_array_equal(result, exp) - - diff = PeriodIndex(['2011-02', '2011-01', '2011-04', - 'NaT'], freq='4M') - msg = "Input has different freq=4M from PeriodIndex" - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - idx1 > diff - - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - idx1 == diff - - # TODO: De-duplicate with test_pi_nat_comp - def test_comp_nat(self): - left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT, - pd.Period('2011-01-03')]) - right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')]) - - for lhs, rhs in [(left, right), - (left.astype(object), right.astype(object))]: - result = lhs == rhs - expected = np.array([False, False, True]) - tm.assert_numpy_array_equal(result, expected) - - result = lhs != rhs - expected = np.array([True, True, False]) - tm.assert_numpy_array_equal(result, expected) - - expected = np.array([False, False, False]) - tm.assert_numpy_array_equal(lhs == pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT == rhs, expected) - - expected = np.array([True, True, True]) - tm.assert_numpy_array_equal(lhs != pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT != lhs, expected) - - expected = np.array([False, False, False]) - tm.assert_numpy_array_equal(lhs < pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT > lhs, expected) diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 6fc7fa5486f82..f3469b829f8a3 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -453,16 +453,6 @@ def test_periods_number_check(self): with pytest.raises(ValueError): period_range('2011-1-1', '2012-1-1', 'B') - def test_start_time(self): - index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31') - expected_index = date_range('2016-01-01', end='2016-05-31', freq='MS') - tm.assert_index_equal(index.start_time, expected_index) - - def test_end_time(self): - index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31') - expected_index = date_range('2016-01-01', end='2016-05-31', freq='M') - tm.assert_index_equal(index.end_time, expected_index) - def test_index_duplicate_periods(self): # monotonic idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN') @@ -495,78 +485,14 @@ def test_index_unique(self): tm.assert_index_equal(idx.unique(), expected) assert idx.nunique() == 3 - def test_shift_gh8083(self): - - # test shift for PeriodIndex - # GH8083 - drange = self.create_index() - result = drange.shift(1) - expected = PeriodIndex(['2013-01-02', '2013-01-03', '2013-01-04', - '2013-01-05', '2013-01-06'], freq='D') - tm.assert_index_equal(result, expected) - def test_shift(self): - pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') - pi2 = PeriodIndex(freq='A', start='1/1/2002', end='12/1/2010') - - tm.assert_index_equal(pi1.shift(0), pi1) - - assert len(pi1) == len(pi2) - tm.assert_index_equal(pi1.shift(1), pi2) - - pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') - pi2 = PeriodIndex(freq='A', start='1/1/2000', end='12/1/2008') - assert len(pi1) == len(pi2) - tm.assert_index_equal(pi1.shift(-1), pi2) - - pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009') - pi2 = PeriodIndex(freq='M', start='2/1/2001', end='1/1/2010') - assert len(pi1) == len(pi2) - tm.assert_index_equal(pi1.shift(1), pi2) - - pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009') - pi2 = PeriodIndex(freq='M', start='12/1/2000', end='11/1/2009') - assert len(pi1) == len(pi2) - tm.assert_index_equal(pi1.shift(-1), pi2) - - pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009') - pi2 = PeriodIndex(freq='D', start='1/2/2001', end='12/2/2009') - assert len(pi1) == len(pi2) - tm.assert_index_equal(pi1.shift(1), pi2) - - pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009') - pi2 = PeriodIndex(freq='D', start='12/31/2000', end='11/30/2009') - assert len(pi1) == len(pi2) - tm.assert_index_equal(pi1.shift(-1), pi2) - - def test_shift_nat(self): - idx = PeriodIndex(['2011-01', '2011-02', 'NaT', - '2011-04'], freq='M', name='idx') - result = idx.shift(1) - expected = PeriodIndex(['2011-02', '2011-03', 'NaT', - '2011-05'], freq='M', name='idx') - tm.assert_index_equal(result, expected) - assert result.name == expected.name + # This is tested in test_arithmetic + pass @td.skip_if_32bit def test_ndarray_compat_properties(self): super(TestPeriodIndex, self).test_ndarray_compat_properties() - def test_shift_ndarray(self): - idx = PeriodIndex(['2011-01', '2011-02', 'NaT', - '2011-04'], freq='M', name='idx') - result = idx.shift(np.array([1, 2, 3, 4])) - expected = PeriodIndex(['2011-02', '2011-04', 'NaT', - '2011-08'], freq='M', name='idx') - tm.assert_index_equal(result, expected) - - idx = PeriodIndex(['2011-01', '2011-02', 'NaT', - '2011-04'], freq='M', name='idx') - result = idx.shift(np.array([1, -2, 3, -4])) - expected = PeriodIndex(['2011-02', '2010-12', 'NaT', - '2010-12'], freq='M', name='idx') - tm.assert_index_equal(result, expected) - def test_negative_ordinals(self): Period(ordinal=-1000, freq='A') Period(ordinal=0, freq='A') diff --git a/pandas/tests/indexes/period/test_scalar_compat.py b/pandas/tests/indexes/period/test_scalar_compat.py new file mode 100644 index 0000000000000..56bd2adf58719 --- /dev/null +++ b/pandas/tests/indexes/period/test_scalar_compat.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +"""Tests for PeriodIndex behaving like a vectorized Period scalar""" + +from pandas import PeriodIndex, date_range +import pandas.util.testing as tm + + +class TestPeriodIndexOps(object): + def test_start_time(self): + index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31') + expected_index = date_range('2016-01-01', end='2016-05-31', freq='MS') + tm.assert_index_equal(index.start_time, expected_index) + + def test_end_time(self): + index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31') + expected_index = date_range('2016-01-01', end='2016-05-31', freq='M') + tm.assert_index_equal(index.end_time, expected_index) diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py index 0e72cadb5d494..f5e7c8269dc4f 100644 --- a/pandas/tests/indexes/period/test_tools.py +++ b/pandas/tests/indexes/period/test_tools.py @@ -6,8 +6,6 @@ import pandas.core.indexes.period as period from pandas.compat import lrange -from pandas._libs.tslibs.frequencies import get_freq -from pandas._libs.tslibs.period import period_ordinal, period_asfreq from pandas._libs.tslibs.ccalendar import MONTHS from pandas import (PeriodIndex, Period, DatetimeIndex, Timestamp, Series, @@ -76,83 +74,6 @@ def test_negone_ordinals(self): repr(period) -class TestTslib(object): - def test_intraday_conversion_factors(self): - assert period_asfreq(1, get_freq('D'), get_freq('H'), False) == 24 - assert period_asfreq(1, get_freq('D'), get_freq('T'), False) == 1440 - assert period_asfreq(1, get_freq('D'), get_freq('S'), False) == 86400 - assert period_asfreq(1, get_freq('D'), - get_freq('L'), False) == 86400000 - assert period_asfreq(1, get_freq('D'), - get_freq('U'), False) == 86400000000 - assert period_asfreq(1, get_freq('D'), - get_freq('N'), False) == 86400000000000 - - assert period_asfreq(1, get_freq('H'), get_freq('T'), False) == 60 - assert period_asfreq(1, get_freq('H'), get_freq('S'), False) == 3600 - assert period_asfreq(1, get_freq('H'), - get_freq('L'), False) == 3600000 - assert period_asfreq(1, get_freq('H'), - get_freq('U'), False) == 3600000000 - assert period_asfreq(1, get_freq('H'), - get_freq('N'), False) == 3600000000000 - - assert period_asfreq(1, get_freq('T'), get_freq('S'), False) == 60 - assert period_asfreq(1, get_freq('T'), get_freq('L'), False) == 60000 - assert period_asfreq(1, get_freq('T'), - get_freq('U'), False) == 60000000 - assert period_asfreq(1, get_freq('T'), - get_freq('N'), False) == 60000000000 - - assert period_asfreq(1, get_freq('S'), get_freq('L'), False) == 1000 - assert period_asfreq(1, get_freq('S'), - get_freq('U'), False) == 1000000 - assert period_asfreq(1, get_freq('S'), - get_freq('N'), False) == 1000000000 - - assert period_asfreq(1, get_freq('L'), get_freq('U'), False) == 1000 - assert period_asfreq(1, get_freq('L'), - get_freq('N'), False) == 1000000 - - assert period_asfreq(1, get_freq('U'), get_freq('N'), False) == 1000 - - def test_period_ordinal_start_values(self): - # information for 1.1.1970 - assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('A')) == 0 - assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('M')) == 0 - assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('W')) == 1 - assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('D')) == 0 - assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('B')) == 0 - - def test_period_ordinal_week(self): - assert period_ordinal(1970, 1, 4, 0, 0, 0, 0, 0, get_freq('W')) == 1 - assert period_ordinal(1970, 1, 5, 0, 0, 0, 0, 0, get_freq('W')) == 2 - assert period_ordinal(2013, 10, 6, 0, - 0, 0, 0, 0, get_freq('W')) == 2284 - assert period_ordinal(2013, 10, 7, 0, - 0, 0, 0, 0, get_freq('W')) == 2285 - - def test_period_ordinal_business_day(self): - # Thursday - assert period_ordinal(2013, 10, 3, 0, - 0, 0, 0, 0, get_freq('B')) == 11415 - # Friday - assert period_ordinal(2013, 10, 4, 0, - 0, 0, 0, 0, get_freq('B')) == 11416 - # Saturday - assert period_ordinal(2013, 10, 5, 0, - 0, 0, 0, 0, get_freq('B')) == 11417 - # Sunday - assert period_ordinal(2013, 10, 6, 0, - 0, 0, 0, 0, get_freq('B')) == 11417 - # Monday - assert period_ordinal(2013, 10, 7, 0, - 0, 0, 0, 0, get_freq('B')) == 11417 - # Tuesday - assert period_ordinal(2013, 10, 8, 0, - 0, 0, 0, 0, get_freq('B')) == 11418 - - class TestPeriodIndex(object): def setup_method(self, method): diff --git a/pandas/tests/tslibs/test_period_asfreq.py b/pandas/tests/tslibs/test_period_asfreq.py new file mode 100644 index 0000000000000..98959adf6fda4 --- /dev/null +++ b/pandas/tests/tslibs/test_period_asfreq.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- + +from pandas._libs.tslibs.frequencies import get_freq +from pandas._libs.tslibs.period import period_ordinal, period_asfreq + + +class TestPeriodFreqConversion(object): + def test_intraday_conversion_factors(self): + assert period_asfreq(1, get_freq('D'), get_freq('H'), False) == 24 + assert period_asfreq(1, get_freq('D'), get_freq('T'), False) == 1440 + assert period_asfreq(1, get_freq('D'), get_freq('S'), False) == 86400 + assert period_asfreq(1, get_freq('D'), + get_freq('L'), False) == 86400000 + assert period_asfreq(1, get_freq('D'), + get_freq('U'), False) == 86400000000 + assert period_asfreq(1, get_freq('D'), + get_freq('N'), False) == 86400000000000 + + assert period_asfreq(1, get_freq('H'), get_freq('T'), False) == 60 + assert period_asfreq(1, get_freq('H'), get_freq('S'), False) == 3600 + assert period_asfreq(1, get_freq('H'), + get_freq('L'), False) == 3600000 + assert period_asfreq(1, get_freq('H'), + get_freq('U'), False) == 3600000000 + assert period_asfreq(1, get_freq('H'), + get_freq('N'), False) == 3600000000000 + + assert period_asfreq(1, get_freq('T'), get_freq('S'), False) == 60 + assert period_asfreq(1, get_freq('T'), get_freq('L'), False) == 60000 + assert period_asfreq(1, get_freq('T'), + get_freq('U'), False) == 60000000 + assert period_asfreq(1, get_freq('T'), + get_freq('N'), False) == 60000000000 + + assert period_asfreq(1, get_freq('S'), get_freq('L'), False) == 1000 + assert period_asfreq(1, get_freq('S'), + get_freq('U'), False) == 1000000 + assert period_asfreq(1, get_freq('S'), + get_freq('N'), False) == 1000000000 + + assert period_asfreq(1, get_freq('L'), get_freq('U'), False) == 1000 + assert period_asfreq(1, get_freq('L'), + get_freq('N'), False) == 1000000 + + assert period_asfreq(1, get_freq('U'), get_freq('N'), False) == 1000 + + def test_period_ordinal_start_values(self): + # information for 1.1.1970 + assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('A')) == 0 + assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('M')) == 0 + assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('W')) == 1 + assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('D')) == 0 + assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('B')) == 0 + + def test_period_ordinal_week(self): + assert period_ordinal(1970, 1, 4, 0, 0, 0, 0, 0, get_freq('W')) == 1 + assert period_ordinal(1970, 1, 5, 0, 0, 0, 0, 0, get_freq('W')) == 2 + assert period_ordinal(2013, 10, 6, 0, + 0, 0, 0, 0, get_freq('W')) == 2284 + assert period_ordinal(2013, 10, 7, 0, + 0, 0, 0, 0, get_freq('W')) == 2285 + + def test_period_ordinal_business_day(self): + # Thursday + assert period_ordinal(2013, 10, 3, 0, + 0, 0, 0, 0, get_freq('B')) == 11415 + # Friday + assert period_ordinal(2013, 10, 4, 0, + 0, 0, 0, 0, get_freq('B')) == 11416 + # Saturday + assert period_ordinal(2013, 10, 5, 0, + 0, 0, 0, 0, get_freq('B')) == 11417 + # Sunday + assert period_ordinal(2013, 10, 6, 0, + 0, 0, 0, 0, get_freq('B')) == 11417 + # Monday + assert period_ordinal(2013, 10, 7, 0, + 0, 0, 0, 0, get_freq('B')) == 11417 + # Tuesday + assert period_ordinal(2013, 10, 8, 0, + 0, 0, 0, 0, get_freq('B')) == 11418
Crammed a lot of #19627 into this. Also did some new parametrization.
https://api.github.com/repos/pandas-dev/pandas/pulls/19641
2018-02-10T19:44:34Z
2018-02-11T14:56:29Z
2018-02-11T14:56:29Z
2018-02-12T16:32:31Z
move array_to_datetime timests
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index bd3fa5e73cd11..b5926933544e8 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -8,7 +8,7 @@ import dateutil import numpy as np from dateutil.parser import parse -from datetime import datetime, date, time, timedelta +from datetime import datetime, date, time from distutils.version import LooseVersion import pandas as pd @@ -19,7 +19,6 @@ from pandas.errors import OutOfBoundsDatetime from pandas.compat import lmap, PY3 -from pandas.compat.numpy import np_array_datetime64_compat from pandas.core.dtypes.common import is_datetime64_ns_dtype from pandas.util import testing as tm import pandas.util._test_decorators as td @@ -803,6 +802,15 @@ def test_dataframe_dtypes(self, cache): class TestToDatetimeMisc(object): + def test_to_datetime_barely_out_of_bounds(self): + # GH#19529 + # GH#19382 close enough to bounds that dropping nanos would result + # in an in-bounds datetime + arr = np.array(['2262-04-11 23:47:16.854775808'], dtype=object) + + with pytest.raises(OutOfBoundsDatetime): + to_datetime(arr) + @pytest.mark.parametrize('cache', [True, False]) def test_to_datetime_iso8601(self, cache): result = to_datetime(["2012-01-01 00:00:00"], cache=cache) @@ -1464,180 +1472,6 @@ def test_parsers_timezone_minute_offsets_roundtrip(self, cache): converted_time = dt_time.tz_localize('UTC').tz_convert(tz) assert dt_string_repr == repr(converted_time) - def test_parsers_iso8601(self): - # GH 12060 - # test only the iso parser - flexibility to different - # separators and leadings 0s - # Timestamp construction falls back to dateutil - cases = {'2011-01-02': datetime(2011, 1, 2), - '2011-1-2': datetime(2011, 1, 2), - '2011-01': datetime(2011, 1, 1), - '2011-1': datetime(2011, 1, 1), - '2011 01 02': datetime(2011, 1, 2), - '2011.01.02': datetime(2011, 1, 2), - '2011/01/02': datetime(2011, 1, 2), - '2011\\01\\02': datetime(2011, 1, 2), - '2013-01-01 05:30:00': datetime(2013, 1, 1, 5, 30), - '2013-1-1 5:30:00': datetime(2013, 1, 1, 5, 30)} - for date_str, exp in compat.iteritems(cases): - actual = tslib._test_parse_iso8601(date_str) - assert actual == exp - - # separators must all match - YYYYMM not valid - invalid_cases = ['2011-01/02', '2011^11^11', - '201401', '201111', '200101', - # mixed separated and unseparated - '2005-0101', '200501-01', - '20010101 12:3456', '20010101 1234:56', - # HHMMSS must have two digits in each component - # if unseparated - '20010101 1', '20010101 123', '20010101 12345', - '20010101 12345Z', - # wrong separator for HHMMSS - '2001-01-01 12-34-56'] - for date_str in invalid_cases: - with pytest.raises(ValueError): - tslib._test_parse_iso8601(date_str) - # If no ValueError raised, let me know which case failed. - raise Exception(date_str) - - -class TestArrayToDatetime(object): - def test_coerce_out_of_bounds_utc(self): - # GH#19612 - ts = Timestamp('1900-01-01', tz='US/Pacific') - dt = ts.to_pydatetime() - timedelta(days=365 * 300) # ~1600AD - arr = np.array([dt]) - result = tslib.array_to_datetime(arr, utc=True, errors='coerce') - expected = np.array(['NaT'], dtype='datetime64[ns]') - tm.assert_numpy_array_equal(result, expected) - - def test_parsing_valid_dates(self): - arr = np.array(['01-01-2013', '01-02-2013'], dtype=object) - tm.assert_numpy_array_equal( - tslib.array_to_datetime(arr), - np_array_datetime64_compat( - [ - '2013-01-01T00:00:00.000000000-0000', - '2013-01-02T00:00:00.000000000-0000' - ], - dtype='M8[ns]' - ) - ) - - arr = np.array(['Mon Sep 16 2013', 'Tue Sep 17 2013'], dtype=object) - tm.assert_numpy_array_equal( - tslib.array_to_datetime(arr), - np_array_datetime64_compat( - [ - '2013-09-16T00:00:00.000000000-0000', - '2013-09-17T00:00:00.000000000-0000' - ], - dtype='M8[ns]' - ) - ) - - def test_parsing_timezone_offsets(self): - # All of these datetime strings with offsets are equivalent - # to the same datetime after the timezone offset is added - dt_strings = [ - '01-01-2013 08:00:00+08:00', - '2013-01-01T08:00:00.000000000+0800', - '2012-12-31T16:00:00.000000000-0800', - '12-31-2012 23:00:00-01:00' - ] - - expected_output = tslib.array_to_datetime(np.array( - ['01-01-2013 00:00:00'], dtype=object)) - - for dt_string in dt_strings: - tm.assert_numpy_array_equal( - tslib.array_to_datetime( - np.array([dt_string], dtype=object) - ), - expected_output - ) - - def test_number_looking_strings_not_into_datetime(self): - # #4601 - # These strings don't look like datetimes so they shouldn't be - # attempted to be converted - arr = np.array(['-352.737091', '183.575577'], dtype=object) - tm.assert_numpy_array_equal( - tslib.array_to_datetime(arr, errors='ignore'), arr) - - arr = np.array(['1', '2', '3', '4', '5'], dtype=object) - tm.assert_numpy_array_equal( - tslib.array_to_datetime(arr, errors='ignore'), arr) - - def test_coercing_dates_outside_of_datetime64_ns_bounds(self): - invalid_dates = [ - date(1000, 1, 1), - datetime(1000, 1, 1), - '1000-01-01', - 'Jan 1, 1000', - np.datetime64('1000-01-01'), - ] - - for invalid_date in invalid_dates: - pytest.raises(ValueError, - tslib.array_to_datetime, - np.array([invalid_date], dtype='object'), - errors='raise', ) - tm.assert_numpy_array_equal( - tslib.array_to_datetime( - np.array([invalid_date], dtype='object'), - errors='coerce'), - np.array([tslib.iNaT], dtype='M8[ns]') - ) - - arr = np.array(['1/1/1000', '1/1/2000'], dtype=object) - tm.assert_numpy_array_equal( - tslib.array_to_datetime(arr, errors='coerce'), - np_array_datetime64_compat( - [ - tslib.iNaT, - '2000-01-01T00:00:00.000000000-0000' - ], - dtype='M8[ns]' - ) - ) - - def test_coerce_of_invalid_datetimes(self): - arr = np.array(['01-01-2013', 'not_a_date', '1'], dtype=object) - - # Without coercing, the presence of any invalid dates prevents - # any values from being converted - tm.assert_numpy_array_equal( - tslib.array_to_datetime(arr, errors='ignore'), arr) - - # With coercing, the invalid dates becomes iNaT - tm.assert_numpy_array_equal( - tslib.array_to_datetime(arr, errors='coerce'), - np_array_datetime64_compat( - [ - '2013-01-01T00:00:00.000000000-0000', - tslib.iNaT, - tslib.iNaT - ], - dtype='M8[ns]' - ) - ) - - def test_to_datetime_barely_out_of_bounds(self): - # GH#19529 - # GH#19382 close enough to bounds that dropping nanos would result - # in an in-bounds datetime - arr = np.array(['2262-04-11 23:47:16.854775808'], dtype=object) - - with pytest.raises(OutOfBoundsDatetime): - to_datetime(arr) - - with pytest.raises(OutOfBoundsDatetime): - # Essentially the same as above, but more directly calling - # the relevant function - tslib.array_to_datetime(arr) - def test_normalize_date(): value = date(2012, 9, 7) diff --git a/pandas/tests/tslibs/test_array_to_datetime.py b/pandas/tests/tslibs/test_array_to_datetime.py new file mode 100644 index 0000000000000..eb77e52e7c91d --- /dev/null +++ b/pandas/tests/tslibs/test_array_to_datetime.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +from datetime import datetime, date + +import numpy as np +import pytest + +from pandas._libs import tslib +from pandas.compat.numpy import np_array_datetime64_compat +import pandas.util.testing as tm + + +class TestParseISO8601(object): + @pytest.mark.parametrize('date_str, exp', [ + ('2011-01-02', datetime(2011, 1, 2)), + ('2011-1-2', datetime(2011, 1, 2)), + ('2011-01', datetime(2011, 1, 1)), + ('2011-1', datetime(2011, 1, 1)), + ('2011 01 02', datetime(2011, 1, 2)), + ('2011.01.02', datetime(2011, 1, 2)), + ('2011/01/02', datetime(2011, 1, 2)), + ('2011\\01\\02', datetime(2011, 1, 2)), + ('2013-01-01 05:30:00', datetime(2013, 1, 1, 5, 30)), + ('2013-1-1 5:30:00', datetime(2013, 1, 1, 5, 30))]) + def test_parsers_iso8601(self, date_str, exp): + # GH#12060 + # test only the iso parser - flexibility to different + # separators and leadings 0s + # Timestamp construction falls back to dateutil + actual = tslib._test_parse_iso8601(date_str) + assert actual == exp + + @pytest.mark.parametrize( + 'date_str', + ['2011-01/02', '2011^11^11', + '201401', '201111', '200101', + # mixed separated and unseparated + '2005-0101', '200501-01', + '20010101 12:3456', + '20010101 1234:56', + # HHMMSS must have two digits in + # each component if unseparated + '20010101 1', '20010101 123', + '20010101 12345', '20010101 12345Z', + # wrong separator for HHMMSS + '2001-01-01 12-34-56']) + def test_parsers_iso8601_invalid(self, date_str): + # separators must all match - YYYYMM not valid + with pytest.raises(ValueError): + tslib._test_parse_iso8601(date_str) + + +class TestArrayToDatetime(object): + def test_parsing_valid_dates(self): + arr = np.array(['01-01-2013', '01-02-2013'], dtype=object) + result = tslib.array_to_datetime(arr) + expected = ['2013-01-01T00:00:00.000000000-0000', + '2013-01-02T00:00:00.000000000-0000'] + tm.assert_numpy_array_equal( + result, + np_array_datetime64_compat(expected, dtype='M8[ns]')) + + arr = np.array(['Mon Sep 16 2013', 'Tue Sep 17 2013'], dtype=object) + result = tslib.array_to_datetime(arr) + expected = ['2013-09-16T00:00:00.000000000-0000', + '2013-09-17T00:00:00.000000000-0000'] + tm.assert_numpy_array_equal( + result, + np_array_datetime64_compat(expected, dtype='M8[ns]')) + + @pytest.mark.parametrize('dt_string', [ + '01-01-2013 08:00:00+08:00', + '2013-01-01T08:00:00.000000000+0800', + '2012-12-31T16:00:00.000000000-0800', + '12-31-2012 23:00:00-01:00']) + def test_parsing_timezone_offsets(self, dt_string): + # All of these datetime strings with offsets are equivalent + # to the same datetime after the timezone offset is added + arr = np.array(['01-01-2013 00:00:00'], dtype=object) + expected = tslib.array_to_datetime(arr) + + arr = np.array([dt_string], dtype=object) + result = tslib.array_to_datetime(arr) + tm.assert_numpy_array_equal(result, expected) + + def test_number_looking_strings_not_into_datetime(self): + # GH#4601 + # These strings don't look like datetimes so they shouldn't be + # attempted to be converted + arr = np.array(['-352.737091', '183.575577'], dtype=object) + result = tslib.array_to_datetime(arr, errors='ignore') + tm.assert_numpy_array_equal(result, arr) + + arr = np.array(['1', '2', '3', '4', '5'], dtype=object) + result = tslib.array_to_datetime(arr, errors='ignore') + tm.assert_numpy_array_equal(result, arr) + + @pytest.mark.parametrize('invalid_date', [ + date(1000, 1, 1), + datetime(1000, 1, 1), + '1000-01-01', + 'Jan 1, 1000', + np.datetime64('1000-01-01')]) + def test_coerce_outside_ns_bounds(self, invalid_date): + arr = np.array([invalid_date], dtype='object') + with pytest.raises(ValueError): + tslib.array_to_datetime(arr, errors='raise') + + result = tslib.array_to_datetime(arr, errors='coerce') + expected = np.array([tslib.iNaT], dtype='M8[ns]') + tm.assert_numpy_array_equal(result, expected) + + def test_coerce_outside_ns_bounds_one_valid(self): + arr = np.array(['1/1/1000', '1/1/2000'], dtype=object) + result = tslib.array_to_datetime(arr, errors='coerce') + expected = [tslib.iNaT, + '2000-01-01T00:00:00.000000000-0000'] + tm.assert_numpy_array_equal( + result, + np_array_datetime64_compat(expected, dtype='M8[ns]')) + + def test_coerce_of_invalid_datetimes(self): + arr = np.array(['01-01-2013', 'not_a_date', '1'], dtype=object) + + # Without coercing, the presence of any invalid dates prevents + # any values from being converted + result = tslib.array_to_datetime(arr, errors='ignore') + tm.assert_numpy_array_equal(result, arr) + + # With coercing, the invalid dates becomes iNaT + result = tslib.array_to_datetime(arr, errors='coerce') + expected = ['2013-01-01T00:00:00.000000000-0000', + tslib.iNaT, + tslib.iNaT] + + tm.assert_numpy_array_equal( + result, + np_array_datetime64_compat(expected, dtype='M8[ns]')) + + def test_to_datetime_barely_out_of_bounds(self): + # GH#19529 + # GH#19382 close enough to bounds that dropping nanos would result + # in an in-bounds datetime + arr = np.array(['2262-04-11 23:47:16.854775808'], dtype=object) + with pytest.raises(tslib.OutOfBoundsDatetime): + tslib.array_to_datetime(arr)
Almost done. With array_to_datetime there is some fuzziness in when a test is for array_to_datetime and when it is for to_datetime. I'm trying to specifically isolate tslibs tests, so for now am erring on the side of keeping to_datetime tests in test_tools.
https://api.github.com/repos/pandas-dev/pandas/pulls/19640
2018-02-10T19:25:16Z
2018-02-12T11:33:33Z
2018-02-12T11:33:33Z
2018-06-22T03:32:35Z
move timedelta test_astype test
diff --git a/pandas/tests/indexes/timedeltas/test_astype.py b/pandas/tests/indexes/timedeltas/test_astype.py index 6c644d239069a..329f0c2467e8b 100644 --- a/pandas/tests/indexes/timedeltas/test_astype.py +++ b/pandas/tests/indexes/timedeltas/test_astype.py @@ -1,3 +1,5 @@ +from datetime import timedelta + import pytest import numpy as np @@ -8,6 +10,24 @@ class TestTimedeltaIndex(object): + def test_astype_object(self): + idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx') + expected_list = [Timedelta('1 days'), Timedelta('2 days'), + Timedelta('3 days'), Timedelta('4 days')] + result = idx.astype(object) + expected = Index(expected_list, dtype=object, name='idx') + tm.assert_index_equal(result, expected) + assert idx.tolist() == expected_list + + def test_astype_object_with_nat(self): + idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), NaT, + timedelta(days=4)], name='idx') + expected_list = [Timedelta('1 days'), Timedelta('2 days'), NaT, + Timedelta('4 days')] + result = idx.astype(object) + expected = Index(expected_list, dtype=object, name='idx') + tm.assert_index_equal(result, expected) + assert idx.tolist() == expected_list def test_astype(self): # GH 13149, GH 13209 diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index 86d7dd4e1b117..d154aa2172ef7 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -8,7 +8,7 @@ from pandas import to_timedelta from pandas import (Series, Timedelta, Timestamp, TimedeltaIndex, timedelta_range, - _np_version_under1p10, Index) + _np_version_under1p10) from pandas._libs.tslib import iNaT from pandas.tests.test_base import Ops @@ -25,31 +25,6 @@ def test_ops_properties(self): self.check_ops_properties(TimedeltaIndex._field_ops, f) self.check_ops_properties(TimedeltaIndex._object_ops, f) - def test_astype_object(self): - idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx') - expected_list = [Timedelta('1 days'), Timedelta('2 days'), - Timedelta('3 days'), Timedelta('4 days')] - expected = pd.Index(expected_list, dtype=object, name='idx') - result = idx.astype(object) - assert isinstance(result, Index) - - assert result.dtype == object - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert idx.tolist() == expected_list - - idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT, - timedelta(days=4)], name='idx') - expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT, - Timedelta('4 days')] - expected = pd.Index(expected_list, dtype=object, name='idx') - result = idx.astype(object) - assert isinstance(result, Index) - assert result.dtype == object - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert idx.tolist() == expected_list - def test_minmax(self): # monotonic
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19639
2018-02-10T19:13:21Z
2018-02-11T14:51:48Z
2018-02-11T14:51:47Z
2018-02-11T21:37:03Z
Move liboffsets, libfreqs, libparsing tests to test_tslibs
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index b95ae07052ecb..35f34dc3a4974 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -187,6 +187,18 @@ def test_to_datetime_format_weeks(self, cache): class TestToDatetime(object): + def test_to_datetime_pydatetime(self): + actual = pd.to_datetime(datetime(2008, 1, 15)) + assert actual == datetime(2008, 1, 15) + + def test_to_datetime_YYYYMMDD(self): + actual = pd.to_datetime('20080115') + assert actual == datetime(2008, 1, 15) + + def test_to_datetime_unparseable_ignore(self): + # unparseable + s = 'Month 1, 1999' + assert pd.to_datetime(s, errors='ignore') == s @td.skip_if_windows # `tm.set_timezone` does not work in windows def test_to_datetime_now(self): diff --git a/pandas/tests/tseries/test_libfrequencies.py b/pandas/tests/tslibs/test_libfrequencies.py similarity index 100% rename from pandas/tests/tseries/test_libfrequencies.py rename to pandas/tests/tslibs/test_libfrequencies.py diff --git a/pandas/tests/tseries/offsets/test_liboffsets.py b/pandas/tests/tslibs/test_liboffsets.py similarity index 100% rename from pandas/tests/tseries/offsets/test_liboffsets.py rename to pandas/tests/tslibs/test_liboffsets.py diff --git a/pandas/tests/scalar/test_parsing.py b/pandas/tests/tslibs/test_parsing.py similarity index 96% rename from pandas/tests/scalar/test_parsing.py rename to pandas/tests/tslibs/test_parsing.py index bff0de649ac5e..34cce088a8b42 100644 --- a/pandas/tests/scalar/test_parsing.py +++ b/pandas/tests/tslibs/test_parsing.py @@ -7,7 +7,6 @@ import pytest from dateutil.parser import parse -import pandas as pd import pandas.util._test_decorators as td from pandas.conftest import is_dateutil_le_261, is_dateutil_gt_261 from pandas import compat @@ -16,18 +15,6 @@ from pandas._libs.tslibs.parsing import parse_time_string -def test_to_datetime1(): - actual = pd.to_datetime(datetime(2008, 1, 15)) - assert actual == datetime(2008, 1, 15) - - actual = pd.to_datetime('20080115') - assert actual == datetime(2008, 1, 15) - - # unparseable - s = 'Month 1, 1999' - assert pd.to_datetime(s, errors='ignore') == s - - class TestParseQuarters(object): def test_parse_time_string(self):
libfreqs and liboffsets tests are moved but not edited. libparsing tests has one test moved to tests.indexes.datetimes.test_tools to keep it self-contained.
https://api.github.com/repos/pandas-dev/pandas/pulls/19638
2018-02-10T19:07:04Z
2018-02-11T14:44:07Z
2018-02-11T14:44:07Z
2018-02-11T21:36:43Z
test_astype portion of #19627
diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py index 4b989eb35e900..8acdd301f241a 100644 --- a/pandas/tests/indexes/datetimes/test_astype.py +++ b/pandas/tests/indexes/datetimes/test_astype.py @@ -138,6 +138,30 @@ def test_astype_object(self): tm.assert_index_equal(casted, Index(exp_values, dtype=np.object_)) assert casted.tolist() == exp_values + @pytest.mark.parametrize('tz', [None, 'Asia/Tokyo']) + def test_astype_object_tz(self, tz): + idx = pd.date_range(start='2013-01-01', periods=4, freq='M', + name='idx', tz=tz) + expected_list = [Timestamp('2013-01-31', tz=tz), + Timestamp('2013-02-28', tz=tz), + Timestamp('2013-03-31', tz=tz), + Timestamp('2013-04-30', tz=tz)] + expected = pd.Index(expected_list, dtype=object, name='idx') + result = idx.astype(object) + tm.assert_index_equal(result, expected) + assert idx.tolist() == expected_list + + def test_astype_object_with_nat(self): + idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2), + pd.NaT, datetime(2013, 1, 4)], name='idx') + expected_list = [Timestamp('2013-01-01'), + Timestamp('2013-01-02'), pd.NaT, + Timestamp('2013-01-04')] + expected = pd.Index(expected_list, dtype=object, name='idx') + result = idx.astype(object) + tm.assert_index_equal(result, expected) + assert idx.tolist() == expected_list + @pytest.mark.parametrize('dtype', [ float, 'timedelta64', 'timedelta64[ns]', 'datetime64', 'datetime64[D]']) diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 4f386eb28cc0f..a0cc565564f35 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -51,49 +51,6 @@ def test_ops_properties_basic(self): assert s.day == 10 pytest.raises(AttributeError, lambda: s.weekday) - def test_astype_object(self): - idx = pd.date_range(start='2013-01-01', periods=4, freq='M', - name='idx') - expected_list = [Timestamp('2013-01-31'), - Timestamp('2013-02-28'), - Timestamp('2013-03-31'), - Timestamp('2013-04-30')] - expected = pd.Index(expected_list, dtype=object, name='idx') - result = idx.astype(object) - assert isinstance(result, Index) - - assert result.dtype == object - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert idx.tolist() == expected_list - - idx = pd.date_range(start='2013-01-01', periods=4, freq='M', - name='idx', tz='Asia/Tokyo') - expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'), - Timestamp('2013-02-28', tz='Asia/Tokyo'), - Timestamp('2013-03-31', tz='Asia/Tokyo'), - Timestamp('2013-04-30', tz='Asia/Tokyo')] - expected = pd.Index(expected_list, dtype=object, name='idx') - result = idx.astype(object) - assert isinstance(result, Index) - assert result.dtype == object - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert idx.tolist() == expected_list - - idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2), - pd.NaT, datetime(2013, 1, 4)], name='idx') - expected_list = [Timestamp('2013-01-01'), - Timestamp('2013-01-02'), pd.NaT, - Timestamp('2013-01-04')] - expected = pd.Index(expected_list, dtype=object, name='idx') - result = idx.astype(object) - assert isinstance(result, Index) - assert result.dtype == object - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert idx.tolist() == expected_list - def test_minmax(self): for tz in self.tz: # monotonic
parametrized as suggested
https://api.github.com/repos/pandas-dev/pandas/pulls/19637
2018-02-10T19:01:42Z
2018-02-11T14:50:47Z
2018-02-11T14:50:47Z
2018-02-11T21:36:56Z
move shift_months test to test_arithmetic
diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index f6f8eccf4e30c..ddc97636ae0a8 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -15,6 +15,7 @@ DatetimeIndex, TimedeltaIndex, date_range) from pandas._libs import tslib +from pandas._libs.tslibs.offsets import shift_months @pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo', @@ -933,3 +934,19 @@ def test_datetime64_with_DateOffset(klass, assert_func): Timestamp('2000-02-29', tz='US/Central')], name='a') assert_func(result, exp) assert_func(result2, exp) + + +@pytest.mark.parametrize('years', [-1, 0, 1]) +@pytest.mark.parametrize('months', [-2, 0, 2]) +def test_shift_months(years, months): + s = DatetimeIndex([Timestamp('2000-01-05 00:15:00'), + Timestamp('2000-01-31 00:23:00'), + Timestamp('2000-01-01'), + Timestamp('2000-02-29'), + Timestamp('2000-12-31')]) + actual = DatetimeIndex(shift_months(s.asi8, years * 12 + months)) + + raw = [x + pd.offsets.DateOffset(years=years, months=months) + for x in s] + expected = DatetimeIndex(raw) + tm.assert_index_equal(actual, expected) diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 4f386eb28cc0f..440478100ddd5 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -5,10 +5,8 @@ import numpy as np from datetime import datetime -from itertools import product import pandas as pd import pandas._libs.tslib as tslib -from pandas._libs.tslibs.offsets import shift_months import pandas.util.testing as tm from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, date_range, _np_version_under1p10, Index, @@ -568,19 +566,6 @@ def test_equals(self): assert not idx.equals(pd.Series(idx3)) -@pytest.mark.parametrize('years,months', product([-1, 0, 1], [-2, 0, 2])) -def test_shift_months(years, months): - s = DatetimeIndex([Timestamp('2000-01-05 00:15:00'), - Timestamp('2000-01-31 00:23:00'), - Timestamp('2000-01-01'), - Timestamp('2000-02-29'), - Timestamp('2000-12-31')]) - actual = DatetimeIndex(shift_months(s.asi8, years * 12 + months)) - expected = DatetimeIndex([x + pd.offsets.DateOffset( - years=years, months=months) for x in s]) - tm.assert_index_equal(actual, expected) - - class TestBusinessDatetimeIndex(object): def setup_method(self, method):
Broken off of #19627
https://api.github.com/repos/pandas-dev/pandas/pulls/19636
2018-02-10T18:54:17Z
2018-02-11T00:49:22Z
2018-02-11T00:49:22Z
2018-02-11T01:29:11Z
Consolidated Groupby nth / last object Templates
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index d75c3a71896e3..866683ce378ab 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -26,105 +26,6 @@ cdef double NaN = <double> np.NaN cdef double nan = NaN -# TODO: aggregate multiple columns in single pass -# ---------------------------------------------------------------------- -# first, nth, last - - -@cython.boundscheck(False) -@cython.wraparound(False) -def group_nth_object(ndarray[object, ndim=2] out, - ndarray[int64_t] counts, - ndarray[object, ndim=2] values, - ndarray[int64_t] labels, - int64_t rank, - Py_ssize_t min_count=-1): - """ - Only aggregates on axis=0 - """ - cdef: - Py_ssize_t i, j, N, K, lab - object val - float64_t count - ndarray[int64_t, ndim=2] nobs - ndarray[object, ndim=2] resx - - assert min_count == -1, "'min_count' only used in add and prod" - - nobs = np.zeros((<object> out).shape, dtype=np.int64) - resx = np.empty((<object> out).shape, dtype=object) - - N, K = (<object> values).shape - - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[lab, j] += 1 - if nobs[lab, j] == rank: - resx[lab, j] = val - - for i in range(len(counts)): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = <object> nan - else: - out[i, j] = resx[i, j] - - -@cython.boundscheck(False) -@cython.wraparound(False) -def group_last_object(ndarray[object, ndim=2] out, - ndarray[int64_t] counts, - ndarray[object, ndim=2] values, - ndarray[int64_t] labels, - Py_ssize_t min_count=-1): - """ - Only aggregates on axis=0 - """ - cdef: - Py_ssize_t i, j, N, K, lab - object val - float64_t count - ndarray[object, ndim=2] resx - ndarray[int64_t, ndim=2] nobs - - assert min_count == -1, "'min_count' only used in add and prod" - - nobs = np.zeros((<object> out).shape, dtype=np.int64) - resx = np.empty((<object> out).shape, dtype=object) - - N, K = (<object> values).shape - - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[lab, j] += 1 - resx[lab, j] = val - - for i in range(len(counts)): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = nan - else: - out[i, j] = resx[i, j] - - cdef inline float64_t median_linear(float64_t* a, int n) nogil: cdef int i, j, na_count = 0 cdef float64_t result diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index b24444c422efa..48dac7bf10362 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -317,7 +317,7 @@ def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{endfor}} #---------------------------------------------------------------------- -# group_nth, group_last +# group_nth, group_last, group_rank #---------------------------------------------------------------------- {{py: @@ -325,7 +325,8 @@ def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, # name, c_type, dest_type2, nan_val dtypes = [('float64', 'float64_t', 'float64_t', 'NAN'), ('float32', 'float32_t', 'float32_t', 'NAN'), - ('int64', 'int64_t', 'int64_t', 'iNaT')] + ('int64', 'int64_t', 'int64_t', 'iNaT'), + ('object', 'object', 'object', 'NAN')] def get_dispatch(dtypes): @@ -350,7 +351,7 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, """ cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{dest_type2}} val, count + {{dest_type2}} val ndarray[{{dest_type2}}, ndim=2] resx ndarray[int64_t, ndim=2] nobs @@ -360,11 +361,19 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, raise AssertionError("len(index) != len(labels)") nobs = np.zeros((<object> out).shape, dtype=np.int64) + {{if name=='object'}} + resx = np.empty((<object> out).shape, dtype=object) + {{else}} resx = np.empty_like(out) + {{endif}} N, K = (<object> values).shape + {{if name == "object"}} + if True: # make templating happy + {{else}} with nogil: + {{endif}} for i in range(N): lab = labels[i] if lab < 0: @@ -375,11 +384,7 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, val = values[i, j] # not nan - {{if name == 'int64'}} - if val != {{nan_val}}: - {{else}} if val == val and val != {{nan_val}}: - {{endif}} nobs[lab, j] += 1 resx[lab, j] = val @@ -390,7 +395,6 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, else: out[i, j] = resx[i, j] - @cython.wraparound(False) @cython.boundscheck(False) def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, @@ -403,7 +407,7 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, """ cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{dest_type2}} val, count + {{dest_type2}} val ndarray[{{dest_type2}}, ndim=2] resx ndarray[int64_t, ndim=2] nobs @@ -413,11 +417,19 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, raise AssertionError("len(index) != len(labels)") nobs = np.zeros((<object> out).shape, dtype=np.int64) + {{if name=='object'}} + resx = np.empty((<object> out).shape, dtype=object) + {{else}} resx = np.empty_like(out) + {{endif}} N, K = (<object> values).shape + {{if name == "object"}} + if True: # make templating happy + {{else}} with nogil: + {{endif}} for i in range(N): lab = labels[i] if lab < 0: @@ -428,11 +440,7 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, val = values[i, j] # not nan - {{if name == 'int64'}} - if val != {{nan_val}}: - {{else}} if val == val and val != {{nan_val}}: - {{endif}} nobs[lab, j] += 1 if nobs[lab, j] == rank: resx[lab, j] = val @@ -445,6 +453,7 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, out[i, j] = resx[i, j] +{{if name != 'object'}} @cython.boundscheck(False) @cython.wraparound(False) def group_rank_{{name}}(ndarray[float64_t, ndim=2] out, @@ -608,6 +617,7 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out, if pct: for i in range(N): out[i, 0] = out[i, 0] / grp_sizes[i, 0] +{{endif}} {{endfor}} diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 2db772ac54369..6eacd45deb7bc 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2252,7 +2252,19 @@ def test_median_empty_bins(self): expected = df.groupby(bins).agg(lambda x: x.median()) assert_frame_equal(result, expected) - def test_groupby_non_arithmetic_agg_types(self): + @pytest.mark.parametrize("dtype", [ + 'int8', 'int16', 'int32', 'int64', 'float32', 'float64']) + @pytest.mark.parametrize("method,data", [ + ('first', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}), + ('last', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}), + ('min', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}), + ('max', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}), + ('nth', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}], + 'args': [1]}), + ('count', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}], + 'out_type': 'int64'}) + ]) + def test_groupby_non_arithmetic_agg_types(self, dtype, method, data): # GH9311, GH6620 df = pd.DataFrame( [{'a': 1, 'b': 1}, @@ -2260,39 +2272,25 @@ def test_groupby_non_arithmetic_agg_types(self): {'a': 2, 'b': 3}, {'a': 2, 'b': 4}]) - dtypes = ['int8', 'int16', 'int32', 'int64', 'float32', 'float64'] - - grp_exp = {'first': {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}, - 'last': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}, - 'min': {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}, - 'max': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}, - 'nth': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}], - 'args': [1]}, - 'count': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}], - 'out_type': 'int64'}} + df['b'] = df.b.astype(dtype) - for dtype in dtypes: - df_in = df.copy() - df_in['b'] = df_in.b.astype(dtype) + if 'args' not in data: + data['args'] = [] - for method, data in compat.iteritems(grp_exp): - if 'args' not in data: - data['args'] = [] - - if 'out_type' in data: - out_type = data['out_type'] - else: - out_type = dtype + if 'out_type' in data: + out_type = data['out_type'] + else: + out_type = dtype - exp = data['df'] - df_out = pd.DataFrame(exp) + exp = data['df'] + df_out = pd.DataFrame(exp) - df_out['b'] = df_out.b.astype(out_type) - df_out.set_index('a', inplace=True) + df_out['b'] = df_out.b.astype(out_type) + df_out.set_index('a', inplace=True) - grpd = df_in.groupby('a') - t = getattr(grpd, method)(*data['args']) - assert_frame_equal(t, df_out) + grpd = df.groupby('a') + t = getattr(grpd, method)(*data['args']) + assert_frame_equal(t, df_out) def test_groupby_non_arithmetic_agg_intlike_precision(self): # GH9311, GH6620
- [X] closes #19569 - [ ] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Updated to prevent the group_rank_object method from being templated after the merging of #19481
https://api.github.com/repos/pandas-dev/pandas/pulls/19635
2018-02-10T17:52:59Z
2018-02-10T18:48:16Z
2018-02-10T18:48:16Z
2018-02-10T18:52:45Z
DOC: Improving code quality of doc/make.py, PEP-8 and refactoring (#19631)
diff --git a/ci/lint.sh b/ci/lint.sh index 49bf9a690b990..b862a3bfcf29e 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -37,6 +37,13 @@ if [ "$LINT" ]; then fi echo "Linting scripts/*.py DONE" + echo "Linting doc script" + flake8 doc/make.py + if [ $? -ne "0" ]; then + RET=1 + fi + echo "Linting doc script DONE" + echo "Linting *.pyx" flake8 pandas --filename=*.pyx --select=E501,E302,E203,E111,E114,E221,E303,E128,E231,E126,E265,E305,E301,E127,E261,E271,E129,W291,E222,E241,E123,F403 if [ $? -ne "0" ]; then diff --git a/doc/make.py b/doc/make.py index acef563f301e4..e3cb29aa3e086 100755 --- a/doc/make.py +++ b/doc/make.py @@ -1,128 +1,62 @@ #!/usr/bin/env python - """ Python script for building documentation. To build the docs you must have all optional dependencies for pandas installed. See the installation instructions for a list of these. -<del>Note: currently latex builds do not work because of table formats that are not -supported in the latex generation.</del> - -2014-01-30: Latex has some issues but 'latex_forced' works ok for 0.13.0-400 or so - Usage ----- -python make.py clean -python make.py html + $ python make.py clean + $ python make.py html + $ python make.py latex """ -from __future__ import print_function - -import io -import glob # noqa +import sys import os import shutil -import sys +import subprocess +import argparse from contextlib import contextmanager +import jinja2 -import sphinx # noqa -import argparse -import jinja2 # noqa -os.environ['PYTHONPATH'] = '..' +DOC_PATH = os.path.dirname(os.path.abspath(__file__)) +SOURCE_PATH = os.path.join(DOC_PATH, 'source') +BUILD_PATH = os.path.join(DOC_PATH, 'build') +BUILD_DIRS = ['doctrees', 'html', 'latex', 'plots', '_static', '_templates'] -SPHINX_BUILD = 'sphinxbuild' +def _generate_index(include_api, single_doc=None): + """Create index.rst file with the specified sections. -def _process_user(user): - if user is None or user is False: - user = '' - else: - user = user + '@' - return user - - -def upload_dev(user=None): - 'push a copy to the pydata dev directory' - user = _process_user(user) - if os.system('cd build/html; rsync -avz . {0}pandas.pydata.org' - ':/usr/share/nginx/pandas/pandas-docs/dev/ -essh'.format(user)): - raise SystemExit('Upload to Pydata Dev failed') - - -def upload_dev_pdf(user=None): - 'push a copy to the pydata dev directory' - user = _process_user(user) - if os.system('cd build/latex; scp pandas.pdf {0}pandas.pydata.org' - ':/usr/share/nginx/pandas/pandas-docs/dev/'.format(user)): - raise SystemExit('PDF upload to Pydata Dev failed') - - -def upload_stable(user=None): - 'push a copy to the pydata stable directory' - user = _process_user(user) - if os.system('cd build/html; rsync -avz . {0}pandas.pydata.org' - ':/usr/share/nginx/pandas/pandas-docs/stable/ -essh'.format(user)): - raise SystemExit('Upload to stable failed') - - -def upload_stable_pdf(user=None): - 'push a copy to the pydata dev directory' - user = _process_user(user) - if os.system('cd build/latex; scp pandas.pdf {0}pandas.pydata.org' - ':/usr/share/nginx/pandas/pandas-docs/stable/'.format(user)): - raise SystemExit('PDF upload to stable failed') - - -def upload_prev(ver, doc_root='./', user=None): - 'push a copy of older release to appropriate version directory' - user = _process_user(user) - local_dir = doc_root + 'build/html' - remote_dir = '/usr/share/nginx/pandas/pandas-docs/version/%s/' % ver - cmd = 'cd %s; rsync -avz . %spandas.pydata.org:%s -essh' - cmd = cmd % (local_dir, user, remote_dir) - print(cmd) - if os.system(cmd): - raise SystemExit( - 'Upload to %s from %s failed' % (remote_dir, local_dir)) - - local_dir = doc_root + 'build/latex' - pdf_cmd = 'cd %s; scp pandas.pdf %spandas.pydata.org:%s' - pdf_cmd = pdf_cmd % (local_dir, user, remote_dir) - if os.system(pdf_cmd): - raise SystemExit('Upload PDF to %s from %s failed' % (ver, doc_root)) - -def build_pandas(): - os.chdir('..') - os.system('python setup.py clean') - os.system('python setup.py build_ext --inplace') - os.chdir('doc') - -def build_prev(ver): - if os.system('git checkout v%s' % ver) != 1: - os.chdir('..') - os.system('python setup.py clean') - os.system('python setup.py build_ext --inplace') - os.chdir('doc') - os.system('python make.py clean') - os.system('python make.py html') - os.system('python make.py latex') - os.system('git checkout master') - - -def clean(): - if os.path.exists('build'): - shutil.rmtree('build') - - if os.path.exists('source/generated'): - shutil.rmtree('source/generated') + Parameters + ---------- + include_api : bool + Whether API documentation will be built. + single_doc : str or None + If provided, this single documentation page will be generated. + """ + if single_doc is not None: + single_doc = os.path.splitext(os.path.basename(single_doc))[0] + include_api = False + + with open(os.path.join(SOURCE_PATH, 'index.rst.template')) as f: + t = jinja2.Template(f.read()) + + with open(os.path.join(SOURCE_PATH, 'index.rst'), 'w') as f: + f.write(t.render(include_api=include_api, + single_doc=single_doc)) @contextmanager -def maybe_exclude_notebooks(): - """ - Skip building the notebooks if pandoc is not installed. +def _maybe_exclude_notebooks(): + """Skip building the notebooks if pandoc is not installed. + This assumes that nbsphinx is installed. + + Skip notebook conversion if: + 1. nbconvert isn't installed, or + 2. nbconvert is installed, but pandoc isn't """ base = os.path.dirname(__file__) notebooks = [os.path.join(base, 'source', nb) @@ -135,304 +69,175 @@ def _remove_notebooks(): contents[nb] = f.read() os.remove(nb) - # Skip notebook conversion if - # 1. nbconvert isn't installed, or - # 2. nbconvert is installed, but pandoc isn't try: import nbconvert except ImportError: - print("Warning: nbconvert not installed. Skipping notebooks.") + sys.stderr.write('Warning: nbconvert not installed. ' + 'Skipping notebooks.\n') _remove_notebooks() else: try: nbconvert.utils.pandoc.get_pandoc_version() except nbconvert.utils.pandoc.PandocMissing: - print("Warning: Pandoc is not installed. Skipping notebooks.") + sys.stderr.write('Warning: Pandoc is not installed. ' + 'Skipping notebooks.\n') _remove_notebooks() yield + for nb, content in contents.items(): with open(nb, 'wt') as f: f.write(content) -def html(): - check_build() - - with maybe_exclude_notebooks(): - if os.system('sphinx-build -P -b html -d build/doctrees ' - 'source build/html'): - raise SystemExit("Building HTML failed.") - try: - # remove stale file - os.remove('build/html/pandas.zip') - except: - pass - - -def zip_html(): - try: - print("\nZipping up HTML docs...") - # just in case the wonky build box doesn't have zip - # don't fail this. - os.system('cd build; rm -f html/pandas.zip; zip html/pandas.zip -r -q html/* ') - print("\n") - except: - pass - -def latex(): - check_build() - if sys.platform != 'win32': - # LaTeX format. - if os.system('sphinx-build -j 2 -b latex -d build/doctrees ' - 'source build/latex'): - raise SystemExit("Building LaTeX failed.") - # Produce pdf. - - os.chdir('build/latex') - - # Call the makefile produced by sphinx... - if os.system('make'): - print("Rendering LaTeX failed.") - print("You may still be able to get a usable PDF file by going into 'build/latex'") - print("and executing 'pdflatex pandas.tex' for the requisite number of passes.") - print("Or using the 'latex_forced' target") - raise SystemExit - - os.chdir('../..') - else: - print('latex build has not been tested on windows') - -def latex_forced(): - check_build() - if sys.platform != 'win32': - # LaTeX format. - if os.system('sphinx-build -j 2 -b latex -d build/doctrees ' - 'source build/latex'): - raise SystemExit("Building LaTeX failed.") - # Produce pdf. - - os.chdir('build/latex') - - # Manually call pdflatex, 3 passes should ensure latex fixes up - # all the required cross-references and such. - os.system('pdflatex -interaction=nonstopmode pandas.tex') - os.system('pdflatex -interaction=nonstopmode pandas.tex') - os.system('pdflatex -interaction=nonstopmode pandas.tex') - raise SystemExit("You should check the file 'build/latex/pandas.pdf' for problems.") - - os.chdir('../..') - else: - print('latex build has not been tested on windows') - - -def check_build(): - build_dirs = [ - 'build', 'build/doctrees', 'build/html', - 'build/latex', 'build/plots', 'build/_static', - 'build/_templates'] - for d in build_dirs: - try: - os.mkdir(d) - except OSError: - pass - - -def all(): - # clean() - html() - - -def auto_dev_build(debug=False): - msg = '' - try: - step = 'clean' - clean() - step = 'html' - html() - step = 'upload dev' - upload_dev() - if not debug: - sendmail(step) - - step = 'latex' - latex() - step = 'upload pdf' - upload_dev_pdf() - if not debug: - sendmail(step) - except (Exception, SystemExit) as inst: - msg = str(inst) + '\n' - sendmail(step, '[ERROR] ' + msg) - - -def sendmail(step=None, err_msg=None): - from_name, to_name = _get_config() - - if step is None: - step = '' - - if err_msg is None or '[ERROR]' not in err_msg: - msgstr = 'Daily docs %s completed successfully' % step - subject = "DOC: %s successful" % step - else: - msgstr = err_msg - subject = "DOC: %s failed" % step - - import smtplib - from email.MIMEText import MIMEText - msg = MIMEText(msgstr) - msg['Subject'] = subject - msg['From'] = from_name - msg['To'] = to_name - - server_str, port, login, pwd = _get_credentials() - server = smtplib.SMTP(server_str, port) - server.ehlo() - server.starttls() - server.ehlo() - - server.login(login, pwd) - try: - server.sendmail(from_name, to_name, msg.as_string()) - finally: - server.close() - - -def _get_dir(subdir=None): - import getpass - USERNAME = getpass.getuser() - if sys.platform == 'darwin': - HOME = '/Users/%s' % USERNAME - else: - HOME = '/home/%s' % USERNAME - - if subdir is None: - subdir = '/code/scripts/config' - conf_dir = '%s/%s' % (HOME, subdir) - return conf_dir - - -def _get_credentials(): - tmp_dir = _get_dir() - cred = '%s/credentials' % tmp_dir - with open(cred, 'r') as fh: - server, port, un, domain = fh.read().split(',') - port = int(port) - login = un + '@' + domain + '.com' - - import base64 - with open('%s/cron_email_pwd' % tmp_dir, 'r') as fh: - pwd = base64.b64decode(fh.read()) - - return server, port, login, pwd - - -def _get_config(): - tmp_dir = _get_dir() - with open('%s/addresses' % tmp_dir, 'r') as fh: - from_name, to_name = fh.read().split(',') - return from_name, to_name - -funcd = { - 'html': html, - 'zip_html': zip_html, - 'upload_dev': upload_dev, - 'upload_stable': upload_stable, - 'upload_dev_pdf': upload_dev_pdf, - 'upload_stable_pdf': upload_stable_pdf, - 'latex': latex, - 'latex_forced': latex_forced, - 'clean': clean, - 'auto_dev': auto_dev_build, - 'auto_debug': lambda: auto_dev_build(True), - 'build_pandas': build_pandas, - 'all': all, -} - -small_docs = False - -# current_dir = os.getcwd() -# os.chdir(os.path.dirname(os.path.join(current_dir, __file__))) - -import argparse -argparser = argparse.ArgumentParser(description=""" -pandas documentation builder -""".strip()) - -# argparser.add_argument('-arg_name', '--arg_name', -# metavar='label for arg help', -# type=str|etc, -# nargs='N|*|?|+|argparse.REMAINDER', -# required=False, -# #choices='abc', -# help='help string', -# action='store|store_true') - -# args = argparser.parse_args() - -#print args.accumulate(args.integers) - -def generate_index(api=True, single=False, **kwds): - from jinja2 import Template - with open("source/index.rst.template") as f: - t = Template(f.read()) +class DocBuilder: + """Class to wrap the different commands of this script. - with open("source/index.rst","w") as f: - f.write(t.render(api=api,single=single,**kwds)) + All public methods of this class can be called as parameters of the + script. + """ + def __init__(self, num_jobs=1): + self.num_jobs = num_jobs + + @staticmethod + def _create_build_structure(): + """Create directories required to build documentation.""" + for dirname in BUILD_DIRS: + try: + os.makedirs(os.path.join(BUILD_PATH, dirname)) + except OSError: + pass + + @staticmethod + def _run_os(*args): + """Execute a command as a OS terminal. + + Parameters + ---------- + *args : list of str + Command and parameters to be executed + + Examples + -------- + >>> DocBuilder()._run_os('python', '--version') + """ + subprocess.check_call(args, stderr=subprocess.STDOUT) + + def _sphinx_build(self, kind): + """Call sphinx to build documentation. + + Attribute `num_jobs` from the class is used. + + Parameters + ---------- + kind : {'html', 'latex'} + + Examples + -------- + >>> DocBuilder(num_jobs=4)._sphinx_build('html') + """ + if kind not in ('html', 'latex'): + raise ValueError('kind must be html or latex, not {}'.format(kind)) + + self._run_os('sphinx-build', + '-j{}'.format(self.num_jobs), + '-b{}'.format(kind), + '-d{}'.format(os.path.join(BUILD_PATH, + 'doctrees')), + SOURCE_PATH, + os.path.join(BUILD_PATH, kind)) + + def html(self): + """Build HTML documentation.""" + self._create_build_structure() + with _maybe_exclude_notebooks(): + self._sphinx_build('html') + zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip') + if os.path.exists(zip_fname): + os.remove(zip_fname) + + def latex(self, force=False): + """Build PDF documentation.""" + self._create_build_structure() + if sys.platform == 'win32': + sys.stderr.write('latex build has not been tested on windows\n') + else: + self._sphinx_build('latex') + os.chdir(os.path.join(BUILD_PATH, 'latex')) + if force: + for i in range(3): + self._run_os('pdflatex', + '-interaction=nonstopmode', + 'pandas.tex') + raise SystemExit('You should check the file ' + '"build/latex/pandas.pdf" for problems.') + else: + self._run_os('make') + + def latex_forced(self): + """Build PDF documentation with retries to find missing references.""" + self.latex(force=True) + + @staticmethod + def clean(): + """Clean documentation generated files.""" + shutil.rmtree(BUILD_PATH, ignore_errors=True) + shutil.rmtree(os.path.join(SOURCE_PATH, 'generated'), + ignore_errors=True) + + def zip_html(self): + """Compress HTML documentation into a zip file.""" + zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip') + if os.path.exists(zip_fname): + os.remove(zip_fname) + dirname = os.path.join(BUILD_PATH, 'html') + fnames = os.listdir(dirname) + os.chdir(dirname) + self._run_os('zip', + zip_fname, + '-r', + '-q', + *fnames) -import argparse -argparser = argparse.ArgumentParser(description="pandas documentation builder", - epilog="Targets : %s" % funcd.keys()) - -argparser.add_argument('--no-api', - default=False, - help='Ommit api and autosummary', - action='store_true') -argparser.add_argument('--single', - metavar='FILENAME', - type=str, - default=False, - help='filename of section to compile, e.g. "indexing"') -argparser.add_argument('--user', - type=str, - default=False, - help='Username to connect to the pydata server') def main(): - args, unknown = argparser.parse_known_args() - sys.argv = [sys.argv[0]] + unknown - if args.single: - args.single = os.path.basename(args.single).split(".rst")[0] - - if 'clean' in unknown: - args.single=False - - generate_index(api=not args.no_api and not args.single, single=args.single) - - if len(sys.argv) > 2: - ftype = sys.argv[1] - ver = sys.argv[2] - - if ftype == 'build_previous': - build_prev(ver, user=args.user) - if ftype == 'upload_previous': - upload_prev(ver, user=args.user) - elif len(sys.argv) == 2: - for arg in sys.argv[1:]: - func = funcd.get(arg) - if func is None: - raise SystemExit('Do not know how to handle %s; valid args are %s' % ( - arg, list(funcd.keys()))) - if args.user: - func(user=args.user) - else: - func() - else: - small_docs = False - all() -# os.chdir(current_dir) + cmds = [method for method in dir(DocBuilder) if not method.startswith('_')] + + argparser = argparse.ArgumentParser( + description='pandas documentation builder', + epilog='Commands: {}'.format(','.join(cmds))) + argparser.add_argument('command', + nargs='?', + default='html', + help='command to run: {}'.format(', '.join(cmds))) + argparser.add_argument('--num-jobs', + type=int, + default=1, + help='number of jobs used by sphinx-build') + argparser.add_argument('--no-api', + default=False, + help='ommit api and autosummary', + action='store_true') + argparser.add_argument('--single', + metavar='FILENAME', + type=str, + default=None, + help=('filename of section to compile, ' + 'e.g. "indexing"')) + argparser.add_argument('--python-path', + type=str, + default=os.path.join(DOC_PATH, '..'), + help='path') + args = argparser.parse_args() + + if args.command not in cmds: + raise ValueError('Unknown command {}. Available options: {}'.format( + args.command, ', '.join(cmds))) + + os.environ['PYTHONPATH'] = args.python_path + _generate_index(not args.no_api, args.single) + getattr(DocBuilder(args.num_jobs), args.command)() + if __name__ == '__main__': - import sys sys.exit(main()) diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template index 7c7457df8ea93..eff1227e98994 100644 --- a/doc/source/index.rst.template +++ b/doc/source/index.rst.template @@ -109,10 +109,10 @@ See the package overview for more detail about what's in the library. .. toctree:: :maxdepth: 4 - {% if single -%} - {{ single }} + {% if single_doc -%} + {{ single_doc }} {% endif -%} - {%if not single -%} + {% if not single_doc -%} whatsnew install contributing @@ -146,10 +146,10 @@ See the package overview for more detail about what's in the library. comparison_with_sql comparison_with_sas {% endif -%} - {% if api -%} + {% if include_api -%} api {% endif -%} - {%if not single -%} + {% if not single_doc -%} developer internals release
- [X] closes #19631 closes #15591 closes #10340 - [ ] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Summary of the PR: - doc/make.py in lint and respects PEP-8 - Making the structure of the code more organized - Added command line arguments for the number of sphinx jobs (#15591), for the PYTHONPATH (#10340) - Should work in both Python 2 and 3 - Tracebacks are returned if a command fails (before just the output from the command was shown, but no traceback on where the exception happened).
https://api.github.com/repos/pandas-dev/pandas/pulls/19634
2018-02-10T17:46:41Z
2018-02-22T02:05:44Z
2018-02-22T02:05:43Z
2018-02-22T12:54:17Z
BUG: Do not round DatetimeIndex nanosecond precision when iterating
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 72f63a4da0f4d..b6316bd39f396 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -729,6 +729,7 @@ Timezones - Bug in :func:`DatetimeIndex.insert` where inserting ``NaT`` into a timezone-aware index incorrectly raised (:issue:`16357`) - Bug in the :class:`DataFrame` constructor, where tz-aware Datetimeindex and a given column name will result in an empty ``DataFrame`` (:issue:`19157`) - Bug in :func:`Timestamp.tz_localize` where localizing a timestamp near the minimum or maximum valid values could overflow and return a timestamp with an incorrect nanosecond value (:issue:`12677`) +- Bug when iterating over :class:`DatetimeIndex` that was localized with fixed timezone offset that rounded nanosecond precision to microseconds (:issue:`19603`) Offsets ^^^^^^^ diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 85e667521e5f2..fec7f21d6e6eb 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -46,7 +46,8 @@ from tslibs.timezones cimport (is_utc, is_tzlocal, is_fixed_offset, treat_tz_as_pytz, get_dst_info) from tslibs.conversion cimport (tz_convert_single, _TSObject, convert_datetime_to_tsobject, - get_datetime64_nanos) + get_datetime64_nanos, + tz_convert_utc_to_tzlocal) from tslibs.conversion import tz_convert_single from tslibs.nattype import NaT, nat_strings, iNaT @@ -144,12 +145,12 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, if value == NPY_NAT: result[i] = NaT else: - dt64_to_dtstruct(value, &dts) - dt = create_datetime_from_ts(value, dts, tz, freq) - dt = dt + tz.utcoffset(dt) - if box: - dt = Timestamp(dt) - result[i] = dt + # Python datetime objects do not support nanosecond + # resolution (yet, PEP 564). Need to compute new value + # using the i8 representation. + local_value = tz_convert_utc_to_tzlocal(value, tz) + dt64_to_dtstruct(local_value, &dts) + result[i] = func_create(value, dts, tz, freq) else: trans, deltas, typ = get_dst_info(tz) diff --git a/pandas/conftest.py b/pandas/conftest.py index 4fe66d4cf7e1f..37f0a2f818a3b 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -93,3 +93,9 @@ def compression_no_zip(request): except zip """ return request.param + + +@pytest.fixture(scope='module') +def datetime_tz_utc(): + from datetime import timezone + return timezone.utc diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index 075d239df5f7a..62854676d43be 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -17,7 +17,7 @@ import pandas as pd from pandas._libs import tslib from pandas._libs.tslibs import timezones -from pandas.compat import lrange, zip +from pandas.compat import lrange, zip, PY3 from pandas import (DatetimeIndex, date_range, bdate_range, Timestamp, isna, to_datetime, Index) @@ -949,6 +949,17 @@ def test_dti_union_aware(self): result = rng.union(rng2) assert result.tz.zone == 'UTC' + @pytest.mark.parametrize('tz', [None, 'UTC', "US/Central", + dateutil.tz.tzoffset(None, -28800)]) + @pytest.mark.usefixtures("datetime_tz_utc") + @pytest.mark.skipif(not PY3, reason="datetime.timezone not in PY2") + def test_iteration_preserves_nanoseconds(self, tz): + # GH 19603 + index = DatetimeIndex(["2018-02-08 15:00:00.168456358", + "2018-02-08 15:00:00.168456359"], tz=tz) + for i, ts in enumerate(index): + assert ts == index[i] + class TestDateRange(object): """Tests for date_range with timezones"""
- [x] closes #19603 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry ~~This issue was actually very specific to localizing a `DatetimeIndex` with `datetime.timezone.utc`~~ ``` # On master In [1]: import pandas as pd In [2]: datetimeindex = pd.DatetimeIndex(["2018-02-08 15:00:00.168456358"], tz='UTC') In [3]: list(datetimeindex)[0] == datetimeindex[0] # returned False with datetime.timezone.utc Out[3]: True ``` ~~I realize our timezone support relies heavily on pytz and dateutil, but I am curious how much support we have for `datetime.timezone` objects.~~ So the actual issue was that `datetime.timezone.utc` is (rightly) considered a fixed offset, and the code path was calculating the new value with Python `datetimes` and `timedeltas` which doesn't yet support nanosecond resolution.
https://api.github.com/repos/pandas-dev/pandas/pulls/19628
2018-02-10T08:34:19Z
2018-02-14T11:05:47Z
2018-02-14T11:05:47Z
2018-02-14T18:55:38Z
fix overflows in Timestamp.tz_localize near boundaries
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 083242cd69b74..0097847874616 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -663,6 +663,7 @@ Timezones - Bug in tz-aware :class:`DatetimeIndex` where addition/subtraction with a :class:`TimedeltaIndex` or array with ``dtype='timedelta64[ns]'`` was incorrect (:issue:`17558`) - Bug in :func:`DatetimeIndex.insert` where inserting ``NaT`` into a timezone-aware index incorrectly raised (:issue:`16357`) - Bug in the :class:`DataFrame` constructor, where tz-aware Datetimeindex and a given column name will result in an empty ``DataFrame`` (:issue:`19157`) +- Bug in :func:`Timestamp.tz_localize` where localizing a timestamp near the minimum or maximum valid values could overflow and return a timestamp with an incorrect nanosecond value (:issue:`12677`) Offsets ^^^^^^^ diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd index 6e7df10e7c424..59c1cde11f925 100644 --- a/pandas/_libs/tslibs/conversion.pxd +++ b/pandas/_libs/tslibs/conversion.pxd @@ -21,8 +21,6 @@ cdef convert_to_tsobject(object ts, object tz, object unit, cdef _TSObject convert_datetime_to_tsobject(datetime ts, object tz, int32_t nanos=*) -cdef void _localize_tso(_TSObject obj, object tz) - cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2) cdef int64_t get_datetime64_nanos(object val) except? -1 diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 4f1a053da6f1d..f37d71af0bd4d 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -309,12 +309,13 @@ cdef convert_to_tsobject(object ts, object tz, object unit, raise TypeError('Cannot convert input [{}] of type {} to ' 'Timestamp'.format(ts, type(ts))) - if obj.value != NPY_NAT: - check_dts_bounds(&obj.dts) - if tz is not None: - _localize_tso(obj, tz) + localize_tso(obj, tz) + if obj.value != NPY_NAT: + # check_overflows needs to run after localize_tso + check_dts_bounds(&obj.dts) + check_overflows(obj) return obj @@ -391,6 +392,7 @@ cdef _TSObject convert_datetime_to_tsobject(datetime ts, object tz, obj.dts.ps = nanos * 1000 check_dts_bounds(&obj.dts) + check_overflows(obj) return obj @@ -454,6 +456,7 @@ cdef _TSObject convert_str_to_tsobject(object ts, object tz, object unit, obj.value = tz_convert_single(obj.value, obj.tzinfo, 'UTC') if tz is None: check_dts_bounds(&obj.dts) + check_overflows(obj) return obj else: # Keep the converter same as PyDateTime's @@ -469,7 +472,7 @@ cdef _TSObject convert_str_to_tsobject(object ts, object tz, object unit, else: ts = obj.value if tz is not None: - # shift for _localize_tso + # shift for localize_tso ts = tz_localize_to_utc(np.array([ts], dtype='i8'), tz, ambiguous='raise', errors='raise')[0] @@ -490,12 +493,51 @@ cdef _TSObject convert_str_to_tsobject(object ts, object tz, object unit, return convert_to_tsobject(ts, tz, unit, dayfirst, yearfirst) +cdef inline check_overflows(_TSObject obj): + """ + Check that we haven't silently overflowed in timezone conversion + + Parameters + ---------- + obj : _TSObject + + Returns + ------- + None + + Raises + ------ + OutOfBoundsDatetime + """ + # GH#12677 + if obj.dts.year == 1677: + if not (obj.value < 0): + raise OutOfBoundsDatetime + elif obj.dts.year == 2262: + if not (obj.value > 0): + raise OutOfBoundsDatetime + + # ---------------------------------------------------------------------- # Localization -cdef inline void _localize_tso(_TSObject obj, object tz): +cdef inline void localize_tso(_TSObject obj, tzinfo tz): """ - Take a TSObject in UTC and localizes to timezone tz. + Given the UTC nanosecond timestamp in obj.value, find the wall-clock + representation of that timestamp in the given timezone. + + Parameters + ---------- + obj : _TSObject + tz : tzinfo + + Returns + ------- + None + + Notes + ----- + Sets obj.tzinfo inplace, alters obj.dts inplace. """ cdef: ndarray[int64_t] trans, deltas diff --git a/pandas/tests/scalar/timestamp/test_timezones.py b/pandas/tests/scalar/timestamp/test_timezones.py index 7a5c6feb8b651..f43651dc6f0db 100644 --- a/pandas/tests/scalar/timestamp/test_timezones.py +++ b/pandas/tests/scalar/timestamp/test_timezones.py @@ -15,12 +15,29 @@ import pandas.util._test_decorators as td from pandas import Timestamp, NaT +from pandas.errors import OutOfBoundsDatetime class TestTimestampTZOperations(object): # -------------------------------------------------------------- # Timestamp.tz_localize + def test_tz_localize_pushes_out_of_bounds(self): + # GH#12677 + # tz_localize that pushes away from the boundary is OK + pac = Timestamp.min.tz_localize('US/Pacific') + assert pac.value > Timestamp.min.value + pac.tz_convert('Asia/Tokyo') # tz_convert doesn't change value + with pytest.raises(OutOfBoundsDatetime): + Timestamp.min.tz_localize('Asia/Tokyo') + + # tz_localize that pushes away from the boundary is OK + tokyo = Timestamp.max.tz_localize('Asia/Tokyo') + assert tokyo.value < Timestamp.max.value + tokyo.tz_convert('US/Pacific') # tz_convert doesn't change value + with pytest.raises(OutOfBoundsDatetime): + Timestamp.max.tz_localize('US/Pacific') + def test_tz_localize_ambiguous_bool(self): # make sure that we are correctly accepting bool values as ambiguous # GH#14402
Flesh out docstring for localize_tsobject, add slightly stronger typing. Closes #12677
https://api.github.com/repos/pandas-dev/pandas/pulls/19626
2018-02-10T02:58:25Z
2018-02-11T00:04:50Z
2018-02-11T00:04:50Z
2018-03-26T01:16:50Z
function for frequently repeated tz-conversion code
diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd index 6e7df10e7c424..0d5e9e3fc5152 100644 --- a/pandas/_libs/tslibs/conversion.pxd +++ b/pandas/_libs/tslibs/conversion.pxd @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # cython: profile=False -from cpython.datetime cimport datetime +from cpython.datetime cimport datetime, tzinfo from numpy cimport int64_t, int32_t @@ -30,3 +30,5 @@ cdef int64_t get_datetime64_nanos(object val) except? -1 cpdef int64_t pydt_to_i8(object pydt) except? -1 cdef maybe_datetimelike_to_i8(object val) + +cdef int64_t tz_convert_utc_to_tzlocal(int64_t utc_val, tzinfo tz) diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 4f1a053da6f1d..cfbcb922cb47d 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -499,7 +499,7 @@ cdef inline void _localize_tso(_TSObject obj, object tz): """ cdef: ndarray[int64_t] trans, deltas - int64_t delta + int64_t delta, local_val Py_ssize_t posn datetime dt @@ -510,11 +510,8 @@ cdef inline void _localize_tso(_TSObject obj, object tz): elif obj.value == NPY_NAT: pass elif is_tzlocal(tz): - dt64_to_dtstruct(obj.value, &obj.dts) - dt = datetime(obj.dts.year, obj.dts.month, obj.dts.day, obj.dts.hour, - obj.dts.min, obj.dts.sec, obj.dts.us, tz) - delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000 - dt64_to_dtstruct(obj.value + delta, &obj.dts) + local_val = tz_convert_utc_to_tzlocal(obj.value, tz) + dt64_to_dtstruct(local_val, &obj.dts) else: # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) @@ -556,6 +553,66 @@ cdef inline datetime _localize_pydatetime(datetime dt, tzinfo tz): # ---------------------------------------------------------------------- # Timezone Conversion +cdef inline int64_t tz_convert_tzlocal_to_utc(int64_t val, tzinfo tz): + """ + Parameters + ---------- + val : int64_t + tz : tzinfo + + Returns + ------- + utc_date : int64_t + + See Also + -------- + tz_convert_utc_to_tzlocal + """ + cdef: + pandas_datetimestruct dts + int64_t utc_date, delta + datetime dt + + dt64_to_dtstruct(val, &dts) + dt = datetime(dts.year, dts.month, dts.day, dts.hour, + dts.min, dts.sec, dts.us, tz) + delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000 + utc_date = val - delta + return utc_date + + +cdef inline int64_t tz_convert_utc_to_tzlocal(int64_t utc_val, tzinfo tz): + """ + Parameters + ---------- + utc_val : int64_t + tz : tzinfo + + Returns + ------- + local_val : int64_t + + See Also + -------- + tz_convert_tzlocal_to_utc + + Notes + ----- + The key difference between this and tz_convert_tzlocal_to_utc is a + an addition flipped to a subtraction in the last line. + """ + cdef: + pandas_datetimestruct dts + int64_t local_val, delta + datetime dt + + dt64_to_dtstruct(utc_val, &dts) + dt = datetime(dts.year, dts.month, dts.day, dts.hour, + dts.min, dts.sec, dts.us, tz) + delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000 + local_val = utc_val + delta + return local_val + cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2): """ @@ -590,11 +647,7 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2): # Convert to UTC if is_tzlocal(tz1): - dt64_to_dtstruct(val, &dts) - dt = datetime(dts.year, dts.month, dts.day, dts.hour, - dts.min, dts.sec, dts.us, tz1) - delta = int(get_utcoffset(tz1, dt).total_seconds()) * 1000000000 - utc_date = val - delta + utc_date = tz_convert_tzlocal_to_utc(val, tz1) elif get_timezone(tz1) != 'UTC': trans, deltas, typ = get_dst_info(tz1) pos = trans.searchsorted(val, side='right') - 1 @@ -608,11 +661,7 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2): if get_timezone(tz2) == 'UTC': return utc_date elif is_tzlocal(tz2): - dt64_to_dtstruct(utc_date, &dts) - dt = datetime(dts.year, dts.month, dts.day, dts.hour, - dts.min, dts.sec, dts.us, tz2) - delta = int(get_utcoffset(tz2, dt).total_seconds()) * 1000000000 - return utc_date + delta + return tz_convert_utc_to_tzlocal(utc_date, tz2) # Convert UTC to other timezone trans, deltas, typ = get_dst_info(tz2) @@ -662,12 +711,7 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2): if v == NPY_NAT: utc_dates[i] = NPY_NAT else: - dt64_to_dtstruct(v, &dts) - dt = datetime(dts.year, dts.month, dts.day, dts.hour, - dts.min, dts.sec, dts.us, tz1) - delta = (int(get_utcoffset(tz1, dt).total_seconds()) * - 1000000000) - utc_dates[i] = v - delta + utc_dates[i] = tz_convert_tzlocal_to_utc(v, tz1) else: trans, deltas, typ = get_dst_info(tz1) @@ -702,12 +746,7 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2): if v == NPY_NAT: result[i] = NPY_NAT else: - dt64_to_dtstruct(v, &dts) - dt = datetime(dts.year, dts.month, dts.day, dts.hour, - dts.min, dts.sec, dts.us, tz2) - delta = (int(get_utcoffset(tz2, dt).total_seconds()) * - 1000000000) - result[i] = v + delta + result[i] = tz_convert_utc_to_tzlocal(v, tz2) return result # Convert UTC to other timezone @@ -777,11 +816,7 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None, if is_tzlocal(tz): for i in range(n): v = vals[i] - dt64_to_dtstruct(v, &dts) - dt = datetime(dts.year, dts.month, dts.day, dts.hour, - dts.min, dts.sec, dts.us, tz) - delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000 - result[i] = v - delta + result[i] = tz_convert_tzlocal_to_utc(v, tz) return result if is_string_object(ambiguous): @@ -1024,11 +1059,8 @@ cdef ndarray[int64_t] _normalize_local(ndarray[int64_t] stamps, object tz): if stamps[i] == NPY_NAT: result[i] = NPY_NAT continue - dt64_to_dtstruct(stamps[i], &dts) - dt = datetime(dts.year, dts.month, dts.day, dts.hour, - dts.min, dts.sec, dts.us, tz) - delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000 - dt64_to_dtstruct(stamps[i] + delta, &dts) + local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) + dt64_to_dtstruct(local_val, &dts) result[i] = _normalized_stamp(&dts) else: # Adjust datetime64 timestamp, recompute datetimestruct @@ -1097,7 +1129,7 @@ def is_date_array_normalized(ndarray[int64_t] stamps, tz=None): Py_ssize_t i, n = len(stamps) ndarray[int64_t] trans, deltas pandas_datetimestruct dts - datetime dt + int64_t local_val if tz is None or is_utc(tz): for i in range(n): @@ -1106,11 +1138,9 @@ def is_date_array_normalized(ndarray[int64_t] stamps, tz=None): return False elif is_tzlocal(tz): for i in range(n): - dt64_to_dtstruct(stamps[i], &dts) - dt = datetime(dts.year, dts.month, dts.day, dts.hour, dts.min, - dts.sec, dts.us, tz) - dt = dt + tz.utcoffset(dt) - if (dt.hour + dt.minute + dt.second + dt.microsecond) > 0: + local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) + dt64_to_dtstruct(local_val, &dts) + if (dts.hour + dts.min + dts.sec + dts.us) > 0: return False else: trans, deltas, typ = get_dst_info(tz) diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 3c396a9ff4f3c..dc5d058f41d11 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -39,6 +39,7 @@ cimport ccalendar from ccalendar cimport dayofweek, get_day_of_year from ccalendar import MONTH_NUMBERS from ccalendar cimport is_leapyear +from conversion cimport tz_convert_utc_to_tzlocal from frequencies cimport (get_freq_code, get_base_alias, get_to_timestamp_base, get_freq_str, get_rule_month) @@ -591,6 +592,7 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, ndarray[int64_t] result = np.empty(n, dtype=np.int64) ndarray[int64_t] trans, deltas, pos pandas_datetimestruct dts + int64_t local_val if is_utc(tz): for i in range(n): @@ -607,11 +609,8 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, if stamps[i] == NPY_NAT: result[i] = NPY_NAT continue - dt64_to_dtstruct(stamps[i], &dts) - dt = datetime(dts.year, dts.month, dts.day, dts.hour, - dts.min, dts.sec, dts.us, tz) - delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000 - dt64_to_dtstruct(stamps[i] + delta, &dts) + local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) + dt64_to_dtstruct(local_val, &dts) result[i] = get_period_ordinal(dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us, dts.ps, freq) diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index b166babe5992c..d0a9501afe566 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -23,6 +23,7 @@ from timezones cimport (is_utc, is_tzlocal, maybe_get_tz, get_dst_info, get_utcoffset) from fields import build_field_sarray from conversion import tz_convert +from conversion cimport tz_convert_utc_to_tzlocal from ccalendar import MONTH_ALIASES, int_to_weekday from pandas._libs.properties import cache_readonly @@ -78,6 +79,7 @@ cdef _reso_local(ndarray[int64_t] stamps, object tz): int reso = RESO_DAY, curr_reso ndarray[int64_t] trans, deltas, pos pandas_datetimestruct dts + int64_t local_val if is_utc(tz): for i in range(n): @@ -91,11 +93,8 @@ cdef _reso_local(ndarray[int64_t] stamps, object tz): for i in range(n): if stamps[i] == NPY_NAT: continue - dt64_to_dtstruct(stamps[i], &dts) - dt = datetime(dts.year, dts.month, dts.day, dts.hour, - dts.min, dts.sec, dts.us, tz) - delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000 - dt64_to_dtstruct(stamps[i] + delta, &dts) + local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) + dt64_to_dtstruct(local_val, &dts) curr_reso = _reso_stamp(&dts) if curr_reso < reso: reso = curr_reso
If cython did generators efficiently we could get rid of a ton of boilerplate where these are used.
https://api.github.com/repos/pandas-dev/pandas/pulls/19625
2018-02-10T01:32:34Z
2018-02-10T16:59:41Z
2018-02-10T16:59:41Z
2018-02-10T17:28:52Z
Fix left join turning into outer join
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 083242cd69b74..3b626ddced5bf 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -772,7 +772,7 @@ Reshaping - Bug in timezone comparisons, manifesting as a conversion of the index to UTC in ``.concat()`` (:issue:`18523`) - Bug in :func:`concat` when concatting sparse and dense series it returns only a ``SparseDataFrame``. Should be a ``DataFrame``. (:issue:`18914`, :issue:`18686`, and :issue:`16874`) - Improved error message for :func:`DataFrame.merge` when there is no common merge key (:issue:`19427`) -- +- Bug in :func:`DataFrame.join` which does an *outer* instead of a *left* join when being called with multiple DataFrames and some have non-unique indices (:issue:`19624`) Other ^^^^^ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6d8dcb8a1ca89..a6417f821a4e6 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5328,18 +5328,17 @@ def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='', raise ValueError('Joining multiple DataFrames only supported' ' for joining on index') - # join indexes only using concat - if how == 'left': - how = 'outer' - join_axes = [self.index] - else: - join_axes = None - frames = [self] + list(other) can_concat = all(df.index.is_unique for df in frames) + # join indexes only using concat if can_concat: + if how == 'left': + how = 'outer' + join_axes = [self.index] + else: + join_axes = None return concat(frames, axis=1, join=how, join_axes=join_axes, verify_integrity=True) diff --git a/pandas/tests/frame/test_join.py b/pandas/tests/frame/test_join.py index afecba2026dd7..ccdba6df2521a 100644 --- a/pandas/tests/frame/test_join.py +++ b/pandas/tests/frame/test_join.py @@ -165,3 +165,20 @@ def test_join_period_index(frame_with_period_index): index=frame_with_period_index.index) tm.assert_frame_equal(joined, expected) + + +def test_join_left_sequence_non_unique_index(): + # https://github.com/pandas-dev/pandas/issues/19607 + df1 = DataFrame({'a': [0, 10, 20]}, index=[1, 2, 3]) + df2 = DataFrame({'b': [100, 200, 300]}, index=[4, 3, 2]) + df3 = DataFrame({'c': [400, 500, 600]}, index=[2, 2, 4]) + + joined = df1.join([df2, df3], how='left') + + expected = DataFrame({ + 'a': [0, 10, 10, 20], + 'b': [np.nan, 300, 300, 200], + 'c': [np.nan, 400, 500, np.nan] + }, index=[1, 2, 2, 3]) + + tm.assert_frame_equal(joined, expected)
- [x] closes #19607 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19624
2018-02-09T20:53:14Z
2018-02-10T16:53:38Z
2018-02-10T16:53:38Z
2018-02-10T16:54:11Z
order of exceptions in array_to_datetime
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 877d7deff6ff4..a035bab2a7049 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -7,7 +7,7 @@ import numpy as np cnp.import_array() -from cpython cimport PyFloat_Check +from cpython cimport PyFloat_Check, PyUnicode_Check from util cimport (is_integer_object, is_float_object, is_string_object, is_datetime64_object) @@ -56,6 +56,8 @@ from tslibs.timestamps cimport (create_timestamp_from_ts, _NS_UPPER_BOUND, _NS_LOWER_BOUND) from tslibs.timestamps import Timestamp +cdef bint PY2 = str == bytes + cdef inline object create_datetime_from_ts( int64_t value, pandas_datetimestruct dts, @@ -549,10 +551,10 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', raise elif PyDate_Check(val): + seen_datetime = 1 iresult[i] = pydate_to_dt64(val, &dts) try: check_dts_bounds(&dts) - seen_datetime = 1 except ValueError: if is_coerce: iresult[i] = NPY_NAT @@ -560,12 +562,12 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', raise elif is_datetime64_object(val): + seen_datetime = 1 if get_datetime64_value(val) == NPY_NAT: iresult[i] = NPY_NAT else: try: iresult[i] = get_datetime64_nanos(val) - seen_datetime = 1 except ValueError: if is_coerce: iresult[i] = NPY_NAT @@ -574,19 +576,18 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', elif is_integer_object(val) or is_float_object(val): # these must be ns unit by-definition + seen_integer = 1 if val != val or val == NPY_NAT: iresult[i] = NPY_NAT elif is_raise or is_ignore: iresult[i] = val - seen_integer = 1 else: # coerce # we now need to parse this as if unit='ns' # we can ONLY accept integers at this point # if we have previously (or in future accept # datetimes/strings, then we must coerce) - seen_integer = 1 try: iresult[i] = cast_from_unit(val, 'ns') except: @@ -594,46 +595,25 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', elif is_string_object(val): # string + seen_string = 1 if len(val) == 0 or val in nat_strings: iresult[i] = NPY_NAT continue - - seen_string = 1 + if PyUnicode_Check(val) and PY2: + val = val.encode('utf-8') try: _string_to_dts(val, &dts, &out_local, &out_tzoffset) - value = dtstruct_to_dt64(&dts) - if out_local == 1: - tz = pytz.FixedOffset(out_tzoffset) - value = tz_convert_single(value, tz, 'UTC') - iresult[i] = value - check_dts_bounds(&dts) - except OutOfBoundsDatetime: - # GH#19382 for just-barely-OutOfBounds falling back to - # dateutil parser will return incorrect result because - # it will ignore nanoseconds - if require_iso8601: - if _parse_today_now(val, &iresult[i]): - continue - elif is_coerce: - iresult[i] = NPY_NAT - continue - elif is_raise: - raise ValueError("time data {val} doesn't match " - "format specified" - .format(val=val)) - return values - elif is_coerce: - iresult[i] = NPY_NAT - continue - raise except ValueError: - # if requiring iso8601 strings, skip trying other formats - if require_iso8601: - if _parse_today_now(val, &iresult[i]): - continue - elif is_coerce: + # A ValueError at this point is a _parsing_ error + # specifically _not_ OutOfBoundsDatetime + if _parse_today_now(val, &iresult[i]): + continue + elif require_iso8601: + # if requiring iso8601 strings, skip trying + # other formats + if is_coerce: iresult[i] = NPY_NAT continue elif is_raise: @@ -646,8 +626,6 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', py_dt = parse_datetime_string(val, dayfirst=dayfirst, yearfirst=yearfirst) except Exception: - if _parse_today_now(val, &iresult[i]): - continue if is_coerce: iresult[i] = NPY_NAT continue @@ -656,16 +634,42 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', try: _ts = convert_datetime_to_tsobject(py_dt, None) iresult[i] = _ts.value - except ValueError: + except OutOfBoundsDatetime: if is_coerce: iresult[i] = NPY_NAT continue raise except: + # TODO: What exception are we concerned with here? if is_coerce: iresult[i] = NPY_NAT continue raise + else: + # No error raised by string_to_dts, pick back up + # where we left off + value = dtstruct_to_dt64(&dts) + if out_local == 1: + tz = pytz.FixedOffset(out_tzoffset) + value = tz_convert_single(value, tz, 'UTC') + iresult[i] = value + try: + check_dts_bounds(&dts) + except OutOfBoundsDatetime: + # GH#19382 for just-barely-OutOfBounds falling back to + # dateutil parser will return incorrect result because + # it will ignore nanoseconds + if is_coerce: + iresult[i] = NPY_NAT + continue + elif require_iso8601: + if is_raise: + raise ValueError("time data {val} doesn't " + "match format specified" + .format(val=val)) + return values + raise + else: if is_coerce: iresult[i] = NPY_NAT diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index f8b1f68ba33ce..b95ae07052ecb 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -18,7 +18,7 @@ from pandas.core.tools import datetimes as tools from pandas.errors import OutOfBoundsDatetime -from pandas.compat import lmap +from pandas.compat import lmap, PY3 from pandas.compat.numpy import np_array_datetime64_compat from pandas.core.dtypes.common import is_datetime64_ns_dtype from pandas.util import testing as tm @@ -238,6 +238,13 @@ def test_to_datetime_today(self): assert pdtoday.tzinfo is None assert pdtoday2.tzinfo is None + def test_to_datetime_today_now_unicode_bytes(self): + to_datetime([u'now']) + to_datetime([u'today']) + if not PY3: + to_datetime(['now']) + to_datetime(['today']) + @pytest.mark.parametrize('cache', [True, False]) def test_to_datetime_dt64s(self, cache): in_bound_dts = [
First, cleans things up by moving all the seen_datetime and seen_integer variables to the tops of their respective blocks. Mainly splits the `try:` block for `string_to_dts` up into two pieces based on the two lines in that block that _could_ raise and handles errors more specfically.
https://api.github.com/repos/pandas-dev/pandas/pulls/19621
2018-02-09T17:01:00Z
2018-02-10T17:08:40Z
2018-02-10T17:08:40Z
2018-02-10T17:28:41Z
TST: set multi_statement flag for pymysql tests
diff --git a/ci/requirements-3.6.run b/ci/requirements-3.6.run index e30461d06b8ea..822144a80bc9a 100644 --- a/ci/requirements-3.6.run +++ b/ci/requirements-3.6.run @@ -13,7 +13,7 @@ lxml html5lib jinja2 sqlalchemy -pymysql<0.8.0 +pymysql feather-format pyarrow psycopg2 diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 0cc4101cd6304..f3ab74d37a2bc 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -1731,13 +1731,16 @@ class _TestMySQLAlchemy(object): @classmethod def connect(cls): url = 'mysql+{driver}://root@localhost/pandas_nosetest' - return sqlalchemy.create_engine(url.format(driver=cls.driver)) + return sqlalchemy.create_engine(url.format(driver=cls.driver), + connect_args=cls.connect_args) @classmethod def setup_driver(cls): try: import pymysql # noqa cls.driver = 'pymysql' + from pymysql.constants import CLIENT + cls.connect_args = {'client_flag': CLIENT.MULTI_STATEMENTS} except ImportError: pytest.skip('pymysql not installed')
Trying something out for #19462 closes #19462
https://api.github.com/repos/pandas-dev/pandas/pulls/19619
2018-02-09T15:09:54Z
2018-02-11T22:24:35Z
2018-02-11T22:24:35Z
2018-02-11T22:24:38Z
De-duplicate masking/fallback logic in ops
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 28923f0fbf240..f0adc77e46e5d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3920,17 +3920,7 @@ def _combine_frame(self, other, func, fill_value=None, level=None): new_index, new_columns = this.index, this.columns def _arith_op(left, right): - if fill_value is not None: - left_mask = isna(left) - right_mask = isna(right) - left = left.copy() - right = right.copy() - - # one but not both - mask = left_mask ^ right_mask - left[left_mask & mask] = fill_value - right[right_mask & mask] = fill_value - + left, right = ops.fill_binop(left, right, fill_value) return func(left, right) if this._is_mixed_type or other._is_mixed_type: diff --git a/pandas/core/ops.py b/pandas/core/ops.py index effa35695fcd1..4c234ccb4dd47 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -398,6 +398,79 @@ def _make_flex_doc(op_name, typ): return doc +# ----------------------------------------------------------------------------- +# Masking NA values and fallbacks for operations numpy does not support + +def fill_binop(left, right, fill_value): + """ + If a non-None fill_value is given, replace null entries in left and right + with this value, but only in positions where _one_ of left/right is null, + not both. + + Parameters + ---------- + left : array-like + right : array-like + fill_value : object + + Returns + ------- + left : array-like + right : array-like + + Notes + ----- + Makes copies if fill_value is not None + """ + # TODO: can we make a no-copy implementation? + if fill_value is not None: + left_mask = isna(left) + right_mask = isna(right) + left = left.copy() + right = right.copy() + + # one but not both + mask = left_mask ^ right_mask + left[left_mask & mask] = fill_value + right[right_mask & mask] = fill_value + return left, right + + +def mask_cmp_op(x, y, op, allowed_types): + """ + Apply the function `op` to only non-null points in x and y. + + Parameters + ---------- + x : array-like + y : array-like + op : binary operation + allowed_types : class or tuple of classes + + Returns + ------- + result : ndarray[bool] + """ + # TODO: Can we make the allowed_types arg unnecessary? + xrav = x.ravel() + result = np.empty(x.size, dtype=bool) + if isinstance(y, allowed_types): + yrav = y.ravel() + mask = notna(xrav) & notna(yrav) + result[mask] = op(np.array(list(xrav[mask])), + np.array(list(yrav[mask]))) + else: + mask = notna(xrav) + result[mask] = op(np.array(list(xrav[mask])), y) + + if op == operator.ne: # pragma: no cover + np.putmask(result, ~mask, True) + else: + np.putmask(result, ~mask, False) + result = result.reshape(x.shape) + return result + + # ----------------------------------------------------------------------------- # Functions that add arithmetic methods to objects, given arithmetic factory # methods @@ -1127,23 +1200,7 @@ def na_op(x, y): with np.errstate(invalid='ignore'): result = op(x, y) except TypeError: - xrav = x.ravel() - result = np.empty(x.size, dtype=bool) - if isinstance(y, (np.ndarray, ABCSeries)): - yrav = y.ravel() - mask = notna(xrav) & notna(yrav) - result[mask] = op(np.array(list(xrav[mask])), - np.array(list(yrav[mask]))) - else: - mask = notna(xrav) - result[mask] = op(np.array(list(xrav[mask])), y) - - if op == operator.ne: # pragma: no cover - np.putmask(result, ~mask, True) - else: - np.putmask(result, ~mask, False) - result = result.reshape(x.shape) - + result = mask_cmp_op(x, y, op, (np.ndarray, ABCSeries)) return result @Appender('Wrapper for flexible comparison methods {name}' @@ -1221,23 +1278,7 @@ def na_op(x, y): try: result = expressions.evaluate(op, str_rep, x, y) except TypeError: - xrav = x.ravel() - result = np.empty(x.size, dtype=bool) - if isinstance(y, np.ndarray): - yrav = y.ravel() - mask = notna(xrav) & notna(yrav) - result[mask] = op(np.array(list(xrav[mask])), - np.array(list(yrav[mask]))) - else: - mask = notna(xrav) - result[mask] = op(np.array(list(xrav[mask])), y) - - if op == operator.ne: # pragma: no cover - np.putmask(result, ~mask, True) - else: - np.putmask(result, ~mask, False) - result = result.reshape(x.shape) - + result = mask_cmp_op(x, y, op, np.ndarray) return result @Appender('Wrapper for comparison method {name}'.format(name=name)) diff --git a/pandas/core/series.py b/pandas/core/series.py index e4b8979d6393a..655eaa5373f5a 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1725,19 +1725,8 @@ def _binop(self, other, func, level=None, fill_value=None): copy=False) new_index = this.index - this_vals = this.values - other_vals = other.values - - if fill_value is not None: - this_mask = isna(this_vals) - other_mask = isna(other_vals) - this_vals = this_vals.copy() - other_vals = other_vals.copy() - - # one but not both - mask = this_mask ^ other_mask - this_vals[this_mask & mask] = fill_value - other_vals[other_mask & mask] = fill_value + this_vals, other_vals = ops.fill_binop(this.values, other.values, + fill_value) with np.errstate(all='ignore'): result = func(this_vals, other_vals)
Some nice deletions here.
https://api.github.com/repos/pandas-dev/pandas/pulls/19613
2018-02-09T04:23:04Z
2018-02-13T00:19:23Z
2018-02-13T00:19:23Z
2018-06-22T03:32:38Z
Fix uncaught OutOfBounds in array_to_datetime
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 6f48d9a6c63c9..41d6520b3d91a 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -703,7 +703,7 @@ Datetimelike - Bug in :class:`Series` floor-division where operating on a scalar ``timedelta`` raises an exception (:issue:`18846`) - Bug in :class:`Series`` with ``dtype='timedelta64[ns]`` where addition or subtraction of ``TimedeltaIndex`` had results cast to ``dtype='int64'`` (:issue:`17250`) - Bug in :class:`TimedeltaIndex` where division by a ``Series`` would return a ``TimedeltaIndex`` instead of a ``Series`` (issue:`19042`) -- Bug in :class:`Series` with ``dtype='timedelta64[ns]`` where addition or subtraction of ``TimedeltaIndex`` could return a ``Series`` with an incorrect name (issue:`19043`) +- Bug in :class:`Series` with ``dtype='timedelta64[ns]`` where addition or subtraction of ``TimedeltaIndex`` could return a ``Series`` with an incorrect name (:issue:`19043`) - Bug in :class:`DatetimeIndex` where the repr was not showing high-precision time values at the end of a day (e.g., 23:59:59.999999999) (:issue:`19030`) - Bug where dividing a scalar timedelta-like object with :class:`TimedeltaIndex` performed the reciprocal operation (:issue:`19125`) - Bug in ``.astype()`` to non-ns timedelta units would hold the incorrect dtype (:issue:`19176`, :issue:`19223`, :issue:`12425`) @@ -713,6 +713,7 @@ Datetimelike - Bug in comparison of :class:`DatetimeIndex` against ``None`` or ``datetime.date`` objects raising ``TypeError`` for ``==`` and ``!=`` comparisons instead of all-``False`` and all-``True``, respectively (:issue:`19301`) - Bug in :class:`Timestamp` and :func:`to_datetime` where a string representing a barely out-of-bounds timestamp would be incorrectly rounded down instead of raising ``OutOfBoundsDatetime`` (:issue:`19382`) - Bug in :func:`Timestamp.floor` :func:`DatetimeIndex.floor` where time stamps far in the future and past were not rounded correctly (:issue:`19206`) +- Bug in :func:`to_datetime` where passing an out-of-bounds datetime with ``errors='coerce'`` and ``utc=True`` would raise ``OutOfBoundsDatetime`` instead of parsing to ``NaT`` (:issue:`19612`) - Timezones diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index a035bab2a7049..85e667521e5f2 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -524,11 +524,10 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', seen_datetime = 1 if val.tzinfo is not None: if utc_convert: - _ts = convert_datetime_to_tsobject(val, None) - iresult[i] = _ts.value try: - check_dts_bounds(&_ts.dts) - except ValueError: + _ts = convert_datetime_to_tsobject(val, None) + iresult[i] = _ts.value + except OutOfBoundsDatetime: if is_coerce: iresult[i] = NPY_NAT continue @@ -544,7 +543,7 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', iresult[i] += val.nanosecond try: check_dts_bounds(&dts) - except ValueError: + except OutOfBoundsDatetime: if is_coerce: iresult[i] = NPY_NAT continue @@ -555,7 +554,7 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', iresult[i] = pydate_to_dt64(val, &dts) try: check_dts_bounds(&dts) - except ValueError: + except OutOfBoundsDatetime: if is_coerce: iresult[i] = NPY_NAT continue @@ -568,7 +567,7 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', else: try: iresult[i] = get_datetime64_nanos(val) - except ValueError: + except OutOfBoundsDatetime: if is_coerce: iresult[i] = NPY_NAT continue diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index b95ae07052ecb..1bdf5c07e1cd0 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -8,7 +8,7 @@ import dateutil import numpy as np from dateutil.parser import parse -from datetime import datetime, date, time +from datetime import datetime, date, time, timedelta from distutils.version import LooseVersion import pandas as pd @@ -1491,6 +1491,15 @@ def test_parsers_iso8601(self): class TestArrayToDatetime(object): + def test_coerce_out_of_bounds_utc(self): + # GH#19612 + ts = Timestamp('1900-01-01', tz='US/Pacific') + dt = ts.to_pydatetime() - timedelta(days=365 * 300) # ~1600AD + arr = np.array([dt]) + result = tslib.array_to_datetime(arr, utc=True, errors='coerce') + expected = np.array(['NaT'], dtype='datetime64[ns]') + tm.assert_numpy_array_equal(result, expected) + def test_parsing_valid_dates(self): arr = np.array(['01-01-2013', '01-02-2013'], dtype=object) tm.assert_numpy_array_equal(
The call to convert_datetime_to_tsobject has a check_dts_bounds in it. One of the usages in array_to_datetime gets that right, the other misses it.
https://api.github.com/repos/pandas-dev/pandas/pulls/19612
2018-02-09T04:00:16Z
2018-02-11T14:48:36Z
2018-02-11T14:48:36Z
2018-02-11T21:36:48Z
dispatch frame methods to series versions instead of re-implementing masking etc
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a001037b573d4..b96af6af3707f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3944,34 +3944,27 @@ def _combine_frame(self, other, func, fill_value=None, level=None): new_index, new_columns = this.index, this.columns def _arith_op(left, right): + # for the mixed_type case where we iterate over columns, + # _arith_op(left, right) is equivalent to + # left._binop(right, func, fill_value=fill_value) left, right = ops.fill_binop(left, right, fill_value) return func(left, right) if this._is_mixed_type or other._is_mixed_type: - - # unique + # iterate over columns if this.columns.is_unique: - - def f(col): - r = _arith_op(this[col].values, other[col].values) - return self._constructor_sliced(r, index=new_index, - dtype=r.dtype) - - result = {col: f(col) for col in this} - - # non-unique + # unique columns + result = {col: _arith_op(this[col], other[col]) + for col in this} + result = self._constructor(result, index=new_index, + columns=new_columns, copy=False) else: - - def f(i): - r = _arith_op(this.iloc[:, i].values, - other.iloc[:, i].values) - return self._constructor_sliced(r, index=new_index, - dtype=r.dtype) - - result = {i: f(i) for i, col in enumerate(this.columns)} + # non-unique columns + result = {i: _arith_op(this.iloc[:, i], other.iloc[:, i]) + for i, col in enumerate(this.columns)} result = self._constructor(result, index=new_index, copy=False) result.columns = new_columns - return result + return result else: result = _arith_op(this.values, other.values) @@ -3979,36 +3972,11 @@ def f(i): return self._constructor(result, index=new_index, columns=new_columns, copy=False) - def _combine_series(self, other, func, fill_value=None, axis=None, - level=None, try_cast=True): - if fill_value is not None: - raise NotImplementedError("fill_value {fill} not supported." - .format(fill=fill_value)) - - if axis is not None: - axis = self._get_axis_name(axis) - if axis == 'index': - return self._combine_match_index(other, func, level=level) - else: - return self._combine_match_columns(other, func, level=level, - try_cast=try_cast) - else: - if not len(other): - return self * np.nan - - if not len(self): - # Ambiguous case, use _series so works with DataFrame - return self._constructor(data=self._series, index=self.index, - columns=self.columns) - - # default axis is columns - return self._combine_match_columns(other, func, level=level, - try_cast=try_cast) - def _combine_match_index(self, other, func, level=None): left, right = self.align(other, join='outer', axis=0, level=level, copy=False) - return self._constructor(func(left.values.T, right.values).T, + new_data = func(left.values.T, right.values).T + return self._constructor(new_data, index=left.index, columns=self.columns, copy=False) @@ -4027,7 +3995,8 @@ def _combine_const(self, other, func, errors='raise', try_cast=True): try_cast=try_cast) return self._constructor(new_data) - def _compare_frame_evaluate(self, other, func, str_rep, try_cast=True): + def _compare_frame(self, other, func, str_rep, try_cast=True): + # compare_frame assumes self._indexed_same(other) import pandas.core.computation.expressions as expressions # unique @@ -4052,19 +4021,6 @@ def _compare(a, b): result.columns = self.columns return result - def _compare_frame(self, other, func, str_rep, try_cast=True): - if not self._indexed_same(other): - raise ValueError('Can only compare identically-labeled ' - 'DataFrame objects') - return self._compare_frame_evaluate(other, func, str_rep, - try_cast=try_cast) - - def _flex_compare_frame(self, other, func, str_rep, level, try_cast=True): - if not self._indexed_same(other): - self, other = self.align(other, 'outer', level=level, copy=False) - return self._compare_frame_evaluate(other, func, str_rep, - try_cast=try_cast) - def combine(self, other, func, fill_value=None, overwrite=True): """ Add two DataFrame objects and do not propagate NaN values, so if for a diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 81b6b28d3927e..a84c00a6b84ce 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -55,7 +55,7 @@ import pandas.core.algorithms as algos import pandas.core.sorting as sorting from pandas.io.formats.printing import pprint_thing -from pandas.core.ops import _comp_method_OBJECT_ARRAY +from pandas.core.ops import _comp_method_OBJECT_ARRAY, make_invalid_op from pandas.core.config import get_option from pandas.core.strings import StringMethods @@ -82,26 +82,6 @@ def _try_get_item(x): return x -def _make_invalid_op(name): - """ - Return a binary method that always raises a TypeError. - - Parameters - ---------- - name : str - - Returns - ------- - invalid_op : function - """ - def invalid_op(self, other=None): - raise TypeError("cannot perform {name} with this index type: " - "{typ}".format(name=name, typ=type(self))) - - invalid_op.__name__ = name - return invalid_op - - class InvalidIndexError(Exception): pass @@ -3994,22 +3974,23 @@ def _evaluate_compare(self, other): @classmethod def _add_numeric_methods_add_sub_disabled(cls): """ add in the numeric add/sub methods to disable """ - cls.__add__ = cls.__radd__ = __iadd__ = _make_invalid_op('__add__') # noqa - cls.__sub__ = __isub__ = _make_invalid_op('__sub__') # noqa + cls.__add__ = cls.__radd__ = __iadd__ = make_invalid_op('__add__') # noqa + cls.__sub__ = __isub__ = make_invalid_op('__sub__') # noqa @classmethod def _add_numeric_methods_disabled(cls): """ add in numeric methods to disable other than add/sub """ - cls.__pow__ = cls.__rpow__ = _make_invalid_op('__pow__') - cls.__mul__ = cls.__rmul__ = _make_invalid_op('__mul__') - cls.__floordiv__ = cls.__rfloordiv__ = _make_invalid_op('__floordiv__') - cls.__truediv__ = cls.__rtruediv__ = _make_invalid_op('__truediv__') + cls.__pow__ = make_invalid_op('__pow__') + cls.__rpow__ = make_invalid_op('__rpow__') + cls.__mul__ = cls.__rmul__ = make_invalid_op('__mul__') + cls.__floordiv__ = cls.__rfloordiv__ = make_invalid_op('__floordiv__') + cls.__truediv__ = cls.__rtruediv__ = make_invalid_op('__truediv__') if not compat.PY3: - cls.__div__ = cls.__rdiv__ = _make_invalid_op('__div__') - cls.__neg__ = _make_invalid_op('__neg__') - cls.__pos__ = _make_invalid_op('__pos__') - cls.__abs__ = _make_invalid_op('__abs__') - cls.__inv__ = _make_invalid_op('__inv__') + cls.__div__ = cls.__rdiv__ = make_invalid_op('__div__') + cls.__neg__ = make_invalid_op('__neg__') + cls.__pos__ = make_invalid_op('__pos__') + cls.__abs__ = make_invalid_op('__abs__') + cls.__inv__ = make_invalid_op('__inv__') def _maybe_update_attributes(self, attrs): """ Update Index attributes (e.g. freq) depending on op """ @@ -4207,8 +4188,8 @@ def logical_func(self, *args, **kwargs): @classmethod def _add_logical_methods_disabled(cls): """ add in logical methods to disable """ - cls.all = _make_invalid_op('all') - cls.any = _make_invalid_op('any') + cls.all = make_invalid_op('all') + cls.any = make_invalid_op('any') Index._add_numeric_methods_disabled() diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 4c234ccb4dd47..fd4fc5540fcec 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -96,6 +96,26 @@ def rxor(left, right): # ----------------------------------------------------------------------------- +def make_invalid_op(name): + """ + Return a binary method that always raises a TypeError. + + Parameters + ---------- + name : str + + Returns + ------- + invalid_op : function + """ + def invalid_op(self, other=None): + raise TypeError("cannot perform {name} with this index type: " + "{typ}".format(name=name, typ=type(self).__name__)) + + invalid_op.__name__ = name + return invalid_op + + def _gen_eval_kwargs(name): """ Find the keyword arguments to pass to numexpr for the given operation. @@ -1047,8 +1067,8 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0): elif isinstance(other, (np.ndarray, list, tuple)): if len(other) != len(self): raise ValueError('Lengths must be equal') - return self._binop(self._constructor(other, self.index), op, - level=level, fill_value=fill_value) + other = self._constructor(other, self.index) + return self._binop(other, op, level=level, fill_value=fill_value) else: if fill_value is not None: self = self.fillna(fill_value) @@ -1071,6 +1091,51 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0): # ----------------------------------------------------------------------------- # DataFrame +def _combine_series_frame(self, other, func, fill_value=None, axis=None, + level=None, try_cast=True): + """ + Apply binary operator `func` to self, other using alignment and fill + conventions determined by the fill_value, axis, level, and try_cast kwargs. + + Parameters + ---------- + self : DataFrame + other : Series + func : binary operator + fill_value : object, default None + axis : {0, 1, 'columns', 'index', None}, default None + level : int or None, default None + try_cast : bool, default True + + Returns + ------- + result : DataFrame + """ + if fill_value is not None: + raise NotImplementedError("fill_value {fill} not supported." + .format(fill=fill_value)) + + if axis is not None: + axis = self._get_axis_number(axis) + if axis == 0: + return self._combine_match_index(other, func, level=level) + else: + return self._combine_match_columns(other, func, level=level, + try_cast=try_cast) + else: + if not len(other): + return self * np.nan + + if not len(self): + # Ambiguous case, use _series so works with DataFrame + return self._constructor(data=self._series, index=self.index, + columns=self.columns) + + # default axis is columns + return self._combine_match_columns(other, func, level=level, + try_cast=try_cast) + + def _align_method_FRAME(left, right, axis): """ convert rhs to meet lhs dims if input is list, tuple or np.ndarray """ @@ -1179,8 +1244,9 @@ def f(self, other, axis=default_axis, level=None, fill_value=None): if isinstance(other, ABCDataFrame): # Another DataFrame return self._combine_frame(other, na_op, fill_value, level) elif isinstance(other, ABCSeries): - return self._combine_series(other, na_op, fill_value, axis, level, - try_cast=True) + return _combine_series_frame(self, other, na_op, + fill_value=fill_value, axis=axis, + level=level, try_cast=True) else: if fill_value is not None: self = self.fillna(fill_value) @@ -1209,13 +1275,17 @@ def f(self, other, axis=default_axis, level=None): other = _align_method_FRAME(self, other, axis) - if isinstance(other, ABCDataFrame): # Another DataFrame - return self._flex_compare_frame(other, na_op, str_rep, level, - try_cast=False) + if isinstance(other, ABCDataFrame): + # Another DataFrame + if not self._indexed_same(other): + self, other = self.align(other, 'outer', + level=level, copy=False) + return self._compare_frame(other, na_op, str_rep, try_cast=False) elif isinstance(other, ABCSeries): - return self._combine_series(other, na_op, None, axis, level, - try_cast=False) + return _combine_series_frame(self, other, na_op, + fill_value=None, axis=axis, + level=level, try_cast=False) else: return self._combine_const(other, na_op, try_cast=False) @@ -1227,11 +1297,17 @@ def f(self, other, axis=default_axis, level=None): def _comp_method_FRAME(func, name, str_rep): @Appender('Wrapper for comparison method {name}'.format(name=name)) def f(self, other): - if isinstance(other, ABCDataFrame): # Another DataFrame - return self._compare_frame(other, func, str_rep) + if isinstance(other, ABCDataFrame): + # Another DataFrame + if not self._indexed_same(other): + raise ValueError('Can only compare identically-labeled ' + 'DataFrame objects') + return self._compare_frame(other, func, str_rep, try_cast=True) + elif isinstance(other, ABCSeries): - return self._combine_series(other, func, - axis=None, try_cast=False) + return _combine_series_frame(self, other, func, + fill_value=None, axis=None, + level=None, try_cast=False) else: # straight boolean comparisons we want to allow all columns diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index a3a799aed1c55..65afe85628f8e 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -72,6 +72,23 @@ def test_tz_aware_scalar_comparison(self, timestamps): # ------------------------------------------------------------------- # Arithmetic +class TestFrameFlexArithmetic(object): + def test_df_add_flex_filled_mixed_dtypes(self): + # GH#19611 + dti = pd.date_range('2016-01-01', periods=3) + ser = pd.Series(['1 Day', 'NaT', '2 Days'], dtype='timedelta64[ns]') + df = pd.DataFrame({'A': dti, 'B': ser}) + other = pd.DataFrame({'A': ser, 'B': ser}) + fill = pd.Timedelta(days=1).to_timedelta64() + result = df.add(other, fill_value=fill) + + expected = pd.DataFrame( + {'A': pd.Series(['2016-01-02', '2016-01-03', '2016-01-05'], + dtype='datetime64[ns]'), + 'B': ser * 2}) + tm.assert_frame_equal(result, expected) + + class TestFrameMulDiv(object): """Tests for DataFrame multiplication and division""" # ------------------------------------------------------------------
This moves some of DataFrame's dispatching logic into `ops`, mostly in ways that don't change any logic. The place that logic _is_ changed is in `DataFrame._combine_frame` where instead of defining `arith_op` to mask `func` and wrapping `{col: arith_op(this[col].values, other[col].values) for col in this.columns}`, we dispatch directly to the Series methods and wrap `{col: func(this[col], other[col]) for col in this.columns}`
https://api.github.com/repos/pandas-dev/pandas/pulls/19611
2018-02-09T02:07:33Z
2018-02-18T16:36:25Z
2018-02-18T16:36:25Z
2018-02-18T18:23:05Z
Consolidate nth / last object Groupby Implementations
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 55de700c9af52..f6c3aa151c6b5 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -25,105 +25,6 @@ cdef double NaN = <double> np.NaN cdef double nan = NaN -# TODO: aggregate multiple columns in single pass -# ---------------------------------------------------------------------- -# first, nth, last - - -@cython.boundscheck(False) -@cython.wraparound(False) -def group_nth_object(ndarray[object, ndim=2] out, - ndarray[int64_t] counts, - ndarray[object, ndim=2] values, - ndarray[int64_t] labels, - int64_t rank, - Py_ssize_t min_count=-1): - """ - Only aggregates on axis=0 - """ - cdef: - Py_ssize_t i, j, N, K, lab - object val - float64_t count - ndarray[int64_t, ndim=2] nobs - ndarray[object, ndim=2] resx - - assert min_count == -1, "'min_count' only used in add and prod" - - nobs = np.zeros((<object> out).shape, dtype=np.int64) - resx = np.empty((<object> out).shape, dtype=object) - - N, K = (<object> values).shape - - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[lab, j] += 1 - if nobs[lab, j] == rank: - resx[lab, j] = val - - for i in range(len(counts)): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = <object> nan - else: - out[i, j] = resx[i, j] - - -@cython.boundscheck(False) -@cython.wraparound(False) -def group_last_object(ndarray[object, ndim=2] out, - ndarray[int64_t] counts, - ndarray[object, ndim=2] values, - ndarray[int64_t] labels, - Py_ssize_t min_count=-1): - """ - Only aggregates on axis=0 - """ - cdef: - Py_ssize_t i, j, N, K, lab - object val - float64_t count - ndarray[object, ndim=2] resx - ndarray[int64_t, ndim=2] nobs - - assert min_count == -1, "'min_count' only used in add and prod" - - nobs = np.zeros((<object> out).shape, dtype=np.int64) - resx = np.empty((<object> out).shape, dtype=object) - - N, K = (<object> values).shape - - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[lab, j] += 1 - resx[lab, j] = val - - for i in range(len(counts)): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = nan - else: - out[i, j] = resx[i, j] - - cdef inline float64_t median_linear(float64_t* a, int n) nogil: cdef int i, j, na_count = 0 cdef float64_t result diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index a751fadaf48cf..025f1b2bc011e 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -325,7 +325,8 @@ def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, # name, c_type, dest_type2, nan_val dtypes = [('float64', 'float64_t', 'float64_t', 'NAN'), ('float32', 'float32_t', 'float32_t', 'NAN'), - ('int64', 'int64_t', 'int64_t', 'iNaT')] + ('int64', 'int64_t', 'int64_t', 'iNaT'), + ('object', 'object', 'object', 'NAN')] def get_dispatch(dtypes): @@ -350,7 +351,7 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, """ cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{dest_type2}} val, count + {{dest_type2}} val ndarray[{{dest_type2}}, ndim=2] resx ndarray[int64_t, ndim=2] nobs @@ -360,11 +361,19 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, raise AssertionError("len(index) != len(labels)") nobs = np.zeros((<object> out).shape, dtype=np.int64) + {{if name=='object'}} + resx = np.empty((<object> out).shape, dtype=object) + {{else}} resx = np.empty_like(out) + {{endif}} N, K = (<object> values).shape + {{if name == "object"}} + if True: # make templating happy + {{else}} with nogil: + {{endif}} for i in range(N): lab = labels[i] if lab < 0: @@ -375,11 +384,7 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, val = values[i, j] # not nan - {{if name == 'int64'}} - if val != {{nan_val}}: - {{else}} if val == val and val != {{nan_val}}: - {{endif}} nobs[lab, j] += 1 resx[lab, j] = val @@ -390,7 +395,6 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, else: out[i, j] = resx[i, j] - @cython.wraparound(False) @cython.boundscheck(False) def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, @@ -403,7 +407,7 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, """ cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{dest_type2}} val, count + {{dest_type2}} val ndarray[{{dest_type2}}, ndim=2] resx ndarray[int64_t, ndim=2] nobs @@ -413,11 +417,19 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, raise AssertionError("len(index) != len(labels)") nobs = np.zeros((<object> out).shape, dtype=np.int64) + {{if name=='object'}} + resx = np.empty((<object> out).shape, dtype=object) + {{else}} resx = np.empty_like(out) + {{endif}} N, K = (<object> values).shape + {{if name == "object"}} + if True: # make templating happy + {{else}} with nogil: + {{endif}} for i in range(N): lab = labels[i] if lab < 0: @@ -428,11 +440,7 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, val = values[i, j] # not nan - {{if name == 'int64'}} - if val != {{nan_val}}: - {{else}} if val == val and val != {{nan_val}}: - {{endif}} nobs[lab, j] += 1 if nobs[lab, j] == rank: resx[lab, j] = val diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 5172efe25d697..4a3e29d295e0e 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2086,7 +2086,19 @@ def test_median_empty_bins(self): expected = df.groupby(bins).agg(lambda x: x.median()) assert_frame_equal(result, expected) - def test_groupby_non_arithmetic_agg_types(self): + @pytest.mark.parametrize("dtype", [ + 'int8', 'int16', 'int32', 'int64', 'float32', 'float64']) + @pytest.mark.parametrize("method,data", [ + ('first', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}), + ('last', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}), + ('min', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}), + ('max', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}), + ('nth', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}], + 'args': [1]}), + ('count', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}], + 'out_type': 'int64'}) + ]) + def test_groupby_non_arithmetic_agg_types(self, dtype, method, data): # GH9311, GH6620 df = pd.DataFrame( [{'a': 1, 'b': 1}, @@ -2094,39 +2106,25 @@ def test_groupby_non_arithmetic_agg_types(self): {'a': 2, 'b': 3}, {'a': 2, 'b': 4}]) - dtypes = ['int8', 'int16', 'int32', 'int64', 'float32', 'float64'] + df['b'] = df.b.astype(dtype) - grp_exp = {'first': {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}, - 'last': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}, - 'min': {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}, - 'max': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}, - 'nth': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}], - 'args': [1]}, - 'count': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}], - 'out_type': 'int64'}} + if 'args' not in data: + data['args'] = [] - for dtype in dtypes: - df_in = df.copy() - df_in['b'] = df_in.b.astype(dtype) - - for method, data in compat.iteritems(grp_exp): - if 'args' not in data: - data['args'] = [] - - if 'out_type' in data: - out_type = data['out_type'] - else: - out_type = dtype + if 'out_type' in data: + out_type = data['out_type'] + else: + out_type = dtype - exp = data['df'] - df_out = pd.DataFrame(exp) + exp = data['df'] + df_out = pd.DataFrame(exp) - df_out['b'] = df_out.b.astype(out_type) - df_out.set_index('a', inplace=True) + df_out['b'] = df_out.b.astype(out_type) + df_out.set_index('a', inplace=True) - grpd = df_in.groupby('a') - t = getattr(grpd, method)(*data['args']) - assert_frame_equal(t, df_out) + grpd = df.groupby('a') + t = getattr(grpd, method)(*data['args']) + assert_frame_equal(t, df_out) def test_groupby_non_arithmetic_agg_intlike_precision(self): # GH9311, GH6620
- [X] closes #19569 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Depending on what goes first between this and #19481 some merge conflicts will need to be cleaned up
https://api.github.com/repos/pandas-dev/pandas/pulls/19610
2018-02-09T01:42:18Z
2018-02-10T16:08:59Z
2018-02-10T16:08:59Z
2018-02-10T16:11:20Z
Continue porting period_helper to cython
diff --git a/pandas/_libs/src/period_helper.c b/pandas/_libs/src/period_helper.c index f0e24fec685d0..7c4de8e42e73b 100644 --- a/pandas/_libs/src/period_helper.c +++ b/pandas/_libs/src/period_helper.c @@ -45,7 +45,7 @@ static int monthToQuarter(int month) { return ((month - 1) / 3) + 1; } /* Find the absdate (days elapsed since datetime(1, 1, 1) * for the given year/month/day. * Assumes GREGORIAN_CALENDAR */ -static npy_int64 dInfoCalc_SetFromDateAndTime(int year, int month, int day) { +npy_int64 absdate_from_ymd(int year, int month, int day) { /* Calculate the absolute date */ pandas_datetimestruct dts; npy_int64 unix_date; @@ -68,8 +68,6 @@ static int dInfoCalc_SetFromAbsDate(register struct date_info *dinfo, dinfo->year = dts.year; dinfo->month = dts.month; dinfo->day = dts.day; - - dinfo->absdate = absdate; return 0; } @@ -100,8 +98,7 @@ PANDAS_INLINE int get_freq_group(int freq) { return (freq / 1000) * 1000; } PANDAS_INLINE int get_freq_group_index(int freq) { return freq / 1000; } -PANDAS_INLINE npy_int64 get_daytime_conversion_factor(int from_index, - int to_index) { +npy_int64 get_daytime_conversion_factor(int from_index, int to_index) { int row = min_value(from_index, to_index); int col = max_value(from_index, to_index); // row or col < 6 means frequency strictly lower than Daily, which @@ -144,9 +141,9 @@ static npy_int64 DtoB_weekday(npy_int64 absdate) { return (((absdate) / 7) * 5) + (absdate) % 7 - BDAY_OFFSET; } -static npy_int64 DtoB(struct date_info *dinfo, int roll_back) { +static npy_int64 DtoB(struct date_info *dinfo, + int roll_back, npy_int64 absdate) { int day_of_week = dayofweek(dinfo->year, dinfo->month, dinfo->day); - npy_int64 absdate = dinfo->absdate; if (roll_back == 1) { if (day_of_week > 4) { @@ -162,9 +159,6 @@ static npy_int64 DtoB(struct date_info *dinfo, int roll_back) { return DtoB_weekday(absdate); } -static npy_int64 absdate_from_ymd(int y, int m, int d) { - return dInfoCalc_SetFromDateAndTime(y, m, d); -} //************ FROM DAILY *************** @@ -224,15 +218,16 @@ static npy_int64 asfreq_DTtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_DTtoB(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; + npy_int64 absdate; int roll_back; ordinal = downsample_daytime(ordinal, af_info); - - dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET); + absdate = ordinal + ORD_OFFSET; + dInfoCalc_SetFromAbsDate(&dinfo, absdate); // This usage defines roll_back the opposite way from the others roll_back = 1 - af_info->is_end; - return DtoB(&dinfo, roll_back); + return DtoB(&dinfo, roll_back, absdate); } // all intra day calculations are now done within one function @@ -298,11 +293,11 @@ static npy_int64 asfreq_WtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_WtoB(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; + npy_int64 absdate = asfreq_WtoDT(ordinal, af_info) + ORD_OFFSET; int roll_back = af_info->is_end; - dInfoCalc_SetFromAbsDate( - &dinfo, asfreq_WtoDT(ordinal, af_info) + ORD_OFFSET); + dInfoCalc_SetFromAbsDate(&dinfo, absdate); - return DtoB(&dinfo, roll_back); + return DtoB(&dinfo, roll_back, absdate); } //************ FROM MONTHLY *************** @@ -338,12 +333,12 @@ static npy_int64 asfreq_MtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_MtoB(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; + npy_int64 absdate = asfreq_MtoDT(ordinal, af_info) + ORD_OFFSET; int roll_back = af_info->is_end; - dInfoCalc_SetFromAbsDate( - &dinfo, asfreq_MtoDT(ordinal, af_info) + ORD_OFFSET); + dInfoCalc_SetFromAbsDate(&dinfo, absdate); - return DtoB(&dinfo, roll_back); + return DtoB(&dinfo, roll_back, absdate); } //************ FROM QUARTERLY *************** @@ -393,12 +388,12 @@ static npy_int64 asfreq_QtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_QtoB(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; + npy_int64 absdate = asfreq_QtoDT(ordinal, af_info) + ORD_OFFSET; int roll_back = af_info->is_end; - dInfoCalc_SetFromAbsDate( - &dinfo, asfreq_QtoDT(ordinal, af_info) + ORD_OFFSET); + dInfoCalc_SetFromAbsDate(&dinfo, absdate); - return DtoB(&dinfo, roll_back); + return DtoB(&dinfo, roll_back, absdate); } //************ FROM ANNUAL *************** @@ -439,11 +434,11 @@ static npy_int64 asfreq_AtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_AtoB(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; + npy_int64 absdate = asfreq_AtoDT(ordinal, af_info) + ORD_OFFSET; int roll_back = af_info->is_end; - dInfoCalc_SetFromAbsDate( - &dinfo, asfreq_AtoDT(ordinal, af_info) + ORD_OFFSET); + dInfoCalc_SetFromAbsDate(&dinfo, absdate); - return DtoB(&dinfo, roll_back); + return DtoB(&dinfo, roll_back, absdate); } static npy_int64 nofunc(npy_int64 ordinal, asfreq_info *af_info) { @@ -675,65 +670,6 @@ freq_conv_func get_asfreq_func(int fromFreq, int toFreq) { } } -double get_abs_time(int freq, npy_int64 date_ordinal, npy_int64 ordinal) { - int freq_index, day_index, base_index; - npy_int64 per_day, start_ord; - double unit, result; - - if (freq <= FR_DAY) { - return 0; - } - - freq_index = get_freq_group_index(freq); - day_index = get_freq_group_index(FR_DAY); - base_index = get_freq_group_index(FR_SEC); - - per_day = get_daytime_conversion_factor(day_index, freq_index); - unit = get_daytime_conversion_factor(freq_index, base_index); - - if (base_index < freq_index) { - unit = 1 / unit; - } - - start_ord = date_ordinal * per_day; - result = (double)(unit * (ordinal - start_ord)); - return result; -} - -/* Sets the time part of the DateTime object. */ -static int dInfoCalc_SetFromAbsTime(struct date_info *dinfo, double abstime) { - int inttime; - int hour, minute; - double second; - - inttime = (int)abstime; - hour = inttime / 3600; - minute = (inttime % 3600) / 60; - second = abstime - (double)(hour * 3600 + minute * 60); - - dinfo->hour = hour; - dinfo->minute = minute; - dinfo->second = second; - return 0; -} - -/* Set the instance's value using the given date and time. - Assumes GREGORIAN_CALENDAR. */ -static int dInfoCalc_SetFromAbsDateTime(struct date_info *dinfo, - npy_int64 absdate, double abstime) { - /* Bounds check */ - // The calling function is responsible for ensuring that - // abstime >= 0.0 && abstime <= 86400 - - /* Calculate the date */ - dInfoCalc_SetFromAbsDate(dinfo, absdate); - - /* Calculate the time */ - dInfoCalc_SetFromAbsTime(dinfo, abstime); - - return 0; -} - /* ------------------------------------------------------------------ * New pandas API-helper code, to expose to cython * ------------------------------------------------------------------*/ @@ -750,185 +686,3 @@ npy_int64 asfreq(npy_int64 period_ordinal, int freq1, int freq2, val = (*func)(period_ordinal, &finfo); return val; } - -/* generate an ordinal in period space */ -npy_int64 get_period_ordinal(int year, int month, int day, int hour, int minute, - int second, int microseconds, int picoseconds, - int freq) { - npy_int64 absdays, delta, seconds; - npy_int64 weeks, days; - npy_int64 ordinal, day_adj; - int freq_group, fmonth, mdiff; - freq_group = get_freq_group(freq); - - if (freq == FR_SEC || freq == FR_MS || freq == FR_US || freq == FR_NS) { - absdays = absdate_from_ymd(year, month, day); - delta = (absdays - ORD_OFFSET); - seconds = - (npy_int64)(delta * 86400 + hour * 3600 + minute * 60 + second); - - switch (freq) { - case FR_MS: - return seconds * 1000 + microseconds / 1000; - - case FR_US: - return seconds * 1000000 + microseconds; - - case FR_NS: - return seconds * 1000000000 + microseconds * 1000 + - picoseconds / 1000; - } - - return seconds; - } - - if (freq == FR_MIN) { - absdays = absdate_from_ymd(year, month, day); - delta = (absdays - ORD_OFFSET); - return (npy_int64)(delta * 1440 + hour * 60 + minute); - } - - if (freq == FR_HR) { - absdays = absdate_from_ymd(year, month, day); - delta = (absdays - ORD_OFFSET); - return (npy_int64)(delta * 24 + hour); - } - - if (freq == FR_DAY) { - return (npy_int64)(absdate_from_ymd(year, month, day) - ORD_OFFSET); - } - - if (freq == FR_UND) { - return (npy_int64)(absdate_from_ymd(year, month, day) - ORD_OFFSET); - } - - if (freq == FR_BUS) { - days = absdate_from_ymd(year, month, day); - // calculate the current week assuming sunday as last day of a week - weeks = (days - BASE_WEEK_TO_DAY_OFFSET) / DAYS_PER_WEEK; - // calculate the current weekday (in range 1 .. 7) - delta = (days - BASE_WEEK_TO_DAY_OFFSET) % DAYS_PER_WEEK + 1; - // return the number of business days in full weeks plus the business - // days in the last - possible partial - week - return (npy_int64)(weeks * BUSINESS_DAYS_PER_WEEK) + - (delta <= BUSINESS_DAYS_PER_WEEK ? delta - : BUSINESS_DAYS_PER_WEEK + 1) - - BDAY_OFFSET; - } - - if (freq_group == FR_WK) { - ordinal = (npy_int64)absdate_from_ymd(year, month, day); - day_adj = freq - FR_WK; - return (ordinal - (1 + day_adj)) / 7 + 1 - WEEK_OFFSET; - } - - if (freq == FR_MTH) { - return (year - BASE_YEAR) * 12 + month - 1; - } - - if (freq_group == FR_QTR) { - fmonth = freq - FR_QTR; - if (fmonth == 0) fmonth = 12; - - mdiff = month - fmonth; - if (mdiff < 0) mdiff += 12; - if (month >= fmonth) mdiff += 12; - - return (year - BASE_YEAR) * 4 + (mdiff - 1) / 3; - } - - if (freq_group == FR_ANN) { - fmonth = freq - FR_ANN; - if (fmonth == 0) fmonth = 12; - if (month <= fmonth) { - return year - BASE_YEAR; - } else { - return year - BASE_YEAR + 1; - } - } - - Py_Error(PyExc_RuntimeError, "Unable to generate frequency ordinal"); - -onError: - return INT_ERR_CODE; -} - -/* - Returns the proleptic Gregorian ordinal of the date, as an integer. - This corresponds to the number of days since Jan., 1st, 1AD. - When the instance has a frequency less than daily, the proleptic date - is calculated for the last day of the period. - */ - -npy_int64 get_python_ordinal(npy_int64 period_ordinal, int freq) { - asfreq_info af_info; - freq_conv_func toDaily = NULL; - - if (freq == FR_DAY) return period_ordinal + ORD_OFFSET; - - toDaily = get_asfreq_func(freq, FR_DAY); - get_asfreq_info(freq, FR_DAY, 'E', &af_info); - - return toDaily(period_ordinal, &af_info) + ORD_OFFSET; -} - - -int get_yq(npy_int64 ordinal, int freq, int *quarter, int *year) { - asfreq_info af_info; - int qtr_freq; - npy_int64 daily_ord; - freq_conv_func toDaily = NULL; - - toDaily = get_asfreq_func(freq, FR_DAY); - get_asfreq_info(freq, FR_DAY, 'E', &af_info); - - daily_ord = toDaily(ordinal, &af_info); - - if (get_freq_group(freq) == FR_QTR) { - qtr_freq = freq; - } else { - qtr_freq = FR_QTR; - } - get_asfreq_info(FR_DAY, qtr_freq, 'E', &af_info); - - DtoQ_yq(daily_ord, &af_info, year, quarter); - return 0; -} - -int _quarter_year(npy_int64 ordinal, int freq, int *year, int *quarter) { - asfreq_info af_info; - int qtr_freq; - - ordinal = get_python_ordinal(ordinal, freq) - ORD_OFFSET; - - if (get_freq_group(freq) == FR_QTR) - qtr_freq = freq; - else - qtr_freq = FR_QTR; - - get_asfreq_info(FR_DAY, qtr_freq, 'E', &af_info); - - DtoQ_yq(ordinal, &af_info, year, quarter); - - if ((qtr_freq % 1000) > 12) *year -= 1; - - return 0; -} - - -int get_date_info(npy_int64 ordinal, int freq, struct date_info *dinfo) { - npy_int64 absdate = get_python_ordinal(ordinal, freq); - double abstime = get_abs_time(freq, absdate - ORD_OFFSET, ordinal); - - while (abstime < 0) { - abstime += 86400; - absdate -= 1; - } - while (abstime >= 86400) { - abstime -= 86400; - absdate += 1; - } - - dInfoCalc_SetFromAbsDateTime(dinfo, absdate, abstime); - return 0; -} diff --git a/pandas/_libs/src/period_helper.h b/pandas/_libs/src/period_helper.h index f14aec268a1fb..1573b1eeec74b 100644 --- a/pandas/_libs/src/period_helper.h +++ b/pandas/_libs/src/period_helper.h @@ -118,8 +118,6 @@ typedef struct asfreq_info { } asfreq_info; typedef struct date_info { - npy_int64 absdate; - double second; int minute; int hour; @@ -136,18 +134,10 @@ typedef npy_int64 (*freq_conv_func)(npy_int64, asfreq_info *af_info); npy_int64 asfreq(npy_int64 period_ordinal, int freq1, int freq2, char relation); -npy_int64 get_period_ordinal(int year, int month, int day, int hour, int minute, - int second, int microseconds, int picoseconds, - int freq); - -npy_int64 get_python_ordinal(npy_int64 period_ordinal, int freq); - -int get_date_info(npy_int64 ordinal, int freq, struct date_info *dinfo); freq_conv_func get_asfreq_func(int fromFreq, int toFreq); void get_asfreq_info(int fromFreq, int toFreq, char relation, asfreq_info *af_info); -int get_yq(npy_int64 ordinal, int freq, int *quarter, int *year); -int _quarter_year(npy_int64 ordinal, int freq, int *year, int *quarter); +npy_int64 get_daytime_conversion_factor(int from_index, int to_index); #endif // PANDAS__LIBS_SRC_PERIOD_HELPER_H_ diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 3c396a9ff4f3c..03d28af5e7f27 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -13,7 +13,7 @@ import_array() from libc.stdlib cimport free, malloc from libc.time cimport strftime, tm -from libc.string cimport strlen +from libc.string cimport strlen, memset from pandas.compat import PY2 @@ -24,7 +24,15 @@ from cpython.datetime cimport PyDateTime_Check, PyDateTime_IMPORT PyDateTime_IMPORT from np_datetime cimport (pandas_datetimestruct, dtstruct_to_dt64, - dt64_to_dtstruct) + dt64_to_dtstruct, + PANDAS_FR_D, + pandas_datetime_to_datetimestruct, + PANDAS_DATETIMEUNIT) + +cdef extern from "../src/datetime/np_datetime.h": + int64_t pandas_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT fr, + pandas_datetimestruct *d + ) nogil cimport util from util cimport is_period_object, is_string_object, INT32_MIN @@ -52,6 +60,24 @@ from pandas.tseries import frequencies cdef extern from "period_helper.h": + int FR_ANN + int FR_QTR + int FR_MTH + int FR_WK + int FR_DAY + int FR_HR + int FR_MIN + int FR_SEC + int FR_MS + int FR_US + int FR_NS + int FR_BUS + int FR_UND + + int ORD_OFFSET + int WEEK_OFFSET + int BDAY_OFFSET + ctypedef struct date_info: double second int minute @@ -72,24 +98,15 @@ cdef extern from "period_helper.h": int from_q_year_end int to_q_year_end - ctypedef int64_t (*freq_conv_func)(int64_t, asfreq_info*) + ctypedef int64_t (*freq_conv_func)(int64_t, asfreq_info*) nogil int64_t asfreq(int64_t dtordinal, int freq1, int freq2, char relation) except INT32_MIN - freq_conv_func get_asfreq_func(int fromFreq, int toFreq) + freq_conv_func get_asfreq_func(int fromFreq, int toFreq) nogil void get_asfreq_info(int fromFreq, int toFreq, char relation, - asfreq_info *af_info) - - int64_t get_period_ordinal(int year, int month, int day, - int hour, int minute, int second, - int microseconds, int picoseconds, - int freq) nogil except INT32_MIN - - int get_date_info(int64_t ordinal, int freq, - date_info *dinfo) nogil + asfreq_info *af_info) nogil - int get_yq(int64_t ordinal, int freq, int *quarter, int *year) - int _quarter_year(int64_t ordinal, int freq, int *year, int *quarter) + int64_t get_daytime_conversion_factor(int from_index, int to_index) nogil @cython.cdivision @@ -129,6 +146,285 @@ cdef char* c_strftime(date_info *dinfo, char *fmt): return result +# ---------------------------------------------------------------------- +# Conversion between date_info and pandas_datetimestruct + +cdef inline int get_freq_group(int freq) nogil: + return (freq // 1000) * 1000 + + +@cython.cdivision +cdef int64_t get_period_ordinal(int year, int month, int day, + int hour, int minute, int second, + int microseconds, int picoseconds, + int freq) nogil: + """generate an ordinal in period space""" + cdef: + int64_t absdays, unix_date, seconds, delta + int64_t weeks + int64_t day_adj + int freq_group, fmonth, mdiff + + freq_group = get_freq_group(freq) + + if freq_group == FR_ANN: + fmonth = freq - FR_ANN + if fmonth == 0: + fmonth = 12 + if month <= fmonth: + return year - 1970 + else: + return year - 1970 + 1 + + elif freq_group == FR_QTR: + fmonth = freq - FR_QTR + if fmonth == 0: + fmonth = 12 + + mdiff = month - fmonth + # TODO: Aren't the next two conditions equivalent to + # unconditional incrementing? + if mdiff < 0: + mdiff += 12 + if month >= fmonth: + mdiff += 12 + + return (year - 1970) * 4 + (mdiff - 1) / 3 + + elif freq == FR_MTH: + return (year - 1970) * 12 + month - 1 + + absdays = absdate_from_ymd(year, month, day) + unix_date = absdays - ORD_OFFSET + + if freq >= FR_SEC: + seconds = unix_date * 86400 + hour * 3600 + minute * 60 + second + + if freq == FR_MS: + return seconds * 1000 + microseconds / 1000 + + elif freq == FR_US: + return seconds * 1000000 + microseconds + + elif freq == FR_NS: + return (seconds * 1000000000 + + microseconds * 1000 + picoseconds / 1000) + + else: + return seconds + + elif freq == FR_MIN: + return unix_date * 1440 + hour * 60 + minute + + elif freq == FR_HR: + return unix_date * 24 + hour + + elif freq == FR_DAY: + return unix_date + + elif freq == FR_UND: + return unix_date + + elif freq == FR_BUS: + # calculate the current week assuming sunday as last day of a week + # Jan 1 0001 is a Monday, so subtract 1 to get to end-of-week + weeks = (unix_date + ORD_OFFSET - 1) / 7 + # calculate the current weekday (in range 1 .. 7) + delta = (unix_date + ORD_OFFSET - 1) % 7 + 1 + # return the number of business days in full weeks plus the business + # days in the last - possible partial - week + if delta <= 5: + return (weeks * 5) + delta - BDAY_OFFSET + else: + return (weeks * 5) + (5 + 1) - BDAY_OFFSET + + elif freq_group == FR_WK: + day_adj = freq - FR_WK + return (unix_date + ORD_OFFSET - (1 + day_adj)) / 7 + 1 - WEEK_OFFSET + + # raise ValueError + + +cdef int get_date_info(int64_t ordinal, int freq, date_info *dinfo) nogil: + cdef: + int64_t absdate + double abstime + + absdate = get_python_ordinal(ordinal, freq); + abstime = get_abs_time(freq, absdate - ORD_OFFSET, ordinal) + + while abstime < 0: + abstime += 86400 + absdate -= 1 + + while abstime >= 86400: + abstime -= 86400 + absdate += 1 + + dInfoCalc_SetFromAbsDateTime(dinfo, absdate, abstime) + return 0 + + +cdef int64_t get_python_ordinal(int64_t period_ordinal, int freq) nogil: + """ + Returns the proleptic Gregorian ordinal of the date, as an integer. + This corresponds to the number of days since Jan., 1st, 1AD. + When the instance has a frequency less than daily, the proleptic date + is calculated for the last day of the period. + """ + cdef: + asfreq_info af_info + freq_conv_func toDaily = NULL + + if freq == FR_DAY: + return period_ordinal + ORD_OFFSET + + toDaily = get_asfreq_func(freq, FR_DAY) + get_asfreq_info(freq, FR_DAY, 'E', &af_info) + return toDaily(period_ordinal, &af_info) + ORD_OFFSET + + +cdef int dInfoCalc_SetFromAbsDateTime(date_info *dinfo, + int64_t absdate, double abstime) nogil: + """ + Set the instance's value using the given date and time. + Assumes GREGORIAN_CALENDAR. + """ + # Bounds check + # The calling function is responsible for ensuring that + # abstime >= 0.0 and abstime <= 86400 + + # Calculate the date + dInfoCalc_SetFromAbsDate(dinfo, absdate) + + # Calculate the time + dInfoCalc_SetFromAbsTime(dinfo, abstime) + return 0 + + +cdef int dInfoCalc_SetFromAbsDate(date_info *dinfo, int64_t absdate) nogil: + """ + Sets the date part of the date_info struct + Assumes GREGORIAN_CALENDAR + """ + cdef: + pandas_datetimestruct dts + + pandas_datetime_to_datetimestruct(absdate - ORD_OFFSET, PANDAS_FR_D, &dts) + dinfo.year = dts.year + dinfo.month = dts.month + dinfo.day = dts.day + return 0 + + +@cython.cdivision +cdef int dInfoCalc_SetFromAbsTime(date_info *dinfo, double abstime) nogil: + """ + Sets the time part of the DateTime object. + """ + cdef: + int inttime + int hour, minute + double second + + inttime = <int>abstime + hour = inttime / 3600 + minute = (inttime % 3600) / 60 + second = abstime - <double>(hour * 3600 + minute * 60) + + dinfo.hour = hour + dinfo.minute = minute + dinfo.second = second + return 0 + + +@cython.cdivision +cdef double get_abs_time(int freq, int64_t date_ordinal, + int64_t ordinal) nogil: + cdef: + int freq_index, day_index, base_index + int64_t per_day, start_ord + double unit, result + + if freq <= FR_DAY: + return 0 + + freq_index = freq // 1000 + day_index = FR_DAY // 1000 + base_index = FR_SEC // 1000 + + per_day = get_daytime_conversion_factor(day_index, freq_index) + unit = get_daytime_conversion_factor(freq_index, base_index) + + if base_index < freq_index: + unit = 1 / unit + + start_ord = date_ordinal * per_day + result = <double>(unit * (ordinal - start_ord)) + return result + + +cdef int64_t absdate_from_ymd(int year, int month, int day) nogil: + """ + Find the absdate (days elapsed since datetime(1, 1, 1) + for the given year/month/day. + Assumes GREGORIAN_CALENDAR + """ + # /* Calculate the absolute date + cdef: + pandas_datetimestruct dts + int64_t unix_date + + memset(&dts, 0, sizeof(pandas_datetimestruct)) + dts.year = year + dts.month = month + dts.day = day + unix_date = pandas_datetimestruct_to_datetime(PANDAS_FR_D, &dts) + return ORD_OFFSET + unix_date + + +cdef int get_yq(int64_t ordinal, int freq, int *quarter, int *year): + cdef: + asfreq_info af_info + int qtr_freq + int64_t daily_ord + + daily_ord = get_python_ordinal(ordinal, freq) - ORD_OFFSET + + if get_freq_group(freq) == FR_QTR: + qtr_freq = freq + else: + qtr_freq = FR_QTR + + get_asfreq_info(FR_DAY, qtr_freq, 'E', &af_info) + + DtoQ_yq(daily_ord, &af_info, year, quarter) + return qtr_freq + + +cdef int64_t DtoQ_yq(int64_t ordinal, asfreq_info *af_info, + int *year, int *quarter): + cdef: + date_info dinfo + + dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET) + + if af_info.to_q_year_end != 12: + dinfo.month -= af_info.to_q_year_end + if dinfo.month <= 0: + dinfo.month += 12 + else: + dinfo.year += 1 + + year[0] = dinfo.year + quarter[0] = monthToQuarter(dinfo.month) + return 0 + + +cdef inline int monthToQuarter(int month): + return (month - 1) // 3 + 1 + + # ---------------------------------------------------------------------- # Period logic @@ -193,8 +489,7 @@ cdef char START = 'S' cdef char END = 'E' -cpdef int64_t period_asfreq(int64_t period_ordinal, int freq1, int freq2, - bint end): +cpdef int64_t period_asfreq(int64_t ordinal, int freq1, int freq2, bint end): """ Convert period ordinal from one frequency to another, and if upsampling, choose to use start ('S') or end ('E') of period. @@ -202,13 +497,13 @@ cpdef int64_t period_asfreq(int64_t period_ordinal, int freq1, int freq2, cdef: int64_t retval - if period_ordinal == iNaT: + if ordinal == iNaT: return iNaT if end: - retval = asfreq(period_ordinal, freq1, freq2, END) + retval = asfreq(ordinal, freq1, freq2, END) else: - retval = asfreq(period_ordinal, freq1, freq2, START) + retval = asfreq(ordinal, freq1, freq2, START) if retval == INT32_MIN: raise ValueError('Frequency conversion failed') @@ -225,7 +520,7 @@ def period_asfreq_arr(ndarray[int64_t] arr, int freq1, int freq2, bint end): ndarray[int64_t] result Py_ssize_t i, n freq_conv_func func - asfreq_info finfo + asfreq_info af_info int64_t val char relation @@ -238,20 +533,20 @@ def period_asfreq_arr(ndarray[int64_t] arr, int freq1, int freq2, bint end): relation = START func = get_asfreq_func(freq1, freq2) - get_asfreq_info(freq1, freq2, relation, &finfo) + get_asfreq_info(freq1, freq2, relation, &af_info) mask = arr == iNaT if mask.any(): # NaT process for i in range(n): val = arr[i] if val != iNaT: - val = func(val, &finfo) + val = func(val, &af_info) if val == INT32_MIN: raise ValueError("Unable to convert to desired frequency.") result[i] = val else: for i in range(n): - val = func(arr[i], &finfo) + val = func(arr[i], &af_info) if val == INT32_MIN: raise ValueError("Unable to convert to desired frequency.") result[i] = val @@ -403,17 +698,22 @@ cdef int pyear(int64_t ordinal, int freq): return dinfo.year +@cython.cdivision cdef int pqyear(int64_t ordinal, int freq): cdef: - int year, quarter - _quarter_year(ordinal, freq, &year, &quarter) + int year, quarter, qtr_freq + qtr_freq = get_yq(ordinal, freq, &quarter, &year) + if (qtr_freq % 1000) > 12: + year -= 1 return year cdef int pquarter(int64_t ordinal, int freq): cdef: - int year, quarter - _quarter_year(ordinal, freq, &year, &quarter) + int year, quarter, qtr_freq + qtr_freq = get_yq(ordinal, freq, &quarter, &year) + if (qtr_freq % 1000) > 12: + year -= 1 return quarter
This moves functions to cython but retains their C names to make comparison easier. The next pass will get rid of the camelCase. A couple of redundant functions are left behind in period_helper, will be cleaned up in a follow-up. The next hurdle is to get rid of all the adding and subtracting of ORD_OFFSET, WEEK_OFFSET, BDAY_OFFSET. (The period_helper code puts 0 at Jan 1 0001 instead of Jan 1 1970). This would be straightforward but I want to be careful about C division/mod conventions.
https://api.github.com/repos/pandas-dev/pandas/pulls/19608
2018-02-08T23:57:25Z
2018-02-10T21:12:38Z
2018-02-10T21:12:38Z
2018-02-11T21:36:28Z
API: Default ExtensionArray.astype
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 1556b653819a6..553e1e0ac2066 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -1,4 +1,6 @@ """An interface for extending pandas with custom arrays.""" +import numpy as np + from pandas.errors import AbstractMethodError _not_implemented_message = "{} does not implement {}." @@ -138,6 +140,25 @@ def nbytes(self): # ------------------------------------------------------------------------ # Additional Methods # ------------------------------------------------------------------------ + def astype(self, dtype, copy=True): + """Cast to a NumPy array with 'dtype'. + + Parameters + ---------- + dtype : str or dtype + Typecode or data-type to which the array is cast. + copy : bool, default True + Whether to copy the data, even if not necessary. If False, + a copy is made only if the old dtype does not match the + new dtype. + + Returns + ------- + array : ndarray + NumPy ndarray with 'dtype' for its dtype. + """ + return np.array(self, dtype=dtype, copy=copy) + def isna(self): # type: () -> np.ndarray """Boolean NumPy array indicating if each value is missing. diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index eca4dd4cf2106..d800a7b92b559 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -10,14 +10,12 @@ Series, Categorical, CategoricalIndex, IntervalIndex, date_range) from pandas.compat import string_types -from pandas.core.arrays import ExtensionArray from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, PeriodDtype, - IntervalDtype, CategoricalDtype, ExtensionDtype) + IntervalDtype, CategoricalDtype) from pandas.core.dtypes.common import ( is_categorical_dtype, is_categorical, is_datetime64tz_dtype, is_datetimetz, - is_extension_array_dtype, is_period_dtype, is_period, is_dtype_equal, is_datetime64_ns_dtype, is_datetime64_dtype, is_interval_dtype, @@ -744,31 +742,3 @@ def test_categorical_categories(self): tm.assert_index_equal(c1.categories, pd.Index(['a', 'b'])) c1 = CategoricalDtype(CategoricalIndex(['a', 'b'])) tm.assert_index_equal(c1.categories, pd.Index(['a', 'b'])) - - -class DummyArray(ExtensionArray): - pass - - -class DummyDtype(ExtensionDtype): - pass - - -class TestExtensionArrayDtype(object): - - @pytest.mark.parametrize('values', [ - pd.Categorical([]), - pd.Categorical([]).dtype, - pd.Series(pd.Categorical([])), - DummyDtype(), - DummyArray(), - ]) - def test_is_extension_array_dtype(self, values): - assert is_extension_array_dtype(values) - - @pytest.mark.parametrize('values', [ - np.array([]), - pd.Series(np.array([])), - ]) - def test_is_not_extension_array_dtype(self, values): - assert not is_extension_array_dtype(values) diff --git a/pandas/tests/extension/__init__.py b/pandas/tests/extension/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/extension/test_common.py b/pandas/tests/extension/test_common.py new file mode 100644 index 0000000000000..1f4582f687415 --- /dev/null +++ b/pandas/tests/extension/test_common.py @@ -0,0 +1,67 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas.util.testing as tm +from pandas.core.arrays import ExtensionArray +from pandas.core.dtypes.common import is_extension_array_dtype +from pandas.core.dtypes.dtypes import ExtensionDtype + + +class DummyDtype(ExtensionDtype): + pass + + +class DummyArray(ExtensionArray): + + def __init__(self, data): + self.data = data + + def __array__(self, dtype): + return self.data + + @property + def dtype(self): + return self.data.dtype + + +class TestExtensionArrayDtype(object): + + @pytest.mark.parametrize('values', [ + pd.Categorical([]), + pd.Categorical([]).dtype, + pd.Series(pd.Categorical([])), + DummyDtype(), + DummyArray(np.array([1, 2])), + ]) + def test_is_extension_array_dtype(self, values): + assert is_extension_array_dtype(values) + + @pytest.mark.parametrize('values', [ + np.array([]), + pd.Series(np.array([])), + ]) + def test_is_not_extension_array_dtype(self, values): + assert not is_extension_array_dtype(values) + + +def test_astype(): + + arr = DummyArray(np.array([1, 2, 3])) + expected = np.array([1, 2, 3], dtype=object) + + result = arr.astype(object) + tm.assert_numpy_array_equal(result, expected) + + result = arr.astype('object') + tm.assert_numpy_array_equal(result, expected) + + +def test_astype_no_copy(): + arr = DummyArray(np.array([1, 2, 3], dtype=np.int64)) + result = arr.astype(arr.dtype, copy=False) + + assert arr.data is result + + result = arr.astype(arr.dtype) + assert arr.data is not result
I need this to progress cleanly on https://github.com/pandas-dev/pandas/pull/19558 and (which is blocking https://github.com/pandas-dev/pandas/pull/19520).
https://api.github.com/repos/pandas-dev/pandas/pulls/19604
2018-02-08T21:09:36Z
2018-02-09T20:53:41Z
2018-02-09T20:53:41Z
2018-02-09T20:53:45Z
DOC: doc/source/indexing.rst says pd.df.ix is deprecated, show warnin…
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 9463512ac11de..352ce921d1d44 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1300,6 +1300,9 @@ class _IXIndexer(_NDFrameIndexer): """A primarily label-location based indexer, with integer position fallback. + Warning: Starting in 0.20.0, the .ix indexer is deprecated, in + favor of the more strict .iloc and .loc indexers. + ``.ix[]`` supports mixed integer and label based access. It is primarily label based, but will fall back to integer positional access unless the corresponding axis is of integer type.
…g in generated doc. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19596
2018-02-08T10:49:28Z
2018-02-08T11:17:13Z
2018-02-08T11:17:13Z
2018-02-08T11:17:16Z
ENH: added an optional css id to `<table>` tags created by `frame.to_…
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 7782e5f1ffa56..88e46b9035886 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -690,6 +690,7 @@ I/O ^^^ - :func:`read_html` now rewinds seekable IO objects after parse failure, before attempting to parse with a new parser. If a parser errors and the object is non-seekable, an informative error is raised suggesting the use of a different parser (:issue:`17975`) +- :meth:`DataFrame.to_html` now has an option to add an id to the leading `<table>` tag (:issue:`8496`) - Bug in :func:`read_msgpack` with a non existent file is passed in Python 2 (:issue:`15296`) - Bug in :func:`read_csv` where a ``MultiIndex`` with duplicate columns was not being mangled appropriately (:issue:`18062`) - Bug in :func:`read_csv` where missing values were not being handled properly when ``keep_default_na=False`` with dictionary ``na_values`` (:issue:`19227`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 28923f0fbf240..6d8dcb8a1ca89 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1727,7 +1727,7 @@ def to_html(self, buf=None, columns=None, col_space=None, header=True, sparsify=None, index_names=True, justify=None, bold_rows=True, classes=None, escape=True, max_rows=None, max_cols=None, show_dimensions=False, notebook=False, decimal='.', - border=None): + border=None, table_id=None): """ Render a DataFrame as an HTML table. @@ -1755,6 +1755,12 @@ def to_html(self, buf=None, columns=None, col_space=None, header=True, `<table>` tag. Default ``pd.options.html.border``. .. versionadded:: 0.19.0 + + table_id : str, optional + A css id is included in the opening `<table>` tag if specified. + + .. versionadded:: 0.23.0 + """ if (justify is not None and @@ -1772,7 +1778,7 @@ def to_html(self, buf=None, columns=None, col_space=None, header=True, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, - decimal=decimal) + decimal=decimal, table_id=table_id) # TODO: a generic formatter wld b in DataFrameFormatter formatter.to_html(classes=classes, notebook=notebook, border=border) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 269c81b380b5e..621641747f376 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -77,7 +77,11 @@ index_names : bool, optional Prints the names of the indexes, default True line_width : int, optional - Width to wrap a line in characters, default no wrap""" + Width to wrap a line in characters, default no wrap + table_id : str, optional + id for the <table> element create by to_html + + .. versionadded:: 0.23.0""" _VALID_JUSTIFY_PARAMETERS = ("left", "right", "center", "justify", "justify-all", "start", "end", "inherit", @@ -387,7 +391,8 @@ def __init__(self, frame, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, justify=None, float_format=None, sparsify=None, index_names=True, line_width=None, max_rows=None, - max_cols=None, show_dimensions=False, decimal='.', **kwds): + max_cols=None, show_dimensions=False, decimal='.', + table_id=None, **kwds): self.frame = frame if buf is not None: self.buf = _expand_user(_stringify_path(buf)) @@ -413,6 +418,7 @@ def __init__(self, frame, buf=None, columns=None, col_space=None, self.max_rows_displayed = min(max_rows or len(self.frame), len(self.frame)) self.show_dimensions = show_dimensions + self.table_id = table_id if justify is None: self.justify = get_option("display.colheader_justify") @@ -740,7 +746,8 @@ def to_html(self, classes=None, notebook=False, border=None): max_rows=self.max_rows, max_cols=self.max_cols, notebook=notebook, - border=border) + border=border, + table_id=self.table_id) if hasattr(self.buf, 'write'): html_renderer.write_result(self.buf) elif isinstance(self.buf, compat.string_types): @@ -1082,7 +1089,7 @@ class HTMLFormatter(TableFormatter): indent_delta = 2 def __init__(self, formatter, classes=None, max_rows=None, max_cols=None, - notebook=False, border=None): + notebook=False, border=None, table_id=None): self.fmt = formatter self.classes = classes @@ -1101,6 +1108,7 @@ def __init__(self, formatter, classes=None, max_rows=None, max_cols=None, if border is None: border = get_option('display.html.border') self.border = border + self.table_id = table_id def write(self, s, indent=0): rs = pprint_thing(s) @@ -1197,6 +1205,7 @@ def write_style(self): def write_result(self, buf): indent = 0 + id_section = "" frame = self.frame _classes = ['dataframe'] # Default class. @@ -1220,8 +1229,12 @@ def write_result(self, buf): self.write('<div{style}>'.format(style=div_style)) self.write_style() - self.write('<table border="{border}" class="{cls}">' - .format(border=self.border, cls=' '.join(_classes)), indent) + + if self.table_id is not None: + id_section = ' id="{table_id}"'.format(table_id=self.table_id) + self.write('<table border="{border}" class="{cls}"{id_section}>' + .format(border=self.border, cls=' '.join(_classes), + id_section=id_section), indent) indent += self.indent_delta indent = self._write_header(indent) diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index e0ce27de5c31f..dddba5b425c3b 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -1492,7 +1492,7 @@ def test_repr_html_float(self): 'B': np.arange(41, 41 + h)}).set_index('idx') reg_repr = df._repr_html_() assert '..' not in reg_repr - assert str(40 + h) in reg_repr + assert '<td>{val}</td>'.format(val=str(40 + h)) in reg_repr h = max_rows + 1 df = DataFrame({'idx': np.linspace(-10, 10, h), @@ -1500,7 +1500,7 @@ def test_repr_html_float(self): 'B': np.arange(41, 41 + h)}).set_index('idx') long_repr = df._repr_html_() assert '..' in long_repr - assert '31' not in long_repr + assert '<td>{val}</td>'.format(val='31') not in long_repr assert u('{h} rows ').format(h=h) in long_repr assert u('2 columns') in long_repr diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py index 9e063c2d176e1..f69cac62513d4 100644 --- a/pandas/tests/io/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -1864,3 +1864,10 @@ def test_to_html_with_index_names_false(self): name='myindexname')) result = df.to_html(index_names=False) assert 'myindexname' not in result + + def test_to_html_with_id(self): + # gh-8496 + df = pd.DataFrame({"A": [1, 2]}, index=pd.Index(['a', 'b'], + name='myindexname')) + result = df.to_html(index_names=False, table_id="TEST_ID") + assert ' id="TEST_ID"' in result
This is useful for the reasons mentioned in #8496 - Automatically create a random id for tables created using `_repr_html_` using uuid. - Also updated `test_repr_html_float` to be more descriptive and avoid conflicts with random numbers in the id for`_repr_html_` - [x] closes #8496 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19594
2018-02-08T06:57:45Z
2018-02-09T12:29:03Z
2018-02-09T12:29:03Z
2018-02-09T13:12:31Z
ENH: Allow literal (non-regex) replacement using .str.replace #16808
diff --git a/doc/source/text.rst b/doc/source/text.rst index 1e620acb1f88a..da8e40892716e 100644 --- a/doc/source/text.rst +++ b/doc/source/text.rst @@ -118,8 +118,8 @@ i.e., from the end of the string to the beginning of the string: s2.str.rsplit('_', expand=True, n=1) -Methods like ``replace`` and ``findall`` take `regular expressions -<https://docs.python.org/3/library/re.html>`__, too: +``replace`` by default replaces `regular expressions +<https://docs.python.org/3/library/re.html>`__: .. ipython:: python @@ -146,12 +146,25 @@ following code will cause trouble because of the regular expression meaning of # We need to escape the special character (for >1 len patterns) dollars.str.replace(r'-\$', '-') +.. versionadded:: 0.23.0 + +If you do want literal replacement of a string (equivalent to +:meth:`str.replace`), you can set the optional ``regex`` parameter to +``False``, rather than escaping each character. In this case both ``pat`` +and ``repl`` must be strings: + +.. ipython:: python + + # These lines are equivalent + dollars.str.replace(r'-\$', '-') + dollars.str.replace('-$', '-', regex=False) + +.. versionadded:: 0.20.0 + The ``replace`` method can also take a callable as replacement. It is called on every ``pat`` using :func:`re.sub`. The callable should expect one positional argument (a regex object) and return a string. -.. versionadded:: 0.20.0 - .. ipython:: python # Reverse every lowercase alphabetic word @@ -164,12 +177,12 @@ positional argument (a regex object) and return a string. repl = lambda m: m.group('two').swapcase() pd.Series(['Foo Bar Baz', np.nan]).str.replace(pat, repl) +.. versionadded:: 0.20.0 + The ``replace`` method also accepts a compiled regular expression object from :func:`re.compile` as a pattern. All flags should be included in the compiled regular expression object. -.. versionadded:: 0.20.0 - .. ipython:: python import re @@ -186,6 +199,7 @@ regular expression object will raise a ``ValueError``. --------------------------------------------------------------------------- ValueError: case and flags cannot be set when pat is a compiled regex + Indexing with ``.str`` ---------------------- @@ -432,7 +446,7 @@ Method Summary :meth:`~Series.str.join`;Join strings in each element of the Series with passed separator :meth:`~Series.str.get_dummies`;Split strings on the delimiter returning DataFrame of dummy variables :meth:`~Series.str.contains`;Return boolean array if each string contains pattern/regex - :meth:`~Series.str.replace`;Replace occurrences of pattern/regex with some other string or the return value of a callable given the occurrence + :meth:`~Series.str.replace`;Replace occurrences of pattern/regex/string with some other string or the return value of a callable given the occurrence :meth:`~Series.str.repeat`;Duplicate values (``s.str.repeat(3)`` equivalent to ``x * 3``) :meth:`~Series.str.pad`;"Add whitespace to left, right, or both sides of strings" :meth:`~Series.str.center`;Equivalent to ``str.center`` diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 6865428c352c1..542e62aa374be 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -620,6 +620,7 @@ Other API Changes - Set operations (union, difference...) on :class:`IntervalIndex` with incompatible index types will now raise a ``TypeError`` rather than a ``ValueError`` (:issue:`19329`) - :class:`DateOffset` objects render more simply, e.g. ``<DateOffset: days=1>`` instead of ``<DateOffset: kwds={'days': 1}>`` (:issue:`19403`) - ``Categorical.fillna`` now validates its ``value`` and ``method`` keyword arguments. It now raises when both or none are specified, matching the behavior of :meth:`Series.fillna` (:issue:`19682`) +- :func:`Series.str.replace` now takes an optional `regex` keyword which, when set to ``False``, uses literal string replacement rather than regex replacement (:issue:`16808`) .. _whatsnew_0230.deprecations: diff --git a/pandas/core/strings.py b/pandas/core/strings.py index ce688f8b16fe5..6b427ed1da834 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -306,7 +306,7 @@ def str_endswith(arr, pat, na=np.nan): return _na_map(f, arr, na, dtype=bool) -def str_replace(arr, pat, repl, n=-1, case=None, flags=0): +def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True): r""" Replace occurrences of pattern/regex in the Series/Index with some other string. Equivalent to :meth:`str.replace` or @@ -337,25 +337,50 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0): flags : int, default 0 (no flags) - re module flags, e.g. re.IGNORECASE - Cannot be set if `pat` is a compiled regex + regex : boolean, default True + - If True, assumes the passed-in pattern is a regular expression. + - If False, treats the pattern as a literal string + - Cannot be set to False if `pat` is a compiled regex or `repl` is + a callable. + + .. versionadded:: 0.23.0 Returns ------- replaced : Series/Index of objects + Raises + ------ + ValueError + * if `regex` is False and `repl` is a callable or `pat` is a compiled + regex + * if `pat` is a compiled regex and `case` or `flags` is set + Notes ----- When `pat` is a compiled regex, all flags should be included in the - compiled regex. Use of `case` or `flags` with a compiled regex will - raise an error. + compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled + regex will raise an error. Examples -------- - When `repl` is a string, every `pat` is replaced as with - :meth:`str.replace`. NaN value(s) in the Series are left as is. + When `pat` is a string and `regex` is True (the default), the given `pat` + is compiled as a regex. When `repl` is a string, it replaces matching + regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are + left as is: + + >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True) + 0 bao + 1 baz + 2 NaN + dtype: object - >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', 'b') - 0 boo - 1 buz + When `pat` is a string and `regex` is False, every `pat` is replaced with + `repl` as with :meth:`str.replace`: + + >>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False) + 0 bao + 1 fuz 2 NaN dtype: object @@ -397,6 +422,7 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0): 1 bar 2 NaN dtype: object + """ # Check whether repl is valid (GH 13438, GH 15055) @@ -404,27 +430,33 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0): raise TypeError("repl must be a string or callable") is_compiled_re = is_re(pat) - if is_compiled_re: - if (case is not None) or (flags != 0): - raise ValueError("case and flags cannot be set" - " when pat is a compiled regex") - else: - # not a compiled regex - # set default case - if case is None: - case = True - - # add case flag, if provided - if case is False: - flags |= re.IGNORECASE - - use_re = is_compiled_re or len(pat) > 1 or flags or callable(repl) - - if use_re: - n = n if n >= 0 else 0 - regex = re.compile(pat, flags=flags) - f = lambda x: regex.sub(repl=repl, string=x, count=n) + if regex: + if is_compiled_re: + if (case is not None) or (flags != 0): + raise ValueError("case and flags cannot be set" + " when pat is a compiled regex") + else: + # not a compiled regex + # set default case + if case is None: + case = True + + # add case flag, if provided + if case is False: + flags |= re.IGNORECASE + if is_compiled_re or len(pat) > 1 or flags or callable(repl): + n = n if n >= 0 else 0 + compiled = re.compile(pat, flags=flags) + f = lambda x: compiled.sub(repl=repl, string=x, count=n) + else: + f = lambda x: x.replace(pat, repl, n) else: + if is_compiled_re: + raise ValueError("Cannot use a compiled regex as replacement " + "pattern with regex=False") + if callable(repl): + raise ValueError("Cannot use a callable replacement when " + "regex=False") f = lambda x: x.replace(pat, repl, n) return _na_map(f, arr) @@ -1596,9 +1628,9 @@ def match(self, pat, case=True, flags=0, na=np.nan, as_indexer=None): return self._wrap_result(result) @copy(str_replace) - def replace(self, pat, repl, n=-1, case=None, flags=0): + def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True): result = str_replace(self._data, pat, repl, n=n, case=case, - flags=flags) + flags=flags, regex=regex) return self._wrap_result(result) @copy(str_repeat) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 178c5ff655b04..a878d6ed7b052 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -530,6 +530,27 @@ def test_replace_compiled_regex(self): exp = Series(['foObaD__baRbaD', NA]) tm.assert_series_equal(result, exp) + def test_replace_literal(self): + # GH16808 literal replace (regex=False vs regex=True) + values = Series(['f.o', 'foo', NA]) + exp = Series(['bao', 'bao', NA]) + result = values.str.replace('f.', 'ba') + tm.assert_series_equal(result, exp) + + exp = Series(['bao', 'foo', NA]) + result = values.str.replace('f.', 'ba', regex=False) + tm.assert_series_equal(result, exp) + + # Cannot do a literal replace if given a callable repl or compiled + # pattern + callable_repl = lambda m: m.group(0).swapcase() + compiled_pat = re.compile('[a-z][A-Z]{2}') + + pytest.raises(ValueError, values.str.replace, 'abc', callable_repl, + regex=False) + pytest.raises(ValueError, values.str.replace, compiled_pat, '', + regex=False) + def test_repeat(self): values = Series(['a', 'b', NA, 'c', NA, 'd'])
Adds `regex=True` default parameter to .str.replace. If `regex=False` is set, uses str.replace to replace occurrences of `pat` with `repl`. Raises `ValueError` if given a callable `repl` or a compiled re for `pat` in combination with `regex=False`. - [ ] closes #16808 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19584
2018-02-07T20:04:15Z
2018-02-28T11:14:12Z
2018-02-28T11:14:11Z
2018-02-28T11:14:17Z
Dispatch categorical Series ops to Categorical
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index ea56ebad7d782..4dfa4bfe637f1 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -686,3 +686,5 @@ Other ^^^^^ - Improved error message when attempting to use a Python keyword as an identifier in a ``numexpr`` backed query (:issue:`18221`) +- Comparisons between :class:`Series` and :class:`Index` would return a ``Series`` with an incorrect name, ignoring the ``Index``'s name attribute (:issue:`19582`) +- diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 62c6a6b16cbe9..15953e6efbe7f 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -53,6 +53,9 @@ def f(self, other): # results depending whether categories are the same or not is kind of # insane, so be a bit stricter here and use the python3 idea of # comparing only things of equal type. + if isinstance(other, ABCSeries): + return NotImplemented + if not self.ordered: if op in ['__lt__', '__gt__', '__le__', '__ge__']: raise TypeError("Unordered Categoricals can only compare " diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 2c7be2b21f959..97cadb2808ad1 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -1,3 +1,5 @@ +import operator + import numpy as np from pandas._libs import index as libindex @@ -726,7 +728,9 @@ def _codes_for_groupby(self, sort): def _add_comparison_methods(cls): """ add in comparison methods """ - def _make_compare(opname): + def _make_compare(op): + opname = '__{op}__'.format(op=op.__name__) + def _evaluate_compare(self, other): # if we have a Categorical type, then must have the same @@ -749,16 +753,21 @@ def _evaluate_compare(self, other): "have the same categories and ordered " "attributes") - return getattr(self.values, opname)(other) + result = op(self.values, other) + if isinstance(result, ABCSeries): + # Dispatch to pd.Categorical returned NotImplemented + # and we got a Series back; down-cast to ndarray + result = result.values + return result return compat.set_function_name(_evaluate_compare, opname, cls) - cls.__eq__ = _make_compare('__eq__') - cls.__ne__ = _make_compare('__ne__') - cls.__lt__ = _make_compare('__lt__') - cls.__gt__ = _make_compare('__gt__') - cls.__le__ = _make_compare('__le__') - cls.__ge__ = _make_compare('__ge__') + cls.__eq__ = _make_compare(operator.eq) + cls.__ne__ = _make_compare(operator.ne) + cls.__lt__ = _make_compare(operator.lt) + cls.__gt__ = _make_compare(operator.gt) + cls.__le__ = _make_compare(operator.le) + cls.__ge__ = _make_compare(operator.ge) def _delegate_method(self, name, *args, **kwargs): """ method delegation to the ._values """ diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 6db84aedce7e7..974a57a28f196 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -726,7 +726,7 @@ def dispatch_to_index_op(op, left, right, index_class): # avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes, # left_idx may inherit a freq from a cached DatetimeIndex. # See discussion in GH#19147. - if left_idx.freq is not None: + if getattr(left_idx, 'freq', None) is not None: left_idx = left_idx._shallow_copy(freq=None) try: result = op(left_idx, right) @@ -774,9 +774,8 @@ def na_op(x, y): # dispatch to the categorical if we have a categorical # in either operand - if is_categorical_dtype(x): - return op(x, y) - elif is_categorical_dtype(y) and not is_scalar(y): + if is_categorical_dtype(y) and not is_scalar(y): + # The `not is_scalar(y)` check excludes the string "category" return op(y, x) elif is_object_dtype(x.dtype): @@ -824,17 +823,36 @@ def wrapper(self, other, axis=None): if axis is not None: self._get_axis_number(axis) + res_name = _get_series_op_result_name(self, other) + if isinstance(other, ABCDataFrame): # pragma: no cover # Defer to DataFrame implementation; fail early return NotImplemented + elif isinstance(other, ABCSeries) and not self._indexed_same(other): + raise ValueError("Can only compare identically-labeled " + "Series objects") + + elif is_categorical_dtype(self): + # Dispatch to Categorical implementation; pd.CategoricalIndex + # behavior is non-canonical GH#19513 + res_values = dispatch_to_index_op(op, self, other, pd.Categorical) + return self._constructor(res_values, index=self.index, + name=res_name) + + elif is_timedelta64_dtype(self): + res_values = dispatch_to_index_op(op, self, other, + pd.TimedeltaIndex) + return self._constructor(res_values, index=self.index, + name=res_name) + elif isinstance(other, ABCSeries): - name = com._maybe_match_name(self, other) - if not self._indexed_same(other): - msg = 'Can only compare identically-labeled Series objects' - raise ValueError(msg) + # By this point we have checked that self._indexed_same(other) res_values = na_op(self.values, other.values) - return self._constructor(res_values, index=self.index, name=name) + # rename is needed in case res_name is None and res_values.name + # is not. + return self._constructor(res_values, index=self.index, + name=res_name).rename(res_name) elif isinstance(other, (np.ndarray, pd.Index)): # do not check length of zerodim array @@ -844,15 +862,17 @@ def wrapper(self, other, axis=None): raise ValueError('Lengths must match to compare') res_values = na_op(self.values, np.asarray(other)) - return self._constructor(res_values, - index=self.index).__finalize__(self) - - elif (isinstance(other, pd.Categorical) and - not is_categorical_dtype(self)): - raise TypeError("Cannot compare a Categorical for op {op} with " - "Series of dtype {typ}.\nIf you want to compare " - "values, use 'series <op> np.asarray(other)'." - .format(op=op, typ=self.dtype)) + result = self._constructor(res_values, index=self.index) + # rename is needed in case res_name is None and self.name + # is not. + return result.__finalize__(self).rename(res_name) + + elif isinstance(other, pd.Categorical): + # ordering of checks matters; by this point we know + # that not is_categorical_dtype(self) + res_values = op(self.values, other) + return self._constructor(res_values, index=self.index, + name=res_name) elif is_scalar(other) and isna(other): # numpy does not like comparisons vs None @@ -863,16 +883,9 @@ def wrapper(self, other, axis=None): return self._constructor(res_values, index=self.index, name=self.name, dtype='bool') - if is_categorical_dtype(self): - # cats are a special case as get_values() would return an ndarray, - # which would then not take categories ordering into account - # we can go directly to op, as the na_op would just test again and - # dispatch to it. - with np.errstate(all='ignore'): - res = op(self.values, other) else: values = self.get_values() - if isinstance(other, (list, np.ndarray)): + if isinstance(other, list): other = np.asarray(other) with np.errstate(all='ignore'): @@ -882,10 +895,9 @@ def wrapper(self, other, axis=None): .format(typ=type(other))) # always return a full value series here - res = com._values_from_object(res) - - res = pd.Series(res, index=self.index, name=self.name, dtype='bool') - return res + res_values = com._values_from_object(res) + return pd.Series(res_values, index=self.index, + name=res_name, dtype='bool') return wrapper diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 8948c5f79900d..2d644652f1b3d 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -788,6 +788,7 @@ def test_equals_op(self): series_d = Series(array_d) with tm.assert_raises_regex(ValueError, "Lengths must match"): index_a == series_b + tm.assert_numpy_array_equal(index_a == series_a, expected1) tm.assert_numpy_array_equal(index_a == series_c, expected2) diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index 1d9fa9dc15531..cf969a7024563 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -43,6 +43,40 @@ def test_ser_flex_cmp_return_dtypes_empty(self, opname): result = getattr(empty, opname)(const).get_dtype_counts() tm.assert_series_equal(result, Series([1], ['bool'])) + @pytest.mark.parametrize('op', [operator.eq, operator.ne, + operator.le, operator.lt, + operator.ge, operator.gt]) + @pytest.mark.parametrize('names', [(None, None, None), + ('foo', 'bar', None), + ('baz', 'baz', 'baz')]) + def test_ser_cmp_result_names(self, names, op): + # datetime64 dtype + dti = pd.date_range('1949-06-07 03:00:00', + freq='H', periods=5, name=names[0]) + ser = Series(dti).rename(names[1]) + result = op(ser, dti) + assert result.name == names[2] + + # datetime64tz dtype + dti = dti.tz_localize('US/Central') + ser = Series(dti).rename(names[1]) + result = op(ser, dti) + assert result.name == names[2] + + # timedelta64 dtype + tdi = dti - dti.shift(1) + ser = Series(tdi).rename(names[1]) + result = op(ser, tdi) + assert result.name == names[2] + + # categorical + if op in [operator.eq, operator.ne]: + # categorical dtype comparisons raise for inequalities + cidx = tdi.astype('category') + ser = Series(cidx).rename(names[1]) + result = op(ser, cidx) + assert result.name == names[2] + class TestTimestampSeriesComparison(object): def test_dt64ser_cmp_period_scalar(self):
Moving towards the One Implementation To Rule Them All, this has `Series[timedelta64].__cmp__` ops wrap `TimedeltaIndex.__cmp__` and `Series[category].__cmp__` wrap `Categorical.__cmp__` Fixes (and tests) incorrectly named results when comparing Series/Index objects. Ref #19513
https://api.github.com/repos/pandas-dev/pandas/pulls/19582
2018-02-07T19:10:33Z
2018-02-18T16:47:29Z
2018-02-18T16:47:29Z
2018-02-18T18:43:54Z
Ignore warnings when reading pickle files
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index fa953f7d876cc..756096dd0c9ce 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -1,4 +1,5 @@ """ pickle compat """ +import warnings import numpy as np from numpy.lib.format import read_array, write_array @@ -96,7 +97,9 @@ def try_read(path, encoding=None): # cpickle # GH 6899 try: - return read_wrapper(lambda f: pkl.load(f)) + with warnings.catch_warnings(record=True): + # We want to silencce any warnings about, e.g. moved modules. + return read_wrapper(lambda f: pkl.load(f)) except Exception: # reg/patched pickle try:
Silences a warning from our tests about ``` pandas/tests/io/test_common.py::TestCommonIOCapabilities::()::test_read_fspath_all[reader10-os-/home/travis/build/pandas-dev/pandas/pandas/tests/io/data/categorical_0_14_1.pickle] /home/travis/build/pandas-dev/pandas/pandas/io/pickle.py:99: FutureWarning: 'pandas.core' is private. Use 'pandas.Categorical' return read_wrapper(lambda f: pkl.load(f)) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19580
2018-02-07T17:42:40Z
2018-02-09T12:25:33Z
2018-02-09T12:25:33Z
2018-05-02T13:09:45Z
PERF: Correct signature for group_nth / group_object
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 4dfd215e6dc3a..2663750de9db9 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -159,6 +159,22 @@ def time_series_nth(self, df): df[1].groupby(df[0]).nth(0) +class NthObject(object): + + goal_time = 0.2 + + def setup_cache(self): + df = DataFrame(np.random.randint(1, 100, (10000,)), columns=['g']) + df['obj'] = ['a'] * 5000 + ['b'] * 5000 + return df + + def time_nth(self, df): + df.groupby('g').nth(5) + + def time_nth_last(self, df): + df.groupby('g').last() + + class DateAttributes(object): goal_time = 0.2 diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a7300f7d1ceb0..452c37b338335 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -719,6 +719,7 @@ Groupby/Resample/Rolling - Bug in :func:`DataFrame.transform` where particular aggregation functions were being incorrectly cast to match the dtype(s) of the grouped data (:issue:`19200`) - Bug in :func:`DataFrame.groupby` passing the `on=` kwarg, and subsequently using ``.apply()`` (:issue:`17813`) - Bug in :func:`DataFrame.resample().aggregate` not raising a ``KeyError`` when aggregating a non-existent column (:issue:`16766`, :issue:`19566`) +- Fixed a performance regression for ``GroupBy.nth`` and ``GroupBy.last`` with some object columns (:issue:`19283`) Sparse ^^^^^^ diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 9cc15fb6692d9..55de700c9af52 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -36,7 +36,8 @@ def group_nth_object(ndarray[object, ndim=2] out, ndarray[int64_t] counts, ndarray[object, ndim=2] values, ndarray[int64_t] labels, - int64_t rank): + int64_t rank, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -47,6 +48,8 @@ def group_nth_object(ndarray[object, ndim=2] out, ndarray[int64_t, ndim=2] nobs ndarray[object, ndim=2] resx + assert min_count == -1, "'min_count' only used in add and prod" + nobs = np.zeros((<object> out).shape, dtype=np.int64) resx = np.empty((<object> out).shape, dtype=object) @@ -80,7 +83,8 @@ def group_nth_object(ndarray[object, ndim=2] out, def group_last_object(ndarray[object, ndim=2] out, ndarray[int64_t] counts, ndarray[object, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -91,6 +95,8 @@ def group_last_object(ndarray[object, ndim=2] out, ndarray[object, ndim=2] resx ndarray[int64_t, ndim=2] nobs + assert min_count == -1, "'min_count' only used in add and prod" + nobs = np.zeros((<object> out).shape, dtype=np.int64) resx = np.empty((<object> out).shape, dtype=object)
Closes #19283
https://api.github.com/repos/pandas-dev/pandas/pulls/19579
2018-02-07T17:04:35Z
2018-02-08T01:32:03Z
2018-02-08T01:32:03Z
2018-02-08T02:22:48Z
CI: Fixed NumPy pinning in conda-build
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 4ec5b0a9d8820..6e270519e60c3 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -110,7 +110,7 @@ if [ -e ${REQ} ]; then fi time conda install -n pandas pytest>=3.1.0 -time pip install pytest-xdist moto +time pip install -q pytest-xdist moto if [ "$LINT" ]; then conda install flake8=3.4.1 @@ -181,10 +181,10 @@ elif [ "$CONDA_BUILD_TEST" ]; then # build & install testing echo "[building conda recipe]" - time conda build ./conda.recipe --numpy 1.13 --python 3.5 -q --no-test + time conda build ./conda.recipe --python 3.5 -q --no-test || exit 1 echo "[installing]" - conda install pandas --use-local + conda install pandas --use-local || exit 1 else diff --git a/ci/requirements-3.5_CONDA_BUILD_TEST.build b/ci/requirements-3.5_CONDA_BUILD_TEST.build index 6648e3778777c..f7befe3b31865 100644 --- a/ci/requirements-3.5_CONDA_BUILD_TEST.build +++ b/ci/requirements-3.5_CONDA_BUILD_TEST.build @@ -2,5 +2,5 @@ python=3.5* python-dateutil pytz nomkl -numpy=1.13* +numpy cython diff --git a/ci/requirements-3.5_CONDA_BUILD_TEST.run b/ci/requirements-3.5_CONDA_BUILD_TEST.run index 19d9a91e86585..669cf437f2164 100644 --- a/ci/requirements-3.5_CONDA_BUILD_TEST.run +++ b/ci/requirements-3.5_CONDA_BUILD_TEST.run @@ -1,5 +1,5 @@ pytz -numpy=1.13* +numpy openpyxl xlsxwriter xlrd diff --git a/ci/requirements-3.5_CONDA_BUILD_TEST.sh b/ci/requirements-3.5_CONDA_BUILD_TEST.sh index 09d6775cfc894..093fdbcf21d78 100644 --- a/ci/requirements-3.5_CONDA_BUILD_TEST.sh +++ b/ci/requirements-3.5_CONDA_BUILD_TEST.sh @@ -8,4 +8,4 @@ echo "install 35 CONDA_BUILD_TEST" conda remove -n pandas python-dateutil --force pip install python-dateutil -conda install -n pandas -c conda-forge feather-format pyarrow=0.5.0 +conda install -n pandas -c conda-forge feather-format pyarrow=0.7.1 diff --git a/ci/requirements-3.6.build b/ci/requirements-3.6.build index 94e1152450d87..1c4b46aea3865 100644 --- a/ci/requirements-3.6.build +++ b/ci/requirements-3.6.build @@ -2,5 +2,5 @@ python=3.6* python-dateutil pytz nomkl -numpy=1.13.* +numpy cython diff --git a/conda.recipe/meta.yaml b/conda.recipe/meta.yaml index 87a79f7e5a987..86bed996c8aab 100644 --- a/conda.recipe/meta.yaml +++ b/conda.recipe/meta.yaml @@ -14,14 +14,14 @@ requirements: build: - python - cython - - {{ pin_compatible('numpy', upper_bound='1.14') }} + - numpy 1.11.* - setuptools >=3.3 - python-dateutil >=2.5.0 - pytz run: - python - - {{ pin_compatible('numpy', upper_bound='1.14') }} + - numpy >=1.11.* - python-dateutil >=2.5.0 - pytz
Closes #19572 closes #19153 (testing locally as well too)
https://api.github.com/repos/pandas-dev/pandas/pulls/19575
2018-02-07T14:30:12Z
2018-02-09T16:11:19Z
2018-02-09T16:11:18Z
2018-02-09T16:11:22Z
DOC: some clean-up of the apply docs (follow-up #18577)
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index fb9e5a6cc75cb..749d4be11ad45 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -774,9 +774,9 @@ We encourage you to view the source code of :meth:`~DataFrame.pipe`. Row or Column-wise Function Application ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Arbitrary functions can be applied along the axes of a DataFrame or Panel +Arbitrary functions can be applied along the axes of a DataFrame using the :meth:`~DataFrame.apply` method, which, like the descriptive -statistics methods, take an optional ``axis`` argument: +statistics methods, takes an optional ``axis`` argument: .. ipython:: python @@ -794,13 +794,15 @@ The :meth:`~DataFrame.apply` method will also dispatch on a string method name. df.apply('mean', axis=1) The return type of the function passed to :meth:`~DataFrame.apply` affects the -type of the ultimate output from DataFrame.apply +type of the final output from ``DataFrame.apply`` for the default behaviour: -* If the applied function returns a ``Series``, the ultimate output is a ``DataFrame``. +* If the applied function returns a ``Series``, the final output is a ``DataFrame``. The columns match the index of the ``Series`` returned by the applied function. -* If the applied function returns any other type, the ultimate output is a ``Series``. -* A ``result_type`` kwarg is accepted with the options: ``reduce``, ``broadcast``, and ``expand``. - These will determine how list-likes return results expand (or not) to a ``DataFrame``. +* If the applied function returns any other type, the final output is a ``Series``. + +This default behaviour can be overridden using the ``result_type``, which +accepts three options: ``reduce``, ``broadcast``, and ``expand``. +These will determine how list-likes return values expand (or not) to a ``DataFrame``. :meth:`~DataFrame.apply` combined with some cleverness can be used to answer many questions about a data set. For example, suppose we wanted to extract the date where the diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 1c6b698605521..c939482b10497 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -334,20 +334,20 @@ Convert to an xarray DataArray .. _whatsnew_0230.api_breaking.apply: -Apply Changes -~~~~~~~~~~~~~ +Changes to make output of ``DataFrame.apply`` consistent +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :func:`DataFrame.apply` was inconsistent when applying an arbitrary user-defined-function that returned a list-like with ``axis=1``. Several bugs and inconsistencies are resolved. If the applied function returns a Series, then pandas will return a DataFrame; otherwise a Series will be returned, this includes the case -where a list-like (e.g. ``tuple`` or ``list`` is returned), (:issue:`16353`, :issue:`17437`, :issue:`17970`, :issue:`17348`, :issue:`17892`, :issue:`18573`, -:issue:`17602`, :issue:`18775`, :issue:`18901`, :issue:`18919`) +where a list-like (e.g. ``tuple`` or ``list`` is returned) (:issue:`16353`, :issue:`17437`, :issue:`17970`, :issue:`17348`, :issue:`17892`, :issue:`18573`, +:issue:`17602`, :issue:`18775`, :issue:`18901`, :issue:`18919`). .. ipython:: python df = pd.DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1, columns=['A', 'B', 'C']) df -Previous Behavior. If the returned shape happened to match the original columns, this would return a ``DataFrame``. +Previous Behavior: if the returned shape happened to match the length of original columns, this would return a ``DataFrame``. If the return shape did not match, a ``Series`` with lists was returned. .. code-block:: python @@ -373,7 +373,7 @@ If the return shape did not match, a ``Series`` with lists was returned. dtype: object -New Behavior. The behavior is consistent. These will *always* return a ``Series``. +New Behavior: When the applied function returns a list-like, this will now *always* return a ``Series``. .. ipython:: python @@ -386,8 +386,9 @@ To have expanded columns, you can use ``result_type='expand'`` df.apply(lambda x: [1, 2, 3], axis=1, result_type='expand') -To have broadcast the result across, you can use ``result_type='broadcast'``. The shape -must match the original columns. +To broadcast the result across the original columns (the old behaviour for +list-likes of the correct length), you can use ``result_type='broadcast'``. +The shape must match the original columns. .. ipython:: python @@ -397,7 +398,7 @@ Returning a ``Series`` allows one to control the exact return structure and colu .. ipython:: python - df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1) + df.apply(lambda x: Series([1, 2, 3], index=['D', 'E', 'F']]), axis=1) .. _whatsnew_0230.api_breaking.build_changes: @@ -523,8 +524,8 @@ Deprecations - The ``is_copy`` attribute is deprecated and will be removed in a future version (:issue:`18801`). - ``IntervalIndex.from_intervals`` is deprecated in favor of the :class:`IntervalIndex` constructor (:issue:`19263`) - :func:``DataFrame.from_items`` is deprecated. Use :func:``DataFrame.from_dict()`` instead, or :func:``DataFrame.from_dict(OrderedDict())`` if you wish to preserve the key order (:issue:`17320`) -- The ``broadcast`` parameter of ``.apply()`` is removed in favor of ``result_type='broadcast'`` (:issue:`18577`) -- The ``reduce`` parameter of ``.apply()`` is removed in favor of ``result_type='reduce'`` (:issue:`18577`) +- The ``broadcast`` parameter of ``.apply()`` is deprecated in favor of ``result_type='broadcast'`` (:issue:`18577`) +- The ``reduce`` parameter of ``.apply()`` is deprecated in favor of ``result_type='reduce'`` (:issue:`18577`) .. _whatsnew_0230.prior_deprecations: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9487f51919108..28923f0fbf240 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4822,12 +4822,12 @@ def aggregate(self, func, axis=0, *args, **kwargs): def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None, result_type=None, args=(), **kwds): - """Applies function along input axis of DataFrame. + """Applies function along an axis of the DataFrame. Objects passed to functions are Series objects having index either the DataFrame's index (axis=0) or the columns (axis=1). - Return type depends on whether passed function aggregates, or the - reduce argument if the DataFrame is empty. + Final return type depends on the return type of the applied function, + or on the `result_type` argument. Parameters ---------- @@ -4863,15 +4863,18 @@ def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None, by result_type='reduce'. result_type : {'expand', 'reduce', 'broadcast, None} - These only act when axis=1 {columns} + These only act when axis=1 {columns}: + * 'expand' : list-like results will be turned into columns. * 'reduce' : return a Series if possible rather than expanding list-like results. This is the opposite to 'expand'. * 'broadcast' : results will be broadcast to the original shape of the frame, the original index & columns will be retained. - * None : list-like results will be returned as a list - in a single column. However if the apply function - returns a Series these are expanded to columns. + + The default behaviour (None) depends on the return value of the + applied function: list-like results will be returned as a Series + of those. However if the apply function returns a Series these + are expanded to columns. .. versionadded:: 0.23.0 @@ -4893,8 +4896,8 @@ def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None, We use this DataFrame to illustrate - >>> df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1, - ... columns=['A', 'B', 'C']) + >>> df = pd.DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1, + ... columns=['A', 'B', 'C']) >>> df A B C 0 1 2 3 @@ -4904,7 +4907,8 @@ def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None, 4 1 2 3 5 1 2 3 - Using a ufunc + Using a numpy universal function (in this case the same as + ``np.sqrt(df)``): >>> df.apply(np.sqrt) A B C @@ -4954,8 +4958,8 @@ def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None, 4 1 2 5 1 2 - Return a Series inside the function is similar to passing - Passing result_type='expand'. The resulting column names + Returning a Series inside the function is similar to passing + ``result_type='expand'``. The resulting column names will be the Series index. >>> df.apply(lambda x: Series([1, 2], index=['foo', 'bar']), axis=1) @@ -4967,10 +4971,10 @@ def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None, 4 1 2 5 1 2 - - Passing result_type='broadcast' will take a same shape - result, whether list-like or scalar and broadcast it - along the axis. The resulting column names will be the originals. + Passing ``result_type='broadcast'`` will ensure the same shape + result, whether list-like or scalar is returned by the function, + and broadcast it along the axis. The resulting column names will + be the originals. >>> df.apply(lambda x: [1, 2, 3], axis=1, result_type='broadcast') A B C diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 371377ce2899c..19b126216db81 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -861,14 +861,18 @@ def apply(self, func, axis=0, broadcast=None, reduce=None, by result_type='reduce'. result_type : {'expand', 'reduce', 'broadcast, None} - These only act when axis=1 {columns} - * 'expand' : list-like results will be turned into columns + These only act when axis=1 {columns}: + + * 'expand' : list-like results will be turned into columns. * 'reduce' : return a Series if possible rather than expanding - list-like results. This is the opposite to 'expand' - * 'broadcast' : scalar results will be broadcast to all columns - * None : list-like results will be returned as a list - in a single column. However if the apply function - returns a Series these are expanded to columns. + list-like results. This is the opposite to 'expand'. + * 'broadcast' : results will be broadcast to the original shape + of the frame, the original index & columns will be retained. + + The default behaviour (None) depends on the return value of the + applied function: list-like results will be returned as a Series + of those. However if the apply function returns a Series these + are expanded to columns. .. versionadded:: 0.23.0
Follup-up on #18577 cc @jreback (instead of yet another round of review, this will go a bit quicker)
https://api.github.com/repos/pandas-dev/pandas/pulls/19573
2018-02-07T14:20:24Z
2018-02-08T00:57:20Z
2018-02-08T00:57:19Z
2018-02-08T08:39:41Z
ERR: raise KeyError on invalid column name in aggregate
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index c48f6d19e3b10..eaa8841b79a78 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -647,7 +647,7 @@ Groupby/Resample/Rolling - Bug in :func:`DataFrame.groupby` where aggregation by ``first``/``last``/``min``/``max`` was causing timestamps to lose precision (:issue:`19526`) - Bug in :func:`DataFrame.transform` where particular aggregation functions were being incorrectly cast to match the dtype(s) of the grouped data (:issue:`19200`) - Bug in :func:`DataFrame.groupby` passing the `on=` kwarg, and subsequently using ``.apply()`` (:issue:`17813`) -- Bug in :func:`DataFrame.resample().aggregate` not raising a `ValueError` when aggregating a non-existent column (:issue:`16766`) +- Bug in :func:`DataFrame.resample().aggregate` not raising a ``KeyError`` when aggregating a non-existent column (:issue:`16766`, :issue:`19566`) Sparse ^^^^^^ diff --git a/pandas/core/base.py b/pandas/core/base.py index 0969717d85e4f..3d8f5f265e3db 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -394,7 +394,7 @@ def nested_renaming_depr(level=4): nested_renaming_depr() elif isinstance(obj, ABCDataFrame) and \ k not in obj.columns: - raise ValueError( + raise KeyError( "Column '{col}' does not exist!".format(col=k)) arg = new_arg diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index 9feba3fd042dd..23cc18de34778 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -613,7 +613,7 @@ def f(): t[['A']].agg({'A': ['sum', 'std'], 'B': ['mean', 'std']}) - pytest.raises(ValueError, f) + pytest.raises(KeyError, f) def test_agg_nested_dicts(self): @@ -668,7 +668,7 @@ def test_try_aggregate_non_existing_column(self): df = DataFrame(data).set_index('dt') # Error as we don't have 'z' column - with pytest.raises(ValueError): + with pytest.raises(KeyError): df.resample('30T').agg({'x': ['mean'], 'y': ['median'], 'z': ['sum']})
xref #19552 xref https://github.com/pandas-dev/pandas/pull/19552/files#r166587045
https://api.github.com/repos/pandas-dev/pandas/pulls/19566
2018-02-07T11:20:05Z
2018-02-07T12:30:17Z
2018-02-07T12:30:17Z
2018-02-07T12:30:17Z
Cleaned up return of _get_cython_function
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index f352b80ba3069..01241db7c0c42 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -2240,7 +2240,7 @@ def wrapper(*args, **kwargs): raise NotImplementedError("function is not implemented for this" "dtype: [how->%s,dtype->%s]" % (how, dtype_str)) - return func, dtype_str + return func def _cython_operation(self, kind, values, how, axis, min_count=-1): assert kind in ['transform', 'aggregate'] @@ -2304,12 +2304,12 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1): values = values.astype(object) try: - func, dtype_str = self._get_cython_function( + func = self._get_cython_function( kind, how, values, is_numeric) except NotImplementedError: if is_numeric: values = _ensure_float64(values) - func, dtype_str = self._get_cython_function( + func = self._get_cython_function( kind, how, values, is_numeric) else: raise
- [X] closes #19557 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19561
2018-02-07T03:21:22Z
2018-02-07T11:01:54Z
2018-02-07T11:01:54Z
2018-02-07T16:12:08Z
DEPR/CLN: fix from_items deprecation warnings
diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py index 575eae1916f4c..4c407ad8a0d93 100644 --- a/pandas/tests/groupby/aggregate/test_other.py +++ b/pandas/tests/groupby/aggregate/test_other.py @@ -7,6 +7,7 @@ from __future__ import print_function import pytest +from collections import OrderedDict import datetime as dt from functools import partial @@ -81,7 +82,7 @@ def test_agg_period_index(): s1 = Series(np.random.rand(len(index)), index=index) s2 = Series(np.random.rand(len(index)), index=index) series = [('s1', s1), ('s2', s2)] - df = DataFrame.from_items(series) + df = DataFrame.from_dict(OrderedDict(series)) grouped = df.groupby(df.index.month) list(grouped) diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py index c9d079421532f..a57c3c41b3637 100644 --- a/pandas/tests/reshape/test_reshape.py +++ b/pandas/tests/reshape/test_reshape.py @@ -3,6 +3,7 @@ from warnings import catch_warnings import pytest +from collections import OrderedDict from pandas import DataFrame, Series import pandas as pd @@ -457,7 +458,8 @@ def test_dataframe_dummies_preserve_categorical_dtype(self, dtype): @pytest.mark.parametrize('sparse', [True, False]) def test_get_dummies_dont_sparsify_all_columns(self, sparse): # GH18914 - df = DataFrame.from_items([('GDP', [1, 2]), ('Nation', ['AB', 'CD'])]) + df = DataFrame.from_dict(OrderedDict([('GDP', [1, 2]), + ('Nation', ['AB', 'CD'])])) df = get_dummies(df, columns=['Nation'], sparse=sparse) df2 = df.reindex(columns=['GDP'])
xref #18529 Replacing some cases of the deprecated ``from_items`` with ``from_dict(OrderedDict())``
https://api.github.com/repos/pandas-dev/pandas/pulls/19559
2018-02-06T21:51:17Z
2018-02-07T11:04:28Z
2018-02-07T11:04:28Z
2018-02-21T22:09:24Z
REF: Internal / External values
diff --git a/doc/source/internals.rst b/doc/source/internals.rst index ee4df879d9478..957f82fd9eba7 100644 --- a/doc/source/internals.rst +++ b/doc/source/internals.rst @@ -89,6 +89,25 @@ not check (or care) whether the levels themselves are sorted. Fortunately, the constructors ``from_tuples`` and ``from_arrays`` ensure that this is true, but if you compute the levels and labels yourself, please be careful. +Values +~~~~~~ + +Pandas extends NumPy's type system with custom types, like ``Categorical`` or +datetimes with a timezone, so we have multiple notions of "values". For 1-D +containers (``Index`` classes and ``Series``) we have the following convention: + +* ``cls._ndarray_values`` is *always* a NumPy ``ndarray``. Ideally, + ``_ndarray_values`` is cheap to compute. For example, for a ``Categorical``, + this returns the codes, not the array of objects. +* ``cls._values`` refers is the "best possible" array. This could be an + ``ndarray``, ``ExtensionArray``, or in ``Index`` subclass (note: we're in the + process of removing the index subclasses here so that it's always an + ``ndarray`` or ``ExtensionArray``). + +So, for example, ``Series[category]._values`` is a ``Categorical``, while +``Series[category]._ndarray_values`` is the underlying codes. + + .. _ref-subclassing-pandas: Subclassing pandas Data Structures diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 553e1e0ac2066..e618dc6b69b2d 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -266,3 +266,15 @@ def _can_hold_na(self): Setting this to false will optimize some operations like fillna. """ return True + + @property + def _ndarray_values(self): + # type: () -> np.ndarray + """Internal pandas method for lossy conversion to a NumPy ndarray. + + This method is not part of the pandas interface. + + The expectation is that this is cheap to compute, and is primarily + used for interacting with our indexers. + """ + return np.array(self) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 93250bdbb5054..bcf9cb7646704 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -410,6 +410,10 @@ def dtype(self): """The :class:`~pandas.api.types.CategoricalDtype` for this instance""" return self._dtype + @property + def _ndarray_values(self): + return self.codes + @property def _constructor(self): return Categorical diff --git a/pandas/core/base.py b/pandas/core/base.py index 3d8f5f265e3db..0ca029ffd4c25 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -13,7 +13,8 @@ is_list_like, is_scalar, is_datetimelike, - is_extension_type) + is_extension_type, + is_extension_array_dtype) from pandas.util._validators import validate_bool_kwarg @@ -738,7 +739,7 @@ def data(self): @property def itemsize(self): """ return the size of the dtype of the item of the underlying data """ - return self._values.itemsize + return self._ndarray_values.itemsize @property def nbytes(self): @@ -748,7 +749,7 @@ def nbytes(self): @property def strides(self): """ return the strides of the underlying data """ - return self._values.strides + return self._ndarray_values.strides @property def size(self): @@ -768,8 +769,17 @@ def base(self): return self.values.base @property - def _values(self): - """ the internal implementation """ + def _ndarray_values(self): + """The data as an ndarray, possibly losing information. + + The expectation is that this is cheap to compute, and is primarily + used for interacting with our indexers. + + - categorical -> codes + """ + # type: () -> np.ndarray + if is_extension_array_dtype(self): + return self.values._ndarray_values return self.values @property @@ -979,6 +989,7 @@ def unique(self): values = self._values if hasattr(values, 'unique'): + result = values.unique() else: from pandas.core.algorithms import unique1d diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index b2816343fc8eb..55919fb2bea0d 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -927,7 +927,7 @@ def try_timedelta(v): # will try first with a string & object conversion from pandas import to_timedelta try: - return to_timedelta(v)._values.reshape(shape) + return to_timedelta(v)._ndarray_values.reshape(shape) except Exception: return v.reshape(shape) diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index c66e7fcfc6978..c2b71bc316fe8 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1709,7 +1709,7 @@ def is_extension_array_dtype(arr_or_dtype): from pandas.core.arrays import ExtensionArray # we want to unpack series, anything else? - if isinstance(arr_or_dtype, ABCSeries): + if isinstance(arr_or_dtype, (ABCIndexClass, ABCSeries)): arr_or_dtype = arr_or_dtype._values return isinstance(arr_or_dtype, (ExtensionDtype, ExtensionArray)) diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index ddecbe85087d8..d306d0d78f1f4 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -488,12 +488,14 @@ def _concat_index_asobject(to_concat, name=None): concat all inputs as object. DatetimeIndex, TimedeltaIndex and PeriodIndex are converted to object dtype before concatenation """ + from pandas import Index + from pandas.core.arrays import ExtensionArray - klasses = ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex + klasses = (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex, + ExtensionArray) to_concat = [x.astype(object) if isinstance(x, klasses) else x for x in to_concat] - from pandas import Index self = to_concat[0] attribs = self._get_attributes_dict() attribs['name'] = name diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 15df77bf772dc..be7c1624936bf 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -31,12 +31,14 @@ is_object_dtype, is_categorical_dtype, is_interval_dtype, + is_period_dtype, is_bool, is_bool_dtype, is_signed_integer_dtype, is_unsigned_integer_dtype, is_integer_dtype, is_float_dtype, is_datetime64_any_dtype, + is_datetime64tz_dtype, is_timedelta64_dtype, needs_i8_conversion, is_iterator, is_list_like, @@ -412,7 +414,7 @@ def _simple_new(cls, values, name=None, dtype=None, **kwargs): values = np.array(values, copy=False) if is_object_dtype(values): values = cls(values, name=name, dtype=dtype, - **kwargs)._values + **kwargs)._ndarray_values result = object.__new__(cls) result._data = values @@ -594,6 +596,40 @@ def values(self): """ return the underlying data as an ndarray """ return self._data.view(np.ndarray) + @property + def _values(self): + # type: () -> Union[ExtensionArray, Index] + # TODO(EA): remove index types as they become extension arrays + """The best array representation. + + This is an ndarray, ExtensionArray, or Index subclass. This differs + from ``_ndarray_values``, which always returns an ndarray. + + Both ``_values`` and ``_ndarray_values`` are consistent between + ``Series`` and ``Index``. + + It may differ from the public '.values' method. + + index | values | _values | _ndarray_values | + ----------------- | -------------- -| ----------- | --------------- | + CategoricalIndex | Categorical | Categorical | codes | + DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] | + + For the following, the ``._values`` is currently ``ndarray[object]``, + but will soon be an ``ExtensionArray`` + + index | values | _values | _ndarray_values | + ----------------- | --------------- | ------------ | --------------- | + PeriodIndex | ndarray[object] | ndarray[obj] | ndarray[int] | + IntervalIndex | ndarray[object] | ndarray[obj] | ndarray[object] | + + See Also + -------- + values + _ndarray_values + """ + return self.values + def get_values(self): """ return the underlying data as an ndarray """ return self.values @@ -664,7 +700,7 @@ def ravel(self, order='C'): -------- numpy.ndarray.ravel """ - return self._values.ravel(order=order) + return self._ndarray_values.ravel(order=order) # construction helpers @classmethod @@ -1597,7 +1633,7 @@ def _constructor(self): @cache_readonly def _engine(self): # property, for now, slow to look up - return self._engine_type(lambda: self._values, len(self)) + return self._engine_type(lambda: self._ndarray_values, len(self)) def _validate_index_level(self, level): """ @@ -2228,27 +2264,37 @@ def union(self, other): other = other.astype('O') return this.union(other) + # TODO(EA): setops-refactor, clean all this up + if is_period_dtype(self) or is_datetime64tz_dtype(self): + lvals = self._ndarray_values + else: + lvals = self._values + if is_period_dtype(other) or is_datetime64tz_dtype(other): + rvals = other._ndarray_values + else: + rvals = other._values + if self.is_monotonic and other.is_monotonic: try: - result = self._outer_indexer(self._values, other._values)[0] + result = self._outer_indexer(lvals, rvals)[0] except TypeError: # incomparable objects - result = list(self._values) + result = list(lvals) # worth making this faster? a very unusual case - value_set = set(self._values) - result.extend([x for x in other._values if x not in value_set]) + value_set = set(lvals) + result.extend([x for x in rvals if x not in value_set]) else: indexer = self.get_indexer(other) indexer, = (indexer == -1).nonzero() if len(indexer) > 0: - other_diff = algos.take_nd(other._values, indexer, + other_diff = algos.take_nd(rvals, indexer, allow_fill=False) - result = _concat._concat_compat((self._values, other_diff)) + result = _concat._concat_compat((lvals, other_diff)) try: - self._values[0] < other_diff[0] + lvals[0] < other_diff[0] except TypeError as e: warnings.warn("%s, sort order is undefined for " "incomparable objects" % e, RuntimeWarning, @@ -2260,7 +2306,7 @@ def union(self, other): result.sort() else: - result = self._values + result = lvals try: result = np.sort(result) @@ -2311,20 +2357,30 @@ def intersection(self, other): other = other.astype('O') return this.intersection(other) + # TODO(EA): setops-refactor, clean all this up + if is_period_dtype(self): + lvals = self._ndarray_values + else: + lvals = self._values + if is_period_dtype(other): + rvals = other._ndarray_values + else: + rvals = other._values + if self.is_monotonic and other.is_monotonic: try: - result = self._inner_indexer(self._values, other._values)[0] + result = self._inner_indexer(lvals, rvals)[0] return self._wrap_union_result(other, result) except TypeError: pass try: - indexer = Index(other._values).get_indexer(self._values) + indexer = Index(rvals).get_indexer(lvals) indexer = indexer.take((indexer != -1).nonzero()[0]) except Exception: # duplicates indexer = algos.unique1d( - Index(other._values).get_indexer_non_unique(self._values)[0]) + Index(rvals).get_indexer_non_unique(lvals)[0]) indexer = indexer[indexer != -1] taken = other.take(indexer) @@ -2700,7 +2756,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): raise ValueError('limit argument only valid if doing pad, ' 'backfill or nearest reindexing') - indexer = self._engine.get_indexer(target._values) + indexer = self._engine.get_indexer(target._ndarray_values) return _ensure_platform_int(indexer) @@ -2716,12 +2772,13 @@ def _get_fill_indexer(self, target, method, limit=None, tolerance=None): if self.is_monotonic_increasing and target.is_monotonic_increasing: method = (self._engine.get_pad_indexer if method == 'pad' else self._engine.get_backfill_indexer) - indexer = method(target._values, limit) + indexer = method(target._ndarray_values, limit) else: indexer = self._get_fill_indexer_searchsorted(target, method, limit) if tolerance is not None: - indexer = self._filter_indexer_tolerance(target._values, indexer, + indexer = self._filter_indexer_tolerance(target._ndarray_values, + indexer, tolerance) return indexer @@ -2812,7 +2869,7 @@ def get_indexer_non_unique(self, target): self = Index(self.asi8) tgt_values = target.asi8 else: - tgt_values = target._values + tgt_values = target._ndarray_values indexer, missing = self._engine.get_indexer_non_unique(tgt_values) return _ensure_platform_int(indexer), missing @@ -3247,16 +3304,17 @@ def _join_multi(self, other, how, return_indexers=True): def _join_non_unique(self, other, how='left', return_indexers=False): from pandas.core.reshape.merge import _get_join_indexers - left_idx, right_idx = _get_join_indexers([self._values], - [other._values], how=how, + left_idx, right_idx = _get_join_indexers([self._ndarray_values], + [other._ndarray_values], + how=how, sort=True) left_idx = _ensure_platform_int(left_idx) right_idx = _ensure_platform_int(right_idx) - join_index = np.asarray(self._values.take(left_idx)) + join_index = np.asarray(self._ndarray_values.take(left_idx)) mask = left_idx == -1 - np.putmask(join_index, mask, other._values.take(right_idx)) + np.putmask(join_index, mask, other._ndarray_values.take(right_idx)) join_index = self._wrap_joined_index(join_index, other) @@ -3403,8 +3461,8 @@ def _join_monotonic(self, other, how='left', return_indexers=False): else: return ret_index - sv = self._values - ov = other._values + sv = self._ndarray_values + ov = other._ndarray_values if self.is_unique and other.is_unique: # We can perform much better than the general case @@ -3756,7 +3814,7 @@ def insert(self, loc, item): item = self._na_value _self = np.asarray(self) - item = self._coerce_scalar_to_index(item)._values + item = self._coerce_scalar_to_index(item)._ndarray_values idx = np.concatenate((_self[:loc], item, _self[loc:])) return self._shallow_copy_with_infer(idx) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 60f5552576ea1..a4d0f787cc6ec 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -293,6 +293,11 @@ def values(self): """ return the underlying data, which is a Categorical """ return self._data + @property + def itemsize(self): + # Size of the items in categories, not codes. + return self.values.itemsize + def get_values(self): """ return the underlying data as an ndarray """ return self._data.get_values() @@ -386,8 +391,8 @@ def is_monotonic_decreasing(self): def unique(self, level=None): if level is not None: self._validate_index_level(level) - result = base.IndexOpsMixin.unique(self) - # CategoricalIndex._shallow_copy uses keeps original categories + result = self.values.unique() + # CategoricalIndex._shallow_copy keeps original categories # and ordered if not otherwise specified return self._shallow_copy(result, categories=result.categories, ordered=result.ordered) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 4a526955d9bf4..c98f8ceea0ffa 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -376,7 +376,7 @@ def sort_values(self, return_indexer=False, ascending=True): sorted_index = self.take(_as) return sorted_index, _as else: - sorted_values = np.sort(self._values) + sorted_values = np.sort(self._ndarray_values) attribs = self._get_attributes_dict() freq = attribs['freq'] diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 61c941c3d2333..cc9ce1f3fd5eb 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -678,6 +678,15 @@ def _assert_tzawareness_compat(self, other): raise TypeError('Cannot compare tz-naive and tz-aware ' 'datetime-like objects') + @property + def _values(self): + # tz-naive -> ndarray + # tz-aware -> DatetimeIndex + if self.tz is not None: + return self + else: + return self.values + @property def tzinfo(self): """ @@ -685,6 +694,27 @@ def tzinfo(self): """ return self.tz + @property + def size(self): + # TODO: Remove this when we have a DatetimeTZArray + # Necessary to avoid recursion error since DTI._values is a DTI + # for TZ-aware + return self._ndarray_values.size + + @property + def shape(self): + # TODO: Remove this when we have a DatetimeTZArray + # Necessary to avoid recursion error since DTI._values is a DTI + # for TZ-aware + return self._ndarray_values.shape + + @property + def nbytes(self): + # TODO: Remove this when we have a DatetimeTZArray + # Necessary to avoid recursion error since DTI._values is a DTI + # for TZ-aware + return self._ndarray_values.nbytes + @cache_readonly def _timezone(self): """ Comparable timezone both for pytz / dateutil""" @@ -1086,6 +1116,19 @@ def snap(self, freq='S'): # we know it conforms; skip check return DatetimeIndex(snapped, freq=freq, verify_integrity=False) + def unique(self, level=None): + # Override here since IndexOpsMixin.unique uses self._values.unique + # For DatetimeIndex with TZ, that's a DatetimeIndex -> recursion error + # So we extract the tz-naive DatetimeIndex, unique that, and wrap the + # result with out TZ. + if self.tz is not None: + naive = type(self)(self._ndarray_values, copy=False) + else: + naive = self + result = super(DatetimeIndex, naive).unique(level=level) + return self._simple_new(result, name=self.name, tz=self.tz, + freq=self.freq) + def union(self, other): """ Specialized union for DatetimeIndex objects. If combine diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 3bf783b5a2faa..d431ea1e51e31 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -680,6 +680,16 @@ def length(self): 'e.g. Intervals with string endpoints') raise TypeError(msg) + @property + def size(self): + # Avoid materializing self.values + return self.left.size + + @property + def shape(self): + # Avoid materializing self.values + return self.left.shape + def __len__(self): return len(self.left) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 510f7245cebd8..94dbd8b884e47 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -799,9 +799,11 @@ def values(self): box = hasattr(lev, '_box_values') # Try to minimize boxing. if box and len(lev) > len(lab): - taken = lev._box_values(algos.take_1d(lev._values, lab)) + taken = lev._box_values(algos.take_1d(lev._ndarray_values, + lab)) elif box: - taken = algos.take_1d(lev._box_values(lev._values), lab, + taken = algos.take_1d(lev._box_values(lev._ndarray_values), + lab, fill_value=_get_na_value(lev.dtype.type)) else: taken = algos.take_1d(np.asarray(lev._values), lab) @@ -2410,7 +2412,7 @@ def convert_indexer(start, stop, step, indexer=indexer, labels=labels): mapper = Series(indexer) indexer = labels.take(_ensure_platform_int(indexer)) result = Series(Index(indexer).isin(r).nonzero()[0]) - m = result.map(mapper)._values + m = result.map(mapper)._ndarray_values else: m = np.zeros(len(labels), dtype=bool) @@ -2505,6 +2507,7 @@ def get_locs(self, seq): MultiIndex.slice_locs : Get slice location given start label(s) and end label(s). """ + from .numeric import Int64Index # must be lexsorted to at least as many levels true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s] @@ -2530,7 +2533,6 @@ def _convert_to_indexer(r): "that is not the same length as the " "index") r = r.nonzero()[0] - from .numeric import Int64Index return Int64Index(r) def _update_indexer(idxr, indexer=indexer): @@ -2567,9 +2569,8 @@ def _update_indexer(idxr, indexer=indexer): if indexers is not None: indexer = _update_indexer(indexers, indexer=indexer) else: - from .numeric import Int64Index # no matches we are done - return Int64Index([])._values + return Int64Index([])._ndarray_values elif com.is_null_slice(k): # empty slice @@ -2589,8 +2590,8 @@ def _update_indexer(idxr, indexer=indexer): # empty indexer if indexer is None: - return Int64Index([])._values - return indexer._values + return Int64Index([])._ndarray_values + return indexer._ndarray_values def truncate(self, before=None, after=None): """ @@ -2639,7 +2640,7 @@ def equals(self, other): if not isinstance(other, MultiIndex): other_vals = com._values_from_object(_ensure_index(other)) - return array_equivalent(self._values, other_vals) + return array_equivalent(self._ndarray_values, other_vals) if self.nlevels != other.nlevels: return False @@ -2655,8 +2656,9 @@ def equals(self, other): olabels = other.labels[i] olabels = olabels[olabels != -1] - ovalues = algos.take_nd(np.asarray(other.levels[i]._values), - olabels, allow_fill=False) + ovalues = algos.take_nd( + np.asarray(other.levels[i]._values), + olabels, allow_fill=False) # since we use NaT both datetime64 and timedelta64 # we can have a situation where a level is typed say @@ -2704,7 +2706,8 @@ def union(self, other): if len(other) == 0 or self.equals(other): return self - uniq_tuples = lib.fast_unique_multiple([self._values, other._values]) + uniq_tuples = lib.fast_unique_multiple([self._ndarray_values, + other._ndarray_values]) return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0, names=result_names) @@ -2726,8 +2729,8 @@ def intersection(self, other): if self.equals(other): return self - self_tuples = self._values - other_tuples = other._values + self_tuples = self._ndarray_values + other_tuples = other._ndarray_values uniq_tuples = sorted(set(self_tuples) & set(other_tuples)) if len(uniq_tuples) == 0: return MultiIndex(levels=[[]] * self.nlevels, @@ -2756,7 +2759,8 @@ def difference(self, other): labels=[[]] * self.nlevels, names=result_names, verify_integrity=False) - difference = sorted(set(self._values) - set(other._values)) + difference = sorted(set(self._ndarray_values) - + set(other._ndarray_values)) if len(difference) == 0: return MultiIndex(levels=[[]] * self.nlevels, diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index b02aee0495d8c..a4558116bfa63 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -378,7 +378,7 @@ def equals(self, other): if (not is_dtype_equal(self.dtype, other.dtype) or self.shape != other.shape): return False - left, right = self._values, other._values + left, right = self._ndarray_values, other._ndarray_values return ((left == right) | (self._isnan & other._isnan)).all() except (TypeError, ValueError): return False diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 1f8542ed5ee60..8f2d7d382a16e 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -54,7 +54,7 @@ def _field_accessor(name, alias, docstring=None): def f(self): base, mult = _gfc(self.freq) - result = get_period_field_arr(alias, self._values, base) + result = get_period_field_arr(alias, self._ndarray_values, base) return Index(result, name=self.name) f.__name__ = name f.__doc__ = docstring @@ -82,7 +82,7 @@ def _period_index_cmp(opname, cls, nat_result=False): def wrapper(self, other): if isinstance(other, Period): - func = getattr(self._values, opname) + func = getattr(self._ndarray_values, opname) other_base, _ = _gfc(other.freq) if other.freq != self.freq: msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr) @@ -94,7 +94,8 @@ def wrapper(self, other): msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr) raise IncompatibleFrequency(msg) - result = getattr(self._values, opname)(other._values) + op = getattr(self._ndarray_values, opname) + result = op(other._ndarray_values) mask = self._isnan | other._isnan if mask.any(): @@ -102,11 +103,11 @@ def wrapper(self, other): return result elif other is tslib.NaT: - result = np.empty(len(self._values), dtype=bool) + result = np.empty(len(self._ndarray_values), dtype=bool) result.fill(nat_result) else: other = Period(other, freq=self.freq) - func = getattr(self._values, opname) + func = getattr(self._ndarray_values, opname) result = func(other.ordinal) if self.hasnans: @@ -275,11 +276,11 @@ def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None, if isinstance(data, PeriodIndex): if freq is None or freq == data.freq: # no freq change freq = data.freq - data = data._values + data = data._ndarray_values else: base1, _ = _gfc(data.freq) base2, _ = _gfc(freq) - data = period.period_asfreq_arr(data._values, + data = period.period_asfreq_arr(data._ndarray_values, base1, base2, 1) return cls._simple_new(data, name=name, freq=freq) @@ -374,7 +375,7 @@ def _shallow_copy(self, values=None, freq=None, **kwargs): if freq is None: freq = self.freq if values is None: - values = self._values + values = self._ndarray_values return super(PeriodIndex, self)._shallow_copy(values=values, freq=freq, **kwargs) @@ -407,7 +408,7 @@ def __contains__(self, key): @property def asi8(self): - return self._values.view('i8') + return self._ndarray_values.view('i8') @cache_readonly def _int64index(self): @@ -418,7 +419,8 @@ def values(self): return self.astype(object).values @property - def _values(self): + def _ndarray_values(self): + # Ordinals return self._data def __array__(self, dtype=None): @@ -475,6 +477,16 @@ def _to_embed(self, keep_tz=False, dtype=None): return self.astype(object).values + @property + def size(self): + # Avoid materializing self._values + return self._ndarray_values.size + + @property + def shape(self): + # Avoid materializing self._values + return self._ndarray_values.shape + @property def _formatter_func(self): return lambda x: "'%s'" % x @@ -489,13 +501,15 @@ def asof_locs(self, where, mask): if isinstance(where_idx, DatetimeIndex): where_idx = PeriodIndex(where_idx.values, freq=self.freq) - locs = self._values[mask].searchsorted(where_idx._values, side='right') + locs = self._ndarray_values[mask].searchsorted( + where_idx._ndarray_values, side='right') locs = np.where(locs > 0, locs - 1, 0) result = np.arange(len(self))[mask].take(locs) first = mask.argmax() - result[(locs == 0) & (where_idx._values < self._values[first])] = -1 + result[(locs == 0) & (where_idx._ndarray_values < + self._ndarray_values[first])] = -1 return result @@ -523,7 +537,8 @@ def searchsorted(self, value, side='left', sorter=None): elif isinstance(value, compat.string_types): value = Period(value, freq=self.freq).ordinal - return self._values.searchsorted(value, side=side, sorter=sorter) + return self._ndarray_values.searchsorted(value, side=side, + sorter=sorter) @property def is_all_dates(self): @@ -664,7 +679,7 @@ def to_timestamp(self, freq=None, how='start'): base, mult = _gfc(freq) new_data = self.asfreq(freq, how) - new_data = period.periodarr_to_dt64arr(new_data._values, base) + new_data = period.periodarr_to_dt64arr(new_data._ndarray_values, base) return DatetimeIndex(new_data, freq='infer', name=self.name) def _maybe_convert_timedelta(self, other): @@ -744,7 +759,7 @@ def shift(self, n): ------- shifted : PeriodIndex """ - values = self._values + n * self.freq.n + values = self._ndarray_values + n * self.freq.n if self.hasnans: values[self._isnan] = tslib.iNaT return self._shallow_copy(values=values) @@ -775,7 +790,7 @@ def get_value(self, series, key): grp = resolution.Resolution.get_freq_group(reso) freqn = resolution.get_freq_group(self.freq) - vals = self._values + vals = self._ndarray_values # if our data is higher resolution than requested key, slice if grp < freqn: @@ -786,7 +801,7 @@ def get_value(self, series, key): if ord2 < vals[0] or ord1 > vals[-1]: raise KeyError(key) - pos = np.searchsorted(self._values, [ord1, ord2]) + pos = np.searchsorted(self._ndarray_values, [ord1, ord2]) key = slice(pos[0], pos[1] + 1) return series[key] elif grp == freqn: diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 0d833807602e1..2437b7d396e84 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -4430,7 +4430,7 @@ def _convert_index(index, encoding=None, format_type=None): elif isinstance(index, (Int64Index, PeriodIndex)): atom = _tables().Int64Col() # avoid to store ndarray of Period objects - return IndexCol(index._values, 'integer', atom, + return IndexCol(index._ndarray_values, 'integer', atom, freq=getattr(index, 'freq', None), index_name=index_name) diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index 07163615c6ba4..9ca06475290e4 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -249,11 +249,11 @@ def _convert_1d(values, units, axis): is_float(values)): return get_datevalue(values, axis.freq) if isinstance(values, PeriodIndex): - return values.asfreq(axis.freq)._values + return values.asfreq(axis.freq)._ndarray_values if isinstance(values, Index): return values.map(lambda x: get_datevalue(x, axis.freq)) if is_period_arraylike(values): - return PeriodIndex(values, freq=axis.freq)._values + return PeriodIndex(values, freq=axis.freq)._ndarray_values if isinstance(values, (list, tuple, np.ndarray, Index)): return [get_datevalue(x, axis.freq) for x in values] return values @@ -642,7 +642,7 @@ def _daily_finder(vmin, vmax, freq): info = np.zeros(span, dtype=[('val', np.int64), ('maj', bool), ('min', bool), ('fmt', '|S20')]) - info['val'][:] = dates_._values + info['val'][:] = dates_._ndarray_values info['fmt'][:] = '' info['maj'][[0, -1]] = True # .. and set some shortcuts diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 8948c5f79900d..2d8d70aa2ac84 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -314,7 +314,8 @@ def test_ensure_copied_data(self): # .values an object array of Period, thus copied result = index_type(ordinal=index.asi8, copy=False, **init_kwargs) - tm.assert_numpy_array_equal(index._values, result._values, + tm.assert_numpy_array_equal(index._ndarray_values, + result._ndarray_values, check_same='same') elif isinstance(index, IntervalIndex): # checked in test_interval.py @@ -323,7 +324,8 @@ def test_ensure_copied_data(self): result = index_type(index.values, copy=False, **init_kwargs) tm.assert_numpy_array_equal(index.values, result.values, check_same='same') - tm.assert_numpy_array_equal(index._values, result._values, + tm.assert_numpy_array_equal(index._ndarray_values, + result._ndarray_values, check_same='same') def test_copy_and_deepcopy(self, indices): diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index a75ace2933b71..05678b0c8dd45 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -469,3 +469,12 @@ def test_factorize_dst(self): arr, res = obj.factorize() tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp)) tm.assert_index_equal(res, idx) + + @pytest.mark.parametrize('arr, expected', [ + (pd.DatetimeIndex(['2017', '2017']), pd.DatetimeIndex(['2017'])), + (pd.DatetimeIndex(['2017', '2017'], tz='US/Eastern'), + pd.DatetimeIndex(['2017'], tz='US/Eastern')), + ]) + def test_unique(self, arr, expected): + result = arr.unique() + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/period/test_construction.py b/pandas/tests/indexes/period/test_construction.py index 639a9272c3808..eca80d17b1dc3 100644 --- a/pandas/tests/indexes/period/test_construction.py +++ b/pandas/tests/indexes/period/test_construction.py @@ -119,8 +119,8 @@ def test_constructor_fromarraylike(self): tm.assert_index_equal(PeriodIndex(idx.values), idx) tm.assert_index_equal(PeriodIndex(list(idx.values)), idx) - pytest.raises(ValueError, PeriodIndex, idx._values) - pytest.raises(ValueError, PeriodIndex, list(idx._values)) + pytest.raises(ValueError, PeriodIndex, idx._ndarray_values) + pytest.raises(ValueError, PeriodIndex, list(idx._ndarray_values)) pytest.raises(TypeError, PeriodIndex, data=Period('2007', freq='A')) diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index f3469b829f8a3..b3f059018493c 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -205,7 +205,7 @@ def test_values(self): tm.assert_numpy_array_equal(idx.values, exp) tm.assert_numpy_array_equal(idx.get_values(), exp) exp = np.array([], dtype=np.int64) - tm.assert_numpy_array_equal(idx._values, exp) + tm.assert_numpy_array_equal(idx._ndarray_values, exp) idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M') @@ -213,7 +213,7 @@ def test_values(self): tm.assert_numpy_array_equal(idx.values, exp) tm.assert_numpy_array_equal(idx.get_values(), exp) exp = np.array([492, -9223372036854775808], dtype=np.int64) - tm.assert_numpy_array_equal(idx._values, exp) + tm.assert_numpy_array_equal(idx._ndarray_values, exp) idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D') @@ -222,7 +222,7 @@ def test_values(self): tm.assert_numpy_array_equal(idx.values, exp) tm.assert_numpy_array_equal(idx.get_values(), exp) exp = np.array([14975, -9223372036854775808], dtype=np.int64) - tm.assert_numpy_array_equal(idx._values, exp) + tm.assert_numpy_array_equal(idx._ndarray_values, exp) def test_period_index_length(self): pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py index f5e7c8269dc4f..97500f2f5ed95 100644 --- a/pandas/tests/indexes/period/test_tools.py +++ b/pandas/tests/indexes/period/test_tools.py @@ -20,7 +20,7 @@ class TestPeriodRepresentation(object): def _check_freq(self, freq, base_date): rng = PeriodIndex(start=base_date, periods=10, freq=freq) exp = np.arange(10, dtype=np.int64) - tm.assert_numpy_array_equal(rng._values, exp) + tm.assert_numpy_array_equal(rng.asi8, exp) def test_annual(self): diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index c2e40c79f8914..e9fddfde90348 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -353,6 +353,14 @@ def test_append(self): expected = Index(list('caaabbca')) tm.assert_index_equal(result, expected, exact=True) + def test_append_to_another(self): + # hits _concat_index_asobject + fst = Index(['a', 'b']) + snd = CategoricalIndex(['d', 'e']) + result = fst.append(snd) + expected = Index(['a', 'b', 'd', 'e']) + tm.assert_index_equal(result, expected) + def test_insert(self): ci = self.create_index() diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index e59456b8a2d5e..cd6a5c761d0c2 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -962,6 +962,53 @@ def test_values_boxed(self): # Check that code branches for boxed values produce identical results tm.assert_numpy_array_equal(result.values[:4], result[:4].values) + def test_values_multiindex_datetimeindex(self): + # Test to ensure we hit the boxing / nobox part of MI.values + ints = np.arange(10**18, 10**18 + 5) + naive = pd.DatetimeIndex(ints) + aware = pd.DatetimeIndex(ints, tz='US/Central') + + idx = pd.MultiIndex.from_arrays([naive, aware]) + result = idx.values + + outer = pd.DatetimeIndex([x[0] for x in result]) + tm.assert_index_equal(outer, naive) + + inner = pd.DatetimeIndex([x[1] for x in result]) + tm.assert_index_equal(inner, aware) + + # n_lev > n_lab + result = idx[:2].values + + outer = pd.DatetimeIndex([x[0] for x in result]) + tm.assert_index_equal(outer, naive[:2]) + + inner = pd.DatetimeIndex([x[1] for x in result]) + tm.assert_index_equal(inner, aware[:2]) + + def test_values_multiindex_periodindex(self): + # Test to ensure we hit the boxing / nobox part of MI.values + ints = np.arange(2007, 2012) + pidx = pd.PeriodIndex(ints, freq='D') + + idx = pd.MultiIndex.from_arrays([ints, pidx]) + result = idx.values + + outer = pd.Int64Index([x[0] for x in result]) + tm.assert_index_equal(outer, pd.Int64Index(ints)) + + inner = pd.PeriodIndex([x[1] for x in result]) + tm.assert_index_equal(inner, pidx) + + # n_lev > n_lab + result = idx[:2].values + + outer = pd.Int64Index([x[0] for x in result]) + tm.assert_index_equal(outer, pd.Int64Index(ints[:2])) + + inner = pd.PeriodIndex([x[1] for x in result]) + tm.assert_index_equal(inner, pidx[:2]) + def test_append(self): result = self.index[:3].append(self.index[3:]) assert result.equals(self.index) diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index df2547fc7b0da..4b5ad336139b0 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -338,8 +338,9 @@ def test_ops(self): if not isinstance(o, PeriodIndex): expected = getattr(o.values, op)() else: - expected = pd.Period(ordinal=getattr(o._values, op)(), - freq=o.freq) + expected = pd.Period( + ordinal=getattr(o._ndarray_values, op)(), + freq=o.freq) try: assert result == expected except TypeError: @@ -450,7 +451,7 @@ def test_value_counts_unique_nunique_null(self): for orig in self.objs: o = orig.copy() klass = type(o) - values = o._values + values = o._ndarray_values if not self._allow_na_ops(o): continue @@ -1175,3 +1176,54 @@ def test_iter_box(self): assert isinstance(res, pd.Period) assert res.freq == 'M' assert res == exp + + +@pytest.mark.parametrize('array, expected_type, dtype', [ + (np.array([0, 1], dtype=np.int64), np.ndarray, 'int64'), + (np.array(['a', 'b']), np.ndarray, 'object'), + (pd.Categorical(['a', 'b']), pd.Categorical, 'category'), + (pd.DatetimeIndex(['2017', '2018']), np.ndarray, 'datetime64[ns]'), + (pd.DatetimeIndex(['2017', '2018'], tz="US/Central"), pd.DatetimeIndex, + 'datetime64[ns, US/Central]'), + (pd.TimedeltaIndex([10**10]), np.ndarray, 'm8[ns]'), + (pd.PeriodIndex([2018, 2019], freq='A'), np.ndarray, 'object'), + (pd.IntervalIndex.from_breaks([0, 1, 2]), np.ndarray, 'object'), +]) +def test_values_consistent(array, expected_type, dtype): + l_values = pd.Series(array)._values + r_values = pd.Index(array)._values + assert type(l_values) is expected_type + assert type(l_values) is type(r_values) + + if isinstance(l_values, np.ndarray): + tm.assert_numpy_array_equal(l_values, r_values) + elif isinstance(l_values, pd.Index): + tm.assert_index_equal(l_values, r_values) + elif pd.api.types.is_categorical(l_values): + tm.assert_categorical_equal(l_values, r_values) + else: + raise TypeError("Unexpected type {}".format(type(l_values))) + + assert l_values.dtype == dtype + assert r_values.dtype == dtype + + +@pytest.mark.parametrize('array, expected', [ + (np.array([0, 1], dtype=np.int64), np.array([0, 1], dtype=np.int64)), + (np.array(['0', '1']), np.array(['0', '1'], dtype=object)), + (pd.Categorical(['a', 'a']), np.array([0, 0], dtype='int8')), + (pd.DatetimeIndex(['2017-01-01T00:00:00']), + np.array(['2017-01-01T00:00:00'], dtype='M8[ns]')), + (pd.DatetimeIndex(['2017-01-01T00:00:00'], tz="US/Eastern"), + np.array(['2017-01-01T05:00:00'], dtype='M8[ns]')), + (pd.TimedeltaIndex([10**10]), np.array([10**10], dtype='m8[ns]')), + pytest.mark.xfail(reason='PeriodArray not implemented')(( + pd.PeriodIndex(['2017', '2018'], freq='D'), + np.array([17167, 17532]), + )), +]) +def test_ndarray_values(array, expected): + l_values = pd.Series(array)._ndarray_values + r_values = pd.Index(array)._ndarray_values + tm.assert_numpy_array_equal(l_values, r_values) + tm.assert_numpy_array_equal(l_values, expected)
Closes #19548 Just changed Index so far. Need to - Run an ASV - add `Series._ndarray_values` - Start cleaning up now redundant if conditions.
https://api.github.com/repos/pandas-dev/pandas/pulls/19558
2018-02-06T20:26:15Z
2018-02-13T14:50:55Z
2018-02-13T14:50:55Z
2018-02-14T00:01:55Z
Implement get_day_of_year, tests
diff --git a/pandas/_libs/tslibs/ccalendar.pxd b/pandas/_libs/tslibs/ccalendar.pxd index a1bbeea1cb69a..42473a97a7150 100644 --- a/pandas/_libs/tslibs/ccalendar.pxd +++ b/pandas/_libs/tslibs/ccalendar.pxd @@ -10,3 +10,4 @@ cdef int dayofweek(int y, int m, int m) nogil cdef bint is_leapyear(int64_t year) nogil cpdef int32_t get_days_in_month(int year, Py_ssize_t month) nogil cpdef int32_t get_week_of_year(int year, int month, int day) nogil +cpdef int32_t get_day_of_year(int year, int month, int day) nogil diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx index ae52f7dd30165..613e111443636 100644 --- a/pandas/_libs/tslibs/ccalendar.pyx +++ b/pandas/_libs/tslibs/ccalendar.pyx @@ -142,17 +142,13 @@ cpdef int32_t get_week_of_year(int year, int month, int day) nogil: Assumes the inputs describe a valid date. """ cdef: - bint isleap, isleap_prev - int32_t mo_off + bint isleap int32_t doy, dow int woy isleap = is_leapyear(year) - isleap_prev = is_leapyear(year - 1) - - mo_off = _month_offset[isleap * 13 + month - 1] - doy = mo_off + day + doy = get_day_of_year(year, month, day) dow = dayofweek(year, month, day) # estimate @@ -162,7 +158,7 @@ cpdef int32_t get_week_of_year(int year, int month, int day) nogil: # verify if woy < 0: - if (woy > -2) or (woy == -2 and isleap_prev): + if (woy > -2) or (woy == -2 and is_leapyear(year - 1)): woy = 53 else: woy = 52 @@ -171,3 +167,36 @@ cpdef int32_t get_week_of_year(int year, int month, int day) nogil: woy = 1 return woy + + +@cython.wraparound(False) +@cython.boundscheck(False) +cpdef int32_t get_day_of_year(int year, int month, int day) nogil: + """Return the ordinal day-of-year for the given day. + + Parameters + ---------- + year : int + month : int + day : int + + Returns + ------- + day_of_year : int32_t + + Notes + ----- + Assumes the inputs describe a valid date. + """ + cdef: + bint isleap + int32_t mo_off + int32_t doy, dow + int woy + + isleap = is_leapyear(year) + + mo_off = _month_offset[isleap * 13 + month - 1] + + day_of_year = mo_off + day + return day_of_year diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index a8a865eec38dd..7a4b9775bd56e 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -15,7 +15,7 @@ cnp.import_array() from ccalendar cimport (get_days_in_month, is_leapyear, dayofweek, - get_week_of_year) + get_week_of_year, get_day_of_year) from np_datetime cimport (pandas_datetimestruct, pandas_timedeltastruct, dt64_to_dtstruct, td64_to_tdstruct) from nattype cimport NPY_NAT @@ -374,15 +374,7 @@ def get_date_field(ndarray[int64_t] dtindex, object field): cdef: Py_ssize_t i, count = 0 ndarray[int32_t] out - ndarray[int32_t, ndim=2] _month_offset - int isleap, isleap_prev pandas_datetimestruct dts - int mo_off, doy, dow - - _month_offset = np.array( - [[0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365], - [0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366]], - dtype=np.int32 ) count = len(dtindex) out = np.empty(count, dtype='i4') @@ -482,8 +474,7 @@ def get_date_field(ndarray[int64_t] dtindex, object field): continue dt64_to_dtstruct(dtindex[i], &dts) - isleap = is_leapyear(dts.year) - out[i] = _month_offset[isleap, dts.month -1] + dts.day + out[i] = get_day_of_year(dts.year, dts.month, dts.day) return out elif field == 'dow': diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 5098e5c9100ff..e82c9c613c62a 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -22,7 +22,7 @@ from cpython.datetime cimport PyDateTime_Check, PyDateTime_IMPORT PyDateTime_IMPORT from np_datetime cimport (pandas_datetimestruct, dtstruct_to_dt64, - dt64_to_dtstruct, is_leapyear) + dt64_to_dtstruct) cimport util from util cimport is_period_object, is_string_object, INT32_MIN @@ -34,11 +34,12 @@ from timezones cimport is_utc, is_tzlocal, get_utcoffset, get_dst_info from timedeltas cimport delta_to_nanoseconds from ccalendar import MONTH_NUMBERS +from ccalendar cimport is_leapyear from frequencies cimport (get_freq_code, get_base_alias, get_to_timestamp_base, get_freq_str, get_rule_month) from parsing import parse_time_string, NAT_SENTINEL -from resolution import resolution, Resolution +from resolution import Resolution from nattype import nat_strings, NaT, iNaT from nattype cimport _nat_scalar_rules, NPY_NAT diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index b9be9c16eb6c3..47179a4e1d761 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -29,8 +29,7 @@ from nattype import NaT from nattype cimport NPY_NAT from np_datetime import OutOfBoundsDatetime from np_datetime cimport (reverse_ops, cmp_scalar, check_dts_bounds, - pandas_datetimestruct, dt64_to_dtstruct, - is_leapyear) + pandas_datetimestruct, dt64_to_dtstruct) from timedeltas import Timedelta from timedeltas cimport delta_to_nanoseconds from timezones cimport ( @@ -291,14 +290,6 @@ cdef class _Timestamp(datetime): val = tz_convert_single(self.value, 'UTC', self.tz) return val - cpdef int _get_field(self, field): - cdef: - int64_t val - ndarray[int32_t] out - val = self._maybe_convert_value_to_local() - out = get_date_field(np.array([val], dtype=np.int64), field) - return int(out[0]) - cpdef bint _get_start_end_field(self, str field): cdef: int64_t val @@ -695,14 +686,11 @@ class Timestamp(_Timestamp): @property def dayofyear(self): - return self._get_field('doy') + return ccalendar.get_day_of_year(self.year, self.month, self.day) @property def week(self): - if self.freq is None: - # fastpath for non-business - return ccalendar.get_week_of_year(self.year, self.month, self.day) - return self._get_field('woy') + return ccalendar.get_week_of_year(self.year, self.month, self.day) weekofyear = week @@ -764,7 +752,7 @@ class Timestamp(_Timestamp): @property def is_leap_year(self): - return bool(is_leapyear(self.year)) + return bool(ccalendar.is_leapyear(self.year)) def tz_localize(self, tz, ambiguous='raise', errors='raise'): """ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index e09fa87477122..61c941c3d2333 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -55,7 +55,7 @@ from pandas._libs import (lib, index as libindex, tslib as libts, join as libjoin, Timestamp) from pandas._libs.tslibs import (timezones, conversion, fields, parsing, - period as libperiod) + resolution as libresolution) # -------- some conversion wrapper functions @@ -1795,7 +1795,7 @@ def is_normalized(self): @cache_readonly def _resolution(self): - return libperiod.resolution(self.asi8, self.tz) + return libresolution.resolution(self.asi8, self.tz) def insert(self, loc, item): """ diff --git a/pandas/tests/tslibs/__init__.py b/pandas/tests/tslibs/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/tslibs/test_ccalendar.py b/pandas/tests/tslibs/test_ccalendar.py new file mode 100644 index 0000000000000..b5d562a7b5a9c --- /dev/null +++ b/pandas/tests/tslibs/test_ccalendar.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +from datetime import datetime + +import numpy as np + +from pandas._libs.tslibs import ccalendar + + +def test_get_day_of_year(): + assert ccalendar.get_day_of_year(2001, 3, 1) == 60 + assert ccalendar.get_day_of_year(2004, 3, 1) == 61 + assert ccalendar.get_day_of_year(1907, 12, 31) == 365 + assert ccalendar.get_day_of_year(2004, 12, 31) == 366 + + dt = datetime.fromordinal(1 + np.random.randint(365 * 4000)) + result = ccalendar.get_day_of_year(dt.year, dt.month, dt.day) + expected = (dt - dt.replace(month=1, day=1)).days + 1 + assert result == expected diff --git a/setup.py b/setup.py index 5397a1b84dc4d..2332503e558ed 100755 --- a/setup.py +++ b/setup.py @@ -515,6 +515,7 @@ def pxd(name): 'pyxfile': '_libs/tslibs/period', 'pxdfiles': ['_libs/src/util', '_libs/missing', + '_libs/tslibs/ccalendar', '_libs/tslibs/timedeltas', '_libs/tslibs/timezones', '_libs/tslibs/nattype'],
Separates the ccalendar.get_day_of_year part out of #18540, implements tests, uses it in `Timestamp` renders Timestamp._get_field unnecessary.
https://api.github.com/repos/pandas-dev/pandas/pulls/19555
2018-02-06T18:28:52Z
2018-02-07T11:09:34Z
2018-02-07T11:09:34Z
2018-02-08T04:50:41Z
Clear up confusion from #19411
diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 09f5e59535adf..e54269d525957 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1304,7 +1304,12 @@ def assert_frame_equal(left, right, check_dtype=True, 5 digits (False) or 3 digits (True) after decimal points are compared. If int, then specify the digits to compare check_names : bool, default True - Whether to check the Index names attribute. + Whether to check that the `names` attribute for both the `index` + and `column` attributes of the DataFrame is identical, i.e. + + * left.index.names == right.index.names + * left.columns.names == right.columns.names + by_blocks : bool, default False Specify how to compare internal data. If False, compare by columns. If True, compare by blocks.
Modify docstring for pandas.util.testing.assert_frame_equal closes #19411
https://api.github.com/repos/pandas-dev/pandas/pulls/19554
2018-02-06T17:51:59Z
2018-07-06T14:26:35Z
2018-07-06T14:26:35Z
2018-07-06T14:26:43Z
BUG: Fixed merge on dtype equal categories
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index b5bf7ccbda0b6..18a063ce3750b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -528,6 +528,32 @@ Documentation Changes Bug Fixes ~~~~~~~~~ +Categorical +^^^^^^^^^^^ + +.. warning:: + + A class of bugs were introduced in pandas 0.21 with ``CategoricalDtype`` that + affects the correctness of operations like ``merge``, ``concat``, and + indexing when comparing multiple unordered ``Categorical`` arrays that have + the same categories, but in a different order. We highly recommend upgrading + or manually aligning your categories before doing these operations. + +- Bug in ``Categorical.equals`` returning the wrong result when comparing two + unordered ``Categorical`` arrays with the same categories, but in a different + order (:issue:`16603`) +- Bug in :func:`pandas.api.types.union_categoricals` returning the wrong result + when for unordered categoricals with the categories in a different order. + This affected :func:`pandas.concat` with Categorical data (:issue:`19096`). +- Bug in :func:`pandas.merge` returning the wrong result when joining on an + unordered ``Categorical`` that had the same categories but in a different + order (:issue:`19551`) +- Bug in :meth:`CategoricalIndex.get_indexer` returning the wrong result when + ``target`` was an unordered ``Categorical`` that had the same categories as + ``self`` but in a different order (:issue:`19551`) +- Bug in :meth:`Index.astype` with a categorical dtype where the resultant index is not converted to a :class:`CategoricalIndex` for all types of index (:issue:`18630`) +- Bug in :meth:`Series.astype` and ``Categorical.astype()`` where an existing categorical data does not get updated (:issue:`10696`, :issue:`18593`) +- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) Datetimelike ^^^^^^^^^^^^ @@ -671,20 +697,6 @@ Reshaping - Improved error message for :func:`DataFrame.merge` when there is no common merge key (:issue:`19427`) - - -Categorical -^^^^^^^^^^^ - -- -- Bug in :func:`pandas.api.types.union_categoricals` returning the wrong result - when all the categoricals had the same categories, but in a different order. - This affected :func:`pandas.concat` with Categorical data (:issue:`19096`). -- Bug in ``Categorical.equals`` between two unordered categories with the same categories, but in a different order (:issue:`16603`) -- Bug in :meth:`Index.astype` with a categorical dtype where the resultant index is not converted to a :class:`CategoricalIndex` for all types of index (:issue:`18630`) -- Bug in :meth:`Series.astype` and ``Categorical.astype()`` where an existing categorical data does not get updated (:issue:`10696`, :issue:`18593`) -- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) -- - Other ^^^^^ diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 2c7be2b21f959..b36bc1df23247 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -553,6 +553,8 @@ def _reindex_non_unique(self, target): @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs) def get_indexer(self, target, method=None, limit=None, tolerance=None): + from pandas.core.arrays.categorical import _recode_for_categories + method = missing.clean_reindex_fill_method(method) target = ibase._ensure_index(target) @@ -568,8 +570,13 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): if (isinstance(target, CategoricalIndex) and self.values.is_dtype_equal(target)): - # we have the same codes - codes = target.codes + if self.values.equals(target.values): + # we have the same codes + codes = target.codes + else: + codes = _recode_for_categories(target.codes, + target.categories, + self.values.categories) else: if isinstance(target, CategoricalIndex): code_indexer = self.categories.get_indexer(target.categories) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 9dbb327e3d956..4b99b0407cfcc 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -12,6 +12,7 @@ from pandas import (Categorical, DataFrame, Index, MultiIndex, Timedelta) +from pandas.core.arrays.categorical import _recode_for_categories from pandas.core.frame import _merge_doc from pandas.core.dtypes.common import ( is_datetime64tz_dtype, @@ -1540,8 +1541,15 @@ def _factorize_keys(lk, rk, sort=True): is_categorical_dtype(rk) and lk.is_dtype_equal(rk)): klass = libhashtable.Int64Factorizer + + if lk.categories.equals(rk.categories): + rk = rk.codes + else: + # Same categories in different orders -> recode + rk = _recode_for_categories(rk.codes, rk.categories, lk.categories) + lk = _ensure_int64(lk.codes) - rk = _ensure_int64(rk.codes) + rk = _ensure_int64(rk) elif is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk): klass = libhashtable.Int64Factorizer lk = _ensure_int64(com._values_from_object(lk)) diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index f2182687d047f..634ad0d8160ed 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -432,6 +432,23 @@ def test_get_indexer_array(self): expected = np.array([0, 1], dtype='intp') tm.assert_numpy_array_equal(result, expected) + def test_get_indexer_same_categories_same_order(self): + ci = CategoricalIndex(['a', 'b'], categories=['a', 'b']) + + result = ci.get_indexer(CategoricalIndex(['b', 'b'], + categories=['a', 'b'])) + expected = np.array([1, 1], dtype='intp') + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_same_categories_different_order(self): + # https://github.com/pandas-dev/pandas/issues/19551 + ci = CategoricalIndex(['a', 'b'], categories=['a', 'b']) + + result = ci.get_indexer(CategoricalIndex(['b', 'b'], + categories=['b', 'a'])) + expected = np.array([1, 1], dtype='intp') + tm.assert_numpy_array_equal(result, expected) + def test_getitem_with_listlike(self): # GH 16115 cats = Categorical([Timestamp('12-31-1999'), diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 32f83ab972be5..101d34ebdb89f 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -1643,6 +1643,25 @@ def test_merge_categorical(self): result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c') tm.assert_frame_equal(result, expected) + def tests_merge_categorical_unordered_equal(self): + # GH-19551 + df1 = DataFrame({ + 'Foo': Categorical(['A', 'B', 'C'], categories=['A', 'B', 'C']), + 'Left': ['A0', 'B0', 'C0'], + }) + + df2 = DataFrame({ + 'Foo': Categorical(['C', 'B', 'A'], categories=['C', 'B', 'A']), + 'Right': ['C1', 'B1', 'A1'], + }) + result = pd.merge(df1, df2, on=['Foo']) + expected = DataFrame({ + 'Foo': pd.Categorical(['A', 'B', 'C']), + 'Left': ['A0', 'B0', 'C0'], + 'Right': ['A1', 'B1', 'C1'], + }) + assert_frame_equal(result, expected) + def test_other_columns(self, left, right): # non-merge columns should preserve if possible right = right.assign(Z=right.Z.astype('category'))
closes #19551
https://api.github.com/repos/pandas-dev/pandas/pulls/19553
2018-02-06T17:37:27Z
2018-02-08T01:20:29Z
2018-02-08T01:20:29Z
2018-05-02T13:09:44Z
using DataFrame.resample with 'agg' method on non-existant columns provides unexpected behavior
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 7322bd9fe3327..c48f6d19e3b10 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -647,6 +647,7 @@ Groupby/Resample/Rolling - Bug in :func:`DataFrame.groupby` where aggregation by ``first``/``last``/``min``/``max`` was causing timestamps to lose precision (:issue:`19526`) - Bug in :func:`DataFrame.transform` where particular aggregation functions were being incorrectly cast to match the dtype(s) of the grouped data (:issue:`19200`) - Bug in :func:`DataFrame.groupby` passing the `on=` kwarg, and subsequently using ``.apply()`` (:issue:`17813`) +- Bug in :func:`DataFrame.resample().aggregate` not raising a `ValueError` when aggregating a non-existent column (:issue:`16766`) Sparse ^^^^^^ diff --git a/pandas/core/base.py b/pandas/core/base.py index d5b204dba063e..0969717d85e4f 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -392,6 +392,10 @@ def nested_renaming_depr(level=4): elif isinstance(obj, ABCSeries): nested_renaming_depr() + elif isinstance(obj, ABCDataFrame) and \ + k not in obj.columns: + raise ValueError( + "Column '{col}' does not exist!".format(col=k)) arg = new_arg diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index 2de890ea459f0..9feba3fd042dd 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -20,7 +20,6 @@ from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame from pandas.compat import range, lrange, zip, product, OrderedDict -from pandas.core.base import SpecificationError from pandas.errors import UnsupportedFunctionCall from pandas.core.groupby import DataError import pandas.core.common as com @@ -614,7 +613,7 @@ def f(): t[['A']].agg({'A': ['sum', 'std'], 'B': ['mean', 'std']}) - pytest.raises(SpecificationError, f) + pytest.raises(ValueError, f) def test_agg_nested_dicts(self): @@ -659,6 +658,21 @@ def f(): 'B': {'rb': ['mean', 'std']}}) assert_frame_equal(result, expected, check_like=True) + def test_try_aggregate_non_existing_column(self): + # GH 16766 + data = [ + {'dt': datetime(2017, 6, 1, 0), 'x': 1.0, 'y': 2.0}, + {'dt': datetime(2017, 6, 1, 1), 'x': 2.0, 'y': 2.0}, + {'dt': datetime(2017, 6, 1, 2), 'x': 3.0, 'y': 1.5} + ] + df = DataFrame(data).set_index('dt') + + # Error as we don't have 'z' column + with pytest.raises(ValueError): + df.resample('30T').agg({'x': ['mean'], + 'y': ['median'], + 'z': ['sum']}) + def test_selection_api_validation(self): # GH 13500 index = date_range(datetime(2005, 1, 1),
- [x] closes #16766 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19552
2018-02-06T16:03:07Z
2018-02-07T11:12:59Z
2018-02-07T11:12:59Z
2018-02-07T11:14:32Z
Simplify argument passing in period_helper
diff --git a/pandas/_libs/src/period_helper.c b/pandas/_libs/src/period_helper.c index 570f20b790750..f0e24fec685d0 100644 --- a/pandas/_libs/src/period_helper.c +++ b/pandas/_libs/src/period_helper.c @@ -82,11 +82,14 @@ static int dInfoCalc_SetFromAbsDate(register struct date_info *dinfo, // helpers for frequency conversion routines // -static int daytime_conversion_factors[][2] = { - {FR_DAY, 1}, {FR_HR, 24}, {FR_MIN, 60}, {FR_SEC, 60}, - {FR_MS, 1000}, {FR_US, 1000}, {FR_NS, 1000}, {0, 0}}; - -static npy_int64 **daytime_conversion_factor_matrix = NULL; +static npy_int64 daytime_conversion_factor_matrix[7][7] = { + {1, 24, 1440, 86400, 86400000, 86400000000, 86400000000000}, + {0, 1, 60, 3600, 3600000, 3600000000, 3600000000000}, + {0, 0, 1, 60, 60000, 60000000, 60000000000}, + {0, 0, 0, 1, 1000, 1000000, 1000000000}, + {0, 0, 0, 0, 1, 1000, 1000000}, + {0, 0, 0, 0, 0, 1, 1000}, + {0, 0, 0, 0, 0, 0, 1}}; PANDAS_INLINE int max_value(int a, int b) { return a > b ? a : b; } @@ -96,100 +99,24 @@ PANDAS_INLINE int get_freq_group(int freq) { return (freq / 1000) * 1000; } PANDAS_INLINE int get_freq_group_index(int freq) { return freq / 1000; } -static int calc_conversion_factors_matrix_size(void) { - int matrix_size = 0; - int index; - for (index = 0;; index++) { - int period_value = - get_freq_group_index(daytime_conversion_factors[index][0]); - if (period_value == 0) { - break; - } - matrix_size = max_value(matrix_size, period_value); - } - return matrix_size + 1; -} - -static void alloc_conversion_factors_matrix(int matrix_size) { - int row_index; - int column_index; - daytime_conversion_factor_matrix = - malloc(matrix_size * sizeof(**daytime_conversion_factor_matrix)); - for (row_index = 0; row_index < matrix_size; row_index++) { - daytime_conversion_factor_matrix[row_index] = - malloc(matrix_size * sizeof(**daytime_conversion_factor_matrix)); - for (column_index = 0; column_index < matrix_size; column_index++) { - daytime_conversion_factor_matrix[row_index][column_index] = 0; - } - } -} - -static npy_int64 calculate_conversion_factor(int start_value, int end_value) { - npy_int64 conversion_factor = 0; - int index; - for (index = 0;; index++) { - int freq_group = daytime_conversion_factors[index][0]; - - if (freq_group == 0) { - conversion_factor = 0; - break; - } - - if (freq_group == start_value) { - conversion_factor = 1; - } else { - conversion_factor *= daytime_conversion_factors[index][1]; - } - - if (freq_group == end_value) { - break; - } - } - return conversion_factor; -} - -static void populate_conversion_factors_matrix(void) { - int row_index_index; - int row_value, row_index; - int column_index_index; - int column_value, column_index; - - for (row_index_index = 0;; row_index_index++) { - row_value = daytime_conversion_factors[row_index_index][0]; - if (row_value == 0) { - break; - } - row_index = get_freq_group_index(row_value); - for (column_index_index = row_index_index;; column_index_index++) { - column_value = daytime_conversion_factors[column_index_index][0]; - if (column_value == 0) { - break; - } - column_index = get_freq_group_index(column_value); - - daytime_conversion_factor_matrix[row_index][column_index] = - calculate_conversion_factor(row_value, column_value); - } - } -} - -void initialize_daytime_conversion_factor_matrix() { - if (daytime_conversion_factor_matrix == NULL) { - int matrix_size = calc_conversion_factors_matrix_size(); - alloc_conversion_factors_matrix(matrix_size); - populate_conversion_factors_matrix(); - } -} PANDAS_INLINE npy_int64 get_daytime_conversion_factor(int from_index, int to_index) { - return daytime_conversion_factor_matrix[min_value(from_index, to_index)] - [max_value(from_index, to_index)]; + int row = min_value(from_index, to_index); + int col = max_value(from_index, to_index); + // row or col < 6 means frequency strictly lower than Daily, which + // do not use daytime_conversion_factors + if (row < 6) { + return 0; + } else if (col < 6) { + return 0; + } + return daytime_conversion_factor_matrix[row - 6][col - 6]; } PANDAS_INLINE npy_int64 upsample_daytime(npy_int64 ordinal, - asfreq_info *af_info, int atEnd) { - if (atEnd) { + asfreq_info *af_info) { + if (af_info->is_end) { return (ordinal + 1) * af_info->intraday_conversion_factor - 1; } else { return ordinal * af_info->intraday_conversion_factor; @@ -197,18 +124,18 @@ PANDAS_INLINE npy_int64 upsample_daytime(npy_int64 ordinal, } PANDAS_INLINE npy_int64 downsample_daytime(npy_int64 ordinal, - asfreq_info *af_info, int atEnd) { + asfreq_info *af_info) { return ordinal / (af_info->intraday_conversion_factor); } -PANDAS_INLINE npy_int64 transform_via_day(npy_int64 ordinal, char relation, +PANDAS_INLINE npy_int64 transform_via_day(npy_int64 ordinal, asfreq_info *af_info, freq_conv_func first_func, freq_conv_func second_func) { npy_int64 result; - result = (*first_func)(ordinal, relation, af_info); - result = (*second_func)(result, relation, af_info); + result = (*first_func)(ordinal, af_info); + result = (*second_func)(result, af_info); return result; } @@ -241,10 +168,9 @@ static npy_int64 absdate_from_ymd(int y, int m, int d) { //************ FROM DAILY *************** -static npy_int64 asfreq_DTtoA(npy_int64 ordinal, char relation, - asfreq_info *af_info) { +static npy_int64 asfreq_DTtoA(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; - ordinal = downsample_daytime(ordinal, af_info, 0); + ordinal = downsample_daytime(ordinal, af_info); dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET); if (dinfo.month > af_info->to_a_year_end) { return (npy_int64)(dinfo.year + 1 - BASE_YEAR); @@ -272,142 +198,110 @@ static npy_int64 DtoQ_yq(npy_int64 ordinal, asfreq_info *af_info, int *year, return 0; } -static npy_int64 asfreq_DTtoQ(npy_int64 ordinal, char relation, - asfreq_info *af_info) { +static npy_int64 asfreq_DTtoQ(npy_int64 ordinal, asfreq_info *af_info) { int year, quarter; - ordinal = downsample_daytime(ordinal, af_info, 0); + ordinal = downsample_daytime(ordinal, af_info); DtoQ_yq(ordinal, af_info, &year, &quarter); return (npy_int64)((year - BASE_YEAR) * 4 + quarter - 1); } -static npy_int64 asfreq_DTtoM(npy_int64 ordinal, char relation, - asfreq_info *af_info) { +static npy_int64 asfreq_DTtoM(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; - ordinal = downsample_daytime(ordinal, af_info, 0); + ordinal = downsample_daytime(ordinal, af_info); dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET); return (npy_int64)((dinfo.year - BASE_YEAR) * 12 + dinfo.month - 1); } -static npy_int64 asfreq_DTtoW(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - ordinal = downsample_daytime(ordinal, af_info, 0); +static npy_int64 asfreq_DTtoW(npy_int64 ordinal, asfreq_info *af_info) { + ordinal = downsample_daytime(ordinal, af_info); return (ordinal + ORD_OFFSET - (1 + af_info->to_week_end)) / 7 + 1 - WEEK_OFFSET; } -static npy_int64 asfreq_DTtoB(npy_int64 ordinal, char relation, - asfreq_info *af_info) { +static npy_int64 asfreq_DTtoB(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; int roll_back; - ordinal = downsample_daytime(ordinal, af_info, 0); + ordinal = downsample_daytime(ordinal, af_info); dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET); // This usage defines roll_back the opposite way from the others - roll_back = (relation == 'S') ? 1 : 0; + roll_back = 1 - af_info->is_end; return DtoB(&dinfo, roll_back); } // all intra day calculations are now done within one function -static npy_int64 asfreq_DownsampleWithinDay(npy_int64 ordinal, char relation, +static npy_int64 asfreq_DownsampleWithinDay(npy_int64 ordinal, asfreq_info *af_info) { - return downsample_daytime(ordinal, af_info, relation == 'E'); + return downsample_daytime(ordinal, af_info); } -static npy_int64 asfreq_UpsampleWithinDay(npy_int64 ordinal, char relation, +static npy_int64 asfreq_UpsampleWithinDay(npy_int64 ordinal, asfreq_info *af_info) { - return upsample_daytime(ordinal, af_info, relation == 'E'); + return upsample_daytime(ordinal, af_info); } //************ FROM BUSINESS *************** -static npy_int64 asfreq_BtoDT(npy_int64 ordinal, char relation, - asfreq_info *af_info) { +static npy_int64 asfreq_BtoDT(npy_int64 ordinal, asfreq_info *af_info) { ordinal += BDAY_OFFSET; ordinal = (((ordinal - 1) / 5) * 7 + mod_compat(ordinal - 1, 5) + 1 - ORD_OFFSET); - return upsample_daytime(ordinal, af_info, relation != 'S'); + return upsample_daytime(ordinal, af_info); } -static npy_int64 asfreq_BtoA(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_BtoDT, - asfreq_DTtoA); +static npy_int64 asfreq_BtoA(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_BtoDT, asfreq_DTtoA); } -static npy_int64 asfreq_BtoQ(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_BtoDT, - asfreq_DTtoQ); +static npy_int64 asfreq_BtoQ(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_BtoDT, asfreq_DTtoQ); } -static npy_int64 asfreq_BtoM(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_BtoDT, - asfreq_DTtoM); +static npy_int64 asfreq_BtoM(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_BtoDT, asfreq_DTtoM); } -static npy_int64 asfreq_BtoW(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_BtoDT, - asfreq_DTtoW); +static npy_int64 asfreq_BtoW(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_BtoDT, asfreq_DTtoW); } //************ FROM WEEKLY *************** -static npy_int64 asfreq_WtoDT(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - ordinal += WEEK_OFFSET; - if (relation != 'S') { - ordinal += 1; - } - - ordinal = ordinal * 7 - 6 + af_info->from_week_end - ORD_OFFSET; - - if (relation != 'S') { - ordinal -= 1; - } - - return upsample_daytime(ordinal, af_info, relation != 'S'); +static npy_int64 asfreq_WtoDT(npy_int64 ordinal, asfreq_info *af_info) { + ordinal = (ordinal + WEEK_OFFSET) * 7 + + af_info->from_week_end - ORD_OFFSET + + (7 - 1) * (af_info->is_end - 1); + return upsample_daytime(ordinal, af_info); } -static npy_int64 asfreq_WtoA(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_WtoDT, - asfreq_DTtoA); +static npy_int64 asfreq_WtoA(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_WtoDT, asfreq_DTtoA); } -static npy_int64 asfreq_WtoQ(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_WtoDT, - asfreq_DTtoQ); +static npy_int64 asfreq_WtoQ(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_WtoDT, asfreq_DTtoQ); } -static npy_int64 asfreq_WtoM(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_WtoDT, - asfreq_DTtoM); +static npy_int64 asfreq_WtoM(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_WtoDT, asfreq_DTtoM); } -static npy_int64 asfreq_WtoW(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_WtoDT, - asfreq_DTtoW); +static npy_int64 asfreq_WtoW(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_WtoDT, asfreq_DTtoW); } -static npy_int64 asfreq_WtoB(npy_int64 ordinal, char relation, - asfreq_info *af_info) { +static npy_int64 asfreq_WtoB(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; - int roll_back; + int roll_back = af_info->is_end; dInfoCalc_SetFromAbsDate( - &dinfo, asfreq_WtoDT(ordinal, relation, af_info) + ORD_OFFSET); + &dinfo, asfreq_WtoDT(ordinal, af_info) + ORD_OFFSET); - roll_back = (relation == 'S') ? 0 : 1; return DtoB(&dinfo, roll_back); } @@ -417,52 +311,38 @@ static void MtoD_ym(npy_int64 ordinal, int *y, int *m) { *m = mod_compat(ordinal, 12) + 1; } -static npy_int64 asfreq_MtoDT(npy_int64 ordinal, char relation, - asfreq_info *af_info) { +static npy_int64 asfreq_MtoDT(npy_int64 ordinal, asfreq_info *af_info) { npy_int64 absdate; int y, m; - if (relation == 'E') { - ordinal += 1; - } + ordinal += af_info->is_end; MtoD_ym(ordinal, &y, &m); absdate = absdate_from_ymd(y, m, 1); ordinal = absdate - ORD_OFFSET; - if (relation == 'E') { - ordinal -= 1; - } - - return upsample_daytime(ordinal, af_info, relation != 'S'); + ordinal -= af_info->is_end; + return upsample_daytime(ordinal, af_info); } -static npy_int64 asfreq_MtoA(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_MtoDT, - asfreq_DTtoA); +static npy_int64 asfreq_MtoA(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_MtoDT, asfreq_DTtoA); } -static npy_int64 asfreq_MtoQ(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_MtoDT, - asfreq_DTtoQ); +static npy_int64 asfreq_MtoQ(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_MtoDT, asfreq_DTtoQ); } -static npy_int64 asfreq_MtoW(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_MtoDT, - asfreq_DTtoW); +static npy_int64 asfreq_MtoW(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_MtoDT, asfreq_DTtoW); } -static npy_int64 asfreq_MtoB(npy_int64 ordinal, char relation, - asfreq_info *af_info) { +static npy_int64 asfreq_MtoB(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; - int roll_back; + int roll_back = af_info->is_end; dInfoCalc_SetFromAbsDate( - &dinfo, asfreq_MtoDT(ordinal, relation, af_info) + ORD_OFFSET); + &dinfo, asfreq_MtoDT(ordinal, af_info) + ORD_OFFSET); - roll_back = (relation == 'S') ? 0 : 1; return DtoB(&dinfo, roll_back); } @@ -482,130 +362,94 @@ static void QtoD_ym(npy_int64 ordinal, int *y, int *m, asfreq_info *af_info) { } } -static npy_int64 asfreq_QtoDT(npy_int64 ordinal, char relation, - asfreq_info *af_info) { +static npy_int64 asfreq_QtoDT(npy_int64 ordinal, asfreq_info *af_info) { npy_int64 absdate; int y, m; - if (relation == 'E') { - ordinal += 1; - } - + ordinal += af_info->is_end; QtoD_ym(ordinal, &y, &m, af_info); absdate = absdate_from_ymd(y, m, 1); - if (relation == 'E') { - absdate -= 1; - } - - return upsample_daytime(absdate - ORD_OFFSET, af_info, relation != 'S'); + absdate -= af_info->is_end; + return upsample_daytime(absdate - ORD_OFFSET, af_info); } -static npy_int64 asfreq_QtoQ(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_QtoDT, - asfreq_DTtoQ); +static npy_int64 asfreq_QtoQ(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_QtoDT, asfreq_DTtoQ); } -static npy_int64 asfreq_QtoA(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_QtoDT, - asfreq_DTtoA); +static npy_int64 asfreq_QtoA(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_QtoDT, asfreq_DTtoA); } -static npy_int64 asfreq_QtoM(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_QtoDT, - asfreq_DTtoM); +static npy_int64 asfreq_QtoM(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_QtoDT, asfreq_DTtoM); } -static npy_int64 asfreq_QtoW(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_QtoDT, - asfreq_DTtoW); +static npy_int64 asfreq_QtoW(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_QtoDT, asfreq_DTtoW); } -static npy_int64 asfreq_QtoB(npy_int64 ordinal, char relation, - asfreq_info *af_info) { +static npy_int64 asfreq_QtoB(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; - int roll_back; + int roll_back = af_info->is_end; + dInfoCalc_SetFromAbsDate( - &dinfo, asfreq_QtoDT(ordinal, relation, af_info) + ORD_OFFSET); + &dinfo, asfreq_QtoDT(ordinal, af_info) + ORD_OFFSET); - roll_back = (relation == 'S') ? 0 : 1; return DtoB(&dinfo, roll_back); } //************ FROM ANNUAL *************** -static npy_int64 asfreq_AtoDT(npy_int64 year, char relation, - asfreq_info *af_info) { +static npy_int64 asfreq_AtoDT(npy_int64 ordinal, asfreq_info *af_info) { npy_int64 absdate; - int month = (af_info->from_a_year_end) % 12; // start from 1970 - year += BASE_YEAR; - - month += 1; + npy_int64 year = ordinal + BASE_YEAR; + int month = (af_info->from_a_year_end % 12) + 1; if (af_info->from_a_year_end != 12) { year -= 1; } - if (relation == 'E') { - year += 1; - } - + year += af_info->is_end; absdate = absdate_from_ymd(year, month, 1); - if (relation == 'E') { - absdate -= 1; - } - - return upsample_daytime(absdate - ORD_OFFSET, af_info, relation != 'S'); + absdate -= af_info->is_end; + return upsample_daytime(absdate - ORD_OFFSET, af_info); } -static npy_int64 asfreq_AtoA(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_AtoDT, - asfreq_DTtoA); +static npy_int64 asfreq_AtoA(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_AtoDT, asfreq_DTtoA); } -static npy_int64 asfreq_AtoQ(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_AtoDT, - asfreq_DTtoQ); +static npy_int64 asfreq_AtoQ(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_AtoDT, asfreq_DTtoQ); } -static npy_int64 asfreq_AtoM(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_AtoDT, - asfreq_DTtoM); +static npy_int64 asfreq_AtoM(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_AtoDT, asfreq_DTtoM); } -static npy_int64 asfreq_AtoW(npy_int64 ordinal, char relation, - asfreq_info *af_info) { - return transform_via_day(ordinal, relation, af_info, asfreq_AtoDT, - asfreq_DTtoW); +static npy_int64 asfreq_AtoW(npy_int64 ordinal, asfreq_info *af_info) { + return transform_via_day(ordinal, af_info, asfreq_AtoDT, asfreq_DTtoW); } -static npy_int64 asfreq_AtoB(npy_int64 ordinal, char relation, - asfreq_info *af_info) { +static npy_int64 asfreq_AtoB(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; - int roll_back; + int roll_back = af_info->is_end; dInfoCalc_SetFromAbsDate( - &dinfo, asfreq_AtoDT(ordinal, relation, af_info) + ORD_OFFSET); + &dinfo, asfreq_AtoDT(ordinal, af_info) + ORD_OFFSET); - roll_back = (relation == 'S') ? 0 : 1; return DtoB(&dinfo, roll_back); } -static npy_int64 nofunc(npy_int64 ordinal, char relation, - asfreq_info *af_info) { +static npy_int64 nofunc(npy_int64 ordinal, asfreq_info *af_info) { return INT_ERR_CODE; } -static npy_int64 no_op(npy_int64 ordinal, char relation, asfreq_info *af_info) { +static npy_int64 no_op(npy_int64 ordinal, asfreq_info *af_info) { return ordinal; } @@ -622,10 +466,17 @@ static int calc_a_year_end(int freq, int group) { static int calc_week_end(int freq, int group) { return freq - group; } -void get_asfreq_info(int fromFreq, int toFreq, asfreq_info *af_info) { +void get_asfreq_info(int fromFreq, int toFreq, char relation, + asfreq_info *af_info) { int fromGroup = get_freq_group(fromFreq); int toGroup = get_freq_group(toFreq); + if (relation == 'E') { + af_info->is_end = 1; + } else { + af_info->is_end = 0; + } + af_info->intraday_conversion_factor = get_daytime_conversion_factor( get_freq_group_index(max_value(fromGroup, FR_DAY)), get_freq_group_index(max_value(toGroup, FR_DAY))); @@ -895,9 +746,8 @@ npy_int64 asfreq(npy_int64 period_ordinal, int freq1, int freq2, func = get_asfreq_func(freq1, freq2); - get_asfreq_info(freq1, freq2, &finfo); - - val = (*func)(period_ordinal, relation, &finfo); + get_asfreq_info(freq1, freq2, relation, &finfo); + val = (*func)(period_ordinal, &finfo); return val; } @@ -1017,9 +867,9 @@ npy_int64 get_python_ordinal(npy_int64 period_ordinal, int freq) { if (freq == FR_DAY) return period_ordinal + ORD_OFFSET; toDaily = get_asfreq_func(freq, FR_DAY); - get_asfreq_info(freq, FR_DAY, &af_info); + get_asfreq_info(freq, FR_DAY, 'E', &af_info); - return toDaily(period_ordinal, 'E', &af_info) + ORD_OFFSET; + return toDaily(period_ordinal, &af_info) + ORD_OFFSET; } @@ -1027,19 +877,19 @@ int get_yq(npy_int64 ordinal, int freq, int *quarter, int *year) { asfreq_info af_info; int qtr_freq; npy_int64 daily_ord; - npy_int64 (*toDaily)(npy_int64, char, asfreq_info *) = NULL; + freq_conv_func toDaily = NULL; toDaily = get_asfreq_func(freq, FR_DAY); - get_asfreq_info(freq, FR_DAY, &af_info); + get_asfreq_info(freq, FR_DAY, 'E', &af_info); - daily_ord = toDaily(ordinal, 'E', &af_info); + daily_ord = toDaily(ordinal, &af_info); if (get_freq_group(freq) == FR_QTR) { qtr_freq = freq; } else { qtr_freq = FR_QTR; } - get_asfreq_info(FR_DAY, qtr_freq, &af_info); + get_asfreq_info(FR_DAY, qtr_freq, 'E', &af_info); DtoQ_yq(daily_ord, &af_info, year, quarter); return 0; @@ -1056,7 +906,7 @@ int _quarter_year(npy_int64 ordinal, int freq, int *year, int *quarter) { else qtr_freq = FR_QTR; - get_asfreq_info(FR_DAY, qtr_freq, &af_info); + get_asfreq_info(FR_DAY, qtr_freq, 'E', &af_info); DtoQ_yq(ordinal, &af_info, year, quarter); diff --git a/pandas/_libs/src/period_helper.h b/pandas/_libs/src/period_helper.h index 2c74659346b15..f14aec268a1fb 100644 --- a/pandas/_libs/src/period_helper.h +++ b/pandas/_libs/src/period_helper.h @@ -101,6 +101,10 @@ frequency conversion routines. #define INT_ERR_CODE INT32_MIN typedef struct asfreq_info { + int is_end; + // char relation == 'S' (for START) --> is_end = 0 + // char relation == 'E' (for END) --> is_end = 1 + int from_week_end; // day the week ends on in the "from" frequency int to_week_end; // day the week ends on in the "to" frequency @@ -124,7 +128,7 @@ typedef struct date_info { int year; } date_info; -typedef npy_int64 (*freq_conv_func)(npy_int64, char, asfreq_info *); +typedef npy_int64 (*freq_conv_func)(npy_int64, asfreq_info *af_info); /* * new pandas API helper functions here @@ -140,11 +144,10 @@ npy_int64 get_python_ordinal(npy_int64 period_ordinal, int freq); int get_date_info(npy_int64 ordinal, int freq, struct date_info *dinfo); freq_conv_func get_asfreq_func(int fromFreq, int toFreq); -void get_asfreq_info(int fromFreq, int toFreq, asfreq_info *af_info); +void get_asfreq_info(int fromFreq, int toFreq, char relation, + asfreq_info *af_info); int get_yq(npy_int64 ordinal, int freq, int *quarter, int *year); int _quarter_year(npy_int64 ordinal, int freq, int *year, int *quarter); -void initialize_daytime_conversion_factor_matrix(void); - #endif // PANDAS__LIBS_SRC_PERIOD_HELPER_H_ diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index ba17b3d345ac8..3c396a9ff4f3c 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -61,6 +61,8 @@ cdef extern from "period_helper.h": int year ctypedef struct asfreq_info: + int is_end + int from_week_end int to_week_end @@ -70,13 +72,13 @@ cdef extern from "period_helper.h": int from_q_year_end int to_q_year_end - ctypedef int64_t (*freq_conv_func)(int64_t, char, asfreq_info*) + ctypedef int64_t (*freq_conv_func)(int64_t, asfreq_info*) - void initialize_daytime_conversion_factor_matrix() int64_t asfreq(int64_t dtordinal, int freq1, int freq2, char relation) except INT32_MIN freq_conv_func get_asfreq_func(int fromFreq, int toFreq) - void get_asfreq_info(int fromFreq, int toFreq, asfreq_info *af_info) + void get_asfreq_info(int fromFreq, int toFreq, char relation, + asfreq_info *af_info) int64_t get_period_ordinal(int year, int month, int day, int hour, int minute, int second, @@ -90,14 +92,20 @@ cdef extern from "period_helper.h": int _quarter_year(int64_t ordinal, int freq, int *year, int *quarter) -initialize_daytime_conversion_factor_matrix() - - @cython.cdivision cdef char* c_strftime(date_info *dinfo, char *fmt): """ - function to generate a nice string representation of the period + Generate a nice string representation of the period object, originally from DateObject_strftime + + Parameters + ---------- + dinfo : date_info* + fmt : char* + + Returns + ------- + result : char* """ cdef: tm c_date @@ -224,26 +232,26 @@ def period_asfreq_arr(ndarray[int64_t] arr, int freq1, int freq2, bint end): n = len(arr) result = np.empty(n, dtype=np.int64) - func = get_asfreq_func(freq1, freq2) - get_asfreq_info(freq1, freq2, &finfo) - if end: relation = END else: relation = START + func = get_asfreq_func(freq1, freq2) + get_asfreq_info(freq1, freq2, relation, &finfo) + mask = arr == iNaT if mask.any(): # NaT process for i in range(n): val = arr[i] if val != iNaT: - val = func(val, relation, &finfo) + val = func(val, &finfo) if val == INT32_MIN: raise ValueError("Unable to convert to desired frequency.") result[i] = val else: for i in range(n): - val = func(arr[i], relation, &finfo) + val = func(arr[i], &finfo) if val == INT32_MIN: raise ValueError("Unable to convert to desired frequency.") result[i] = val
Orthogonal to #19540, though will need some small rebasing if/when that goes in. Instead of passing `relation` argument all over the place, pass it once slightly earlier and make it part of `asfreq_info`.
https://api.github.com/repos/pandas-dev/pandas/pulls/19550
2018-02-06T15:51:55Z
2018-02-08T11:20:40Z
2018-02-08T11:20:40Z
2018-02-08T14:13:39Z
Performance increase rolling min max
diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py index 75990d83f8212..ba25ad6c5eda6 100644 --- a/asv_bench/benchmarks/rolling.py +++ b/asv_bench/benchmarks/rolling.py @@ -16,12 +16,26 @@ class Methods(object): def setup(self, constructor, window, dtype, method): N = 10**5 - arr = np.random.random(N).astype(dtype) + arr = (100 * np.random.random(N)).astype(dtype) self.roll = getattr(pd, constructor)(arr).rolling(window) def time_rolling(self, constructor, window, dtype, method): getattr(self.roll, method)() +class VariableWindowMethods(Methods): + sample_time = 0.2 + params = (['DataFrame', 'Series'], + ['50s', '1h', '1d'], + ['int', 'float'], + ['median', 'mean', 'max', 'min', 'std', 'count', 'skew', 'kurt', + 'sum']) + param_names = ['contructor', 'window', 'dtype', 'method'] + + def setup(self, constructor, window, dtype, method): + N = 10**5 + arr = (100 * np.random.random(N)).astype(dtype) + index = pd.date_range('2017-01-01', periods=N, freq='5s') + self.roll = getattr(pd, constructor)(arr, index=index).rolling(window) class Pairwise(object): diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index acab9d0bbebf8..cdd2150a24a37 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -645,6 +645,7 @@ Performance Improvements - Improved performance of :func:`MultiIndex.get_loc` for large indexes, at the cost of a reduction in performance for small ones (:issue:`18519`) - Improved performance of pairwise ``.rolling()`` and ``.expanding()`` with ``.cov()`` and ``.corr()`` operations (:issue:`17917`) - Improved performance of :func:`DataFrameGroupBy.rank` (:issue:`15779`) +- Improved performance of variable ``.rolling()`` on ``.min()`` and ``.max()`` (:issue:`19521`) .. _whatsnew_0230.docs: diff --git a/pandas/_libs/src/headers/cmath b/pandas/_libs/src/headers/cmath new file mode 100644 index 0000000000000..d8e2239406cae --- /dev/null +++ b/pandas/_libs/src/headers/cmath @@ -0,0 +1,15 @@ +#ifndef _PANDAS_MATH_H_ +#define _PANDAS_MATH_H_ + +// In older versions of Visual Studio there wasn't a std::signbit defined +// This defines it using _copysign +#if defined(_MSC_VER) && (_MSC_VER < 1800) +#include <cmath> +namespace std { + __inline int signbit(double num) { return _copysign(1.0, num) < 0; } +} +#else +#include <cmath> +#endif + +#endif diff --git a/pandas/_libs/src/headers/math.h b/pandas/_libs/src/headers/math.h deleted file mode 100644 index 34ad9f24a58f9..0000000000000 --- a/pandas/_libs/src/headers/math.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef _PANDAS_MATH_H_ -#define _PANDAS_MATH_H_ - -#if defined(_MSC_VER) && (_MSC_VER < 1800) -#include <math.h> -__inline int signbit(double num) { return _copysign(1.0, num) < 0; } -#else -#include <math.h> -#endif - -#endif diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index cacb073da581c..aa13f03d8e9e4 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -3,6 +3,7 @@ cimport cython from cython cimport Py_ssize_t +from libcpp.deque cimport deque from libc.stdlib cimport malloc, free @@ -12,7 +13,7 @@ from numpy cimport ndarray, double_t, int64_t, float64_t cnp.import_array() -cdef extern from "../src/headers/math.h": +cdef extern from "../src/headers/cmath" namespace "std": int signbit(double) nogil double sqrt(double x) nogil @@ -1222,8 +1223,9 @@ cdef _roll_min_max(ndarray[numeric] input, int64_t win, int64_t minp, cdef: numeric ai bint is_variable, should_replace - int64_t s, e, N, i, j, removed + int64_t N, i, removed, window_i Py_ssize_t nobs = 0 + deque Q[int64_t] ndarray[int64_t] starti, endi ndarray[numeric, ndim=1] output cdef: @@ -1242,32 +1244,48 @@ cdef _roll_min_max(ndarray[numeric] input, int64_t win, int64_t minp, output = np.empty(N, dtype=input.dtype) + Q = deque[int64_t]() + if is_variable: with nogil: - for i in range(N): - s = starti[i] - e = endi[i] + # This is using a modified version of the C++ code in this + # SO post: http://bit.ly/2nOoHlY + # The original impl didn't deal with variable window sizes + # So the code was optimized for that - r = input[s] - nobs = 0 - for j in range(s, e): + for i from starti[0] <= i < endi[0]: + ai = init_mm(input[i], &nobs, is_max) - # adds, death at the i offset - ai = init_mm(input[j], &nobs, is_max) + if is_max: + while not Q.empty() and ai >= input[Q.back()]: + Q.pop_back() + else: + while not Q.empty() and ai <= input[Q.back()]: + Q.pop_back() + Q.push_back(i) - if is_max: - if ai > r: - r = ai - else: - if ai < r: - r = ai + for i from endi[0] <= i < N: + output[i-1] = calc_mm(minp, nobs, input[Q.front()]) - output[i] = calc_mm(minp, nobs, r) + ai = init_mm(input[i], &nobs, is_max) - else: + if is_max: + while not Q.empty() and ai >= input[Q.back()]: + Q.pop_back() + else: + while not Q.empty() and ai <= input[Q.back()]: + Q.pop_back() + while not Q.empty() and Q.front() <= i - (endi[i] - starti[i]): + Q.pop_front() + + Q.push_back(i) + + output[N-1] = calc_mm(minp, nobs, input[Q.front()]) + + else: # setup the rings of death! ring = <numeric *>malloc(win * sizeof(numeric)) death = <int64_t *>malloc(win * sizeof(int64_t)) diff --git a/setup.py b/setup.py index 2332503e558ed..c66979dd19ef0 100755 --- a/setup.py +++ b/setup.py @@ -617,7 +617,8 @@ def pxd(name): 'pyxfile': '_libs/testing'}, '_libs.window': { 'pyxfile': '_libs/window', - 'pxdfiles': ['_libs/skiplist', '_libs/src/util']}, + 'pxdfiles': ['_libs/skiplist', '_libs/src/util'], + 'language': 'c++'}, '_libs.writers': { 'pyxfile': '_libs/writers', 'pxdfiles': ['_libs/src/util']}, @@ -640,11 +641,11 @@ def pxd(name): sources=sources, depends=data.get('depends', []), include_dirs=include, + language=data.get('language', 'c'), extra_compile_args=extra_compile_args) extensions.append(obj) - # ---------------------------------------------------------------------- # msgpack
- [x] closes #19521 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry In my testing the performance of ```python import pandas as pd import timeit df = pd.DataFrame({"a": 0}, index=pd.date_range('2017-01-01', '2019-01-01', freq='1T')) timeit.timeit(lambda: df.rolling('1d').max(), number=1) ``` Went from 1.8 sec to 0.3 sec on my machine (lenovo laptop).
https://api.github.com/repos/pandas-dev/pandas/pulls/19549
2018-02-06T15:15:44Z
2018-02-14T11:13:19Z
2018-02-14T11:13:19Z
2018-02-14T11:13:25Z
DOC: Remove repeated duplicated word
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index 25f7c5a3ad948..ca903dadc6eb1 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -672,7 +672,7 @@ The ``CategoricalIndex`` is **preserved** after indexing: df2.loc['a'].index Sorting the index will sort by the order of the categories (Recall that we -created the index with with ``CategoricalDtype(list('cab'))``, so the sorted +created the index with ``CategoricalDtype(list('cab'))``, so the sorted order is ``cab``.). .. ipython:: python diff --git a/doc/source/comparison_with_sas.rst b/doc/source/comparison_with_sas.rst index e9e0d7716af3a..214667119f7e0 100644 --- a/doc/source/comparison_with_sas.rst +++ b/doc/source/comparison_with_sas.rst @@ -279,7 +279,7 @@ date/datetime columns. The equivalent pandas operations are shown below. In addition to these functions pandas supports other Time Series features -not available in Base SAS (such as resampling and and custom offsets) - +not available in Base SAS (such as resampling and custom offsets) - see the :ref:`timeseries documentation<timeseries>` for more details. .. ipython:: python @@ -584,7 +584,7 @@ For example, in SAS you could do this to filter missing values. if value_x ^= .; run; -Which doesn't work in in pandas. Instead, the ``pd.isna`` or ``pd.notna`` functions +Which doesn't work in pandas. Instead, the ``pd.isna`` or ``pd.notna`` functions should be used for comparisons. .. ipython:: python diff --git a/doc/source/computation.rst b/doc/source/computation.rst index a64542fa71705..4285767654e25 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -512,7 +512,7 @@ a same sized result as the input. When using ``.resample()`` with an offset. Construct a new index that is the frequency of the offset. For each frequency bin, aggregate points from the input within a backwards-in-time looking window that fall in that bin. The result of this -aggregation is the output for that frequency point. The windows are fixed size size in the frequency space. Your result +aggregation is the output for that frequency point. The windows are fixed size in the frequency space. Your result will have the shape of a regular frequency between the min and the max of the original input object. To summarize, ``.rolling()`` is a time-based window operation, while ``.resample()`` is a frequency-based window operation. diff --git a/doc/source/io.rst b/doc/source/io.rst index 60dc89f8fd495..1785de54b7dd6 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -4529,7 +4529,7 @@ Several caveats. on an attempt at serialization. You can specify an ``engine`` to direct the serialization. This can be one of ``pyarrow``, or ``fastparquet``, or ``auto``. -If the engine is NOT specified, then the ``pd.options.io.parquet.engine`` option is checked; if this is also ``auto``, then +If the engine is NOT specified, then the ``pd.options.io.parquet.engine`` option is checked; if this is also ``auto``, then ``pyarrow`` is tried, and falling back to ``fastparquet``. See the documentation for `pyarrow <http://arrow.apache.org/docs/python/>`__ and `fastparquet <https://fastparquet.readthedocs.io/en/latest/>`__ diff --git a/doc/source/release.rst b/doc/source/release.rst index cd763de42d162..8e063116cbf07 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -406,7 +406,7 @@ of all enhancements and bugs that have been fixed in 0.20.1. .. note:: - This is a combined release for 0.20.0 and and 0.20.1. + This is a combined release for 0.20.0 and 0.20.1. Version 0.20.1 contains one additional change for backwards-compatibility with downstream projects using pandas' ``utils`` routines. (:issue:`16250`) Thanks @@ -2918,7 +2918,7 @@ Improvements to existing features - clipboard functions use pyperclip (no dependencies on Windows, alternative dependencies offered for Linux) (:issue:`3837`). - Plotting functions now raise a ``TypeError`` before trying to plot anything - if the associated objects have have a dtype of ``object`` (:issue:`1818`, + if the associated objects have a dtype of ``object`` (:issue:`1818`, :issue:`3572`, :issue:`3911`, :issue:`3912`), but they will try to convert object arrays to numeric arrays if possible so that you can still plot, for example, an object array with floats. This happens before any drawing takes place which @@ -4082,7 +4082,7 @@ Bug Fixes columns (:issue:`1943`) - Fix time zone localization bug causing improper fields (e.g. hours) in time zones that have not had a UTC transition in a long time (:issue:`1946`) -- Fix errors when parsing and working with with fixed offset timezones +- Fix errors when parsing and working with fixed offset timezones (:issue:`1922`, :issue:`1928`) - Fix text parser bug when handling UTC datetime objects generated by dateutil (:issue:`1693`) @@ -4383,7 +4383,7 @@ Bug Fixes error (:issue:`1090`) - Consistently set name on groupby pieces (:issue:`184`) - Treat dict return values as Series in GroupBy.apply (:issue:`823`) -- Respect column selection for DataFrame in in GroupBy.transform (:issue:`1365`) +- Respect column selection for DataFrame in GroupBy.transform (:issue:`1365`) - Fix MultiIndex partial indexing bug (:issue:`1352`) - Enable assignment of rows in mixed-type DataFrame via .ix (:issue:`1432`) - Reset index mapping when grouping Series in Cython (:issue:`1423`) @@ -5040,7 +5040,7 @@ New Features - Add `melt` function to `pandas.core.reshape` - Add `level` parameter to group by level in Series and DataFrame descriptive statistics (:issue:`313`) -- Add `head` and `tail` methods to Series, analogous to to DataFrame (PR +- Add `head` and `tail` methods to Series, analogous to DataFrame (PR :issue:`296`) - Add `Series.isin` function which checks if each value is contained in a passed sequence (:issue:`289`) diff --git a/doc/source/tutorials.rst b/doc/source/tutorials.rst index 43ccd372d9d5b..710212bc237cd 100644 --- a/doc/source/tutorials.rst +++ b/doc/source/tutorials.rst @@ -19,7 +19,7 @@ pandas Cookbook The goal of this cookbook (by `Julia Evans <http://jvns.ca>`_) is to give you some concrete examples for getting started with pandas. These are examples with real-world data, and all the bugs and weirdness that -that entails. +entails. Here are links to the v0.1 release. For an up-to-date table of contents, see the `pandas-cookbook GitHub repository <http://github.com/jvns/pandas-cookbook>`_. To run the examples in this tutorial, you'll need to
Minor fixes to docs ✏️
https://api.github.com/repos/pandas-dev/pandas/pulls/19546
2018-02-06T02:20:33Z
2018-02-06T10:15:51Z
2018-02-06T10:15:51Z
2018-02-06T22:57:03Z
separate DatetimeIndex timezone tests
diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index 09a6b35a0ff0e..f6f8eccf4e30c 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -412,6 +412,14 @@ def test_dti_shift_no_freq(self): with pytest.raises(NullFrequencyError): dti.shift(2) + @pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern']) + def test_dti_shift_localized(self, tzstr): + dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI') + dr_tz = dr.tz_localize(tzstr) + + result = dr_tz.shift(1, '10T') + assert result.tz == dr_tz.tz + # ------------------------------------------------------------- # Binary operations DatetimeIndex and timedelta-like @@ -767,6 +775,24 @@ def test_dti_with_offset_series(self, tz, names): res3 = dti - other tm.assert_series_equal(res3, expected_sub) + def test_dti_add_offset_tzaware(self): + dates = date_range('2012-11-01', periods=3, tz='US/Pacific') + offset = dates + pd.offsets.Hour(5) + assert dates[0] + pd.offsets.Hour(5) == offset[0] + + # GH#6818 + for tz in ['UTC', 'US/Pacific', 'Asia/Tokyo']: + dates = date_range('2010-11-01 00:00', periods=3, tz=tz, freq='H') + expected = DatetimeIndex(['2010-11-01 05:00', '2010-11-01 06:00', + '2010-11-01 07:00'], freq='H', tz=tz) + + offset = dates + pd.offsets.Hour(5) + tm.assert_index_equal(offset, expected) + offset = dates + np.timedelta64(5, 'h') + tm.assert_index_equal(offset, expected) + offset = dates + timedelta(hours=5) + tm.assert_index_equal(offset, expected) + @pytest.mark.parametrize('klass,assert_func', [ (Series, tm.assert_series_equal), diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py new file mode 100644 index 0000000000000..075d239df5f7a --- /dev/null +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -0,0 +1,1018 @@ +# -*- coding: utf-8 -*- +""" +Tests for DatetimeIndex timezone-related methods +""" +from datetime import datetime, timedelta, tzinfo +from distutils.version import LooseVersion + +import pytest +import pytz +import dateutil +from dateutil.tz import gettz, tzlocal +import numpy as np + +import pandas.util.testing as tm +import pandas.util._test_decorators as td + +import pandas as pd +from pandas._libs import tslib +from pandas._libs.tslibs import timezones +from pandas.compat import lrange, zip +from pandas import (DatetimeIndex, date_range, bdate_range, + Timestamp, isna, to_datetime, Index) + + +class FixedOffset(tzinfo): + """Fixed offset in minutes east from UTC.""" + + def __init__(self, offset, name): + self.__offset = timedelta(minutes=offset) + self.__name = name + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return self.__name + + def dst(self, dt): + return timedelta(0) + + +fixed_off = FixedOffset(-420, '-07:00') +fixed_off_no_name = FixedOffset(-330, None) + + +class TestDatetimeIndexTimezones(object): + # ------------------------------------------------------------- + # DatetimeIndex.tz_convert + def test_tz_convert_nat(self): + # GH#5546 + dates = [pd.NaT] + idx = DatetimeIndex(dates) + idx = idx.tz_localize('US/Pacific') + tm.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Pacific')) + idx = idx.tz_convert('US/Eastern') + tm.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Eastern')) + idx = idx.tz_convert('UTC') + tm.assert_index_equal(idx, DatetimeIndex(dates, tz='UTC')) + + dates = ['2010-12-01 00:00', '2010-12-02 00:00', pd.NaT] + idx = DatetimeIndex(dates) + idx = idx.tz_localize('US/Pacific') + tm.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Pacific')) + idx = idx.tz_convert('US/Eastern') + expected = ['2010-12-01 03:00', '2010-12-02 03:00', pd.NaT] + tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern')) + + idx = idx + pd.offsets.Hour(5) + expected = ['2010-12-01 08:00', '2010-12-02 08:00', pd.NaT] + tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern')) + idx = idx.tz_convert('US/Pacific') + expected = ['2010-12-01 05:00', '2010-12-02 05:00', pd.NaT] + tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Pacific')) + + idx = idx + np.timedelta64(3, 'h') + expected = ['2010-12-01 08:00', '2010-12-02 08:00', pd.NaT] + tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Pacific')) + + idx = idx.tz_convert('US/Eastern') + expected = ['2010-12-01 11:00', '2010-12-02 11:00', pd.NaT] + tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern')) + + @pytest.mark.parametrize('prefix', ['', 'dateutil/']) + def test_dti_tz_convert_compat_timestamp(self, prefix): + strdates = ['1/1/2012', '3/1/2012', '4/1/2012'] + idx = DatetimeIndex(strdates, tz=prefix + 'US/Eastern') + + conv = idx[0].tz_convert(prefix + 'US/Pacific') + expected = idx.tz_convert(prefix + 'US/Pacific')[0] + + assert conv == expected + + def test_dti_tz_convert_hour_overflow_dst(self): + # Regression test for: + # https://github.com/pandas-dev/pandas/issues/13306 + + # sorted case US/Eastern -> UTC + ts = ['2008-05-12 09:50:00', + '2008-12-12 09:50:35', + '2009-05-12 09:50:32'] + tt = DatetimeIndex(ts).tz_localize('US/Eastern') + ut = tt.tz_convert('UTC') + expected = Index([13, 14, 13]) + tm.assert_index_equal(ut.hour, expected) + + # sorted case UTC -> US/Eastern + ts = ['2008-05-12 13:50:00', + '2008-12-12 14:50:35', + '2009-05-12 13:50:32'] + tt = DatetimeIndex(ts).tz_localize('UTC') + ut = tt.tz_convert('US/Eastern') + expected = Index([9, 9, 9]) + tm.assert_index_equal(ut.hour, expected) + + # unsorted case US/Eastern -> UTC + ts = ['2008-05-12 09:50:00', + '2008-12-12 09:50:35', + '2008-05-12 09:50:32'] + tt = DatetimeIndex(ts).tz_localize('US/Eastern') + ut = tt.tz_convert('UTC') + expected = Index([13, 14, 13]) + tm.assert_index_equal(ut.hour, expected) + + # unsorted case UTC -> US/Eastern + ts = ['2008-05-12 13:50:00', + '2008-12-12 14:50:35', + '2008-05-12 13:50:32'] + tt = DatetimeIndex(ts).tz_localize('UTC') + ut = tt.tz_convert('US/Eastern') + expected = Index([9, 9, 9]) + tm.assert_index_equal(ut.hour, expected) + + @pytest.mark.parametrize('tz', ['US/Eastern', 'dateutil/US/Eastern']) + def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz): + # Regression test for GH#13306 + + # sorted case US/Eastern -> UTC + ts = [Timestamp('2008-05-12 09:50:00', tz=tz), + Timestamp('2008-12-12 09:50:35', tz=tz), + Timestamp('2009-05-12 09:50:32', tz=tz)] + tt = DatetimeIndex(ts) + ut = tt.tz_convert('UTC') + expected = Index([13, 14, 13]) + tm.assert_index_equal(ut.hour, expected) + + # sorted case UTC -> US/Eastern + ts = [Timestamp('2008-05-12 13:50:00', tz='UTC'), + Timestamp('2008-12-12 14:50:35', tz='UTC'), + Timestamp('2009-05-12 13:50:32', tz='UTC')] + tt = DatetimeIndex(ts) + ut = tt.tz_convert('US/Eastern') + expected = Index([9, 9, 9]) + tm.assert_index_equal(ut.hour, expected) + + # unsorted case US/Eastern -> UTC + ts = [Timestamp('2008-05-12 09:50:00', tz=tz), + Timestamp('2008-12-12 09:50:35', tz=tz), + Timestamp('2008-05-12 09:50:32', tz=tz)] + tt = DatetimeIndex(ts) + ut = tt.tz_convert('UTC') + expected = Index([13, 14, 13]) + tm.assert_index_equal(ut.hour, expected) + + # unsorted case UTC -> US/Eastern + ts = [Timestamp('2008-05-12 13:50:00', tz='UTC'), + Timestamp('2008-12-12 14:50:35', tz='UTC'), + Timestamp('2008-05-12 13:50:32', tz='UTC')] + tt = DatetimeIndex(ts) + ut = tt.tz_convert('US/Eastern') + expected = Index([9, 9, 9]) + tm.assert_index_equal(ut.hour, expected) + + def test_dti_tz_convert_trans_pos_plus_1__bug(self): + # Regression test for tslib.tz_convert(vals, tz1, tz2). + # See https://github.com/pandas-dev/pandas/issues/4496 for details. + for freq, n in [('H', 1), ('T', 60), ('S', 3600)]: + idx = date_range(datetime(2011, 3, 26, 23), + datetime(2011, 3, 27, 1), freq=freq) + idx = idx.tz_localize('UTC') + idx = idx.tz_convert('Europe/Moscow') + + expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1])) + tm.assert_index_equal(idx.hour, Index(expected)) + + def test_dti_tz_convert_dst(self): + for freq, n in [('H', 1), ('T', 60), ('S', 3600)]: + # Start DST + idx = date_range('2014-03-08 23:00', '2014-03-09 09:00', freq=freq, + tz='UTC') + idx = idx.tz_convert('US/Eastern') + expected = np.repeat(np.array([18, 19, 20, 21, 22, 23, + 0, 1, 3, 4, 5]), + np.array([n, n, n, n, n, n, n, n, n, n, 1])) + tm.assert_index_equal(idx.hour, Index(expected)) + + idx = date_range('2014-03-08 18:00', '2014-03-09 05:00', freq=freq, + tz='US/Eastern') + idx = idx.tz_convert('UTC') + expected = np.repeat(np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), + np.array([n, n, n, n, n, n, n, n, n, n, 1])) + tm.assert_index_equal(idx.hour, Index(expected)) + + # End DST + idx = date_range('2014-11-01 23:00', '2014-11-02 09:00', freq=freq, + tz='UTC') + idx = idx.tz_convert('US/Eastern') + expected = np.repeat(np.array([19, 20, 21, 22, 23, + 0, 1, 1, 2, 3, 4]), + np.array([n, n, n, n, n, n, n, n, n, n, 1])) + tm.assert_index_equal(idx.hour, Index(expected)) + + idx = date_range('2014-11-01 18:00', '2014-11-02 05:00', freq=freq, + tz='US/Eastern') + idx = idx.tz_convert('UTC') + expected = np.repeat(np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10]), + np.array([n, n, n, n, n, n, n, n, n, + n, n, n, 1])) + tm.assert_index_equal(idx.hour, Index(expected)) + + # daily + # Start DST + idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D', + tz='UTC') + idx = idx.tz_convert('US/Eastern') + tm.assert_index_equal(idx.hour, Index([19, 19])) + + idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D', + tz='US/Eastern') + idx = idx.tz_convert('UTC') + tm.assert_index_equal(idx.hour, Index([5, 5])) + + # End DST + idx = date_range('2014-11-01 00:00', '2014-11-02 00:00', freq='D', + tz='UTC') + idx = idx.tz_convert('US/Eastern') + tm.assert_index_equal(idx.hour, Index([20, 20])) + + idx = date_range('2014-11-01 00:00', '2014-11-02 000:00', freq='D', + tz='US/Eastern') + idx = idx.tz_convert('UTC') + tm.assert_index_equal(idx.hour, Index([4, 4])) + + @pytest.mark.parametrize('tz', ['UTC', 'Asia/Tokyo', 'US/Eastern', + 'dateutil/US/Pacific']) + def test_tz_convert_roundtrip(self, tz): + idx1 = date_range(start='2014-01-01', end='2014-12-31', freq='M', + tz='UTC') + exp1 = date_range(start='2014-01-01', end='2014-12-31', freq='M') + + idx2 = date_range(start='2014-01-01', end='2014-12-31', freq='D', + tz='UTC') + exp2 = date_range(start='2014-01-01', end='2014-12-31', freq='D') + + idx3 = date_range(start='2014-01-01', end='2014-03-01', freq='H', + tz='UTC') + exp3 = date_range(start='2014-01-01', end='2014-03-01', freq='H') + + idx4 = date_range(start='2014-08-01', end='2014-10-31', freq='T', + tz='UTC') + exp4 = date_range(start='2014-08-01', end='2014-10-31', freq='T') + + for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), + (idx4, exp4)]: + converted = idx.tz_convert(tz) + reset = converted.tz_convert(None) + tm.assert_index_equal(reset, expected) + assert reset.tzinfo is None + expected = converted.tz_convert('UTC').tz_localize(None) + tm.assert_index_equal(reset, expected) + + def test_dti_tz_convert_tzlocal(self): + # GH#13583 + # tz_convert doesn't affect to internal + dti = date_range(start='2001-01-01', end='2001-03-01', tz='UTC') + dti2 = dti.tz_convert(dateutil.tz.tzlocal()) + tm.assert_numpy_array_equal(dti2.asi8, dti.asi8) + + dti = date_range(start='2001-01-01', end='2001-03-01', + tz=dateutil.tz.tzlocal()) + dti2 = dti.tz_convert(None) + tm.assert_numpy_array_equal(dti2.asi8, dti.asi8) + + @pytest.mark.parametrize('tz', ['US/Eastern', 'dateutil/US/Eastern', + pytz.timezone('US/Eastern'), + gettz('US/Eastern')]) + def test_dti_tz_convert_utc_to_local_no_modify(self, tz): + rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc') + rng_eastern = rng.tz_convert(tz) + + # Values are unmodified + tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8) + + assert timezones.tz_compare(rng_eastern.tz, timezones.maybe_get_tz(tz)) + + @pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern']) + def test_tz_convert_unsorted(self, tzstr): + dr = date_range('2012-03-09', freq='H', periods=100, tz='utc') + dr = dr.tz_convert(tzstr) + + result = dr[::-1].hour + exp = dr.hour[::-1] + tm.assert_almost_equal(result, exp) + + # ------------------------------------------------------------- + # DatetimeIndex.tz_localize + + def test_dti_tz_localize_nonexistent_raise_coerce(self): + # GH#13057 + times = ['2015-03-08 01:00', '2015-03-08 02:00', '2015-03-08 03:00'] + index = DatetimeIndex(times) + tz = 'US/Eastern' + with pytest.raises(pytz.NonExistentTimeError): + index.tz_localize(tz=tz) + + with pytest.raises(pytz.NonExistentTimeError): + index.tz_localize(tz=tz, errors='raise') + + result = index.tz_localize(tz=tz, errors='coerce') + test_times = ['2015-03-08 01:00-05:00', 'NaT', + '2015-03-08 03:00-04:00'] + dti = DatetimeIndex(test_times) + expected = dti.tz_localize('UTC').tz_convert('US/Eastern') + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'), + gettz('US/Eastern')]) + def test_dti_tz_localize_ambiguous_infer(self, tz): + # November 6, 2011, fall back, repeat 2 AM hour + # With no repeated hours, we cannot infer the transition + dr = date_range(datetime(2011, 11, 6, 0), periods=5, + freq=pd.offsets.Hour()) + with pytest.raises(pytz.AmbiguousTimeError): + dr.tz_localize(tz) + + # With repeated hours, we can infer the transition + dr = date_range(datetime(2011, 11, 6, 0), periods=5, + freq=pd.offsets.Hour(), tz=tz) + times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00', + '11/06/2011 02:00', '11/06/2011 03:00'] + di = DatetimeIndex(times) + localized = di.tz_localize(tz, ambiguous='infer') + tm.assert_index_equal(dr, localized) + with tm.assert_produces_warning(FutureWarning): + localized_old = di.tz_localize(tz, infer_dst=True) + tm.assert_index_equal(dr, localized_old) + tm.assert_index_equal(dr, DatetimeIndex(times, tz=tz, + ambiguous='infer')) + + # When there is no dst transition, nothing special happens + dr = date_range(datetime(2011, 6, 1, 0), periods=10, + freq=pd.offsets.Hour()) + localized = dr.tz_localize(tz) + localized_infer = dr.tz_localize(tz, ambiguous='infer') + tm.assert_index_equal(localized, localized_infer) + with tm.assert_produces_warning(FutureWarning): + localized_infer_old = dr.tz_localize(tz, infer_dst=True) + tm.assert_index_equal(localized, localized_infer_old) + + @pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'), + gettz('US/Eastern')]) + def test_dti_tz_localize_ambiguous_times(self, tz): + # March 13, 2011, spring forward, skip from 2 AM to 3 AM + dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3, + freq=pd.offsets.Hour()) + with pytest.raises(pytz.NonExistentTimeError): + dr.tz_localize(tz) + + # after dst transition, it works + dr = date_range(datetime(2011, 3, 13, 3, 30), periods=3, + freq=pd.offsets.Hour(), tz=tz) + + # November 6, 2011, fall back, repeat 2 AM hour + dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3, + freq=pd.offsets.Hour()) + with pytest.raises(pytz.AmbiguousTimeError): + dr.tz_localize(tz) + + # UTC is OK + dr = date_range(datetime(2011, 3, 13), periods=48, + freq=pd.offsets.Minute(30), tz=pytz.utc) + + @pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern']) + def test_dti_tz_localize_pass_dates_to_utc(self, tzstr): + strdates = ['1/1/2012', '3/1/2012', '4/1/2012'] + + idx = DatetimeIndex(strdates) + conv = idx.tz_localize(tzstr) + + fromdates = DatetimeIndex(strdates, tz=tzstr) + + assert conv.tz == fromdates.tz + tm.assert_numpy_array_equal(conv.values, fromdates.values) + + @pytest.mark.parametrize('prefix', ['', 'dateutil/']) + def test_dti_tz_localize(self, prefix): + tzstr = prefix + 'US/Eastern' + dti = DatetimeIndex(start='1/1/2005', end='1/1/2005 0:00:30.256', + freq='L') + dti2 = dti.tz_localize(tzstr) + + dti_utc = DatetimeIndex(start='1/1/2005 05:00', + end='1/1/2005 5:00:30.256', freq='L', tz='utc') + + tm.assert_numpy_array_equal(dti2.values, dti_utc.values) + + dti3 = dti2.tz_convert(prefix + 'US/Pacific') + tm.assert_numpy_array_equal(dti3.values, dti_utc.values) + + dti = DatetimeIndex(start='11/6/2011 1:59', end='11/6/2011 2:00', + freq='L') + with pytest.raises(pytz.AmbiguousTimeError): + dti.tz_localize(tzstr) + + dti = DatetimeIndex(start='3/13/2011 1:59', end='3/13/2011 2:00', + freq='L') + with pytest.raises(pytz.NonExistentTimeError): + dti.tz_localize(tzstr) + + @pytest.mark.parametrize('tz', ['US/Eastern', 'dateutil/US/Eastern', + pytz.timezone('US/Eastern'), + gettz('US/Eastern')]) + def test_dti_tz_localize_utc_conversion(self, tz): + # Localizing to time zone should: + # 1) check for DST ambiguities + # 2) convert to UTC + + rng = date_range('3/10/2012', '3/11/2012', freq='30T') + + converted = rng.tz_localize(tz) + expected_naive = rng + pd.offsets.Hour(5) + tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8) + + # DST ambiguity, this should fail + rng = date_range('3/11/2012', '3/12/2012', freq='30T') + # Is this really how it should fail?? + with pytest.raises(pytz.NonExistentTimeError): + rng.tz_localize(tz) + + @pytest.mark.parametrize('tz', ['UTC', 'Asia/Tokyo', 'US/Eastern', + 'dateutil/US/Pacific']) + def test_dti_tz_localize_roundtrip(self, tz): + idx1 = date_range(start='2014-01-01', end='2014-12-31', freq='M') + idx2 = date_range(start='2014-01-01', end='2014-12-31', freq='D') + idx3 = date_range(start='2014-01-01', end='2014-03-01', freq='H') + idx4 = date_range(start='2014-08-01', end='2014-10-31', freq='T') + for idx in [idx1, idx2, idx3, idx4]: + localized = idx.tz_localize(tz) + expected = date_range(start=idx[0], end=idx[-1], freq=idx.freq, + tz=tz) + tm.assert_index_equal(localized, expected) + + with pytest.raises(TypeError): + localized.tz_localize(tz) + + reset = localized.tz_localize(None) + tm.assert_index_equal(reset, idx) + assert reset.tzinfo is None + + def test_dti_tz_localize_naive(self): + rng = date_range('1/1/2011', periods=100, freq='H') + + conv = rng.tz_localize('US/Pacific') + exp = date_range('1/1/2011', periods=100, freq='H', tz='US/Pacific') + + tm.assert_index_equal(conv, exp) + + def test_dti_tz_localize_tzlocal(self): + # GH#13583 + offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1)) + offset = int(offset.total_seconds() * 1000000000) + + dti = date_range(start='2001-01-01', end='2001-03-01') + dti2 = dti.tz_localize(dateutil.tz.tzlocal()) + tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8) + + dti = date_range(start='2001-01-01', end='2001-03-01', + tz=dateutil.tz.tzlocal()) + dti2 = dti.tz_localize(None) + tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8) + + @pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'), + gettz('US/Eastern')]) + def test_dti_tz_localize_ambiguous_nat(self, tz): + times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00', + '11/06/2011 02:00', '11/06/2011 03:00'] + di = DatetimeIndex(times) + localized = di.tz_localize(tz, ambiguous='NaT') + + times = ['11/06/2011 00:00', np.NaN, np.NaN, '11/06/2011 02:00', + '11/06/2011 03:00'] + di_test = DatetimeIndex(times, tz='US/Eastern') + + # left dtype is datetime64[ns, US/Eastern] + # right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')] + tm.assert_numpy_array_equal(di_test.values, localized.values) + + @pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'), + gettz('US/Eastern')]) + def test_dti_tz_localize_ambiguous_flags(self, tz): + # November 6, 2011, fall back, repeat 2 AM hour + + # Pass in flags to determine right dst transition + dr = date_range(datetime(2011, 11, 6, 0), periods=5, + freq=pd.offsets.Hour(), tz=tz) + times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00', + '11/06/2011 02:00', '11/06/2011 03:00'] + + # Test tz_localize + di = DatetimeIndex(times) + is_dst = [1, 1, 0, 0, 0] + localized = di.tz_localize(tz, ambiguous=is_dst) + tm.assert_index_equal(dr, localized) + tm.assert_index_equal(dr, DatetimeIndex(times, tz=tz, + ambiguous=is_dst)) + + localized = di.tz_localize(tz, ambiguous=np.array(is_dst)) + tm.assert_index_equal(dr, localized) + + localized = di.tz_localize(tz, + ambiguous=np.array(is_dst).astype('bool')) + tm.assert_index_equal(dr, localized) + + # Test constructor + localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst) + tm.assert_index_equal(dr, localized) + + # Test duplicate times where infer_dst fails + times += times + di = DatetimeIndex(times) + + # When the sizes are incompatible, make sure error is raised + with pytest.raises(Exception): + di.tz_localize(tz, ambiguous=is_dst) + + # When sizes are compatible and there are repeats ('infer' won't work) + is_dst = np.hstack((is_dst, is_dst)) + localized = di.tz_localize(tz, ambiguous=is_dst) + dr = dr.append(dr) + tm.assert_index_equal(dr, localized) + + # When there is no dst transition, nothing special happens + dr = date_range(datetime(2011, 6, 1, 0), periods=10, + freq=pd.offsets.Hour()) + is_dst = np.array([1] * 10) + localized = dr.tz_localize(tz) + localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst) + tm.assert_index_equal(localized, localized_is_dst) + + # TODO: belongs outside tz_localize tests? + @pytest.mark.parametrize('tz', ['Europe/London', 'dateutil/Europe/London']) + def test_dti_construction_ambiguous_endpoint(self, tz): + # construction with an ambiguous end-point + # GH#11626 + + # FIXME: This next block fails to raise; it was taken from an older + # version of this test that had an indention mistake that caused it + # to not get executed. + # with pytest.raises(pytz.AmbiguousTimeError): + # date_range("2013-10-26 23:00", "2013-10-27 01:00", + # tz="Europe/London", freq="H") + + times = date_range("2013-10-26 23:00", "2013-10-27 01:00", freq="H", + tz=tz, ambiguous='infer') + assert times[0] == Timestamp('2013-10-26 23:00', tz=tz, freq="H") + + if str(tz).startswith('dateutil'): + if LooseVersion(dateutil.__version__) < LooseVersion('2.6.0'): + # see GH#14621 + assert times[-1] == Timestamp('2013-10-27 01:00:00+0000', + tz=tz, freq="H") + elif LooseVersion(dateutil.__version__) > LooseVersion('2.6.0'): + # fixed ambiguous behavior + assert times[-1] == Timestamp('2013-10-27 01:00:00+0100', + tz=tz, freq="H") + else: + assert times[-1] == Timestamp('2013-10-27 01:00:00+0000', + tz=tz, freq="H") + + def test_dti_tz_localize_bdate_range(self): + dr = pd.bdate_range('1/1/2009', '1/1/2010') + dr_utc = pd.bdate_range('1/1/2009', '1/1/2010', tz=pytz.utc) + localized = dr.tz_localize(pytz.utc) + tm.assert_index_equal(dr_utc, localized) + + # ------------------------------------------------------------- + # DatetimeIndex.normalize + + def test_normalize_tz(self): + rng = date_range('1/1/2000 9:30', periods=10, freq='D', + tz='US/Eastern') + + result = rng.normalize() + expected = date_range('1/1/2000', periods=10, freq='D', + tz='US/Eastern') + tm.assert_index_equal(result, expected) + + assert result.is_normalized + assert not rng.is_normalized + + rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz='UTC') + + result = rng.normalize() + expected = date_range('1/1/2000', periods=10, freq='D', tz='UTC') + tm.assert_index_equal(result, expected) + + assert result.is_normalized + assert not rng.is_normalized + + rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz=tzlocal()) + result = rng.normalize() + expected = date_range('1/1/2000', periods=10, freq='D', tz=tzlocal()) + tm.assert_index_equal(result, expected) + + assert result.is_normalized + assert not rng.is_normalized + + @td.skip_if_windows + @pytest.mark.parametrize('timezone', ['US/Pacific', 'US/Eastern', 'UTC', + 'Asia/Kolkata', 'Asia/Shanghai', + 'Australia/Canberra']) + def test_normalize_tz_local(self, timezone): + # GH#13459 + with tm.set_timezone(timezone): + rng = date_range('1/1/2000 9:30', periods=10, freq='D', + tz=tzlocal()) + + result = rng.normalize() + expected = date_range('1/1/2000', periods=10, freq='D', + tz=tzlocal()) + tm.assert_index_equal(result, expected) + + assert result.is_normalized + assert not rng.is_normalized + + # ------------------------------------------------------------ + # DatetimeIndex.__new__ + + @pytest.mark.parametrize('prefix', ['', 'dateutil/']) + def test_dti_constructor_static_tzinfo(self, prefix): + # it works! + index = DatetimeIndex([datetime(2012, 1, 1)], tz=prefix + 'EST') + index.hour + index[0] + + def test_dti_constructor_with_fixed_tz(self): + off = FixedOffset(420, '+07:00') + start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off) + end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off) + rng = date_range(start=start, end=end) + assert off == rng.tz + + rng2 = date_range(start, periods=len(rng), tz=off) + tm.assert_index_equal(rng, rng2) + + rng3 = date_range('3/11/2012 05:00:00+07:00', + '6/11/2012 05:00:00+07:00') + assert (rng.values == rng3.values).all() + + @pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern']) + def test_dti_convert_datetime_list(self, tzstr): + dr = date_range('2012-06-02', periods=10, + tz=tzstr, name='foo') + dr2 = DatetimeIndex(list(dr), name='foo') + tm.assert_index_equal(dr, dr2) + assert dr.tz == dr2.tz + assert dr2.name == 'foo' + + def test_dti_construction_univalent(self): + rng = date_range('03/12/2012 00:00', periods=10, freq='W-FRI', + tz='US/Eastern') + rng2 = DatetimeIndex(data=rng, tz='US/Eastern') + tm.assert_index_equal(rng, rng2) + + @pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'), + gettz('US/Eastern')]) + def test_dti_from_tzaware_datetime(self, tz): + d = [datetime(2012, 8, 19, tzinfo=tz)] + + index = DatetimeIndex(d) + assert timezones.tz_compare(index.tz, tz) + + @pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern']) + def test_dti_tz_constructors(self, tzstr): + """ Test different DatetimeIndex constructions with timezone + Follow-up of GH#4229 + """ + + arr = ['11/10/2005 08:00:00', '11/10/2005 09:00:00'] + + idx1 = to_datetime(arr).tz_localize(tzstr) + idx2 = DatetimeIndex(start="2005-11-10 08:00:00", freq='H', periods=2, + tz=tzstr) + idx3 = DatetimeIndex(arr, tz=tzstr) + idx4 = DatetimeIndex(np.array(arr), tz=tzstr) + + for other in [idx2, idx3, idx4]: + tm.assert_index_equal(idx1, other) + + # ------------------------------------------------------------- + # Unsorted + + def test_join_utc_convert(self): + rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') + + left = rng.tz_convert('US/Eastern') + right = rng.tz_convert('Europe/Berlin') + + for how in ['inner', 'outer', 'left', 'right']: + result = left.join(left[:-5], how=how) + assert isinstance(result, DatetimeIndex) + assert result.tz == left.tz + + result = left.join(right[:-5], how=how) + assert isinstance(result, DatetimeIndex) + assert result.tz.zone == 'UTC' + + def test_dti_drop_dont_lose_tz(self): + # GH#2621 + ind = date_range("2012-12-01", periods=10, tz="utc") + ind = ind.drop(ind[-1]) + + assert ind.tz is not None + + def test_date_range_localize(self): + rng = date_range('3/11/2012 03:00', periods=15, freq='H', + tz='US/Eastern') + rng2 = DatetimeIndex(['3/11/2012 03:00', '3/11/2012 04:00'], + tz='US/Eastern') + rng3 = date_range('3/11/2012 03:00', periods=15, freq='H') + rng3 = rng3.tz_localize('US/Eastern') + + tm.assert_index_equal(rng, rng3) + + # DST transition time + val = rng[0] + exp = Timestamp('3/11/2012 03:00', tz='US/Eastern') + + assert val.hour == 3 + assert exp.hour == 3 + assert val == exp # same UTC value + tm.assert_index_equal(rng[:2], rng2) + + # Right before the DST transition + rng = date_range('3/11/2012 00:00', periods=2, freq='H', + tz='US/Eastern') + rng2 = DatetimeIndex(['3/11/2012 00:00', '3/11/2012 01:00'], + tz='US/Eastern') + tm.assert_index_equal(rng, rng2) + exp = Timestamp('3/11/2012 00:00', tz='US/Eastern') + assert exp.hour == 0 + assert rng[0] == exp + exp = Timestamp('3/11/2012 01:00', tz='US/Eastern') + assert exp.hour == 1 + assert rng[1] == exp + + rng = date_range('3/11/2012 00:00', periods=10, freq='H', + tz='US/Eastern') + assert rng[2].hour == 3 + + def test_timestamp_equality_different_timezones(self): + utc_range = date_range('1/1/2000', periods=20, tz='UTC') + eastern_range = utc_range.tz_convert('US/Eastern') + berlin_range = utc_range.tz_convert('Europe/Berlin') + + for a, b, c in zip(utc_range, eastern_range, berlin_range): + assert a == b + assert b == c + assert a == c + + assert (utc_range == eastern_range).all() + assert (utc_range == berlin_range).all() + assert (berlin_range == eastern_range).all() + + def test_dti_intersection(self): + rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') + + left = rng[10:90][::-1] + right = rng[20:80][::-1] + + assert left.tz == rng.tz + result = left.intersection(right) + assert result.tz == left.tz + + def test_dti_equals_with_tz(self): + left = date_range('1/1/2011', periods=100, freq='H', tz='utc') + right = date_range('1/1/2011', periods=100, freq='H', tz='US/Eastern') + + assert not left.equals(right) + + @pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern']) + def test_dti_tz_nat(self, tzstr): + idx = DatetimeIndex([Timestamp("2013-1-1", tz=tzstr), pd.NaT]) + + assert isna(idx[1]) + assert idx[0].tzinfo is not None + + @pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern']) + def test_dti_astype_asobject_tzinfos(self, tzstr): + # GH#1345 + + # dates around a dst transition + rng = date_range('2/13/2010', '5/6/2010', tz=tzstr) + + objs = rng.astype(object) + for i, x in enumerate(objs): + exval = rng[i] + assert x == exval + assert x.tzinfo == exval.tzinfo + + objs = rng.astype(object) + for i, x in enumerate(objs): + exval = rng[i] + assert x == exval + assert x.tzinfo == exval.tzinfo + + @pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern']) + def test_dti_with_timezone_repr(self, tzstr): + rng = date_range('4/13/2010', '5/6/2010') + + rng_eastern = rng.tz_localize(tzstr) + + rng_repr = repr(rng_eastern) + assert '2010-04-13 00:00:00' in rng_repr + + @pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern']) + def test_dti_take_dont_lose_meta(self, tzstr): + rng = date_range('1/1/2000', periods=20, tz=tzstr) + + result = rng.take(lrange(5)) + assert result.tz == rng.tz + assert result.freq == rng.freq + + @pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern']) + def test_utc_box_timestamp_and_localize(self, tzstr): + tz = timezones.maybe_get_tz(tzstr) + + rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc') + rng_eastern = rng.tz_convert(tzstr) + + expected = rng[-1].astimezone(tz) + + stamp = rng_eastern[-1] + assert stamp == expected + assert stamp.tzinfo == expected.tzinfo + + # right tzinfo + rng = date_range('3/13/2012', '3/14/2012', freq='H', tz='utc') + rng_eastern = rng.tz_convert(tzstr) + # test not valid for dateutil timezones. + # assert 'EDT' in repr(rng_eastern[0].tzinfo) + assert ('EDT' in repr(rng_eastern[0].tzinfo) or + 'tzfile' in repr(rng_eastern[0].tzinfo)) + + def test_dti_to_pydatetime(self): + dt = dateutil.parser.parse('2012-06-13T01:39:00Z') + dt = dt.replace(tzinfo=tzlocal()) + + arr = np.array([dt], dtype=object) + + result = to_datetime(arr, utc=True) + assert result.tz is pytz.utc + + rng = date_range('2012-11-03 03:00', '2012-11-05 03:00', tz=tzlocal()) + arr = rng.to_pydatetime() + result = to_datetime(arr, utc=True) + assert result.tz is pytz.utc + + def test_dti_to_pydatetime_fizedtz(self): + dates = np.array([datetime(2000, 1, 1, tzinfo=fixed_off), + datetime(2000, 1, 2, tzinfo=fixed_off), + datetime(2000, 1, 3, tzinfo=fixed_off)]) + dti = DatetimeIndex(dates) + + result = dti.to_pydatetime() + tm.assert_numpy_array_equal(dates, result) + + result = dti._mpl_repr() + tm.assert_numpy_array_equal(dates, result) + + @pytest.mark.parametrize('tz', [pytz.timezone('US/Central'), + gettz('US/Central')]) + def test_with_tz(self, tz): + # just want it to work + start = datetime(2011, 3, 12, tzinfo=pytz.utc) + dr = bdate_range(start, periods=50, freq=pd.offsets.Hour()) + assert dr.tz is pytz.utc + + # DateRange with naive datetimes + dr = bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc) + dr = bdate_range('1/1/2005', '1/1/2009', tz=tz) + + # normalized + central = dr.tz_convert(tz) + assert central.tz is tz + naive = central[0].to_pydatetime().replace(tzinfo=None) + comp = tslib._localize_pydatetime(naive, tz).tzinfo + assert central[0].tz is comp + + # compare vs a localized tz + naive = dr[0].to_pydatetime().replace(tzinfo=None) + comp = tslib._localize_pydatetime(naive, tz).tzinfo + assert central[0].tz is comp + + # datetimes with tzinfo set + dr = bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc), + datetime(2009, 1, 1, tzinfo=pytz.utc)) + with pytest.raises(Exception): + bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc), '1/1/2009', + tz=tz) + + @pytest.mark.parametrize('prefix', ['', 'dateutil/']) + def test_field_access_localize(self, prefix): + strdates = ['1/1/2012', '3/1/2012', '4/1/2012'] + rng = DatetimeIndex(strdates, tz=prefix + 'US/Eastern') + assert (rng.hour == 0).all() + + # a more unusual time zone, #1946 + dr = date_range('2011-10-02 00:00', freq='h', periods=10, + tz=prefix + 'America/Atikokan') + + expected = Index(np.arange(10, dtype=np.int64)) + tm.assert_index_equal(dr.hour, expected) + + @pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'), + gettz('US/Eastern')]) + def test_dti_convert_tz_aware_datetime_datetime(self, tz): + # GH#1581 + dates = [datetime(2000, 1, 1), datetime(2000, 1, 2), + datetime(2000, 1, 3)] + + dates_aware = [tslib._localize_pydatetime(x, tz) for x in dates] + result = DatetimeIndex(dates_aware) + assert timezones.tz_compare(result.tz, tz) + + converted = to_datetime(dates_aware, utc=True) + ex_vals = np.array([Timestamp(x).value for x in dates_aware]) + tm.assert_numpy_array_equal(converted.asi8, ex_vals) + assert converted.tz is pytz.utc + + def test_dti_union_aware(self): + # non-overlapping + rng = date_range("2012-11-15 00:00:00", periods=6, freq="H", + tz="US/Central") + + rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="H", + tz="US/Eastern") + + result = rng.union(rng2) + assert result.tz.zone == 'UTC' + + +class TestDateRange(object): + """Tests for date_range with timezones""" + def test_hongkong_tz_convert(self): + # GH#1673 smoke test + dr = date_range('2012-01-01', '2012-01-10', freq='D', tz='Hongkong') + + # it works! + dr.hour + + @pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern']) + def test_date_range_span_dst_transition(self, tzstr): + # GH#1778 + + # Standard -> Daylight Savings Time + dr = date_range('03/06/2012 00:00', periods=200, freq='W-FRI', + tz='US/Eastern') + + assert (dr.hour == 0).all() + + dr = date_range('2012-11-02', periods=10, tz=tzstr) + assert (dr.hour == 0).all() + + @pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern']) + def test_date_range_timezone_str_argument(self, tzstr): + tz = timezones.maybe_get_tz(tzstr) + result = date_range('1/1/2000', periods=10, tz=tzstr) + expected = date_range('1/1/2000', periods=10, tz=tz) + + tm.assert_index_equal(result, expected) + + def test_date_range_with_fixedoffset_noname(self): + off = fixed_off_no_name + start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off) + end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off) + rng = date_range(start=start, end=end) + assert off == rng.tz + + idx = Index([start, end]) + assert off == idx.tz + + @pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern']) + def test_date_range_with_tz(self, tzstr): + stamp = Timestamp('3/11/2012 05:00', tz=tzstr) + assert stamp.hour == 5 + + rng = date_range('3/11/2012 04:00', periods=10, freq='H', + tz=tzstr) + + assert stamp == rng[1] + + +class TestToDatetime(object): + """Tests for the to_datetime constructor with timezones""" + def test_to_datetime_utc(self): + arr = np.array([dateutil.parser.parse('2012-06-13T01:39:00Z')], + dtype=object) + + result = to_datetime(arr, utc=True) + assert result.tz is pytz.utc + + def test_to_datetime_fixed_offset(self): + dates = [datetime(2000, 1, 1, tzinfo=fixed_off), + datetime(2000, 1, 2, tzinfo=fixed_off), + datetime(2000, 1, 3, tzinfo=fixed_off)] + result = to_datetime(dates) + assert result.tz == fixed_off diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py index 8f46e0a58580e..565e735c14c80 100644 --- a/pandas/tests/tseries/test_timezones.py +++ b/pandas/tests/tseries/test_timezones.py @@ -5,43 +5,13 @@ import dateutil import numpy as np -from dateutil.parser import parse -from pytz import NonExistentTimeError -from distutils.version import LooseVersion -from dateutil.tz import tzlocal -from datetime import datetime, timedelta, tzinfo +from datetime import datetime import pandas.util.testing as tm -import pandas.util._test_decorators as td -import pandas.tseries.offsets as offsets -from pandas.compat import lrange, zip -from pandas.core.indexes.datetimes import bdate_range, date_range +from pandas.core.indexes.datetimes import date_range from pandas._libs import tslib from pandas._libs.tslibs import timezones, conversion -from pandas import (Index, isna, Timestamp, NaT, - DatetimeIndex, to_datetime) -from pandas.util.testing import set_timezone - - -class FixedOffset(tzinfo): - """Fixed offset in minutes east from UTC.""" - - def __init__(self, offset, name): - self.__offset = timedelta(minutes=offset) - self.__name = name - - def utcoffset(self, dt): - return self.__offset - - def tzname(self, dt): - return self.__name - - def dst(self, dt): - return timedelta(0) - - -fixed_off = FixedOffset(-420, '-07:00') -fixed_off_no_name = FixedOffset(-330, None) +from pandas import Timestamp class TestTimeZoneSupportPytz(object): @@ -68,399 +38,6 @@ def cmptz(self, tz1, tz2): # tests. return tz1.zone == tz2.zone - def test_utc_to_local_no_modify(self): - rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc') - rng_eastern = rng.tz_convert(self.tzstr('US/Eastern')) - - # Values are unmodified - tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8) - - assert self.cmptz(rng_eastern.tz, self.tz('US/Eastern')) - - def test_utc_to_local_no_modify_explicit(self): - rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc') - rng_eastern = rng.tz_convert(self.tz('US/Eastern')) - - # Values are unmodified - tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8) - - assert rng_eastern.tz == self.tz('US/Eastern') - - def test_localize_utc_conversion(self): - # Localizing to time zone should: - # 1) check for DST ambiguities - # 2) convert to UTC - - rng = date_range('3/10/2012', '3/11/2012', freq='30T') - - converted = rng.tz_localize(self.tzstr('US/Eastern')) - expected_naive = rng + offsets.Hour(5) - tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8) - - # DST ambiguity, this should fail - rng = date_range('3/11/2012', '3/12/2012', freq='30T') - # Is this really how it should fail?? - pytest.raises(NonExistentTimeError, rng.tz_localize, - self.tzstr('US/Eastern')) - - def test_localize_utc_conversion_explicit(self): - # Localizing to time zone should: - # 1) check for DST ambiguities - # 2) convert to UTC - - rng = date_range('3/10/2012', '3/11/2012', freq='30T') - converted = rng.tz_localize(self.tz('US/Eastern')) - expected_naive = rng + offsets.Hour(5) - tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8) - - # DST ambiguity, this should fail - rng = date_range('3/11/2012', '3/12/2012', freq='30T') - # Is this really how it should fail?? - pytest.raises(NonExistentTimeError, rng.tz_localize, - self.tz('US/Eastern')) - - def test_tz_localize_dti(self): - dti = DatetimeIndex(start='1/1/2005', end='1/1/2005 0:00:30.256', - freq='L') - dti2 = dti.tz_localize(self.tzstr('US/Eastern')) - - dti_utc = DatetimeIndex(start='1/1/2005 05:00', - end='1/1/2005 5:00:30.256', freq='L', tz='utc') - - tm.assert_numpy_array_equal(dti2.values, dti_utc.values) - - dti3 = dti2.tz_convert(self.tzstr('US/Pacific')) - tm.assert_numpy_array_equal(dti3.values, dti_utc.values) - - dti = DatetimeIndex(start='11/6/2011 1:59', end='11/6/2011 2:00', - freq='L') - pytest.raises(pytz.AmbiguousTimeError, dti.tz_localize, - self.tzstr('US/Eastern')) - - dti = DatetimeIndex(start='3/13/2011 1:59', end='3/13/2011 2:00', - freq='L') - pytest.raises(pytz.NonExistentTimeError, dti.tz_localize, - self.tzstr('US/Eastern')) - - def test_create_with_tz(self): - stamp = Timestamp('3/11/2012 05:00', tz=self.tzstr('US/Eastern')) - assert stamp.hour == 5 - - rng = date_range('3/11/2012 04:00', periods=10, freq='H', - tz=self.tzstr('US/Eastern')) - - assert stamp == rng[1] - - def test_create_with_fixed_tz(self): - off = FixedOffset(420, '+07:00') - start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off) - end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off) - rng = date_range(start=start, end=end) - assert off == rng.tz - - rng2 = date_range(start, periods=len(rng), tz=off) - tm.assert_index_equal(rng, rng2) - - rng3 = date_range('3/11/2012 05:00:00+07:00', - '6/11/2012 05:00:00+07:00') - assert (rng.values == rng3.values).all() - - def test_create_with_fixedoffset_noname(self): - off = fixed_off_no_name - start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off) - end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off) - rng = date_range(start=start, end=end) - assert off == rng.tz - - idx = Index([start, end]) - assert off == idx.tz - - def test_date_range_localize(self): - rng = date_range('3/11/2012 03:00', periods=15, freq='H', - tz='US/Eastern') - rng2 = DatetimeIndex(['3/11/2012 03:00', '3/11/2012 04:00'], - tz='US/Eastern') - rng3 = date_range('3/11/2012 03:00', periods=15, freq='H') - rng3 = rng3.tz_localize('US/Eastern') - - tm.assert_index_equal(rng, rng3) - - # DST transition time - val = rng[0] - exp = Timestamp('3/11/2012 03:00', tz='US/Eastern') - - assert val.hour == 3 - assert exp.hour == 3 - assert val == exp # same UTC value - tm.assert_index_equal(rng[:2], rng2) - - # Right before the DST transition - rng = date_range('3/11/2012 00:00', periods=2, freq='H', - tz='US/Eastern') - rng2 = DatetimeIndex(['3/11/2012 00:00', '3/11/2012 01:00'], - tz='US/Eastern') - tm.assert_index_equal(rng, rng2) - exp = Timestamp('3/11/2012 00:00', tz='US/Eastern') - assert exp.hour == 0 - assert rng[0] == exp - exp = Timestamp('3/11/2012 01:00', tz='US/Eastern') - assert exp.hour == 1 - assert rng[1] == exp - - rng = date_range('3/11/2012 00:00', periods=10, freq='H', - tz='US/Eastern') - assert rng[2].hour == 3 - - def test_utc_box_timestamp_and_localize(self): - rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc') - rng_eastern = rng.tz_convert(self.tzstr('US/Eastern')) - - tz = self.tz('US/Eastern') - expected = rng[-1].astimezone(tz) - - stamp = rng_eastern[-1] - assert stamp == expected - assert stamp.tzinfo == expected.tzinfo - - # right tzinfo - rng = date_range('3/13/2012', '3/14/2012', freq='H', tz='utc') - rng_eastern = rng.tz_convert(self.tzstr('US/Eastern')) - # test not valid for dateutil timezones. - # assert 'EDT' in repr(rng_eastern[0].tzinfo) - assert ('EDT' in repr(rng_eastern[0].tzinfo) or - 'tzfile' in repr(rng_eastern[0].tzinfo)) - - def test_timestamp_tz_convert(self): - strdates = ['1/1/2012', '3/1/2012', '4/1/2012'] - idx = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern')) - - conv = idx[0].tz_convert(self.tzstr('US/Pacific')) - expected = idx.tz_convert(self.tzstr('US/Pacific'))[0] - - assert conv == expected - - def test_pass_dates_localize_to_utc(self): - strdates = ['1/1/2012', '3/1/2012', '4/1/2012'] - - idx = DatetimeIndex(strdates) - conv = idx.tz_localize(self.tzstr('US/Eastern')) - - fromdates = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern')) - - assert conv.tz == fromdates.tz - tm.assert_numpy_array_equal(conv.values, fromdates.values) - - def test_field_access_localize(self): - strdates = ['1/1/2012', '3/1/2012', '4/1/2012'] - rng = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern')) - assert (rng.hour == 0).all() - - # a more unusual time zone, #1946 - dr = date_range('2011-10-02 00:00', freq='h', periods=10, - tz=self.tzstr('America/Atikokan')) - - expected = Index(np.arange(10, dtype=np.int64)) - tm.assert_index_equal(dr.hour, expected) - - def test_with_tz(self): - tz = self.tz('US/Central') - - # just want it to work - start = datetime(2011, 3, 12, tzinfo=pytz.utc) - dr = bdate_range(start, periods=50, freq=offsets.Hour()) - assert dr.tz is pytz.utc - - # DateRange with naive datetimes - dr = bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc) - dr = bdate_range('1/1/2005', '1/1/2009', tz=tz) - - # normalized - central = dr.tz_convert(tz) - assert central.tz is tz - comp = self.localize(tz, central[0].to_pydatetime().replace( - tzinfo=None)).tzinfo - assert central[0].tz is comp - - # compare vs a localized tz - comp = self.localize(tz, - dr[0].to_pydatetime().replace(tzinfo=None)).tzinfo - assert central[0].tz is comp - - # datetimes with tzinfo set - dr = bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc), - datetime(2009, 1, 1, tzinfo=pytz.utc)) - - pytest.raises(Exception, bdate_range, - datetime(2005, 1, 1, tzinfo=pytz.utc), '1/1/2009', - tz=tz) - - def test_tz_localize(self): - dr = bdate_range('1/1/2009', '1/1/2010') - dr_utc = bdate_range('1/1/2009', '1/1/2010', tz=pytz.utc) - localized = dr.tz_localize(pytz.utc) - tm.assert_index_equal(dr_utc, localized) - - def test_with_tz_ambiguous_times(self): - tz = self.tz('US/Eastern') - - # March 13, 2011, spring forward, skip from 2 AM to 3 AM - dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3, - freq=offsets.Hour()) - pytest.raises(pytz.NonExistentTimeError, dr.tz_localize, tz) - - # after dst transition, it works - dr = date_range(datetime(2011, 3, 13, 3, 30), periods=3, - freq=offsets.Hour(), tz=tz) - - # November 6, 2011, fall back, repeat 2 AM hour - dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3, - freq=offsets.Hour()) - pytest.raises(pytz.AmbiguousTimeError, dr.tz_localize, tz) - - # UTC is OK - dr = date_range(datetime(2011, 3, 13), periods=48, - freq=offsets.Minute(30), tz=pytz.utc) - - def test_ambiguous_infer(self): - # November 6, 2011, fall back, repeat 2 AM hour - # With no repeated hours, we cannot infer the transition - tz = self.tz('US/Eastern') - dr = date_range(datetime(2011, 11, 6, 0), periods=5, - freq=offsets.Hour()) - pytest.raises(pytz.AmbiguousTimeError, dr.tz_localize, tz) - - # With repeated hours, we can infer the transition - dr = date_range(datetime(2011, 11, 6, 0), periods=5, - freq=offsets.Hour(), tz=tz) - times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00', - '11/06/2011 02:00', '11/06/2011 03:00'] - di = DatetimeIndex(times) - localized = di.tz_localize(tz, ambiguous='infer') - tm.assert_index_equal(dr, localized) - with tm.assert_produces_warning(FutureWarning): - localized_old = di.tz_localize(tz, infer_dst=True) - tm.assert_index_equal(dr, localized_old) - tm.assert_index_equal(dr, DatetimeIndex(times, tz=tz, - ambiguous='infer')) - - # When there is no dst transition, nothing special happens - dr = date_range(datetime(2011, 6, 1, 0), periods=10, - freq=offsets.Hour()) - localized = dr.tz_localize(tz) - localized_infer = dr.tz_localize(tz, ambiguous='infer') - tm.assert_index_equal(localized, localized_infer) - with tm.assert_produces_warning(FutureWarning): - localized_infer_old = dr.tz_localize(tz, infer_dst=True) - tm.assert_index_equal(localized, localized_infer_old) - - def test_ambiguous_flags(self): - # November 6, 2011, fall back, repeat 2 AM hour - tz = self.tz('US/Eastern') - - # Pass in flags to determine right dst transition - dr = date_range(datetime(2011, 11, 6, 0), periods=5, - freq=offsets.Hour(), tz=tz) - times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00', - '11/06/2011 02:00', '11/06/2011 03:00'] - - # Test tz_localize - di = DatetimeIndex(times) - is_dst = [1, 1, 0, 0, 0] - localized = di.tz_localize(tz, ambiguous=is_dst) - tm.assert_index_equal(dr, localized) - tm.assert_index_equal(dr, DatetimeIndex(times, tz=tz, - ambiguous=is_dst)) - - localized = di.tz_localize(tz, ambiguous=np.array(is_dst)) - tm.assert_index_equal(dr, localized) - - localized = di.tz_localize(tz, - ambiguous=np.array(is_dst).astype('bool')) - tm.assert_index_equal(dr, localized) - - # Test constructor - localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst) - tm.assert_index_equal(dr, localized) - - # Test duplicate times where infer_dst fails - times += times - di = DatetimeIndex(times) - - # When the sizes are incompatible, make sure error is raised - pytest.raises(Exception, di.tz_localize, tz, ambiguous=is_dst) - - # When sizes are compatible and there are repeats ('infer' won't work) - is_dst = np.hstack((is_dst, is_dst)) - localized = di.tz_localize(tz, ambiguous=is_dst) - dr = dr.append(dr) - tm.assert_index_equal(dr, localized) - - # When there is no dst transition, nothing special happens - dr = date_range(datetime(2011, 6, 1, 0), periods=10, - freq=offsets.Hour()) - is_dst = np.array([1] * 10) - localized = dr.tz_localize(tz) - localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst) - tm.assert_index_equal(localized, localized_is_dst) - - # construction with an ambiguous end-point - # GH 11626 - tz = self.tzstr("Europe/London") - - def f(): - date_range("2013-10-26 23:00", "2013-10-27 01:00", - tz="Europe/London", freq="H") - pytest.raises(pytz.AmbiguousTimeError, f) - - times = date_range("2013-10-26 23:00", "2013-10-27 01:00", freq="H", - tz=tz, ambiguous='infer') - assert times[0] == Timestamp('2013-10-26 23:00', tz=tz, freq="H") - - if str(tz).startswith('dateutil'): - if LooseVersion(dateutil.__version__) < LooseVersion('2.6.0'): - # see gh-14621 - assert times[-1] == Timestamp('2013-10-27 01:00:00+0000', - tz=tz, freq="H") - elif LooseVersion(dateutil.__version__) > LooseVersion('2.6.0'): - # fixed ambiguous behavior - assert times[-1] == Timestamp('2013-10-27 01:00:00+0100', - tz=tz, freq="H") - else: - assert times[-1] == Timestamp('2013-10-27 01:00:00+0000', - tz=tz, freq="H") - - def test_ambiguous_nat(self): - tz = self.tz('US/Eastern') - times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00', - '11/06/2011 02:00', '11/06/2011 03:00'] - di = DatetimeIndex(times) - localized = di.tz_localize(tz, ambiguous='NaT') - - times = ['11/06/2011 00:00', np.NaN, np.NaN, '11/06/2011 02:00', - '11/06/2011 03:00'] - di_test = DatetimeIndex(times, tz='US/Eastern') - - # left dtype is datetime64[ns, US/Eastern] - # right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')] - tm.assert_numpy_array_equal(di_test.values, localized.values) - - def test_nonexistent_raise_coerce(self): - # See issue 13057 - from pytz.exceptions import NonExistentTimeError - times = ['2015-03-08 01:00', '2015-03-08 02:00', '2015-03-08 03:00'] - index = DatetimeIndex(times) - tz = 'US/Eastern' - pytest.raises(NonExistentTimeError, - index.tz_localize, tz=tz) - pytest.raises(NonExistentTimeError, - index.tz_localize, tz=tz, errors='raise') - result = index.tz_localize(tz=tz, errors='coerce') - test_times = ['2015-03-08 01:00-05:00', 'NaT', - '2015-03-08 03:00-04:00'] - expected = DatetimeIndex(test_times)\ - .tz_localize('UTC').tz_convert('US/Eastern') - tm.assert_index_equal(result, expected) - # test utility methods def test_infer_tz(self): eastern = self.tz('US/Eastern') @@ -486,183 +63,6 @@ def test_infer_tz(self): pytest.raises(Exception, timezones.infer_tzinfo, start, end) pytest.raises(Exception, timezones.infer_tzinfo, end, start) - def test_tz_string(self): - result = date_range('1/1/2000', periods=10, - tz=self.tzstr('US/Eastern')) - expected = date_range('1/1/2000', periods=10, tz=self.tz('US/Eastern')) - - tm.assert_index_equal(result, expected) - - def test_take_dont_lose_meta(self): - rng = date_range('1/1/2000', periods=20, tz=self.tzstr('US/Eastern')) - - result = rng.take(lrange(5)) - assert result.tz == rng.tz - assert result.freq == rng.freq - - def test_index_with_timezone_repr(self): - rng = date_range('4/13/2010', '5/6/2010') - - rng_eastern = rng.tz_localize(self.tzstr('US/Eastern')) - - rng_repr = repr(rng_eastern) - assert '2010-04-13 00:00:00' in rng_repr - - def test_index_astype_asobject_tzinfos(self): - # #1345 - - # dates around a dst transition - rng = date_range('2/13/2010', '5/6/2010', tz=self.tzstr('US/Eastern')) - - objs = rng.astype(object) - for i, x in enumerate(objs): - exval = rng[i] - assert x == exval - assert x.tzinfo == exval.tzinfo - - objs = rng.astype(object) - for i, x in enumerate(objs): - exval = rng[i] - assert x == exval - assert x.tzinfo == exval.tzinfo - - def test_fixed_offset(self): - dates = [datetime(2000, 1, 1, tzinfo=fixed_off), - datetime(2000, 1, 2, tzinfo=fixed_off), - datetime(2000, 1, 3, tzinfo=fixed_off)] - result = to_datetime(dates) - assert result.tz == fixed_off - - def test_fixedtz_topydatetime(self): - dates = np.array([datetime(2000, 1, 1, tzinfo=fixed_off), - datetime(2000, 1, 2, tzinfo=fixed_off), - datetime(2000, 1, 3, tzinfo=fixed_off)]) - result = to_datetime(dates).to_pydatetime() - tm.assert_numpy_array_equal(dates, result) - result = to_datetime(dates)._mpl_repr() - tm.assert_numpy_array_equal(dates, result) - - def test_convert_tz_aware_datetime_datetime(self): - # #1581 - - tz = self.tz('US/Eastern') - - dates = [datetime(2000, 1, 1), datetime(2000, 1, 2), - datetime(2000, 1, 3)] - - dates_aware = [self.localize(tz, x) for x in dates] - result = to_datetime(dates_aware) - assert self.cmptz(result.tz, self.tz('US/Eastern')) - - converted = to_datetime(dates_aware, utc=True) - ex_vals = np.array([Timestamp(x).value for x in dates_aware]) - tm.assert_numpy_array_equal(converted.asi8, ex_vals) - assert converted.tz is pytz.utc - - def test_to_datetime_utc(self): - arr = np.array([parse('2012-06-13T01:39:00Z')], dtype=object) - - result = to_datetime(arr, utc=True) - assert result.tz is pytz.utc - - def test_to_datetime_tzlocal(self): - dt = parse('2012-06-13T01:39:00Z') - dt = dt.replace(tzinfo=tzlocal()) - - arr = np.array([dt], dtype=object) - - result = to_datetime(arr, utc=True) - assert result.tz is pytz.utc - - rng = date_range('2012-11-03 03:00', '2012-11-05 03:00', tz=tzlocal()) - arr = rng.to_pydatetime() - result = to_datetime(arr, utc=True) - assert result.tz is pytz.utc - - def test_hongkong_tz_convert(self): - # #1673 - dr = date_range('2012-01-01', '2012-01-10', freq='D', tz='Hongkong') - - # it works! - dr.hour - - def test_tz_convert_unsorted(self): - dr = date_range('2012-03-09', freq='H', periods=100, tz='utc') - dr = dr.tz_convert(self.tzstr('US/Eastern')) - - result = dr[::-1].hour - exp = dr.hour[::-1] - tm.assert_almost_equal(result, exp) - - def test_shift_localized(self): - dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI') - dr_tz = dr.tz_localize(self.tzstr('US/Eastern')) - - result = dr_tz.shift(1, '10T') - assert result.tz == dr_tz.tz - - def test_static_tzinfo(self): - # it works! - index = DatetimeIndex([datetime(2012, 1, 1)], tz=self.tzstr('EST')) - index.hour - index[0] - - def test_tzaware_datetime_to_index(self): - d = [datetime(2012, 8, 19, tzinfo=self.tz('US/Eastern'))] - - index = DatetimeIndex(d) - assert self.cmptz(index.tz, self.tz('US/Eastern')) - - def test_date_range_span_dst_transition(self): - # #1778 - - # Standard -> Daylight Savings Time - dr = date_range('03/06/2012 00:00', periods=200, freq='W-FRI', - tz='US/Eastern') - - assert (dr.hour == 0).all() - - dr = date_range('2012-11-02', periods=10, tz=self.tzstr('US/Eastern')) - assert (dr.hour == 0).all() - - def test_convert_datetime_list(self): - dr = date_range('2012-06-02', periods=10, - tz=self.tzstr('US/Eastern'), name='foo') - dr2 = DatetimeIndex(list(dr), name='foo') - tm.assert_index_equal(dr, dr2) - assert dr.tz == dr2.tz - assert dr2.name == 'foo' - - def test_index_drop_dont_lose_tz(self): - # #2621 - ind = date_range("2012-12-01", periods=10, tz="utc") - ind = ind.drop(ind[-1]) - - assert ind.tz is not None - - def test_datetimeindex_tz(self): - """ Test different DatetimeIndex constructions with timezone - Follow-up of #4229 - """ - - arr = ['11/10/2005 08:00:00', '11/10/2005 09:00:00'] - - idx1 = to_datetime(arr).tz_localize(self.tzstr('US/Eastern')) - idx2 = DatetimeIndex(start="2005-11-10 08:00:00", freq='H', periods=2, - tz=self.tzstr('US/Eastern')) - idx3 = DatetimeIndex(arr, tz=self.tzstr('US/Eastern')) - idx4 = DatetimeIndex(np.array(arr), tz=self.tzstr('US/Eastern')) - - for other in [idx2, idx3, idx4]: - tm.assert_index_equal(idx1, other) - - def test_datetimeindex_tz_nat(self): - idx = to_datetime([Timestamp("2013-1-1", tz=self.tzstr('US/Eastern')), - NaT]) - - assert isna(idx[1]) - assert idx[0].tzinfo is not None - def test_replace_across_dst(self): # GH#18319 check that 1) timezone is correctly normalized and # 2) that hour is not incorrectly changed by this normalization @@ -712,159 +112,6 @@ def normalize(self, ts): # no-op for dateutil return ts - def test_tz_convert_hour_overflow_dst(self): - # Regression test for: - # https://github.com/pandas-dev/pandas/issues/13306 - - # sorted case US/Eastern -> UTC - ts = ['2008-05-12 09:50:00', - '2008-12-12 09:50:35', - '2009-05-12 09:50:32'] - tt = to_datetime(ts).tz_localize('US/Eastern') - ut = tt.tz_convert('UTC') - expected = Index([13, 14, 13]) - tm.assert_index_equal(ut.hour, expected) - - # sorted case UTC -> US/Eastern - ts = ['2008-05-12 13:50:00', - '2008-12-12 14:50:35', - '2009-05-12 13:50:32'] - tt = to_datetime(ts).tz_localize('UTC') - ut = tt.tz_convert('US/Eastern') - expected = Index([9, 9, 9]) - tm.assert_index_equal(ut.hour, expected) - - # unsorted case US/Eastern -> UTC - ts = ['2008-05-12 09:50:00', - '2008-12-12 09:50:35', - '2008-05-12 09:50:32'] - tt = to_datetime(ts).tz_localize('US/Eastern') - ut = tt.tz_convert('UTC') - expected = Index([13, 14, 13]) - tm.assert_index_equal(ut.hour, expected) - - # unsorted case UTC -> US/Eastern - ts = ['2008-05-12 13:50:00', - '2008-12-12 14:50:35', - '2008-05-12 13:50:32'] - tt = to_datetime(ts).tz_localize('UTC') - ut = tt.tz_convert('US/Eastern') - expected = Index([9, 9, 9]) - tm.assert_index_equal(ut.hour, expected) - - def test_tz_convert_hour_overflow_dst_timestamps(self): - # Regression test for: - # https://github.com/pandas-dev/pandas/issues/13306 - - tz = self.tzstr('US/Eastern') - - # sorted case US/Eastern -> UTC - ts = [Timestamp('2008-05-12 09:50:00', tz=tz), - Timestamp('2008-12-12 09:50:35', tz=tz), - Timestamp('2009-05-12 09:50:32', tz=tz)] - tt = to_datetime(ts) - ut = tt.tz_convert('UTC') - expected = Index([13, 14, 13]) - tm.assert_index_equal(ut.hour, expected) - - # sorted case UTC -> US/Eastern - ts = [Timestamp('2008-05-12 13:50:00', tz='UTC'), - Timestamp('2008-12-12 14:50:35', tz='UTC'), - Timestamp('2009-05-12 13:50:32', tz='UTC')] - tt = to_datetime(ts) - ut = tt.tz_convert('US/Eastern') - expected = Index([9, 9, 9]) - tm.assert_index_equal(ut.hour, expected) - - # unsorted case US/Eastern -> UTC - ts = [Timestamp('2008-05-12 09:50:00', tz=tz), - Timestamp('2008-12-12 09:50:35', tz=tz), - Timestamp('2008-05-12 09:50:32', tz=tz)] - tt = to_datetime(ts) - ut = tt.tz_convert('UTC') - expected = Index([13, 14, 13]) - tm.assert_index_equal(ut.hour, expected) - - # unsorted case UTC -> US/Eastern - ts = [Timestamp('2008-05-12 13:50:00', tz='UTC'), - Timestamp('2008-12-12 14:50:35', tz='UTC'), - Timestamp('2008-05-12 13:50:32', tz='UTC')] - tt = to_datetime(ts) - ut = tt.tz_convert('US/Eastern') - expected = Index([9, 9, 9]) - tm.assert_index_equal(ut.hour, expected) - - def test_tslib_tz_convert_trans_pos_plus_1__bug(self): - # Regression test for tslib.tz_convert(vals, tz1, tz2). - # See https://github.com/pandas-dev/pandas/issues/4496 for details. - for freq, n in [('H', 1), ('T', 60), ('S', 3600)]: - idx = date_range(datetime(2011, 3, 26, 23), - datetime(2011, 3, 27, 1), freq=freq) - idx = idx.tz_localize('UTC') - idx = idx.tz_convert('Europe/Moscow') - - expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1])) - tm.assert_index_equal(idx.hour, Index(expected)) - - def test_tslib_tz_convert_dst(self): - for freq, n in [('H', 1), ('T', 60), ('S', 3600)]: - # Start DST - idx = date_range('2014-03-08 23:00', '2014-03-09 09:00', freq=freq, - tz='UTC') - idx = idx.tz_convert('US/Eastern') - expected = np.repeat(np.array([18, 19, 20, 21, 22, 23, - 0, 1, 3, 4, 5]), - np.array([n, n, n, n, n, n, n, n, n, n, 1])) - tm.assert_index_equal(idx.hour, Index(expected)) - - idx = date_range('2014-03-08 18:00', '2014-03-09 05:00', freq=freq, - tz='US/Eastern') - idx = idx.tz_convert('UTC') - expected = np.repeat(np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), - np.array([n, n, n, n, n, n, n, n, n, n, 1])) - tm.assert_index_equal(idx.hour, Index(expected)) - - # End DST - idx = date_range('2014-11-01 23:00', '2014-11-02 09:00', freq=freq, - tz='UTC') - idx = idx.tz_convert('US/Eastern') - expected = np.repeat(np.array([19, 20, 21, 22, 23, - 0, 1, 1, 2, 3, 4]), - np.array([n, n, n, n, n, n, n, n, n, n, 1])) - tm.assert_index_equal(idx.hour, Index(expected)) - - idx = date_range('2014-11-01 18:00', '2014-11-02 05:00', freq=freq, - tz='US/Eastern') - idx = idx.tz_convert('UTC') - expected = np.repeat(np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, - 7, 8, 9, 10]), - np.array([n, n, n, n, n, n, n, n, n, - n, n, n, 1])) - tm.assert_index_equal(idx.hour, Index(expected)) - - # daily - # Start DST - idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D', - tz='UTC') - idx = idx.tz_convert('US/Eastern') - tm.assert_index_equal(idx.hour, Index([19, 19])) - - idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D', - tz='US/Eastern') - idx = idx.tz_convert('UTC') - tm.assert_index_equal(idx.hour, Index([5, 5])) - - # End DST - idx = date_range('2014-11-01 00:00', '2014-11-02 00:00', freq='D', - tz='UTC') - idx = idx.tz_convert('US/Eastern') - tm.assert_index_equal(idx.hour, Index([20, 20])) - - idx = date_range('2014-11-01 00:00', '2014-11-02 000:00', freq='D', - tz='US/Eastern') - idx = idx.tz_convert('UTC') - tm.assert_index_equal(idx.hour, Index([4, 4])) - def test_tzlocal(self): # GH 13583 ts = Timestamp('2011-01-01', tz=dateutil.tz.tzlocal()) @@ -879,32 +126,6 @@ def test_tzlocal(self): offset = offset.total_seconds() * 1000000000 assert ts.value + offset == Timestamp('2011-01-01').value - def test_tz_localize_tzlocal(self): - # GH 13583 - offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1)) - offset = int(offset.total_seconds() * 1000000000) - - dti = date_range(start='2001-01-01', end='2001-03-01') - dti2 = dti.tz_localize(dateutil.tz.tzlocal()) - tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8) - - dti = date_range(start='2001-01-01', end='2001-03-01', - tz=dateutil.tz.tzlocal()) - dti2 = dti.tz_localize(None) - tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8) - - def test_tz_convert_tzlocal(self): - # GH 13583 - # tz_convert doesn't affect to internal - dti = date_range(start='2001-01-01', end='2001-03-01', tz='UTC') - dti2 = dti.tz_convert(dateutil.tz.tzlocal()) - tm.assert_numpy_array_equal(dti2.asi8, dti.asi8) - - dti = date_range(start='2001-01-01', end='2001-03-01', - tz=dateutil.tz.tzlocal()) - dti2 = dti.tz_convert(None) - tm.assert_numpy_array_equal(dti2.asi8, dti.asi8) - class TestTimeZoneCacheKey(object): @@ -922,228 +143,6 @@ def test_cache_keys_are_distinct_for_pytz_vs_dateutil(self, tz_name): timezones._p_tz_cache_key(tz_d)) -class TestTimeZones(object): - timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific'] - - def test_index_equals_with_tz(self): - left = date_range('1/1/2011', periods=100, freq='H', tz='utc') - right = date_range('1/1/2011', periods=100, freq='H', tz='US/Eastern') - - assert not left.equals(right) - - def test_tz_localize_naive(self): - rng = date_range('1/1/2011', periods=100, freq='H') - - conv = rng.tz_localize('US/Pacific') - exp = date_range('1/1/2011', periods=100, freq='H', tz='US/Pacific') - - tm.assert_index_equal(conv, exp) - - def test_tz_localize_roundtrip(self): - for tz in self.timezones: - idx1 = date_range(start='2014-01-01', end='2014-12-31', freq='M') - idx2 = date_range(start='2014-01-01', end='2014-12-31', freq='D') - idx3 = date_range(start='2014-01-01', end='2014-03-01', freq='H') - idx4 = date_range(start='2014-08-01', end='2014-10-31', freq='T') - for idx in [idx1, idx2, idx3, idx4]: - localized = idx.tz_localize(tz) - expected = date_range(start=idx[0], end=idx[-1], freq=idx.freq, - tz=tz) - tm.assert_index_equal(localized, expected) - - with pytest.raises(TypeError): - localized.tz_localize(tz) - - reset = localized.tz_localize(None) - tm.assert_index_equal(reset, idx) - assert reset.tzinfo is None - - def test_tz_convert_roundtrip(self): - for tz in self.timezones: - idx1 = date_range(start='2014-01-01', end='2014-12-31', freq='M', - tz='UTC') - exp1 = date_range(start='2014-01-01', end='2014-12-31', freq='M') - - idx2 = date_range(start='2014-01-01', end='2014-12-31', freq='D', - tz='UTC') - exp2 = date_range(start='2014-01-01', end='2014-12-31', freq='D') - - idx3 = date_range(start='2014-01-01', end='2014-03-01', freq='H', - tz='UTC') - exp3 = date_range(start='2014-01-01', end='2014-03-01', freq='H') - - idx4 = date_range(start='2014-08-01', end='2014-10-31', freq='T', - tz='UTC') - exp4 = date_range(start='2014-08-01', end='2014-10-31', freq='T') - - for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), - (idx4, exp4)]: - converted = idx.tz_convert(tz) - reset = converted.tz_convert(None) - tm.assert_index_equal(reset, expected) - assert reset.tzinfo is None - tm.assert_index_equal(reset, converted.tz_convert( - 'UTC').tz_localize(None)) - - def test_join_utc_convert(self): - rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') - - left = rng.tz_convert('US/Eastern') - right = rng.tz_convert('Europe/Berlin') - - for how in ['inner', 'outer', 'left', 'right']: - result = left.join(left[:-5], how=how) - assert isinstance(result, DatetimeIndex) - assert result.tz == left.tz - - result = left.join(right[:-5], how=how) - assert isinstance(result, DatetimeIndex) - assert result.tz.zone == 'UTC' - - def test_join_aware(self): - rng = date_range('1/1/2011', periods=10, freq='H') - - # non-overlapping - rng = date_range("2012-11-15 00:00:00", periods=6, freq="H", - tz="US/Central") - - rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="H", - tz="US/Eastern") - - result = rng.union(rng2) - assert result.tz.zone == 'UTC' - - def test_intersection(self): - rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') - - left = rng[10:90][::-1] - right = rng[20:80][::-1] - - assert left.tz == rng.tz - result = left.intersection(right) - assert result.tz == left.tz - - def test_timestamp_equality_different_timezones(self): - utc_range = date_range('1/1/2000', periods=20, tz='UTC') - eastern_range = utc_range.tz_convert('US/Eastern') - berlin_range = utc_range.tz_convert('Europe/Berlin') - - for a, b, c in zip(utc_range, eastern_range, berlin_range): - assert a == b - assert b == c - assert a == c - - assert (utc_range == eastern_range).all() - assert (utc_range == berlin_range).all() - assert (berlin_range == eastern_range).all() - - def test_datetimeindex_tz(self): - rng = date_range('03/12/2012 00:00', periods=10, freq='W-FRI', - tz='US/Eastern') - rng2 = DatetimeIndex(data=rng, tz='US/Eastern') - tm.assert_index_equal(rng, rng2) - - def test_normalize_tz(self): - rng = date_range('1/1/2000 9:30', periods=10, freq='D', - tz='US/Eastern') - - result = rng.normalize() - expected = date_range('1/1/2000', periods=10, freq='D', - tz='US/Eastern') - tm.assert_index_equal(result, expected) - - assert result.is_normalized - assert not rng.is_normalized - - rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz='UTC') - - result = rng.normalize() - expected = date_range('1/1/2000', periods=10, freq='D', tz='UTC') - tm.assert_index_equal(result, expected) - - assert result.is_normalized - assert not rng.is_normalized - - rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz=tzlocal()) - result = rng.normalize() - expected = date_range('1/1/2000', periods=10, freq='D', tz=tzlocal()) - tm.assert_index_equal(result, expected) - - assert result.is_normalized - assert not rng.is_normalized - - @td.skip_if_windows - def test_normalize_tz_local(self): - # see gh-13459 - timezones = ['US/Pacific', 'US/Eastern', 'UTC', 'Asia/Kolkata', - 'Asia/Shanghai', 'Australia/Canberra'] - - for timezone in timezones: - with set_timezone(timezone): - rng = date_range('1/1/2000 9:30', periods=10, freq='D', - tz=tzlocal()) - - result = rng.normalize() - expected = date_range('1/1/2000', periods=10, freq='D', - tz=tzlocal()) - tm.assert_index_equal(result, expected) - - assert result.is_normalized - assert not rng.is_normalized - - def test_tzaware_offset(self): - dates = date_range('2012-11-01', periods=3, tz='US/Pacific') - offset = dates + offsets.Hour(5) - assert dates[0] + offsets.Hour(5) == offset[0] - - # GH 6818 - for tz in ['UTC', 'US/Pacific', 'Asia/Tokyo']: - dates = date_range('2010-11-01 00:00', periods=3, tz=tz, freq='H') - expected = DatetimeIndex(['2010-11-01 05:00', '2010-11-01 06:00', - '2010-11-01 07:00'], freq='H', tz=tz) - - offset = dates + offsets.Hour(5) - tm.assert_index_equal(offset, expected) - offset = dates + np.timedelta64(5, 'h') - tm.assert_index_equal(offset, expected) - offset = dates + timedelta(hours=5) - tm.assert_index_equal(offset, expected) - - def test_nat(self): - # GH 5546 - dates = [NaT] - idx = DatetimeIndex(dates) - idx = idx.tz_localize('US/Pacific') - tm.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Pacific')) - idx = idx.tz_convert('US/Eastern') - tm.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Eastern')) - idx = idx.tz_convert('UTC') - tm.assert_index_equal(idx, DatetimeIndex(dates, tz='UTC')) - - dates = ['2010-12-01 00:00', '2010-12-02 00:00', NaT] - idx = DatetimeIndex(dates) - idx = idx.tz_localize('US/Pacific') - tm.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Pacific')) - idx = idx.tz_convert('US/Eastern') - expected = ['2010-12-01 03:00', '2010-12-02 03:00', NaT] - tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern')) - - idx = idx + offsets.Hour(5) - expected = ['2010-12-01 08:00', '2010-12-02 08:00', NaT] - tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern')) - idx = idx.tz_convert('US/Pacific') - expected = ['2010-12-01 05:00', '2010-12-02 05:00', NaT] - tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Pacific')) - - idx = idx + np.timedelta64(3, 'h') - expected = ['2010-12-01 08:00', '2010-12-02 08:00', NaT] - tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Pacific')) - - idx = idx.tz_convert('US/Eastern') - expected = ['2010-12-01 11:00', '2010-12-02 11:00', NaT] - tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern')) - - class TestTslib(object): def test_tslib_tz_convert(self):
There is probably some overlap with other tests.indexes.datetimes modules. We'll get those in the next pass.
https://api.github.com/repos/pandas-dev/pandas/pulls/19545
2018-02-06T00:55:18Z
2018-02-06T23:41:48Z
2018-02-06T23:41:48Z
2018-02-07T00:19:51Z
Collect Series timezone tests
diff --git a/pandas/tests/series/test_timezones.py b/pandas/tests/series/test_timezones.py new file mode 100644 index 0000000000000..2e15c964e4e93 --- /dev/null +++ b/pandas/tests/series/test_timezones.py @@ -0,0 +1,293 @@ +# -*- coding: utf-8 -*- +""" +Tests for Series timezone-related methods +""" +from datetime import datetime + +import pytest +import pytz +import numpy as np +from dateutil.tz import tzoffset + +import pandas.util.testing as tm +from pandas._libs import tslib +from pandas._libs.tslibs import timezones +from pandas.compat import lrange +from pandas.core.indexes.datetimes import date_range +from pandas import Series, Timestamp, DatetimeIndex, Index + + +class TestSeriesTimezones(object): + # ----------------------------------------------------------------- + # Series.tz_localize + def test_series_tz_localize(self): + + rng = date_range('1/1/2011', periods=100, freq='H') + ts = Series(1, index=rng) + + result = ts.tz_localize('utc') + assert result.index.tz.zone == 'UTC' + + # Can't localize if already tz-aware + rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') + ts = Series(1, index=rng) + tm.assert_raises_regex(TypeError, 'Already tz-aware', + ts.tz_localize, 'US/Eastern') + + def test_series_tz_localize_ambiguous_bool(self): + # make sure that we are correctly accepting bool values as ambiguous + + # GH#14402 + ts = Timestamp('2015-11-01 01:00:03') + expected0 = Timestamp('2015-11-01 01:00:03-0500', tz='US/Central') + expected1 = Timestamp('2015-11-01 01:00:03-0600', tz='US/Central') + + ser = Series([ts]) + expected0 = Series([expected0]) + expected1 = Series([expected1]) + + with pytest.raises(pytz.AmbiguousTimeError): + ser.dt.tz_localize('US/Central') + + result = ser.dt.tz_localize('US/Central', ambiguous=True) + tm.assert_series_equal(result, expected0) + + result = ser.dt.tz_localize('US/Central', ambiguous=[True]) + tm.assert_series_equal(result, expected0) + + result = ser.dt.tz_localize('US/Central', ambiguous=False) + tm.assert_series_equal(result, expected1) + + result = ser.dt.tz_localize('US/Central', ambiguous=[False]) + tm.assert_series_equal(result, expected1) + + @pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern']) + def test_series_tz_localize_empty(self, tzstr): + # GH#2248 + ser = Series() + + ser2 = ser.tz_localize('utc') + assert ser2.index.tz == pytz.utc + + ser2 = ser.tz_localize(tzstr) + timezones.tz_compare(ser2.index.tz, timezones.maybe_get_tz(tzstr)) + + # ----------------------------------------------------------------- + # Series.tz_convert + + def test_series_tz_convert(self): + rng = date_range('1/1/2011', periods=200, freq='D', tz='US/Eastern') + ts = Series(1, index=rng) + + result = ts.tz_convert('Europe/Berlin') + assert result.index.tz.zone == 'Europe/Berlin' + + # can't convert tz-naive + rng = date_range('1/1/2011', periods=200, freq='D') + ts = Series(1, index=rng) + tm.assert_raises_regex(TypeError, "Cannot convert tz-naive", + ts.tz_convert, 'US/Eastern') + + # ----------------------------------------------------------------- + # Series.append + + def test_series_append_aware(self): + rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', + tz='US/Eastern') + rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', + tz='US/Eastern') + ser1 = Series([1], index=rng1) + ser2 = Series([2], index=rng2) + ts_result = ser1.append(ser2) + + exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'], + tz='US/Eastern') + exp = Series([1, 2], index=exp_index) + tm.assert_series_equal(ts_result, exp) + assert ts_result.index.tz == rng1.tz + + rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', tz='UTC') + rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', tz='UTC') + ser1 = Series([1], index=rng1) + ser2 = Series([2], index=rng2) + ts_result = ser1.append(ser2) + + exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'], + tz='UTC') + exp = Series([1, 2], index=exp_index) + tm.assert_series_equal(ts_result, exp) + utc = rng1.tz + assert utc == ts_result.index.tz + + # GH#7795 + # different tz coerces to object dtype, not UTC + rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', + tz='US/Eastern') + rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', + tz='US/Central') + ser1 = Series([1], index=rng1) + ser2 = Series([2], index=rng2) + ts_result = ser1.append(ser2) + exp_index = Index([Timestamp('1/1/2011 01:00', tz='US/Eastern'), + Timestamp('1/1/2011 02:00', tz='US/Central')]) + exp = Series([1, 2], index=exp_index) + tm.assert_series_equal(ts_result, exp) + + def test_series_append_aware_naive(self): + rng1 = date_range('1/1/2011 01:00', periods=1, freq='H') + rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', + tz='US/Eastern') + ser1 = Series(np.random.randn(len(rng1)), index=rng1) + ser2 = Series(np.random.randn(len(rng2)), index=rng2) + ts_result = ser1.append(ser2) + + expected = ser1.index.astype(object).append(ser2.index.astype(object)) + assert ts_result.index.equals(expected) + + # mixed + rng1 = date_range('1/1/2011 01:00', periods=1, freq='H') + rng2 = lrange(100) + ser1 = Series(np.random.randn(len(rng1)), index=rng1) + ser2 = Series(np.random.randn(len(rng2)), index=rng2) + ts_result = ser1.append(ser2) + + expected = ser1.index.astype(object).append(ser2.index) + assert ts_result.index.equals(expected) + + def test_series_append_dst(self): + rng1 = date_range('1/1/2016 01:00', periods=3, freq='H', + tz='US/Eastern') + rng2 = date_range('8/1/2016 01:00', periods=3, freq='H', + tz='US/Eastern') + ser1 = Series([1, 2, 3], index=rng1) + ser2 = Series([10, 11, 12], index=rng2) + ts_result = ser1.append(ser2) + + exp_index = DatetimeIndex(['2016-01-01 01:00', '2016-01-01 02:00', + '2016-01-01 03:00', '2016-08-01 01:00', + '2016-08-01 02:00', '2016-08-01 03:00'], + tz='US/Eastern') + exp = Series([1, 2, 3, 10, 11, 12], index=exp_index) + tm.assert_series_equal(ts_result, exp) + assert ts_result.index.tz == rng1.tz + + # ----------------------------------------------------------------- + + def test_dateutil_tzoffset_support(self): + values = [188.5, 328.25] + tzinfo = tzoffset(None, 7200) + index = [datetime(2012, 5, 11, 11, tzinfo=tzinfo), + datetime(2012, 5, 11, 12, tzinfo=tzinfo)] + series = Series(data=values, index=index) + + assert series.index.tz == tzinfo + + # it works! #2443 + repr(series.index[0]) + + @pytest.mark.parametrize('tz', ['US/Eastern', 'dateutil/US/Eastern']) + def test_tz_aware_asfreq(self, tz): + dr = date_range('2011-12-01', '2012-07-20', freq='D', tz=tz) + + ser = Series(np.random.randn(len(dr)), index=dr) + + # it works! + ser.asfreq('T') + + @pytest.mark.parametrize('tz', ['US/Eastern', 'dateutil/US/Eastern']) + def test_string_index_alias_tz_aware(self, tz): + rng = date_range('1/1/2000', periods=10, tz=tz) + ser = Series(np.random.randn(len(rng)), index=rng) + + result = ser['1/3/2000'] + tm.assert_almost_equal(result, ser[2]) + + # TODO: De-duplicate with test below + def test_series_add_tz_mismatch_converts_to_utc_duplicate(self): + rng = date_range('1/1/2011', periods=10, freq='H', tz='US/Eastern') + ser = Series(np.random.randn(len(rng)), index=rng) + + ts_moscow = ser.tz_convert('Europe/Moscow') + + result = ser + ts_moscow + assert result.index.tz is pytz.utc + + result = ts_moscow + ser + assert result.index.tz is pytz.utc + + def test_series_add_tz_mismatch_converts_to_utc(self): + rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') + + perm = np.random.permutation(100)[:90] + ser1 = Series(np.random.randn(90), + index=rng.take(perm).tz_convert('US/Eastern')) + + perm = np.random.permutation(100)[:90] + ser2 = Series(np.random.randn(90), + index=rng.take(perm).tz_convert('Europe/Berlin')) + + result = ser1 + ser2 + + uts1 = ser1.tz_convert('utc') + uts2 = ser2.tz_convert('utc') + expected = uts1 + uts2 + + assert result.index.tz == pytz.UTC + tm.assert_series_equal(result, expected) + + def test_series_add_aware_naive_raises(self): + rng = date_range('1/1/2011', periods=10, freq='H') + ser = Series(np.random.randn(len(rng)), index=rng) + + ser_utc = ser.tz_localize('utc') + + with pytest.raises(Exception): + ser + ser_utc + + with pytest.raises(Exception): + ser_utc + ser + + def test_series_align_aware(self): + idx1 = date_range('2001', periods=5, freq='H', tz='US/Eastern') + ser = Series(np.random.randn(len(idx1)), index=idx1) + ser_central = ser.tz_convert('US/Central') + # # different timezones convert to UTC + + new1, new2 = ser.align(ser_central) + assert new1.index.tz == pytz.UTC + assert new2.index.tz == pytz.UTC + + @pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern']) + def test_localized_at_time_between_time(self, tzstr): + from datetime import time + tz = timezones.maybe_get_tz(tzstr) + + rng = date_range('4/16/2012', '5/1/2012', freq='H') + ts = Series(np.random.randn(len(rng)), index=rng) + + ts_local = ts.tz_localize(tzstr) + + result = ts_local.at_time(time(10, 0)) + expected = ts.at_time(time(10, 0)).tz_localize(tzstr) + tm.assert_series_equal(result, expected) + assert timezones.tz_compare(result.index.tz, tz) + + t1, t2 = time(10, 0), time(11, 0) + result = ts_local.between_time(t1, t2) + expected = ts.between_time(t1, t2).tz_localize(tzstr) + tm.assert_series_equal(result, expected) + assert timezones.tz_compare(result.index.tz, tz) + + @pytest.mark.parametrize('tzstr', ['Europe/Berlin', + 'dateutil/Europe/Berlin']) + def test_getitem_pydatetime_tz(self, tzstr): + tz = timezones.maybe_get_tz(tzstr) + + index = date_range(start='2012-12-24 16:00', end='2012-12-24 18:00', + freq='H', tz=tzstr) + ts = Series(index=index, data=index.hour) + time_pandas = Timestamp('2012-12-24 17:00', tz=tzstr) + + dt = datetime(2012, 12, 24, 17, 0) + time_datetime = tslib._localize_pydatetime(dt, tz) + assert ts[time_pandas] == ts[time_datetime] diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py index 2630984a70807..8f46e0a58580e 100644 --- a/pandas/tests/tseries/test_timezones.py +++ b/pandas/tests/tseries/test_timezones.py @@ -8,7 +8,7 @@ from dateutil.parser import parse from pytz import NonExistentTimeError from distutils.version import LooseVersion -from dateutil.tz import tzlocal, tzoffset +from dateutil.tz import tzlocal from datetime import datetime, timedelta, tzinfo import pandas.util.testing as tm @@ -18,9 +18,9 @@ from pandas.core.indexes.datetimes import bdate_range, date_range from pandas._libs import tslib from pandas._libs.tslibs import timezones, conversion -from pandas import (Index, Series, isna, Timestamp, NaT, +from pandas import (Index, isna, Timestamp, NaT, DatetimeIndex, to_datetime) -from pandas.util.testing import assert_series_equal, set_timezone +from pandas.util.testing import set_timezone class FixedOffset(tzinfo): @@ -142,17 +142,6 @@ def test_tz_localize_dti(self): pytest.raises(pytz.NonExistentTimeError, dti.tz_localize, self.tzstr('US/Eastern')) - def test_tz_localize_empty_series(self): - # #2248 - - ts = Series() - - ts2 = ts.tz_localize('utc') - assert ts2.index.tz == pytz.utc - - ts2 = ts.tz_localize(self.tzstr('US/Eastern')) - assert self.cmptz(ts2.index.tz, self.tz('US/Eastern')) - def test_create_with_tz(self): stamp = Timestamp('3/11/2012 05:00', tz=self.tzstr('US/Eastern')) assert stamp.hour == 5 @@ -455,34 +444,6 @@ def test_ambiguous_nat(self): # right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')] tm.assert_numpy_array_equal(di_test.values, localized.values) - def test_ambiguous_bool(self): - # make sure that we are correctly accepting bool values as ambiguous - - # gh-14402 - t = Timestamp('2015-11-01 01:00:03') - expected0 = Timestamp('2015-11-01 01:00:03-0500', tz='US/Central') - expected1 = Timestamp('2015-11-01 01:00:03-0600', tz='US/Central') - - s = Series([t]) - expected0 = Series([expected0]) - expected1 = Series([expected1]) - - def f(): - s.dt.tz_localize('US/Central') - pytest.raises(pytz.AmbiguousTimeError, f) - - result = s.dt.tz_localize('US/Central', ambiguous=True) - assert_series_equal(result, expected0) - - result = s.dt.tz_localize('US/Central', ambiguous=[True]) - assert_series_equal(result, expected0) - - result = s.dt.tz_localize('US/Central', ambiguous=False) - assert_series_equal(result, expected1) - - result = s.dt.tz_localize('US/Central', ambiguous=[False]) - assert_series_equal(result, expected1) - def test_nonexistent_raise_coerce(self): # See issue 13057 from pytz.exceptions import NonExistentTimeError @@ -565,34 +526,6 @@ def test_index_astype_asobject_tzinfos(self): assert x == exval assert x.tzinfo == exval.tzinfo - def test_localized_at_time_between_time(self): - from datetime import time - - rng = date_range('4/16/2012', '5/1/2012', freq='H') - ts = Series(np.random.randn(len(rng)), index=rng) - - ts_local = ts.tz_localize(self.tzstr('US/Eastern')) - - result = ts_local.at_time(time(10, 0)) - expected = ts.at_time(time(10, 0)).tz_localize(self.tzstr( - 'US/Eastern')) - assert_series_equal(result, expected) - assert self.cmptz(result.index.tz, self.tz('US/Eastern')) - - t1, t2 = time(10, 0), time(11, 0) - result = ts_local.between_time(t1, t2) - expected = ts.between_time(t1, - t2).tz_localize(self.tzstr('US/Eastern')) - assert_series_equal(result, expected) - assert self.cmptz(result.index.tz, self.tz('US/Eastern')) - - def test_string_index_alias_tz_aware(self): - rng = date_range('1/1/2000', periods=10, tz=self.tzstr('US/Eastern')) - ts = Series(np.random.randn(len(rng)), index=rng) - - result = ts['1/3/2000'] - tm.assert_almost_equal(result, ts[2]) - def test_fixed_offset(self): dates = [datetime(2000, 1, 1, tzinfo=fixed_off), datetime(2000, 1, 2, tzinfo=fixed_off), @@ -668,15 +601,6 @@ def test_shift_localized(self): result = dr_tz.shift(1, '10T') assert result.tz == dr_tz.tz - def test_tz_aware_asfreq(self): - dr = date_range('2011-12-01', '2012-07-20', freq='D', - tz=self.tzstr('US/Eastern')) - - s = Series(np.random.randn(len(dr)), index=dr) - - # it works! - s.asfreq('T') - def test_static_tzinfo(self): # it works! index = DatetimeIndex([datetime(2012, 1, 1)], tz=self.tzstr('EST')) @@ -709,28 +633,6 @@ def test_convert_datetime_list(self): assert dr.tz == dr2.tz assert dr2.name == 'foo' - def test_dateutil_tzoffset_support(self): - values = [188.5, 328.25] - tzinfo = tzoffset(None, 7200) - index = [datetime(2012, 5, 11, 11, tzinfo=tzinfo), - datetime(2012, 5, 11, 12, tzinfo=tzinfo)] - series = Series(data=values, index=index) - - assert series.index.tz == tzinfo - - # it works! #2443 - repr(series.index[0]) - - def test_getitem_pydatetime_tz(self): - index = date_range(start='2012-12-24 16:00', end='2012-12-24 18:00', - freq='H', tz=self.tzstr('Europe/Berlin')) - ts = Series(index=index, data=index.hour) - time_pandas = Timestamp('2012-12-24 17:00', - tz=self.tzstr('Europe/Berlin')) - time_datetime = self.localize( - self.tz('Europe/Berlin'), datetime(2012, 12, 24, 17, 0)) - assert ts[time_pandas] == ts[time_datetime] - def test_index_drop_dont_lose_tz(self): # #2621 ind = date_range("2012-12-01", periods=10, tz="utc") @@ -1056,33 +958,6 @@ def test_tz_localize_roundtrip(self): tm.assert_index_equal(reset, idx) assert reset.tzinfo is None - def test_series_tz_localize(self): - - rng = date_range('1/1/2011', periods=100, freq='H') - ts = Series(1, index=rng) - - result = ts.tz_localize('utc') - assert result.index.tz.zone == 'UTC' - - # Can't localize if already tz-aware - rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') - ts = Series(1, index=rng) - tm.assert_raises_regex(TypeError, 'Already tz-aware', - ts.tz_localize, 'US/Eastern') - - def test_series_tz_convert(self): - rng = date_range('1/1/2011', periods=200, freq='D', tz='US/Eastern') - ts = Series(1, index=rng) - - result = ts.tz_convert('Europe/Berlin') - assert result.index.tz.zone == 'Europe/Berlin' - - # can't convert tz-naive - rng = date_range('1/1/2011', periods=200, freq='D') - ts = Series(1, index=rng) - tm.assert_raises_regex(TypeError, "Cannot convert tz-naive", - ts.tz_convert, 'US/Eastern') - def test_tz_convert_roundtrip(self): for tz in self.timezones: idx1 = date_range(start='2014-01-01', end='2014-12-31', freq='M', @@ -1127,12 +1002,6 @@ def test_join_utc_convert(self): def test_join_aware(self): rng = date_range('1/1/2011', periods=10, freq='H') - ts = Series(np.random.randn(len(rng)), index=rng) - - ts_utc = ts.tz_localize('utc') - - pytest.raises(Exception, ts.__add__, ts_utc) - pytest.raises(Exception, ts_utc.__add__, ts) # non-overlapping rng = date_range("2012-11-15 00:00:00", periods=6, freq="H", @@ -1144,127 +1013,6 @@ def test_join_aware(self): result = rng.union(rng2) assert result.tz.zone == 'UTC' - def test_series_align_aware(self): - idx1 = date_range('2001', periods=5, freq='H', tz='US/Eastern') - ser = Series(np.random.randn(len(idx1)), index=idx1) - ser_central = ser.tz_convert('US/Central') - # # different timezones convert to UTC - - new1, new2 = ser.align(ser_central) - assert new1.index.tz == pytz.UTC - assert new2.index.tz == pytz.UTC - - def test_append_aware(self): - rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', - tz='US/Eastern') - rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', - tz='US/Eastern') - ts1 = Series([1], index=rng1) - ts2 = Series([2], index=rng2) - ts_result = ts1.append(ts2) - - exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'], - tz='US/Eastern') - exp = Series([1, 2], index=exp_index) - assert_series_equal(ts_result, exp) - assert ts_result.index.tz == rng1.tz - - rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', tz='UTC') - rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', tz='UTC') - ts1 = Series([1], index=rng1) - ts2 = Series([2], index=rng2) - ts_result = ts1.append(ts2) - - exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'], - tz='UTC') - exp = Series([1, 2], index=exp_index) - assert_series_equal(ts_result, exp) - utc = rng1.tz - assert utc == ts_result.index.tz - - # GH 7795 - # different tz coerces to object dtype, not UTC - rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', - tz='US/Eastern') - rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', - tz='US/Central') - ts1 = Series([1], index=rng1) - ts2 = Series([2], index=rng2) - ts_result = ts1.append(ts2) - exp_index = Index([Timestamp('1/1/2011 01:00', tz='US/Eastern'), - Timestamp('1/1/2011 02:00', tz='US/Central')]) - exp = Series([1, 2], index=exp_index) - assert_series_equal(ts_result, exp) - - def test_append_dst(self): - rng1 = date_range('1/1/2016 01:00', periods=3, freq='H', - tz='US/Eastern') - rng2 = date_range('8/1/2016 01:00', periods=3, freq='H', - tz='US/Eastern') - ts1 = Series([1, 2, 3], index=rng1) - ts2 = Series([10, 11, 12], index=rng2) - ts_result = ts1.append(ts2) - - exp_index = DatetimeIndex(['2016-01-01 01:00', '2016-01-01 02:00', - '2016-01-01 03:00', '2016-08-01 01:00', - '2016-08-01 02:00', '2016-08-01 03:00'], - tz='US/Eastern') - exp = Series([1, 2, 3, 10, 11, 12], index=exp_index) - assert_series_equal(ts_result, exp) - assert ts_result.index.tz == rng1.tz - - def test_append_aware_naive(self): - rng1 = date_range('1/1/2011 01:00', periods=1, freq='H') - rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', - tz='US/Eastern') - ts1 = Series(np.random.randn(len(rng1)), index=rng1) - ts2 = Series(np.random.randn(len(rng2)), index=rng2) - ts_result = ts1.append(ts2) - - assert ts_result.index.equals(ts1.index.astype(object).append( - ts2.index.astype(object))) - - # mixed - rng1 = date_range('1/1/2011 01:00', periods=1, freq='H') - rng2 = lrange(100) - ts1 = Series(np.random.randn(len(rng1)), index=rng1) - ts2 = Series(np.random.randn(len(rng2)), index=rng2) - ts_result = ts1.append(ts2) - assert ts_result.index.equals(ts1.index.astype(object).append( - ts2.index)) - - def test_series_add_tz_mismatch_converts_to_utc(self): - rng = date_range('1/1/2011', periods=10, freq='H', tz='US/Eastern') - ts = Series(np.random.randn(len(rng)), index=rng) - - ts_moscow = ts.tz_convert('Europe/Moscow') - - result = ts + ts_moscow - assert result.index.tz is pytz.utc - - result = ts_moscow + ts - assert result.index.tz is pytz.utc - - def test_arith_utc_convert(self): - rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') - - perm = np.random.permutation(100)[:90] - ts1 = Series(np.random.randn(90), - index=rng.take(perm).tz_convert('US/Eastern')) - - perm = np.random.permutation(100)[:90] - ts2 = Series(np.random.randn(90), - index=rng.take(perm).tz_convert('Europe/Berlin')) - - result = ts1 + ts2 - - uts1 = ts1.tz_convert('utc') - uts2 = ts2.tz_convert('utc') - expected = uts1 + uts2 - - assert result.index.tz == pytz.UTC - assert_series_equal(result, expected) - def test_intersection(self): rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19541
2018-02-05T18:43:18Z
2018-02-06T11:34:33Z
2018-02-06T11:34:33Z
2018-02-11T21:39:10Z
Remove duplicated logic from period_helper
diff --git a/pandas/_libs/src/period_helper.c b/pandas/_libs/src/period_helper.c index 8f1c527a68455..570f20b790750 100644 --- a/pandas/_libs/src/period_helper.c +++ b/pandas/_libs/src/period_helper.c @@ -14,6 +14,7 @@ See end of file for stuff pandas uses (search for 'pandas'). */ #include "period_helper.h" +#include "../datetime/np_datetime.h" /* ------------------------------------------------------------------ * Code derived from scikits.timeseries @@ -37,193 +38,39 @@ static int floordiv(int x, int divisor) { } } -/* Table with day offsets for each month (0-based, without and with leap) */ -static int month_offset[2][13] = { - {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365}, - {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366}}; - -/* Table of number of days in a month (0-based, without and with leap) */ -static int days_in_month[2][12] = { - {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, - {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}}; - -/* Return 1/0 iff year points to a leap year. - * Assumes GREGORIAN_CALENDAR */ -static int dInfoCalc_Leapyear(npy_int64 year) { - return (year % 4 == 0) && ((year % 100 != 0) || (year % 400 == 0)); -} - -/* Return the day of the week for the given absolute date. */ -static int dInfoCalc_DayOfWeek(npy_int64 absdate) { - int day_of_week; - - if (absdate >= 1) { - day_of_week = (absdate - 1) % 7; - } else { - day_of_week = 6 - ((-absdate) % 7); - } - return day_of_week; -} static int monthToQuarter(int month) { return ((month - 1) / 3) + 1; } -/* Return the year offset, that is the absolute date of the day - 31.12.(year-1) - Assumes GREGORIAN_CALENDAR - - This is equivalent to: - - (datetime(year, 1, 1) - datetime(1970, 1, 1)).days - - Note: - For the Julian calendar we shift the absdate (which is measured - using the Gregorian Epoch) value by two days because the Epoch - (0001-01-01) in the Julian calendar lies 2 days before the Epoch in - the Gregorian calendar. */ -static int dInfoCalc_YearOffset(npy_int64 year) { - year--; - if (year >= 0 || -1 / 4 == -1) - return year * 365 + year / 4 - year / 100 + year / 400; - else - return year * 365 + (year - 3) / 4 - (year - 99) / 100 + - (year - 399) / 400; -} - -/* Set the instance's value using the given date and time. +/* Find the absdate (days elapsed since datetime(1, 1, 1) + * for the given year/month/day. * Assumes GREGORIAN_CALENDAR */ -static int dInfoCalc_SetFromDateAndTime(struct date_info *dinfo, int year, - int month, int day, int hour, - int minute, double second) { +static npy_int64 dInfoCalc_SetFromDateAndTime(int year, int month, int day) { /* Calculate the absolute date */ - { - int leap; - npy_int64 absdate; - int yearoffset; - - /* Range check */ - Py_AssertWithArg(year > -(INT_MAX / 366) && year < (INT_MAX / 366), - PyExc_ValueError, "year out of range: %i", year); - - /* Is it a leap year ? */ - leap = dInfoCalc_Leapyear(year); + pandas_datetimestruct dts; + npy_int64 unix_date; - /* Negative month values indicate months relative to the years end */ - if (month < 0) month += 13; - Py_AssertWithArg(month >= 1 && month <= 12, PyExc_ValueError, - "month out of range (1-12): %i", month); - - /* Negative values indicate days relative to the months end */ - if (day < 0) day += days_in_month[leap][month - 1] + 1; - Py_AssertWithArg(day >= 1 && day <= days_in_month[leap][month - 1], - PyExc_ValueError, "day out of range: %i", day); - - yearoffset = dInfoCalc_YearOffset(year); - if (yearoffset == INT_ERR_CODE) goto onError; - - absdate = day + month_offset[leap][month - 1] + yearoffset; - - dinfo->absdate = absdate; - - dinfo->year = year; - dinfo->month = month; - dinfo->quarter = ((month - 1) / 3) + 1; - dinfo->day = day; - - dinfo->day_of_week = dInfoCalc_DayOfWeek(absdate); - dinfo->day_of_year = (short)(absdate - yearoffset); - } - - /* Calculate the absolute time */ - { - Py_AssertWithArg(hour >= 0 && hour <= 23, PyExc_ValueError, - "hour out of range (0-23): %i", hour); - Py_AssertWithArg(minute >= 0 && minute <= 59, PyExc_ValueError, - "minute out of range (0-59): %i", minute); - Py_AssertWithArg( - second >= (double)0.0 && - (second < (double)60.0 || - (hour == 23 && minute == 59 && second < (double)61.0)), - PyExc_ValueError, - "second out of range (0.0 - <60.0; <61.0 for 23:59): %f", second); - - dinfo->abstime = (double)(hour * 3600 + minute * 60) + second; - - dinfo->hour = hour; - dinfo->minute = minute; - dinfo->second = second; - } - return 0; - -onError: - return INT_ERR_CODE; + memset(&dts, 0, sizeof(pandas_datetimestruct)); + dts.year = year; + dts.month = month; + dts.day = day; + unix_date = pandas_datetimestruct_to_datetime(PANDAS_FR_D, &dts); + return ORD_OFFSET + unix_date; } /* Sets the date part of the date_info struct - Assumes GREGORIAN_CALENDAR - - XXX This could also be done using some integer arithmetics rather - than with this iterative approach... */ + Assumes GREGORIAN_CALENDAR */ static int dInfoCalc_SetFromAbsDate(register struct date_info *dinfo, npy_int64 absdate) { - register npy_int64 year; - npy_int64 yearoffset; - int leap, dayoffset; - int *monthoffset; - - /* Approximate year */ - year = (npy_int64)(((double)absdate) / 365.2425); - - if (absdate > 0) year++; - - /* Apply corrections to reach the correct year */ - while (1) { - /* Calculate the year offset */ - yearoffset = dInfoCalc_YearOffset(year); - if (yearoffset == INT_ERR_CODE) goto onError; - - /* Backward correction: absdate must be greater than the - yearoffset */ - if (yearoffset >= absdate) { - year--; - continue; - } + pandas_datetimestruct dts; - dayoffset = absdate - yearoffset; - leap = dInfoCalc_Leapyear(year); + pandas_datetime_to_datetimestruct(absdate - ORD_OFFSET, PANDAS_FR_D, &dts); + dinfo->year = dts.year; + dinfo->month = dts.month; + dinfo->day = dts.day; - /* Forward correction: non leap years only have 365 days */ - if (dayoffset > 365 && !leap) { - year++; - continue; - } - break; - } - - dinfo->year = year; - - /* Now iterate to find the month */ - monthoffset = month_offset[leap]; - { - register int month; - - for (month = 1; month < 13; month++) { - if (monthoffset[month] >= dayoffset) break; - } - - dinfo->month = month; - dinfo->quarter = monthToQuarter(month); - dinfo->day = dayoffset - month_offset[leap][month - 1]; - } - - dinfo->day_of_week = dInfoCalc_DayOfWeek(absdate); - dinfo->day_of_year = dayoffset; dinfo->absdate = absdate; - return 0; - -onError: - return INT_ERR_CODE; } /////////////////////////////////////////////// @@ -358,9 +205,6 @@ PANDAS_INLINE npy_int64 transform_via_day(npy_int64 ordinal, char relation, asfreq_info *af_info, freq_conv_func first_func, freq_conv_func second_func) { - // printf("transform_via_day(%ld, %ld, %d)\n", ordinal, - // af_info->intraday_conversion_factor, - // af_info->intraday_conversion_upsample); npy_int64 result; result = (*first_func)(ordinal, relation, af_info); @@ -373,28 +217,26 @@ static npy_int64 DtoB_weekday(npy_int64 absdate) { return (((absdate) / 7) * 5) + (absdate) % 7 - BDAY_OFFSET; } -static npy_int64 DtoB_WeekendToMonday(npy_int64 absdate, int day_of_week) { - if (day_of_week > 4) { - // change to Monday after weekend - absdate += (7 - day_of_week); - } - return DtoB_weekday(absdate); -} +static npy_int64 DtoB(struct date_info *dinfo, int roll_back) { + int day_of_week = dayofweek(dinfo->year, dinfo->month, dinfo->day); + npy_int64 absdate = dinfo->absdate; -static npy_int64 DtoB_WeekendToFriday(npy_int64 absdate, int day_of_week) { - if (day_of_week > 4) { - // change to friday before weekend - absdate -= (day_of_week - 4); + if (roll_back == 1) { + if (day_of_week > 4) { + // change to friday before weekend + absdate -= (day_of_week - 4); + } + } else { + if (day_of_week > 4) { + // change to Monday after weekend + absdate += (7 - day_of_week); + } } return DtoB_weekday(absdate); } static npy_int64 absdate_from_ymd(int y, int m, int d) { - struct date_info tempDate; - if (dInfoCalc_SetFromDateAndTime(&tempDate, y, m, d, 0, 0, 0)) { - return INT_ERR_CODE; - } - return tempDate.absdate; + return dInfoCalc_SetFromDateAndTime(y, m, d); } //************ FROM DAILY *************** @@ -403,8 +245,7 @@ static npy_int64 asfreq_DTtoA(npy_int64 ordinal, char relation, asfreq_info *af_info) { struct date_info dinfo; ordinal = downsample_daytime(ordinal, af_info, 0); - if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET)) - return INT_ERR_CODE; + dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET); if (dinfo.month > af_info->to_a_year_end) { return (npy_int64)(dinfo.year + 1 - BASE_YEAR); } else { @@ -415,8 +256,7 @@ static npy_int64 asfreq_DTtoA(npy_int64 ordinal, char relation, static npy_int64 DtoQ_yq(npy_int64 ordinal, asfreq_info *af_info, int *year, int *quarter) { struct date_info dinfo; - if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET)) - return INT_ERR_CODE; + dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET); if (af_info->to_q_year_end != 12) { dinfo.month -= af_info->to_q_year_end; if (dinfo.month <= 0) { @@ -424,11 +264,10 @@ static npy_int64 DtoQ_yq(npy_int64 ordinal, asfreq_info *af_info, int *year, } else { dinfo.year += 1; } - dinfo.quarter = monthToQuarter(dinfo.month); } *year = dinfo.year; - *quarter = dinfo.quarter; + *quarter = monthToQuarter(dinfo.month); return 0; } @@ -439,10 +278,7 @@ static npy_int64 asfreq_DTtoQ(npy_int64 ordinal, char relation, ordinal = downsample_daytime(ordinal, af_info, 0); - if (DtoQ_yq(ordinal, af_info, &year, &quarter) == INT_ERR_CODE) { - return INT_ERR_CODE; - } - + DtoQ_yq(ordinal, af_info, &year, &quarter); return (npy_int64)((year - BASE_YEAR) * 4 + quarter - 1); } @@ -452,8 +288,7 @@ static npy_int64 asfreq_DTtoM(npy_int64 ordinal, char relation, ordinal = downsample_daytime(ordinal, af_info, 0); - if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET)) - return INT_ERR_CODE; + dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET); return (npy_int64)((dinfo.year - BASE_YEAR) * 12 + dinfo.month - 1); } @@ -467,17 +302,15 @@ static npy_int64 asfreq_DTtoW(npy_int64 ordinal, char relation, static npy_int64 asfreq_DTtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) { struct date_info dinfo; + int roll_back; ordinal = downsample_daytime(ordinal, af_info, 0); - if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET)) - return INT_ERR_CODE; + dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET); - if (relation == 'S') { - return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); - } else { - return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); - } + // This usage defines roll_back the opposite way from the others + roll_back = (relation == 'S') ? 1 : 0; + return DtoB(&dinfo, roll_back); } // all intra day calculations are now done within one function @@ -570,15 +403,12 @@ static npy_int64 asfreq_WtoW(npy_int64 ordinal, char relation, static npy_int64 asfreq_WtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) { struct date_info dinfo; - if (dInfoCalc_SetFromAbsDate( - &dinfo, asfreq_WtoDT(ordinal, relation, af_info) + ORD_OFFSET)) - return INT_ERR_CODE; + int roll_back; + dInfoCalc_SetFromAbsDate( + &dinfo, asfreq_WtoDT(ordinal, relation, af_info) + ORD_OFFSET); - if (relation == 'S') { - return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); - } else { - return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); - } + roll_back = (relation == 'S') ? 0 : 1; + return DtoB(&dinfo, roll_back); } //************ FROM MONTHLY *************** @@ -596,8 +426,7 @@ static npy_int64 asfreq_MtoDT(npy_int64 ordinal, char relation, ordinal += 1; } MtoD_ym(ordinal, &y, &m); - if ((absdate = absdate_from_ymd(y, m, 1)) == INT_ERR_CODE) - return INT_ERR_CODE; + absdate = absdate_from_ymd(y, m, 1); ordinal = absdate - ORD_OFFSET; if (relation == 'E') { @@ -628,16 +457,13 @@ static npy_int64 asfreq_MtoW(npy_int64 ordinal, char relation, static npy_int64 asfreq_MtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) { struct date_info dinfo; + int roll_back; - if (dInfoCalc_SetFromAbsDate( - &dinfo, asfreq_MtoDT(ordinal, relation, af_info) + ORD_OFFSET)) - return INT_ERR_CODE; + dInfoCalc_SetFromAbsDate( + &dinfo, asfreq_MtoDT(ordinal, relation, af_info) + ORD_OFFSET); - if (relation == 'S') { - return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); - } else { - return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); - } + roll_back = (relation == 'S') ? 0 : 1; + return DtoB(&dinfo, roll_back); } //************ FROM QUARTERLY *************** @@ -667,8 +493,7 @@ static npy_int64 asfreq_QtoDT(npy_int64 ordinal, char relation, QtoD_ym(ordinal, &y, &m, af_info); - if ((absdate = absdate_from_ymd(y, m, 1)) == INT_ERR_CODE) - return INT_ERR_CODE; + absdate = absdate_from_ymd(y, m, 1); if (relation == 'E') { absdate -= 1; @@ -704,15 +529,12 @@ static npy_int64 asfreq_QtoW(npy_int64 ordinal, char relation, static npy_int64 asfreq_QtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) { struct date_info dinfo; - if (dInfoCalc_SetFromAbsDate( - &dinfo, asfreq_QtoDT(ordinal, relation, af_info) + ORD_OFFSET)) - return INT_ERR_CODE; + int roll_back; + dInfoCalc_SetFromAbsDate( + &dinfo, asfreq_QtoDT(ordinal, relation, af_info) + ORD_OFFSET); - if (relation == 'S') { - return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); - } else { - return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); - } + roll_back = (relation == 'S') ? 0 : 1; + return DtoB(&dinfo, roll_back); } //************ FROM ANNUAL *************** @@ -737,10 +559,6 @@ static npy_int64 asfreq_AtoDT(npy_int64 year, char relation, absdate = absdate_from_ymd(year, month, 1); - if (absdate == INT_ERR_CODE) { - return INT_ERR_CODE; - } - if (relation == 'E') { absdate -= 1; } @@ -775,15 +593,12 @@ static npy_int64 asfreq_AtoW(npy_int64 ordinal, char relation, static npy_int64 asfreq_AtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) { struct date_info dinfo; - if (dInfoCalc_SetFromAbsDate( - &dinfo, asfreq_AtoDT(ordinal, relation, af_info) + ORD_OFFSET)) - return INT_ERR_CODE; + int roll_back; + dInfoCalc_SetFromAbsDate( + &dinfo, asfreq_AtoDT(ordinal, relation, af_info) + ORD_OFFSET); - if (relation == 'S') { - return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); - } else { - return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); - } + roll_back = (relation == 'S') ? 0 : 1; + return DtoB(&dinfo, roll_back); } static npy_int64 nofunc(npy_int64 ordinal, char relation, @@ -815,10 +630,6 @@ void get_asfreq_info(int fromFreq, int toFreq, asfreq_info *af_info) { get_freq_group_index(max_value(fromGroup, FR_DAY)), get_freq_group_index(max_value(toGroup, FR_DAY))); - // printf("get_asfreq_info(%d, %d) %ld, %d\n", fromFreq, toFreq, - // af_info->intraday_conversion_factor, - // af_info->intraday_conversion_upsample); - switch (fromGroup) { case FR_WK: af_info->from_week_end = calc_week_end(fromFreq, fromGroup); @@ -1014,8 +825,6 @@ freq_conv_func get_asfreq_func(int fromFreq, int toFreq) { } double get_abs_time(int freq, npy_int64 date_ordinal, npy_int64 ordinal) { - // printf("get_abs_time %d %lld %lld\n", freq, date_ordinal, ordinal); - int freq_index, day_index, base_index; npy_int64 per_day, start_ord; double unit, result; @@ -1028,23 +837,15 @@ double get_abs_time(int freq, npy_int64 date_ordinal, npy_int64 ordinal) { day_index = get_freq_group_index(FR_DAY); base_index = get_freq_group_index(FR_SEC); - // printf(" indices: day %d, freq %d, base %d\n", day_index, freq_index, - // base_index); - per_day = get_daytime_conversion_factor(day_index, freq_index); unit = get_daytime_conversion_factor(freq_index, base_index); - // printf(" per_day: %lld, unit: %f\n", per_day, unit); - if (base_index < freq_index) { unit = 1 / unit; - // printf(" corrected unit: %f\n", unit); } start_ord = date_ordinal * per_day; - // printf("start_ord: %lld\n", start_ord); result = (double)(unit * (ordinal - start_ord)); - // printf(" result: %f\n", result); return result; } @@ -1062,9 +863,6 @@ static int dInfoCalc_SetFromAbsTime(struct date_info *dinfo, double abstime) { dinfo->hour = hour; dinfo->minute = minute; dinfo->second = second; - - dinfo->abstime = abstime; - return 0; } @@ -1073,19 +871,16 @@ static int dInfoCalc_SetFromAbsTime(struct date_info *dinfo, double abstime) { static int dInfoCalc_SetFromAbsDateTime(struct date_info *dinfo, npy_int64 absdate, double abstime) { /* Bounds check */ - Py_AssertWithArg(abstime >= 0.0 && abstime <= SECONDS_PER_DAY, - PyExc_ValueError, - "abstime out of range (0.0 - 86400.0): %f", abstime); + // The calling function is responsible for ensuring that + // abstime >= 0.0 && abstime <= 86400 /* Calculate the date */ - if (dInfoCalc_SetFromAbsDate(dinfo, absdate)) goto onError; + dInfoCalc_SetFromAbsDate(dinfo, absdate); /* Calculate the time */ - if (dInfoCalc_SetFromAbsTime(dinfo, abstime)) goto onError; + dInfoCalc_SetFromAbsTime(dinfo, abstime); return 0; -onError: - return INT_ERR_CODE; } /* ------------------------------------------------------------------ @@ -1102,19 +897,8 @@ npy_int64 asfreq(npy_int64 period_ordinal, int freq1, int freq2, get_asfreq_info(freq1, freq2, &finfo); - // printf("\n%x %d %d %ld %ld\n", func, freq1, freq2, - // finfo.intraday_conversion_factor, -finfo.intraday_conversion_factor); - val = (*func)(period_ordinal, relation, &finfo); - - if (val == INT_ERR_CODE) { - // Py_Error(PyExc_ValueError, "Unable to convert to desired - // frequency."); - goto onError; - } return val; -onError: - return INT_ERR_CODE; } /* generate an ordinal in period space */ @@ -1155,9 +939,7 @@ npy_int64 get_period_ordinal(int year, int month, int day, int hour, int minute, } if (freq == FR_HR) { - if ((absdays = absdate_from_ymd(year, month, day)) == INT_ERR_CODE) { - goto onError; - } + absdays = absdate_from_ymd(year, month, day); delta = (absdays - ORD_OFFSET); return (npy_int64)(delta * 24 + hour); } @@ -1171,9 +953,7 @@ npy_int64 get_period_ordinal(int year, int month, int day, int hour, int minute, } if (freq == FR_BUS) { - if ((days = absdate_from_ymd(year, month, day)) == INT_ERR_CODE) { - goto onError; - } + days = absdate_from_ymd(year, month, day); // calculate the current week assuming sunday as last day of a week weeks = (days - BASE_WEEK_TO_DAY_OFFSET) / DAYS_PER_WEEK; // calculate the current weekday (in range 1 .. 7) @@ -1187,10 +967,7 @@ npy_int64 get_period_ordinal(int year, int month, int day, int hour, int minute, } if (freq_group == FR_WK) { - if ((ordinal = (npy_int64)absdate_from_ymd(year, month, day)) == - INT_ERR_CODE) { - goto onError; - } + ordinal = (npy_int64)absdate_from_ymd(year, month, day); day_adj = freq - FR_WK; return (ordinal - (1 + day_adj)) / 7 + 1 - WEEK_OFFSET; } @@ -1246,32 +1023,6 @@ npy_int64 get_python_ordinal(npy_int64 period_ordinal, int freq) { } -// function to generate a nice string representation of the period -// object, originally from DateObject_strftime - -char *c_strftime(struct date_info *tmp, char *fmt) { - struct tm c_date; - char *result; - struct date_info dinfo = *tmp; - int result_len = strlen(fmt) + 50; - - c_date.tm_sec = (int)dinfo.second; - c_date.tm_min = dinfo.minute; - c_date.tm_hour = dinfo.hour; - c_date.tm_mday = dinfo.day; - c_date.tm_mon = dinfo.month - 1; - c_date.tm_year = dinfo.year - 1900; - c_date.tm_wday = (dinfo.day_of_week + 1) % 7; - c_date.tm_yday = dinfo.day_of_year - 1; - c_date.tm_isdst = -1; - - result = malloc(result_len * sizeof(char)); - - strftime(result, result_len, fmt, &c_date); - - return result; -} - int get_yq(npy_int64 ordinal, int freq, int *quarter, int *year) { asfreq_info af_info; int qtr_freq; @@ -1290,12 +1041,11 @@ int get_yq(npy_int64 ordinal, int freq, int *quarter, int *year) { } get_asfreq_info(FR_DAY, qtr_freq, &af_info); - if (DtoQ_yq(daily_ord, &af_info, year, quarter) == INT_ERR_CODE) return -1; - + DtoQ_yq(daily_ord, &af_info, year, quarter); return 0; } -static int _quarter_year(npy_int64 ordinal, int freq, int *year, int *quarter) { +int _quarter_year(npy_int64 ordinal, int freq, int *year, int *quarter) { asfreq_info af_info; int qtr_freq; @@ -1308,37 +1058,13 @@ static int _quarter_year(npy_int64 ordinal, int freq, int *year, int *quarter) { get_asfreq_info(FR_DAY, qtr_freq, &af_info); - if (DtoQ_yq(ordinal, &af_info, year, quarter) == INT_ERR_CODE) - return INT_ERR_CODE; + DtoQ_yq(ordinal, &af_info, year, quarter); if ((qtr_freq % 1000) > 12) *year -= 1; return 0; } -static int _ISOWeek(struct date_info *dinfo) { - int week; - - /* Estimate */ - week = (dinfo->day_of_year - 1) - dinfo->day_of_week + 3; - if (week >= 0) week = week / 7 + 1; - - /* Verify */ - if (week < 0) { - /* The day lies in last week of the previous year */ - if ((week > -2) || (week == -2 && dInfoCalc_Leapyear(dinfo->year - 1))) - week = 53; - else - week = 52; - } else if (week == 53) { - /* Check if the week belongs to year or year+1 */ - if (31 - dinfo->day + dinfo->day_of_week < 3) { - week = 1; - } - } - - return week; -} int get_date_info(npy_int64 ordinal, int freq, struct date_info *dinfo) { npy_int64 absdate = get_python_ordinal(ordinal, freq); @@ -1353,101 +1079,6 @@ int get_date_info(npy_int64 ordinal, int freq, struct date_info *dinfo) { absdate += 1; } - if (dInfoCalc_SetFromAbsDateTime(dinfo, absdate, abstime)) - return INT_ERR_CODE; - + dInfoCalc_SetFromAbsDateTime(dinfo, absdate, abstime); return 0; } - -int pyear(npy_int64 ordinal, int freq) { - struct date_info dinfo; - get_date_info(ordinal, freq, &dinfo); - return dinfo.year; -} - -int pqyear(npy_int64 ordinal, int freq) { - int year, quarter; - if (_quarter_year(ordinal, freq, &year, &quarter) == INT_ERR_CODE) - return INT_ERR_CODE; - return year; -} - -int pquarter(npy_int64 ordinal, int freq) { - int year, quarter; - if (_quarter_year(ordinal, freq, &year, &quarter) == INT_ERR_CODE) - return INT_ERR_CODE; - return quarter; -} - -int pmonth(npy_int64 ordinal, int freq) { - struct date_info dinfo; - if (get_date_info(ordinal, freq, &dinfo) == INT_ERR_CODE) - return INT_ERR_CODE; - return dinfo.month; -} - -int pday(npy_int64 ordinal, int freq) { - struct date_info dinfo; - if (get_date_info(ordinal, freq, &dinfo) == INT_ERR_CODE) - return INT_ERR_CODE; - return dinfo.day; -} - -int pweekday(npy_int64 ordinal, int freq) { - struct date_info dinfo; - if (get_date_info(ordinal, freq, &dinfo) == INT_ERR_CODE) - return INT_ERR_CODE; - return dinfo.day_of_week; -} - -int pday_of_week(npy_int64 ordinal, int freq) { - struct date_info dinfo; - if (get_date_info(ordinal, freq, &dinfo) == INT_ERR_CODE) - return INT_ERR_CODE; - return dinfo.day_of_week; -} - -int pday_of_year(npy_int64 ordinal, int freq) { - struct date_info dinfo; - if (get_date_info(ordinal, freq, &dinfo) == INT_ERR_CODE) - return INT_ERR_CODE; - return dinfo.day_of_year; -} - -int pweek(npy_int64 ordinal, int freq) { - struct date_info dinfo; - if (get_date_info(ordinal, freq, &dinfo) == INT_ERR_CODE) - return INT_ERR_CODE; - return _ISOWeek(&dinfo); -} - -int phour(npy_int64 ordinal, int freq) { - struct date_info dinfo; - if (get_date_info(ordinal, freq, &dinfo) == INT_ERR_CODE) - return INT_ERR_CODE; - return dinfo.hour; -} - -int pminute(npy_int64 ordinal, int freq) { - struct date_info dinfo; - if (get_date_info(ordinal, freq, &dinfo) == INT_ERR_CODE) - return INT_ERR_CODE; - return dinfo.minute; -} - -int psecond(npy_int64 ordinal, int freq) { - struct date_info dinfo; - if (get_date_info(ordinal, freq, &dinfo) == INT_ERR_CODE) - return INT_ERR_CODE; - return (int)dinfo.second; -} - -int pdays_in_month(npy_int64 ordinal, int freq) { - int days; - struct date_info dinfo; - if (get_date_info(ordinal, freq, &dinfo) == INT_ERR_CODE) - return INT_ERR_CODE; - - days = days_in_month[dInfoCalc_Leapyear(dinfo.year)][dinfo.month - 1]; - return days; -} diff --git a/pandas/_libs/src/period_helper.h b/pandas/_libs/src/period_helper.h index d3d32f81d1f66..2c74659346b15 100644 --- a/pandas/_libs/src/period_helper.h +++ b/pandas/_libs/src/period_helper.h @@ -24,15 +24,6 @@ frequency conversion routines. * declarations from period here */ -#define SECONDS_PER_DAY ((double)86400.0) - -#define Py_AssertWithArg(x, errortype, errorstr, a1) \ - { \ - if (!(x)) { \ - PyErr_Format(errortype, errorstr, a1); \ - goto onError; \ - } \ - } #define Py_Error(errortype, errorstr) \ { \ PyErr_SetString(errortype, errorstr); \ @@ -124,17 +115,13 @@ typedef struct asfreq_info { typedef struct date_info { npy_int64 absdate; - double abstime; double second; int minute; int hour; int day; int month; - int quarter; int year; - int day_of_week; - int day_of_year; } date_info; typedef npy_int64 (*freq_conv_func)(npy_int64, char, asfreq_info *); @@ -155,22 +142,8 @@ int get_date_info(npy_int64 ordinal, int freq, struct date_info *dinfo); freq_conv_func get_asfreq_func(int fromFreq, int toFreq); void get_asfreq_info(int fromFreq, int toFreq, asfreq_info *af_info); -int pyear(npy_int64 ordinal, int freq); -int pqyear(npy_int64 ordinal, int freq); -int pquarter(npy_int64 ordinal, int freq); -int pmonth(npy_int64 ordinal, int freq); -int pday(npy_int64 ordinal, int freq); -int pweekday(npy_int64 ordinal, int freq); -int pday_of_week(npy_int64 ordinal, int freq); -int pday_of_year(npy_int64 ordinal, int freq); -int pweek(npy_int64 ordinal, int freq); -int phour(npy_int64 ordinal, int freq); -int pminute(npy_int64 ordinal, int freq); -int psecond(npy_int64 ordinal, int freq); -int pdays_in_month(npy_int64 ordinal, int freq); - -char *c_strftime(struct date_info *dinfo, char *fmt); int get_yq(npy_int64 ordinal, int freq, int *quarter, int *year); +int _quarter_year(npy_int64 ordinal, int freq, int *year, int *quarter); void initialize_daytime_conversion_factor_matrix(void); diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx index 613e111443636..9bd315b43ea9e 100644 --- a/pandas/_libs/tslibs/ccalendar.pyx +++ b/pandas/_libs/tslibs/ccalendar.pyx @@ -191,8 +191,7 @@ cpdef int32_t get_day_of_year(int year, int month, int day) nogil: cdef: bint isleap int32_t mo_off - int32_t doy, dow - int woy + int day_of_year isleap = is_leapyear(year) diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index e82c9c613c62a..ba17b3d345ac8 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -11,7 +11,9 @@ from numpy cimport int64_t, import_array, ndarray import numpy as np import_array() -from libc.stdlib cimport free +from libc.stdlib cimport free, malloc +from libc.time cimport strftime, tm +from libc.string cimport strlen from pandas.compat import PY2 @@ -33,6 +35,8 @@ from timestamps import Timestamp from timezones cimport is_utc, is_tzlocal, get_utcoffset, get_dst_info from timedeltas cimport delta_to_nanoseconds +cimport ccalendar +from ccalendar cimport dayofweek, get_day_of_year from ccalendar import MONTH_NUMBERS from ccalendar cimport is_leapyear from frequencies cimport (get_freq_code, get_base_alias, @@ -49,17 +53,12 @@ from pandas.tseries import frequencies cdef extern from "period_helper.h": ctypedef struct date_info: - int64_t absdate - double abstime double second int minute int hour int day int month - int quarter int year - int day_of_week - int day_of_year ctypedef struct asfreq_info: int from_week_end @@ -85,28 +84,43 @@ cdef extern from "period_helper.h": int freq) nogil except INT32_MIN int get_date_info(int64_t ordinal, int freq, - date_info *dinfo) nogil except INT32_MIN - - int pyear(int64_t ordinal, int freq) except INT32_MIN - int pqyear(int64_t ordinal, int freq) except INT32_MIN - int pquarter(int64_t ordinal, int freq) except INT32_MIN - int pmonth(int64_t ordinal, int freq) except INT32_MIN - int pday(int64_t ordinal, int freq) except INT32_MIN - int pweekday(int64_t ordinal, int freq) except INT32_MIN - int pday_of_week(int64_t ordinal, int freq) except INT32_MIN - # TODO: pday_of_week and pweekday are identical. Make one an alias instead - # of importing them separately. - int pday_of_year(int64_t ordinal, int freq) except INT32_MIN - int pweek(int64_t ordinal, int freq) except INT32_MIN - int phour(int64_t ordinal, int freq) except INT32_MIN - int pminute(int64_t ordinal, int freq) except INT32_MIN - int psecond(int64_t ordinal, int freq) except INT32_MIN - int pdays_in_month(int64_t ordinal, int freq) except INT32_MIN - char *c_strftime(date_info *dinfo, char *fmt) + date_info *dinfo) nogil + int get_yq(int64_t ordinal, int freq, int *quarter, int *year) + int _quarter_year(int64_t ordinal, int freq, int *year, int *quarter) + initialize_daytime_conversion_factor_matrix() + +@cython.cdivision +cdef char* c_strftime(date_info *dinfo, char *fmt): + """ + function to generate a nice string representation of the period + object, originally from DateObject_strftime + """ + cdef: + tm c_date + char *result + int result_len = strlen(fmt) + 50 + + c_date.tm_sec = <int>dinfo.second + c_date.tm_min = dinfo.minute + c_date.tm_hour = dinfo.hour + c_date.tm_mday = dinfo.day + c_date.tm_mon = dinfo.month - 1 + c_date.tm_year = dinfo.year - 1900 + c_date.tm_wday = (dayofweek(dinfo.year, dinfo.month, dinfo.day) + 1) % 7 + c_date.tm_yday = get_day_of_year(dinfo.year, dinfo.month, dinfo.day) - 1 + c_date.tm_isdst = -1 + + result = <char*>malloc(result_len * sizeof(char)) + + strftime(result, result_len, fmt, &c_date) + + return result + + # ---------------------------------------------------------------------- # Period logic @@ -367,19 +381,105 @@ cdef object _period_strftime(int64_t value, int freq, object fmt): return result + +# ---------------------------------------------------------------------- # period accessors ctypedef int (*accessor)(int64_t ordinal, int freq) except INT32_MIN +cdef int pyear(int64_t ordinal, int freq): + cdef: + date_info dinfo + get_date_info(ordinal, freq, &dinfo) + return dinfo.year + + +cdef int pqyear(int64_t ordinal, int freq): + cdef: + int year, quarter + _quarter_year(ordinal, freq, &year, &quarter) + return year + + +cdef int pquarter(int64_t ordinal, int freq): + cdef: + int year, quarter + _quarter_year(ordinal, freq, &year, &quarter) + return quarter + + +cdef int pmonth(int64_t ordinal, int freq): + cdef: + date_info dinfo + get_date_info(ordinal, freq, &dinfo) + return dinfo.month + + +cdef int pday(int64_t ordinal, int freq): + cdef: + date_info dinfo + get_date_info(ordinal, freq, &dinfo) + return dinfo.day + + +cdef int pweekday(int64_t ordinal, int freq): + cdef: + date_info dinfo + get_date_info(ordinal, freq, &dinfo) + return dayofweek(dinfo.year, dinfo.month, dinfo.day) + + +cdef int pday_of_year(int64_t ordinal, int freq): + cdef: + date_info dinfo + get_date_info(ordinal, freq, &dinfo) + return get_day_of_year(dinfo.year, dinfo.month, dinfo.day) + + +cdef int pweek(int64_t ordinal, int freq): + cdef: + date_info dinfo + get_date_info(ordinal, freq, &dinfo) + return ccalendar.get_week_of_year(dinfo.year, dinfo.month, dinfo.day) + + +cdef int phour(int64_t ordinal, int freq): + cdef: + date_info dinfo + get_date_info(ordinal, freq, &dinfo) + return dinfo.hour + + +cdef int pminute(int64_t ordinal, int freq): + cdef: + date_info dinfo + get_date_info(ordinal, freq, &dinfo) + return dinfo.minute + + +cdef int psecond(int64_t ordinal, int freq): + cdef: + date_info dinfo + get_date_info(ordinal, freq, &dinfo) + return <int>dinfo.second + + +cdef int pdays_in_month(int64_t ordinal, int freq): + cdef: + date_info dinfo + get_date_info(ordinal, freq, &dinfo) + return ccalendar.get_days_in_month(dinfo.year, dinfo.month) + + def get_period_field_arr(int code, ndarray[int64_t] arr, int freq): cdef: Py_ssize_t i, sz ndarray[int64_t] out accessor f - f = _get_accessor_func(code) - if f is NULL: + func = _get_accessor_func(code) + if func is NULL: raise ValueError('Unrecognized period code: %d' % code) sz = len(arr) @@ -389,36 +489,36 @@ def get_period_field_arr(int code, ndarray[int64_t] arr, int freq): if arr[i] == iNaT: out[i] = -1 continue - out[i] = f(arr[i], freq) + out[i] = func(arr[i], freq) return out cdef accessor _get_accessor_func(int code): if code == 0: - return &pyear + return <accessor>pyear elif code == 1: - return &pqyear + return <accessor>pqyear elif code == 2: - return &pquarter + return <accessor>pquarter elif code == 3: - return &pmonth + return <accessor>pmonth elif code == 4: - return &pday + return <accessor>pday elif code == 5: - return &phour + return <accessor>phour elif code == 6: - return &pminute + return <accessor>pminute elif code == 7: - return &psecond + return <accessor>psecond elif code == 8: - return &pweek + return <accessor>pweek elif code == 9: - return &pday_of_year + return <accessor>pday_of_year elif code == 10: - return &pweekday + return <accessor>pweekday elif code == 11: - return &pdays_in_month + return <accessor>pdays_in_month return NULL
Following on the heels of #19534, this removes a bunch of duplicate logic from period_helper and uses the implementations in np_datetime and ccalendar instead. A handful of functions are moved directly into period, but for the most part it's deletion. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19540
2018-02-05T17:27:35Z
2018-02-08T01:09:49Z
2018-02-08T01:09:49Z
2018-02-08T01:58:22Z
PERF: improve get_loc on unsorted, non-unique indexes
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index eb6c212731822..fa048c8707ff1 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1063,6 +1063,7 @@ Performance Improvements - Improved performance of :func:`DataFrame.median` with ``axis=1`` when bottleneck is not installed (:issue:`16468`) - Improved performance of :func:`MultiIndex.get_loc` for large indexes, at the cost of a reduction in performance for small ones (:issue:`18519`) - Improved performance of :func:`MultiIndex.remove_unused_levels` when there are no unused levels, at the cost of a reduction in performance when there are (:issue:`19289`) +- Improved performance of :func:`Index.get_loc` for non-unique indexes (:issue:`19478`) - Improved performance of pairwise ``.rolling()`` and ``.expanding()`` with ``.cov()`` and ``.corr()`` operations (:issue:`17917`) - Improved performance of :func:`pandas.core.groupby.GroupBy.rank` (:issue:`15779`) - Improved performance of variable ``.rolling()`` on ``.min()`` and ``.max()`` (:issue:`19521`) diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 6b23e487aad3a..9968d398e9040 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -183,32 +183,20 @@ cdef class IndexEngine: cdef _maybe_get_bool_indexer(self, object val): cdef: - ndarray[uint8_t] indexer - ndarray[object] values - int count = 0 - Py_ssize_t i, n - int last_true + ndarray[cnp.uint8_t, ndim=1, cast=True] indexer + ndarray[int64_t, ndim=1] found + int count - values = np.array(self._get_index_values(), copy=False) - n = len(values) - - result = np.empty(n, dtype=bool) - indexer = result.view(np.uint8) + indexer = self._get_index_values() == val + found = np.where(indexer)[0] + count = len(found) - for i in range(n): - if values[i] == val: - count += 1 - indexer[i] = 1 - last_true = i - else: - indexer[i] = 0 - - if count == 0: - raise KeyError(val) + if count > 1: + return indexer if count == 1: - return last_true + return int(found[0]) - return result + raise KeyError(val) def sizeof(self, deep=False): """ return the sizeof our mapping """ @@ -542,9 +530,6 @@ cdef class PeriodEngine(Int64Engine): return super(PeriodEngine, self).get_indexer_non_unique(ordinal_array) - cdef _get_index_values_for_bool_indexer(self): - return self._get_index_values().view('i8') - cpdef convert_scalar(ndarray arr, object value): # we don't turn integers diff --git a/pandas/_libs/index_class_helper.pxi.in b/pandas/_libs/index_class_helper.pxi.in index b9fc0ddd7ea1c..6f726dd49f11e 100644 --- a/pandas/_libs/index_class_helper.pxi.in +++ b/pandas/_libs/index_class_helper.pxi.in @@ -55,40 +55,29 @@ cdef class {{name}}Engine(IndexEngine): cdef _maybe_get_bool_indexer(self, object val): cdef: - ndarray[uint8_t, cast=True] indexer + ndarray[cnp.uint8_t, ndim=1, cast=True] indexer + ndarray[int64_t, ndim=1] found ndarray[{{ctype}}] values int count = 0 - Py_ssize_t i, n - int last_true {{if name != 'Float64'}} if not util.is_integer_object(val): raise KeyError(val) {{endif}} - values = self._get_index_values_for_bool_indexer() - n = len(values) + # A view is needed for some subclasses, such as PeriodEngine: + values = self._get_index_values().view('{{dtype}}') + indexer = values == val + found = np.where(indexer)[0] + count = len(found) - result = np.empty(n, dtype=bool) - indexer = result.view(np.uint8) - - for i in range(n): - if values[i] == val: - count += 1 - indexer[i] = 1 - last_true = i - else: - indexer[i] = 0 - - if count == 0: - raise KeyError(val) + if count > 1: + return indexer if count == 1: - return last_true + return int(found[0]) - return result + raise KeyError(val) - cdef _get_index_values_for_bool_indexer(self): - return self._get_index_values() {{endif}} {{endfor}}
- [x] closes #19478 - [x] tests passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry asv run: ``` [bd4332f4] [9ac7be34] - 475±5μs 422±7μs 0.89 multiindex_object.Duplicates.time_remove_unused_levels - 15.4±0.3ms 13.5±0.2ms 0.88 multiindex_object.GetLoc.time_small_get_loc_warm - 197±5ns 171±2ns 0.87 index_object.Datetime.time_is_dates_only - 8.68±0.3μs 6.88±0.1μs 0.79 index_object.Float64IndexMethod.time_get_loc - 15.4μs 12.0±0.2μs 0.78 index_object.Indexing.time_get_loc_sorted('Float') - 283±9μs 215±2μs 0.76 multiindex_object.Values.time_datetime_level_values_sliced - 15.0μs 11.1μs 0.74 index_object.Indexing.time_get_loc('Float') - 7.38±0.04μs 5.03±0.05μs 0.68 index_object.Indexing.time_get_loc_sorted('Int') - 7.15±0.04μs 4.58±0.04μs 0.64 index_object.Indexing.time_get_loc('Int') - 121±0.9ms 1.62ms 0.01 index_object.Indexing.time_get_loc_non_unique_sorted('Float') - 136ms 1.51ms 0.01 index_object.Indexing.time_get_loc_non_unique('Float') - 166±1ms 1.29±0.03ms 0.01 index_object.Indexing.time_get_loc_non_unique_sorted('Int') - 175±0.9ms 1.29±0.01ms 0.01 index_object.Indexing.time_get_loc_non_unique('Int') SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY. ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19539
2018-02-05T17:09:22Z
2018-05-08T00:05:13Z
2018-05-08T00:05:13Z
2018-05-08T05:43:37Z
doc: Fix typo in example
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 58796aa30f0bf..20e72dd6bde91 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -364,7 +364,7 @@ def format(self, formatter, subset=None): >>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b']) >>> df.style.format("{:.2%}") >>> df['c'] = ['a', 'b', 'c', 'd'] - >>> df.style.format({'C': str.upper}) + >>> df.style.format({'c': str.upper}) """ if subset is None: row_locs = range(len(self.data))
Fix typo in the example for pandas.io.formats.style.Styler.format - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19537
2018-02-05T13:02:44Z
2018-02-05T18:43:03Z
2018-02-05T18:43:03Z
2018-02-05T18:43:07Z
TST/CLN: Remove legacy instances of _multiprocess_can_split_ in tests
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index e0fc6c470fe57..d69ddcd8f14d4 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -496,8 +496,6 @@ def zip_frames(*frames): class TestDataFrameAggregate(TestData): - _multiprocess_can_split_ = True - def test_agg_transform(self): with np.errstate(all='ignore'): diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index ab341b70dfe91..6fc7fa5486f82 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -14,7 +14,6 @@ class TestPeriodIndex(DatetimeLike): _holder = PeriodIndex - _multiprocess_can_split_ = True def setup_method(self, method): self.indices = dict(index=tm.makePeriodIndex(10), diff --git a/pandas/tests/indexes/timedeltas/test_astype.py b/pandas/tests/indexes/timedeltas/test_astype.py index c3bd857036efc..6c644d239069a 100644 --- a/pandas/tests/indexes/timedeltas/test_astype.py +++ b/pandas/tests/indexes/timedeltas/test_astype.py @@ -8,7 +8,6 @@ class TestTimedeltaIndex(object): - _multiprocess_can_split_ = True def test_astype(self): # GH 13149, GH 13209 diff --git a/pandas/tests/indexes/timedeltas/test_construction.py b/pandas/tests/indexes/timedeltas/test_construction.py index 70aadd9f57174..68dc0003e2312 100644 --- a/pandas/tests/indexes/timedeltas/test_construction.py +++ b/pandas/tests/indexes/timedeltas/test_construction.py @@ -9,7 +9,6 @@ class TestTimedeltaIndex(object): - _multiprocess_can_split_ = True def test_construction_base_constructor(self): arr = [pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')] diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py index e64c4e6ac54a5..59e38c2e738b0 100644 --- a/pandas/tests/indexes/timedeltas/test_indexing.py +++ b/pandas/tests/indexes/timedeltas/test_indexing.py @@ -9,7 +9,6 @@ class TestTimedeltaIndex(object): - _multiprocess_can_split_ = True def test_insert(self): diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index e944aad13f8d5..86d7dd4e1b117 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -420,7 +420,6 @@ def test_equals(self): class TestTimedeltas(object): - _multiprocess_can_split_ = True def test_timedelta_ops(self): # GH4984 diff --git a/pandas/tests/indexes/timedeltas/test_setops.py b/pandas/tests/indexes/timedeltas/test_setops.py index 22546d25273a7..020e9079b3436 100644 --- a/pandas/tests/indexes/timedeltas/test_setops.py +++ b/pandas/tests/indexes/timedeltas/test_setops.py @@ -6,7 +6,6 @@ class TestTimedeltaIndex(object): - _multiprocess_can_split_ = True def test_union(self): diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index 32157a9a44e04..ce0f3b89b753e 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -18,7 +18,6 @@ class TestTimedeltaIndex(DatetimeLike): _holder = TimedeltaIndex - _multiprocess_can_split_ = True def setup_method(self, method): self.indices = dict(index=tm.makeTimedeltaIndex(10)) @@ -300,7 +299,6 @@ def test_freq_conversion(self): class TestTimeSeries(object): - _multiprocess_can_split_ = True def test_series_box_timedelta(self): rng = timedelta_range('1 day 1 s', periods=5, freq='h') diff --git a/pandas/tests/indexes/timedeltas/test_timedelta_range.py b/pandas/tests/indexes/timedeltas/test_timedelta_range.py index 7624e1f79af15..784ef845fea10 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta_range.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta_range.py @@ -7,7 +7,6 @@ class TestTimedeltas(object): - _multiprocess_can_split_ = True def test_timedelta_range(self): diff --git a/pandas/tests/indexes/timedeltas/test_tools.py b/pandas/tests/indexes/timedeltas/test_tools.py index b4ad28eeacb69..daa9739132d9e 100644 --- a/pandas/tests/indexes/timedeltas/test_tools.py +++ b/pandas/tests/indexes/timedeltas/test_tools.py @@ -11,7 +11,6 @@ class TestTimedeltas(object): - _multiprocess_can_split_ = True def test_to_timedelta(self): def conv(v): diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py index 64d4940082978..667266be2a89b 100644 --- a/pandas/tests/scalar/test_timedelta.py +++ b/pandas/tests/scalar/test_timedelta.py @@ -13,7 +13,6 @@ class TestTimedeltaArithmetic(object): - _multiprocess_can_split_ = True def test_arithmetic_overflow(self): with pytest.raises(OverflowError): @@ -286,7 +285,6 @@ def test_compare_timedelta_ndarray(self): class TestTimedeltas(object): - _multiprocess_can_split_ = True def setup_method(self, method): pass diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index 3822ecd0a1b0e..0780c846a6c19 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -164,8 +164,6 @@ def test_apply_dict_depr(self): class TestSeriesAggregate(TestData): - _multiprocess_can_split_ = True - def test_transform(self): # transforming functions
- [X] closes #19532 - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` xref https://github.com/pandas-dev/pandas/pull/19509#discussion_r165848506
https://api.github.com/repos/pandas-dev/pandas/pulls/19536
2018-02-04T21:39:53Z
2018-02-05T11:05:21Z
2018-02-05T11:05:21Z
2018-02-05T15:46:52Z
PKG: Exclude data test files.
diff --git a/MANIFEST.in b/MANIFEST.in index 9773019c6e6e0..b417b8890fa24 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,27 +3,39 @@ include LICENSE include RELEASE.md include README.md include setup.py -include pyproject.toml graft doc prune doc/build +graft LICENSES + graft pandas -global-exclude *.so -global-exclude *.pyd +global-exclude *.bz2 +global-exclude *.csv +global-exclude *.dta +global-exclude *.gz +global-exclude *.h5 +global-exclude *.html +global-exclude *.json +global-exclude *.msgpack +global-exclude *.pickle +global-exclude *.png global-exclude *.pyc +global-exclude *.pyd +global-exclude *.sas7bdat +global-exclude *.so +global-exclude *.xls +global-exclude *.xlsm +global-exclude *.xlsx +global-exclude *.xpt +global-exclude *.xz +global-exclude *.zip global-exclude *~ -global-exclude \#* -global-exclude .git* global-exclude .DS_Store -global-exclude *.png +global-exclude .git* +global-exclude \#* -# include examples/data/* -# recursive-include examples *.py -# recursive-include doc/source * -# recursive-include doc/sphinxext * -# recursive-include LICENSES * include versioneer.py include pandas/_version.py include pandas/io/formats/templates/*.tpl diff --git a/ci/script_single.sh b/ci/script_single.sh index f376c920ac71b..60e2fbb33ee5d 100755 --- a/ci/script_single.sh +++ b/ci/script_single.sh @@ -25,12 +25,12 @@ if [ "$DOC" ]; then echo "We are not running pytest as this is a doc-build" elif [ "$COVERAGE" ]; then - echo pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas - pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas + echo pytest -s -m "single" -r xXs --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas + pytest -s -m "single" -r xXs --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas else - echo pytest -m "single" -r xX --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas - pytest -m "single" -r xX --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas # TODO: doctest + echo pytest -m "single" -r xXs --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas + pytest -m "single" -r xXs --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas # TODO: doctest fi diff --git a/doc/source/whatsnew/v0.23.2.txt b/doc/source/whatsnew/v0.23.2.txt index a603bf9f7e9e0..a41a6c31b0678 100644 --- a/doc/source/whatsnew/v0.23.2.txt +++ b/doc/source/whatsnew/v0.23.2.txt @@ -76,6 +76,11 @@ Documentation Changes - - +Build Changes +------------- + +- The source and binary distributions no longer include test data files, resulting in smaller download sizes. Tests relying on these data files will be skipped when using ``pandas.test()``. (:issue:`19320`) + .. _whatsnew_0232.bug_fixes: Bug Fixes diff --git a/pandas/conftest.py b/pandas/conftest.py index b4a599758417c..82d860b091b82 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -1,7 +1,9 @@ +import os import importlib import pytest +import pandas import numpy as np import pandas as pd from pandas.compat import PY3 @@ -17,6 +19,8 @@ def pytest_addoption(parser): help="run high memory tests") parser.addoption("--only-slow", action="store_true", help="run only slow tests") + parser.addoption("--strict-data-files", action="store_true", + help="Fail if a test is skipped for missing data file.") def pytest_runtest_setup(item): @@ -131,6 +135,43 @@ def join_type(request): return request.param +@pytest.fixture +def datapath(request): + """Get the path to a data file. + + Parameters + ---------- + path : str + Path to the file, relative to ``pandas/tests/`` + + Returns + ------- + path : path including ``pandas/tests``. + + Raises + ------ + ValueError + If the path doesn't exist and the --strict-data-files option is set. + """ + def deco(*args): + path = os.path.join('pandas', 'tests', *args) + if not os.path.exists(path): + if request.config.getoption("--strict-data-files"): + msg = "Could not find file {} and --strict-data-files is set." + raise ValueError(msg.format(path)) + else: + msg = "Could not find {}." + pytest.skip(msg.format(path)) + return path + return deco + + +@pytest.fixture +def iris(datapath): + """The iris dataset as a DataFrame.""" + return pandas.read_csv(datapath('data', 'iris.csv')) + + @pytest.fixture(params=['nlargest', 'nsmallest']) def nselect_method(request): """ diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 362f917e74972..c925c4c403960 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -1182,12 +1182,12 @@ def test_iter(self): ('baz', 'two'), ('qux', 'one'), ('qux', 'two')] assert result == expected - def test_legacy_pickle(self): + def test_legacy_pickle(self, datapath): if PY3: pytest.skip("testing for legacy pickles not " "support on py3") - path = tm.get_data_path('multiindex_v1.pickle') + path = datapath('indexes', 'data', 'multiindex_v1.pickle') obj = pd.read_pickle(path) obj2 = MultiIndex.from_tuples(obj.values) @@ -1203,10 +1203,10 @@ def test_legacy_pickle(self): assert_almost_equal(res, exp) assert_almost_equal(exp, exp2) - def test_legacy_v2_unpickle(self): + def test_legacy_v2_unpickle(self, datapath): # 0.7.3 -> 0.8.0 format manage - path = tm.get_data_path('mindex_073.pickle') + path = datapath('indexes', 'data', 'mindex_073.pickle') obj = pd.read_pickle(path) obj2 = MultiIndex.from_tuples(obj.values) diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py index 8deb51e190bab..7623587803b41 100644 --- a/pandas/tests/io/conftest.py +++ b/pandas/tests/io/conftest.py @@ -1,32 +1,23 @@ -import os - import pytest from pandas.io.parsers import read_table -from pandas.util import testing as tm - - -@pytest.fixture -def parser_data(request): - return os.path.join(tm.get_data_path(), '..', 'parser', 'data') @pytest.fixture -def tips_file(parser_data): +def tips_file(datapath): """Path to the tips dataset""" - return os.path.join(parser_data, 'tips.csv') + return datapath('io', 'parser', 'data', 'tips.csv') @pytest.fixture -def jsonl_file(parser_data): +def jsonl_file(datapath): """Path a JSONL dataset""" - return os.path.join(parser_data, 'items.jsonl') + return datapath('io', 'parser', 'data', 'items.jsonl') @pytest.fixture -def salaries_table(parser_data): +def salaries_table(datapath): """DataFrame with the salaries dataset""" - path = os.path.join(parser_data, 'salaries.csv') - return read_table(path) + return read_table(datapath('io', 'parser', 'data', 'salaries.csv')) @pytest.fixture diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index f221df93dd412..63b7cb3459069 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -916,8 +916,8 @@ def test_unicode_problem_decoding_as_ascii(self): dm = DataFrame({u('c/\u03c3'): Series({'test': np.nan})}) compat.text_type(dm.to_string()) - def test_string_repr_encoding(self): - filepath = tm.get_data_path('unicode_series.csv') + def test_string_repr_encoding(self, datapath): + filepath = datapath('io', 'formats', 'data', 'unicode_series.csv') df = pd.read_csv(filepath, header=None, encoding='latin1') repr(df) repr(df[1]) diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py index c9074ca49e5be..05ceace20f5a4 100644 --- a/pandas/tests/io/json/test_compression.py +++ b/pandas/tests/io/json/test_compression.py @@ -21,11 +21,11 @@ def test_compression_roundtrip(compression): assert_frame_equal(df, pd.read_json(result)) -def test_read_zipped_json(): - uncompressed_path = tm.get_data_path("tsframe_v012.json") +def test_read_zipped_json(datapath): + uncompressed_path = datapath("io", "json", "data", "tsframe_v012.json") uncompressed_df = pd.read_json(uncompressed_path) - compressed_path = tm.get_data_path("tsframe_v012.json.zip") + compressed_path = datapath("io", "json", "data", "tsframe_v012.json.zip") compressed_df = pd.read_json(compressed_path, compression='zip') assert_frame_equal(uncompressed_df, compressed_df) diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 7e497c395266f..bcbac4400c953 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -37,8 +37,9 @@ class TestPandasContainer(object): - def setup_method(self, method): - self.dirpath = tm.get_data_path() + @pytest.fixture(scope="function", autouse=True) + def setup(self, datapath): + self.dirpath = datapath("io", "json", "data") self.ts = tm.makeTimeSeries() self.ts.name = 'ts' @@ -59,7 +60,8 @@ def setup_method(self, method): self.mixed_frame = _mixed_frame.copy() self.categorical = _cat_frame.copy() - def teardown_method(self, method): + yield + del self.dirpath del self.ts diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py index 6e1d3575a1481..9e871d27f0ce8 100644 --- a/pandas/tests/io/parser/common.py +++ b/pandas/tests/io/parser/common.py @@ -77,7 +77,7 @@ def test_read_csv(self): else: prefix = u("file://") - fname = prefix + compat.text_type(self.csv1) + fname = prefix + compat.text_type(os.path.abspath(self.csv1)) self.read_csv(fname, index_col=0, parse_dates=True) def test_1000_sep(self): @@ -651,21 +651,19 @@ def test_read_csv_parse_simple_list(self): tm.assert_frame_equal(df, expected) @tm.network - def test_url(self): + def test_url(self, datapath): # HTTP(S) url = ('https://raw.github.com/pandas-dev/pandas/master/' 'pandas/tests/io/parser/data/salaries.csv') url_table = self.read_table(url) - dirpath = tm.get_data_path() - localtable = os.path.join(dirpath, 'salaries.csv') + localtable = datapath('io', 'parser', 'data', 'salaries.csv') local_table = self.read_table(localtable) tm.assert_frame_equal(url_table, local_table) # TODO: ftp testing @pytest.mark.slow - def test_file(self): - dirpath = tm.get_data_path() - localtable = os.path.join(dirpath, 'salaries.csv') + def test_file(self, datapath): + localtable = datapath('io', 'parser', 'data', 'salaries.csv') local_table = self.read_table(localtable) try: @@ -755,8 +753,8 @@ def test_utf16_bom_skiprows(self): tm.assert_frame_equal(result, expected) - def test_utf16_example(self): - path = tm.get_data_path('utf16_ex.txt') + def test_utf16_example(self, datapath): + path = datapath('io', 'parser', 'data', 'utf16_ex.txt') # it works! and is the right length result = self.read_table(path, encoding='utf-16') @@ -767,8 +765,8 @@ def test_utf16_example(self): result = self.read_table(buf, encoding='utf-16') assert len(result) == 50 - def test_unicode_encoding(self): - pth = tm.get_data_path('unicode_series.csv') + def test_unicode_encoding(self, datapath): + pth = datapath('io', 'parser', 'data', 'unicode_series.csv') result = self.read_csv(pth, header=None, encoding='latin-1') result = result.set_index(0) @@ -1513,10 +1511,9 @@ def test_internal_eof_byte_to_file(self): result = self.read_csv(path) tm.assert_frame_equal(result, expected) - def test_sub_character(self): + def test_sub_character(self, datapath): # see gh-16893 - dirpath = tm.get_data_path() - filename = os.path.join(dirpath, "sub_char.csv") + filename = datapath('io', 'parser', 'data', 'sub_char.csv') expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"]) result = self.read_csv(filename) diff --git a/pandas/tests/io/parser/compression.py b/pandas/tests/io/parser/compression.py index e84db66561c49..e4950af19ea95 100644 --- a/pandas/tests/io/parser/compression.py +++ b/pandas/tests/io/parser/compression.py @@ -120,9 +120,9 @@ def test_read_csv_infer_compression(self): tm.assert_frame_equal(expected, df) - def test_read_csv_compressed_utf16_example(self): + def test_read_csv_compressed_utf16_example(self, datapath): # GH18071 - path = tm.get_data_path('utf16_ex_small.zip') + path = datapath('io', 'parser', 'data', 'utf16_ex_small.zip') result = self.read_csv(path, encoding='utf-16', compression='zip', sep='\t') diff --git a/pandas/tests/io/parser/dtypes.py b/pandas/tests/io/parser/dtypes.py index b91ce04673e29..8060ebf2fbcd4 100644 --- a/pandas/tests/io/parser/dtypes.py +++ b/pandas/tests/io/parser/dtypes.py @@ -125,9 +125,9 @@ def test_categorical_dtype_high_cardinality_numeric(self): np.sort(actual.a.cat.categories), ordered=True) tm.assert_frame_equal(actual, expected) - def test_categorical_dtype_encoding(self): + def test_categorical_dtype_encoding(self, datapath): # GH 10153 - pth = tm.get_data_path('unicode_series.csv') + pth = datapath('io', 'parser', 'data', 'unicode_series.csv') encoding = 'latin-1' expected = self.read_csv(pth, header=None, encoding=encoding) expected[1] = Categorical(expected[1]) @@ -135,7 +135,7 @@ def test_categorical_dtype_encoding(self): dtype={1: 'category'}) tm.assert_frame_equal(actual, expected) - pth = tm.get_data_path('utf16_ex.txt') + pth = datapath('io', 'parser', 'data', 'utf16_ex.txt') encoding = 'utf-16' expected = self.read_table(pth, encoding=encoding) expected = expected.apply(Categorical) diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index fdf45f307e953..e2243b8087a5b 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -48,10 +48,16 @@ def check_compressed_urls(salaries_table, compression, extension, mode, tm.assert_frame_equal(url_table, salaries_table) +@pytest.fixture +def tips_df(datapath): + """DataFrame with the tips dataset.""" + return read_csv(datapath('io', 'parser', 'data', 'tips.csv')) + + @pytest.mark.usefixtures("s3_resource") class TestS3(object): - def test_parse_public_s3_bucket(self): + def test_parse_public_s3_bucket(self, tips_df): pytest.importorskip('s3fs') # more of an integration test due to the not-public contents portion # can probably mock this though. @@ -60,45 +66,40 @@ def test_parse_public_s3_bucket(self): ext, compression=comp) assert isinstance(df, DataFrame) assert not df.empty - tm.assert_frame_equal(read_csv( - tm.get_data_path('tips.csv')), df) + tm.assert_frame_equal(df, tips_df) # Read public file from bucket with not-public contents df = read_csv('s3://cant_get_it/tips.csv') assert isinstance(df, DataFrame) assert not df.empty - tm.assert_frame_equal(read_csv(tm.get_data_path('tips.csv')), df) + tm.assert_frame_equal(df, tips_df) - def test_parse_public_s3n_bucket(self): + def test_parse_public_s3n_bucket(self, tips_df): # Read from AWS s3 as "s3n" URL df = read_csv('s3n://pandas-test/tips.csv', nrows=10) assert isinstance(df, DataFrame) assert not df.empty - tm.assert_frame_equal(read_csv( - tm.get_data_path('tips.csv')).iloc[:10], df) + tm.assert_frame_equal(tips_df.iloc[:10], df) - def test_parse_public_s3a_bucket(self): + def test_parse_public_s3a_bucket(self, tips_df): # Read from AWS s3 as "s3a" URL df = read_csv('s3a://pandas-test/tips.csv', nrows=10) assert isinstance(df, DataFrame) assert not df.empty - tm.assert_frame_equal(read_csv( - tm.get_data_path('tips.csv')).iloc[:10], df) + tm.assert_frame_equal(tips_df.iloc[:10], df) - def test_parse_public_s3_bucket_nrows(self): + def test_parse_public_s3_bucket_nrows(self, tips_df): for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]: df = read_csv('s3://pandas-test/tips.csv' + ext, nrows=10, compression=comp) assert isinstance(df, DataFrame) assert not df.empty - tm.assert_frame_equal(read_csv( - tm.get_data_path('tips.csv')).iloc[:10], df) + tm.assert_frame_equal(tips_df.iloc[:10], df) - def test_parse_public_s3_bucket_chunked(self): + def test_parse_public_s3_bucket_chunked(self, tips_df): # Read with a chunksize chunksize = 5 - local_tips = read_csv(tm.get_data_path('tips.csv')) for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]: df_reader = read_csv('s3://pandas-test/tips.csv' + ext, chunksize=chunksize, compression=comp) @@ -109,14 +110,13 @@ def test_parse_public_s3_bucket_chunked(self): df = df_reader.get_chunk() assert isinstance(df, DataFrame) assert not df.empty - true_df = local_tips.iloc[ + true_df = tips_df.iloc[ chunksize * i_chunk: chunksize * (i_chunk + 1)] tm.assert_frame_equal(true_df, df) - def test_parse_public_s3_bucket_chunked_python(self): + def test_parse_public_s3_bucket_chunked_python(self, tips_df): # Read with a chunksize using the Python parser chunksize = 5 - local_tips = read_csv(tm.get_data_path('tips.csv')) for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]: df_reader = read_csv('s3://pandas-test/tips.csv' + ext, chunksize=chunksize, compression=comp, @@ -127,36 +127,33 @@ def test_parse_public_s3_bucket_chunked_python(self): df = df_reader.get_chunk() assert isinstance(df, DataFrame) assert not df.empty - true_df = local_tips.iloc[ + true_df = tips_df.iloc[ chunksize * i_chunk: chunksize * (i_chunk + 1)] tm.assert_frame_equal(true_df, df) - def test_parse_public_s3_bucket_python(self): + def test_parse_public_s3_bucket_python(self, tips_df): for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]: df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python', compression=comp) assert isinstance(df, DataFrame) assert not df.empty - tm.assert_frame_equal(read_csv( - tm.get_data_path('tips.csv')), df) + tm.assert_frame_equal(df, tips_df) - def test_infer_s3_compression(self): + def test_infer_s3_compression(self, tips_df): for ext in ['', '.gz', '.bz2']: df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python', compression='infer') assert isinstance(df, DataFrame) assert not df.empty - tm.assert_frame_equal(read_csv( - tm.get_data_path('tips.csv')), df) + tm.assert_frame_equal(df, tips_df) - def test_parse_public_s3_bucket_nrows_python(self): + def test_parse_public_s3_bucket_nrows_python(self, tips_df): for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]: df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python', nrows=10, compression=comp) assert isinstance(df, DataFrame) assert not df.empty - tm.assert_frame_equal(read_csv( - tm.get_data_path('tips.csv')).iloc[:10], df) + tm.assert_frame_equal(tips_df.iloc[:10], df) def test_s3_fails(self): with pytest.raises(IOError): diff --git a/pandas/tests/io/parser/test_parsers.py b/pandas/tests/io/parser/test_parsers.py index 7717102b64fc5..b6f13039641a2 100644 --- a/pandas/tests/io/parser/test_parsers.py +++ b/pandas/tests/io/parser/test_parsers.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- import os +import pytest import pandas.util.testing as tm from pandas import read_csv, read_table, DataFrame @@ -45,8 +46,9 @@ def read_table(self, *args, **kwargs): def float_precision_choices(self): raise com.AbstractMethodError(self) - def setup_method(self, method): - self.dirpath = tm.get_data_path() + @pytest.fixture(autouse=True) + def setup_method(self, datapath): + self.dirpath = datapath('io', 'parser', 'data') self.csv1 = os.path.join(self.dirpath, 'test1.csv') self.csv2 = os.path.join(self.dirpath, 'test2.csv') self.xls1 = os.path.join(self.dirpath, 'test.xls') diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index e8d9d8b52164b..c7026e3e0fc88 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -28,8 +28,9 @@ class TestTextReader(object): - def setup_method(self, method): - self.dirpath = tm.get_data_path() + @pytest.fixture(autouse=True) + def setup_method(self, datapath): + self.dirpath = datapath('io', 'parser', 'data') self.csv1 = os.path.join(self.dirpath, 'test1.csv') self.csv2 = os.path.join(self.dirpath, 'test2.csv') self.xls1 = os.path.join(self.dirpath, 'test.xls') diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py index b80263021c269..101ee3e619f5b 100644 --- a/pandas/tests/io/sas/test_sas7bdat.py +++ b/pandas/tests/io/sas/test_sas7bdat.py @@ -11,8 +11,9 @@ class TestSAS7BDAT(object): - def setup_method(self, method): - self.dirpath = tm.get_data_path() + @pytest.fixture(autouse=True) + def setup_method(self, datapath): + self.dirpath = datapath("io", "sas", "data") self.data = [] self.test_ix = [list(range(1, 16)), [16]] for j in 1, 2: @@ -123,9 +124,8 @@ def test_iterator_read_too_much(self): rdr.close() -def test_encoding_options(): - dirpath = tm.get_data_path() - fname = os.path.join(dirpath, "test1.sas7bdat") +def test_encoding_options(datapath): + fname = datapath("io", "sas", "data", "test1.sas7bdat") df1 = pd.read_sas(fname) df2 = pd.read_sas(fname, encoding='utf-8') for col in df1.columns: @@ -143,43 +143,39 @@ def test_encoding_options(): assert(x == y.decode()) -def test_productsales(): - dirpath = tm.get_data_path() - fname = os.path.join(dirpath, "productsales.sas7bdat") +def test_productsales(datapath): + fname = datapath("io", "sas", "data", "productsales.sas7bdat") df = pd.read_sas(fname, encoding='utf-8') - fname = os.path.join(dirpath, "productsales.csv") + fname = datapath("io", "sas", "data", "productsales.csv") df0 = pd.read_csv(fname, parse_dates=['MONTH']) vn = ["ACTUAL", "PREDICT", "QUARTER", "YEAR"] df0[vn] = df0[vn].astype(np.float64) tm.assert_frame_equal(df, df0) -def test_12659(): - dirpath = tm.get_data_path() - fname = os.path.join(dirpath, "test_12659.sas7bdat") +def test_12659(datapath): + fname = datapath("io", "sas", "data", "test_12659.sas7bdat") df = pd.read_sas(fname) - fname = os.path.join(dirpath, "test_12659.csv") + fname = datapath("io", "sas", "data", "test_12659.csv") df0 = pd.read_csv(fname) df0 = df0.astype(np.float64) tm.assert_frame_equal(df, df0) -def test_airline(): - dirpath = tm.get_data_path() - fname = os.path.join(dirpath, "airline.sas7bdat") +def test_airline(datapath): + fname = datapath("io", "sas", "data", "airline.sas7bdat") df = pd.read_sas(fname) - fname = os.path.join(dirpath, "airline.csv") + fname = datapath("io", "sas", "data", "airline.csv") df0 = pd.read_csv(fname) df0 = df0.astype(np.float64) tm.assert_frame_equal(df, df0, check_exact=False) -def test_date_time(): +def test_date_time(datapath): # Support of different SAS date/datetime formats (PR #15871) - dirpath = tm.get_data_path() - fname = os.path.join(dirpath, "datetime.sas7bdat") + fname = datapath("io", "sas", "data", "datetime.sas7bdat") df = pd.read_sas(fname) - fname = os.path.join(dirpath, "datetime.csv") + fname = datapath("io", "sas", "data", "datetime.csv") df0 = pd.read_csv(fname, parse_dates=['Date1', 'Date2', 'DateTime', 'DateTimeHi', 'Taiw']) # GH 19732: Timestamps imported from sas will incur floating point errors @@ -187,9 +183,8 @@ def test_date_time(): tm.assert_frame_equal(df, df0) -def test_zero_variables(): +def test_zero_variables(datapath): # Check if the SAS file has zero variables (PR #18184) - dirpath = tm.get_data_path() - fname = os.path.join(dirpath, "zero_variables.sas7bdat") + fname = datapath("io", "sas", "data", "zero_variables.sas7bdat") with pytest.raises(EmptyDataError): pd.read_sas(fname) diff --git a/pandas/tests/io/sas/test_xport.py b/pandas/tests/io/sas/test_xport.py index de31c3e36a8d5..6e5b2ab067aa5 100644 --- a/pandas/tests/io/sas/test_xport.py +++ b/pandas/tests/io/sas/test_xport.py @@ -1,3 +1,4 @@ +import pytest import pandas as pd import pandas.util.testing as tm from pandas.io.sas.sasreader import read_sas @@ -18,8 +19,9 @@ def numeric_as_float(data): class TestXport(object): - def setup_method(self, method): - self.dirpath = tm.get_data_path() + @pytest.fixture(autouse=True) + def setup_method(self, datapath): + self.dirpath = datapath("io", "sas", "data") self.file01 = os.path.join(self.dirpath, "DEMO_G.xpt") self.file02 = os.path.join(self.dirpath, "SSHSV1_A.xpt") self.file03 = os.path.join(self.dirpath, "DRXFCD_G.xpt") diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index a89156db38ae3..5c9739be73393 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -149,27 +149,22 @@ def test_read_non_existant(self, reader, module, error_class, fn_ext): reader(path) @pytest.mark.parametrize('reader, module, path', [ - (pd.read_csv, 'os', os.path.join(HERE, 'data', 'iris.csv')), - (pd.read_table, 'os', os.path.join(HERE, 'data', 'iris.csv')), - (pd.read_fwf, 'os', os.path.join(HERE, 'data', - 'fixed_width_format.txt')), - (pd.read_excel, 'xlrd', os.path.join(HERE, 'data', 'test1.xlsx')), - (pd.read_feather, 'feather', os.path.join(HERE, 'data', - 'feather-0_3_1.feather')), - (pd.read_hdf, 'tables', os.path.join(HERE, 'data', 'legacy_hdf', - 'datetimetz_object.h5')), - (pd.read_stata, 'os', os.path.join(HERE, 'data', 'stata10_115.dta')), - (pd.read_sas, 'os', os.path.join(HERE, 'sas', 'data', - 'test1.sas7bdat')), - (pd.read_json, 'os', os.path.join(HERE, 'json', 'data', - 'tsframe_v012.json')), - (pd.read_msgpack, 'os', os.path.join(HERE, 'msgpack', 'data', - 'frame.mp')), - (pd.read_pickle, 'os', os.path.join(HERE, 'data', - 'categorical_0_14_1.pickle')), + (pd.read_csv, 'os', ('io', 'data', 'iris.csv')), + (pd.read_table, 'os', ('io', 'data', 'iris.csv')), + (pd.read_fwf, 'os', ('io', 'data', 'fixed_width_format.txt')), + (pd.read_excel, 'xlrd', ('io', 'data', 'test1.xlsx')), + (pd.read_feather, 'feather', ('io', 'data', 'feather-0_3_1.feather')), + (pd.read_hdf, 'tables', ('io', 'data', 'legacy_hdf', + 'datetimetz_object.h5')), + (pd.read_stata, 'os', ('io', 'data', 'stata10_115.dta')), + (pd.read_sas, 'os', ('io', 'sas', 'data', 'test1.sas7bdat')), + (pd.read_json, 'os', ('io', 'json', 'data', 'tsframe_v012.json')), + (pd.read_msgpack, 'os', ('io', 'msgpack', 'data', 'frame.mp')), + (pd.read_pickle, 'os', ('io', 'data', 'categorical_0_14_1.pickle')), ]) - def test_read_fspath_all(self, reader, module, path): + def test_read_fspath_all(self, reader, module, path, datapath): pytest.importorskip(module) + path = datapath(*path) mypath = CustomFSPath(path) result = reader(mypath) @@ -232,13 +227,14 @@ def test_write_fspath_hdf5(self): tm.assert_frame_equal(result, expected) -class TestMMapWrapper(object): +@pytest.fixture +def mmap_file(datapath): + return datapath('io', 'data', 'test_mmap.csv') + - def setup_method(self, method): - self.mmap_file = os.path.join(tm.get_data_path(), - 'test_mmap.csv') +class TestMMapWrapper(object): - def test_constructor_bad_file(self): + def test_constructor_bad_file(self, mmap_file): non_file = StringIO('I am not a file') non_file.fileno = lambda: -1 @@ -252,15 +248,15 @@ def test_constructor_bad_file(self): tm.assert_raises_regex(err, msg, common.MMapWrapper, non_file) - target = open(self.mmap_file, 'r') + target = open(mmap_file, 'r') target.close() msg = "I/O operation on closed file" tm.assert_raises_regex( ValueError, msg, common.MMapWrapper, target) - def test_get_attr(self): - with open(self.mmap_file, 'r') as target: + def test_get_attr(self, mmap_file): + with open(mmap_file, 'r') as target: wrapper = common.MMapWrapper(target) attrs = dir(wrapper.mmap) @@ -273,8 +269,8 @@ def test_get_attr(self): assert not hasattr(wrapper, 'foo') - def test_next(self): - with open(self.mmap_file, 'r') as target: + def test_next(self, mmap_file): + with open(mmap_file, 'r') as target: wrapper = common.MMapWrapper(target) lines = target.readlines() diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 2a225e6fe6a45..1fda56dbff772 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -39,8 +39,9 @@ @td.skip_if_no('xlrd', '0.9') class SharedItems(object): - def setup_method(self, method): - self.dirpath = tm.get_data_path() + @pytest.fixture(autouse=True) + def setup_method(self, datapath): + self.dirpath = datapath("io", "data") self.frame = _frame.copy() self.frame2 = _frame2.copy() self.tsframe = _tsframe.copy() @@ -49,7 +50,6 @@ def setup_method(self, method): def get_csv_refdf(self, basename): """ Obtain the reference data from read_csv with the Python engine. - Test data path is defined by pandas.util.testing.get_data_path() Parameters ---------- @@ -68,8 +68,7 @@ def get_csv_refdf(self, basename): def get_excelfile(self, basename, ext): """ - Return test data ExcelFile instance. Test data path is defined by - pandas.util.testing.get_data_path() + Return test data ExcelFile instance. Parameters ---------- @@ -86,8 +85,7 @@ def get_excelfile(self, basename, ext): def get_exceldf(self, basename, ext, *args, **kwds): """ - Return test data DataFrame. Test data path is defined by - pandas.util.testing.get_data_path() + Return test data DataFrame. Parameters ---------- diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index a56946b82b027..9c6a8de7ed446 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -1,6 +1,5 @@ from __future__ import print_function -import glob import os import re import threading @@ -25,8 +24,18 @@ import pandas.util._test_decorators as td from pandas.util.testing import makeCustomDataframe as mkdf, network +HERE = os.path.dirname(__file__) -DATA_PATH = tm.get_data_path() + +@pytest.fixture(params=[ + 'chinese_utf-16.html', + 'chinese_utf-32.html', + 'chinese_utf-8.html', + 'letz_latin1.html', +]) +def html_encoding_file(request, datapath): + """Parametrized fixture for HTML encoding test filenames.""" + return datapath('io', 'data', 'html_encoding', request.param) def assert_framelist_equal(list1, list2, *args, **kwargs): @@ -44,11 +53,11 @@ def assert_framelist_equal(list1, list2, *args, **kwargs): @td.skip_if_no('bs4') -def test_bs4_version_fails(monkeypatch): +def test_bs4_version_fails(monkeypatch, datapath): import bs4 monkeypatch.setattr(bs4, '__version__', '4.2') with tm.assert_raises_regex(ValueError, "minimum version"): - read_html(os.path.join(DATA_PATH, "spam.html"), flavor='bs4') + read_html(datapath("io", "data", "spam.html"), flavor='bs4') def test_invalid_flavor(): @@ -59,8 +68,8 @@ def test_invalid_flavor(): @td.skip_if_no('bs4') @td.skip_if_no('lxml') -def test_same_ordering(): - filename = os.path.join(DATA_PATH, 'valid_markup.html') +def test_same_ordering(datapath): + filename = datapath('io', 'data', 'valid_markup.html') dfs_lxml = read_html(filename, index_col=0, flavor=['lxml']) dfs_bs4 = read_html(filename, index_col=0, flavor=['bs4']) assert_framelist_equal(dfs_lxml, dfs_bs4) @@ -72,11 +81,14 @@ def test_same_ordering(): pytest.param('lxml', marks=pytest.mark.skipif( not td.safe_import('lxml'), reason='No lxml'))], scope="class") class TestReadHtml(object): - spam_data = os.path.join(DATA_PATH, 'spam.html') - spam_data_kwargs = {} - if PY3: - spam_data_kwargs['encoding'] = 'UTF-8' - banklist_data = os.path.join(DATA_PATH, 'banklist.html') + + @pytest.fixture(autouse=True) + def set_files(self, datapath): + self.spam_data = datapath('io', 'data', 'spam.html') + self.spam_data_kwargs = {} + if PY3: + self.spam_data_kwargs['encoding'] = 'UTF-8' + self.banklist_data = datapath("io", "data", "banklist.html") @pytest.fixture(autouse=True, scope="function") def set_defaults(self, flavor, request): @@ -272,7 +284,8 @@ def test_invalid_url(self): @pytest.mark.slow def test_file_url(self): url = self.banklist_data - dfs = self.read_html(file_path_to_url(url), 'First', + dfs = self.read_html(file_path_to_url(os.path.abspath(url)), + 'First', attrs={'id': 'table'}) assert isinstance(dfs, list) for df in dfs: @@ -326,7 +339,7 @@ def test_multiindex_header_index_skiprows(self): @pytest.mark.slow def test_regex_idempotency(self): url = self.banklist_data - dfs = self.read_html(file_path_to_url(url), + dfs = self.read_html(file_path_to_url(os.path.abspath(url)), match=re.compile(re.compile('Florida')), attrs={'id': 'table'}) assert isinstance(dfs, list) @@ -352,9 +365,9 @@ def test_python_docs_table(self): assert sorted(zz) == sorted(['Repo', 'What']) @pytest.mark.slow - def test_thousands_macau_stats(self): + def test_thousands_macau_stats(self, datapath): all_non_nan_table_index = -2 - macau_data = os.path.join(DATA_PATH, 'macau.html') + macau_data = datapath("io", "data", "macau.html") dfs = self.read_html(macau_data, index_col=0, attrs={'class': 'style1'}) df = dfs[all_non_nan_table_index] @@ -362,9 +375,9 @@ def test_thousands_macau_stats(self): assert not any(s.isna().any() for _, s in df.iteritems()) @pytest.mark.slow - def test_thousands_macau_index_col(self): + def test_thousands_macau_index_col(self, datapath): all_non_nan_table_index = -2 - macau_data = os.path.join(DATA_PATH, 'macau.html') + macau_data = datapath('io', 'data', 'macau.html') dfs = self.read_html(macau_data, index_col=0, header=0) df = dfs[all_non_nan_table_index] @@ -518,8 +531,8 @@ def test_countries_municipalities(self): res2 = self.read_html(data2, header=0) assert_framelist_equal(res1, res2) - def test_nyse_wsj_commas_table(self): - data = os.path.join(DATA_PATH, 'nyse_wsj.html') + def test_nyse_wsj_commas_table(self, datapath): + data = datapath('io', 'data', 'nyse_wsj.html') df = self.read_html(data, index_col=0, header=0, attrs={'class': 'mdcTable'})[0] @@ -530,7 +543,7 @@ def test_nyse_wsj_commas_table(self): tm.assert_index_equal(df.columns, columns) @pytest.mark.slow - def test_banklist_header(self): + def test_banklist_header(self, datapath): from pandas.io.html import _remove_whitespace def try_remove_ws(x): @@ -541,7 +554,7 @@ def try_remove_ws(x): df = self.read_html(self.banklist_data, 'Metcalf', attrs={'id': 'table'})[0] - ground_truth = read_csv(os.path.join(DATA_PATH, 'banklist.csv'), + ground_truth = read_csv(datapath('io', 'data', 'banklist.csv'), converters={'Updated Date': Timestamp, 'Closing Date': Timestamp}) assert df.shape == ground_truth.shape @@ -658,19 +671,19 @@ def test_parse_dates_combine(self): newdf = DataFrame({'datetime': raw_dates}) tm.assert_frame_equal(newdf, res[0]) - def test_computer_sales_page(self): - data = os.path.join(DATA_PATH, 'computer_sales_page.html') + def test_computer_sales_page(self, datapath): + data = datapath('io', 'data', 'computer_sales_page.html') with tm.assert_raises_regex(ParserError, r"Passed header=\[0,1\] are " r"too many rows for this " r"multi_index of columns"): self.read_html(data, header=[0, 1]) - data = os.path.join(DATA_PATH, 'computer_sales_page.html') + data = datapath('io', 'data', 'computer_sales_page.html') assert self.read_html(data, header=[1, 2]) - def test_wikipedia_states_table(self): - data = os.path.join(DATA_PATH, 'wikipedia_states.html') + def test_wikipedia_states_table(self, datapath): + data = datapath('io', 'data', 'wikipedia_states.html') assert os.path.isfile(data), '%r is not a file' % data assert os.path.getsize(data), '%r is an empty file' % data result = self.read_html(data, 'Arizona', header=1)[0] @@ -784,15 +797,15 @@ def test_multiple_header_rows(self): html_df = read_html(html, )[0] tm.assert_frame_equal(expected_df, html_df) - def test_works_on_valid_markup(self): - filename = os.path.join(DATA_PATH, 'valid_markup.html') + def test_works_on_valid_markup(self, datapath): + filename = datapath('io', 'data', 'valid_markup.html') dfs = self.read_html(filename, index_col=0) assert isinstance(dfs, list) assert isinstance(dfs[0], DataFrame) @pytest.mark.slow - def test_fallback_success(self): - banklist_data = os.path.join(DATA_PATH, 'banklist.html') + def test_fallback_success(self, datapath): + banklist_data = datapath('io', 'data', 'banklist.html') self.read_html(banklist_data, '.*Water.*', flavor=['lxml', 'html5lib']) def test_to_html_timestamp(self): @@ -835,22 +848,23 @@ def test_displayed_only(self, displayed_only, exp0, exp1): else: assert len(dfs) == 1 # Should not parse hidden table - @pytest.mark.parametrize("f", glob.glob( - os.path.join(DATA_PATH, 'html_encoding', '*.html'))) - def test_encode(self, f): - _, encoding = os.path.splitext(os.path.basename(f))[0].split('_') + def test_encode(self, html_encoding_file): + _, encoding = os.path.splitext( + os.path.basename(html_encoding_file) + )[0].split('_') try: - with open(f, 'rb') as fobj: + with open(html_encoding_file, 'rb') as fobj: from_string = self.read_html(fobj.read(), encoding=encoding, index_col=0).pop() - with open(f, 'rb') as fobj: + with open(html_encoding_file, 'rb') as fobj: from_file_like = self.read_html(BytesIO(fobj.read()), encoding=encoding, index_col=0).pop() - from_filename = self.read_html(f, encoding=encoding, + from_filename = self.read_html(html_encoding_file, + encoding=encoding, index_col=0).pop() tm.assert_frame_equal(from_string, from_file_like) tm.assert_frame_equal(from_string, from_filename) @@ -906,7 +920,7 @@ def seekable(self): assert self.read_html(bad) @pytest.mark.slow - def test_importcheck_thread_safety(self): + def test_importcheck_thread_safety(self, datapath): # see gh-16928 class ErrorThread(threading.Thread): @@ -921,7 +935,7 @@ def run(self): # force import check by reinitalising global vars in html.py reload(pandas.io.html) - filename = os.path.join(DATA_PATH, 'valid_markup.html') + filename = datapath('io', 'data', 'valid_markup.html') helper_thread1 = ErrorThread(target=self.read_html, args=(filename,)) helper_thread2 = ErrorThread(target=self.read_html, args=(filename,)) diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py index 0b1c1ca178762..412e218f95c6f 100644 --- a/pandas/tests/io/test_packers.py +++ b/pandas/tests/io/test_packers.py @@ -3,6 +3,7 @@ from warnings import catch_warnings import os import datetime +import glob import numpy as np from distutils.version import LooseVersion @@ -836,13 +837,13 @@ def test_default_encoding(self): assert_frame_equal(result, frame) -def legacy_packers_versions(): - # yield the packers versions - path = tm.get_data_path('legacy_msgpack') - for v in os.listdir(path): - p = os.path.join(path, v) - if os.path.isdir(p): - yield v +files = glob.glob(os.path.join(os.path.dirname(__file__), "data", + "legacy_msgpack", "*", "*.msgpack")) + + +@pytest.fixture(params=files) +def legacy_packer(request, datapath): + return datapath(request.param) class TestMsgpack(object): @@ -919,24 +920,20 @@ def compare_frame_dt_mixed_tzs(self, result, expected, typ, version): else: tm.assert_frame_equal(result, expected) - @pytest.mark.parametrize('version', legacy_packers_versions()) def test_msgpacks_legacy(self, current_packers_data, all_packers_data, - version): - - pth = tm.get_data_path('legacy_msgpack/{0}'.format(version)) - n = 0 - for f in os.listdir(pth): - # GH12142 0.17 files packed in P2 can't be read in P3 - if (compat.PY3 and version.startswith('0.17.') and - f.split('.')[-4][-1] == '2'): - continue - vf = os.path.join(pth, f) - try: - with catch_warnings(record=True): - self.compare(current_packers_data, all_packers_data, - vf, version) - except ImportError: - # blosc not installed - continue - n += 1 - assert n > 0, 'Msgpack files are not tested' + legacy_packer, datapath): + + version = os.path.basename(os.path.dirname(legacy_packer)) + + # GH12142 0.17 files packed in P2 can't be read in P3 + if (compat.PY3 and version.startswith('0.17.') and + legacy_packer.split('.')[-4][-1] == '2'): + msg = "Files packed in Py2 can't be read in Py3 ({})" + pytest.skip(msg.format(version)) + try: + with catch_warnings(record=True): + self.compare(current_packers_data, all_packers_data, + legacy_packer, version) + except ImportError: + # blosc not installed + pass diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index fbe2174e603e2..45cbbd43cd6a8 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -12,7 +12,7 @@ 3. Move the created pickle to "data/legacy_pickle/<version>" directory. """ - +import glob import pytest from warnings import catch_warnings @@ -184,27 +184,25 @@ def compare_sp_frame_float(result, expected, typ, version): tm.assert_sp_frame_equal(result, expected) +files = glob.glob(os.path.join(os.path.dirname(__file__), "data", + "legacy_pickle", "*", "*.pickle")) + + +@pytest.fixture(params=files) +def legacy_pickle(request, datapath): + return datapath(request.param) + + # --------------------- # tests # --------------------- -def legacy_pickle_versions(): - # yield the pickle versions - path = tm.get_data_path('legacy_pickle') - for v in os.listdir(path): - p = os.path.join(path, v) - if os.path.isdir(p): - for f in os.listdir(p): - yield (v, f) - - -@pytest.mark.parametrize('version, f', legacy_pickle_versions()) -def test_pickles(current_pickle_data, version, f): +def test_pickles(current_pickle_data, legacy_pickle): if not is_platform_little_endian(): pytest.skip("known failure on non-little endian") - vf = tm.get_data_path('legacy_pickle/{}/{}'.format(version, f)) + version = os.path.basename(os.path.dirname(legacy_pickle)) with catch_warnings(record=True): - compare(current_pickle_data, vf, version) + compare(current_pickle_data, legacy_pickle, version) def test_round_trip_current(current_pickle_data): @@ -260,12 +258,11 @@ def python_unpickler(path): compare_element(result, expected, typ) -def test_pickle_v0_14_1(): +def test_pickle_v0_14_1(datapath): cat = pd.Categorical(values=['a', 'b', 'c'], ordered=False, categories=['a', 'b', 'c', 'd']) - pickle_path = os.path.join(tm.get_data_path(), - 'categorical_0_14_1.pickle') + pickle_path = datapath('io', 'data', 'categorical_0_14_1.pickle') # This code was executed once on v0.14.1 to generate the pickle: # # cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'], @@ -275,14 +272,13 @@ def test_pickle_v0_14_1(): tm.assert_categorical_equal(cat, pd.read_pickle(pickle_path)) -def test_pickle_v0_15_2(): +def test_pickle_v0_15_2(datapath): # ordered -> _ordered # GH 9347 cat = pd.Categorical(values=['a', 'b', 'c'], ordered=False, categories=['a', 'b', 'c', 'd']) - pickle_path = os.path.join(tm.get_data_path(), - 'categorical_0_15_2.pickle') + pickle_path = datapath('io', 'data', 'categorical_0_15_2.pickle') # This code was executed once on v0.15.2 to generate the pickle: # # cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'], diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index f96e7eeb40ea2..b95df3840b6c5 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -4449,28 +4449,27 @@ def f(): store.select('df') tm.assert_raises_regex(ClosedFileError, 'file is not open', f) - def test_pytables_native_read(self): - + def test_pytables_native_read(self, datapath): with ensure_clean_store( - tm.get_data_path('legacy_hdf/pytables_native.h5'), + datapath('io', 'data', 'legacy_hdf/pytables_native.h5'), mode='r') as store: d2 = store['detector/readout'] assert isinstance(d2, DataFrame) @pytest.mark.skipif(PY35 and is_platform_windows(), reason="native2 read fails oddly on windows / 3.5") - def test_pytables_native2_read(self): + def test_pytables_native2_read(self, datapath): with ensure_clean_store( - tm.get_data_path('legacy_hdf/pytables_native2.h5'), + datapath('io', 'data', 'legacy_hdf', 'pytables_native2.h5'), mode='r') as store: str(store) d1 = store['detector'] assert isinstance(d1, DataFrame) - def test_legacy_table_read(self): + def test_legacy_table_read(self, datapath): # legacy table types with ensure_clean_store( - tm.get_data_path('legacy_hdf/legacy_table.h5'), + datapath('io', 'data', 'legacy_hdf', 'legacy_table.h5'), mode='r') as store: with catch_warnings(record=True): @@ -5117,7 +5116,7 @@ def test_fspath(self): with pd.HDFStore(path) as store: assert os.fspath(store) == str(path) - def test_read_py2_hdf_file_in_py3(self): + def test_read_py2_hdf_file_in_py3(self, datapath): # GH 16781 # tests reading a PeriodIndex DataFrame written in Python2 in Python3 @@ -5132,8 +5131,8 @@ def test_read_py2_hdf_file_in_py3(self): ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B')) with ensure_clean_store( - tm.get_data_path( - 'legacy_hdf/periodindex_0.20.1_x86_64_darwin_2.7.13.h5'), + datapath('io', 'data', 'legacy_hdf', + 'periodindex_0.20.1_x86_64_darwin_2.7.13.h5'), mode='r') as store: result = store['p'] assert_frame_equal(result, expected) @@ -5530,14 +5529,14 @@ def test_store_timezone(self): assert_frame_equal(result, df) - def test_legacy_datetimetz_object(self): + def test_legacy_datetimetz_object(self, datapath): # legacy from < 0.17.0 # 8260 expected = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), B=Timestamp('20130603', tz='CET')), index=range(5)) with ensure_clean_store( - tm.get_data_path('legacy_hdf/datetimetz_object.h5'), + datapath('io', 'data', 'legacy_hdf', 'datetimetz_object.h5'), mode='r') as store: result = store['df'] assert_frame_equal(result, expected) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index f3ab74d37a2bc..f8f742c5980ac 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -22,7 +22,6 @@ import pytest import sqlite3 import csv -import os import warnings import numpy as np @@ -184,9 +183,11 @@ class MixInBase(object): def teardown_method(self, method): - for tbl in self._get_all_tables(): - self.drop_table(tbl) - self._close_conn() + # if setup fails, there may not be a connection to close. + if hasattr(self, 'conn'): + for tbl in self._get_all_tables(): + self.drop_table(tbl) + self._close_conn() class MySQLMixIn(MixInBase): @@ -253,9 +254,9 @@ def _get_exec(self): else: return self.conn.cursor() - def _load_iris_data(self): + def _load_iris_data(self, datapath): import io - iris_csv_file = os.path.join(tm.get_data_path(), 'iris.csv') + iris_csv_file = datapath('io', 'data', 'iris.csv') self.drop_table('iris') self._get_exec().execute(SQL_STRINGS['create_iris'][self.flavor]) @@ -503,9 +504,10 @@ class _TestSQLApi(PandasSQLTest): flavor = 'sqlite' mode = None - def setup_method(self, method): + @pytest.fixture(autouse=True) + def setup_method(self, datapath): self.conn = self.connect() - self._load_iris_data() + self._load_iris_data(datapath) self._load_iris_view() self._load_test1_data() self._load_test2_data() @@ -1025,8 +1027,9 @@ class _EngineToConnMixin(object): A mixin that causes setup_connect to create a conn rather than an engine. """ - def setup_method(self, method): - super(_EngineToConnMixin, self).setup_method(method) + @pytest.fixture(autouse=True) + def setup_method(self, datapath): + super(_EngineToConnMixin, self).setup_method(datapath) engine = self.conn conn = engine.connect() self.__tx = conn.begin() @@ -1034,12 +1037,14 @@ def setup_method(self, method): self.__engine = engine self.conn = conn - def teardown_method(self, method): + yield + self.__tx.rollback() self.conn.close() self.conn = self.__engine self.pandasSQL = sql.SQLDatabase(self.__engine) - super(_EngineToConnMixin, self).teardown_method(method) + # XXX: + # super(_EngineToConnMixin, self).teardown_method(method) @pytest.mark.single @@ -1136,7 +1141,7 @@ class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest): """ flavor = None - @classmethod + @pytest.fixture(autouse=True, scope='class') def setup_class(cls): cls.setup_import() cls.setup_driver() @@ -1149,10 +1154,11 @@ def setup_class(cls): msg = "{0} - can't connect to {1} server".format(cls, cls.flavor) pytest.skip(msg) - def setup_method(self, method): + @pytest.fixture(autouse=True) + def setup_method(self, datapath): self.setup_connect() - self._load_iris_data() + self._load_iris_data(datapath) self._load_raw_sql() self._load_test1_data() @@ -1920,11 +1926,12 @@ class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest): def connect(cls): return sqlite3.connect(':memory:') - def setup_method(self, method): + @pytest.fixture(autouse=True) + def setup_method(self, datapath): self.conn = self.connect() self.pandasSQL = sql.SQLiteDatabase(self.conn) - self._load_iris_data() + self._load_iris_data(datapath) self._load_test1_data() @@ -2135,8 +2142,9 @@ def _skip_if_no_pymysql(): @pytest.mark.single class TestXSQLite(SQLiteMixIn): - def setup_method(self, method): - self.method = method + @pytest.fixture(autouse=True) + def setup_method(self, request, datapath): + self.method = request.function self.conn = sqlite3.connect(':memory:') def test_basic(self): @@ -2215,8 +2223,7 @@ def test_execute_fail(self): with pytest.raises(Exception): sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn) - @tm.capture_stdout - def test_execute_closed_connection(self): + def test_execute_closed_connection(self, request, datapath): create_sql = """ CREATE TABLE test ( @@ -2236,7 +2243,7 @@ def test_execute_closed_connection(self): tquery("select * from test", con=self.conn) # Initialize connection again (needed for tearDown) - self.setup_method(self.method) + self.setup_method(request, datapath) def test_na_roundtrip(self): pass @@ -2341,7 +2348,7 @@ def clean_up(test_table_to_drop): "if SQLAlchemy is not installed") class TestXMySQL(MySQLMixIn): - @classmethod + @pytest.fixture(autouse=True, scope='class') def setup_class(cls): _skip_if_no_pymysql() @@ -2370,7 +2377,8 @@ def setup_class(cls): "[pandas] in your system's mysql default file, " "typically located at ~/.my.cnf or /etc/.my.cnf. ") - def setup_method(self, method): + @pytest.fixture(autouse=True) + def setup_method(self, request, datapath): _skip_if_no_pymysql() import pymysql try: @@ -2396,7 +2404,7 @@ def setup_method(self, method): "[pandas] in your system's mysql default file, " "typically located at ~/.my.cnf or /etc/.my.cnf. ") - self.method = method + self.method = request.function def test_basic(self): _skip_if_no_pymysql() @@ -2501,8 +2509,7 @@ def test_execute_fail(self): with pytest.raises(Exception): sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn) - @tm.capture_stdout - def test_execute_closed_connection(self): + def test_execute_closed_connection(self, request, datapath): _skip_if_no_pymysql() drop_sql = "DROP TABLE IF EXISTS test" create_sql = """ @@ -2525,7 +2532,7 @@ def test_execute_closed_connection(self): tquery("select * from test", con=self.conn) # Initialize connection again (needed for tearDown) - self.setup_method(self.method) + self.setup_method(request, datapath) def test_na_roundtrip(self): _skip_if_no_pymysql() diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index bfb72be80400e..cfe47cae7e5e1 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -25,8 +25,8 @@ @pytest.fixture -def dirpath(): - return tm.get_data_path() +def dirpath(datapath): + return datapath("io", "data") @pytest.fixture @@ -39,8 +39,9 @@ def parsed_114(dirpath): class TestStata(object): - def setup_method(self, method): - self.dirpath = tm.get_data_path() + @pytest.fixture(autouse=True) + def setup_method(self, datapath): + self.dirpath = datapath("io", "data") self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta') self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta') diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index f65791329f2f1..09687dd97bd43 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -74,11 +74,6 @@ def setup_method(self, method): else: self.default_figsize = (8.0, 6.0) self.default_tick_position = 'left' if self.mpl_ge_2_0_0 else 'default' - # common test data - from pandas import read_csv - base = os.path.join(os.path.dirname(curpath()), os.pardir) - path = os.path.join(base, 'tests', 'data', 'iris.csv') - self.iris = read_csv(path) n = 100 with tm.RNGContext(42): diff --git a/pandas/tests/plotting/test_deprecated.py b/pandas/tests/plotting/test_deprecated.py index 2c2d371921d2f..a45b17ec98261 100644 --- a/pandas/tests/plotting/test_deprecated.py +++ b/pandas/tests/plotting/test_deprecated.py @@ -46,10 +46,9 @@ def test_boxplot_deprecated(self): by='indic') @pytest.mark.slow - def test_radviz_deprecated(self): - df = self.iris + def test_radviz_deprecated(self, iris): with tm.assert_produces_warning(FutureWarning): - plotting.radviz(frame=df, class_column='Name') + plotting.radviz(frame=iris, class_column='Name') @pytest.mark.slow def test_plot_params(self): diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index c82c939584dc7..0473610ea2f8f 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -100,11 +100,11 @@ def test_scatter_matrix_axis(self): axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) @pytest.mark.slow - def test_andrews_curves(self): + def test_andrews_curves(self, iris): from pandas.plotting import andrews_curves from matplotlib import cm - df = self.iris + df = iris _check_plot_works(andrews_curves, frame=df, class_column='Name') @@ -165,11 +165,11 @@ def test_andrews_curves(self): andrews_curves(data=df, class_column='Name') @pytest.mark.slow - def test_parallel_coordinates(self): + def test_parallel_coordinates(self, iris): from pandas.plotting import parallel_coordinates from matplotlib import cm - df = self.iris + df = iris ax = _check_plot_works(parallel_coordinates, frame=df, class_column='Name') @@ -234,11 +234,11 @@ def test_parallel_coordinates_with_sorted_labels(self): assert prev[1] < nxt[1] and prev[0] < nxt[0] @pytest.mark.slow - def test_radviz(self): + def test_radviz(self, iris): from pandas.plotting import radviz from matplotlib import cm - df = self.iris + df = iris _check_plot_works(radviz, frame=df, class_column='Name') rgba = ('#556270', '#4ECDC4', '#C7F464') @@ -272,8 +272,8 @@ def test_radviz(self): self._check_colors(handles, facecolors=colors) @pytest.mark.slow - def test_subplot_titles(self): - df = self.iris.drop('Name', axis=1).head() + def test_subplot_titles(self, iris): + df = iris.drop('Name', axis=1).head() # Use the column names as the subplot titles title = list(df.columns) diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index cebbcc41c3e17..59b53cd23010e 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -1,4 +1,3 @@ -import os import pytest import pytz @@ -13,8 +12,8 @@ class TestAsOfMerge(object): - def read_data(self, name, dedupe=False): - path = os.path.join(tm.get_data_path(), name) + def read_data(self, datapath, name, dedupe=False): + path = datapath('reshape', 'merge', 'data', name) x = read_csv(path) if dedupe: x = (x.drop_duplicates(['time', 'ticker'], keep='last') @@ -23,15 +22,17 @@ def read_data(self, name, dedupe=False): x.time = to_datetime(x.time) return x - def setup_method(self, method): + @pytest.fixture(autouse=True) + def setup_method(self, datapath): - self.trades = self.read_data('trades.csv') - self.quotes = self.read_data('quotes.csv', dedupe=True) - self.asof = self.read_data('asof.csv') - self.tolerance = self.read_data('tolerance.csv') - self.allow_exact_matches = self.read_data('allow_exact_matches.csv') + self.trades = self.read_data(datapath, 'trades.csv') + self.quotes = self.read_data(datapath, 'quotes.csv', dedupe=True) + self.asof = self.read_data(datapath, 'asof.csv') + self.tolerance = self.read_data(datapath, 'tolerance.csv') + self.allow_exact_matches = self.read_data(datapath, + 'allow_exact_matches.csv') self.allow_exact_matches_and_tolerance = self.read_data( - 'allow_exact_matches_and_tolerance.csv') + datapath, 'allow_exact_matches_and_tolerance.csv') def test_examples1(self): """ doc-string examples """ @@ -423,11 +424,11 @@ def test_multiby_indexed(self): pd.merge_asof(left, right, left_index=True, right_index=True, left_by=['k1', 'k2'], right_by=['k1']) - def test_basic2(self): + def test_basic2(self, datapath): - expected = self.read_data('asof2.csv') - trades = self.read_data('trades2.csv') - quotes = self.read_data('quotes2.csv', dedupe=True) + expected = self.read_data(datapath, 'asof2.csv') + trades = self.read_data(datapath, 'trades2.csv') + quotes = self.read_data(datapath, 'quotes2.csv', dedupe=True) result = merge_asof(trades, quotes, on='time', @@ -467,14 +468,14 @@ def test_valid_join_keys(self): merge_asof(trades, quotes, by='ticker') - def test_with_duplicates(self): + def test_with_duplicates(self, datapath): q = pd.concat([self.quotes, self.quotes]).sort_values( ['time', 'ticker']).reset_index(drop=True) result = merge_asof(self.trades, q, on='time', by='ticker') - expected = self.read_data('asof.csv') + expected = self.read_data(datapath, 'asof.csv') assert_frame_equal(result, expected) def test_with_duplicates_no_on(self): diff --git a/pandas/tests/reshape/test_tile.py b/pandas/tests/reshape/test_tile.py index 5ea27f9e34e1c..807fb2530603a 100644 --- a/pandas/tests/reshape/test_tile.py +++ b/pandas/tests/reshape/test_tile.py @@ -282,10 +282,10 @@ def test_round_frac(self): result = tmod._round_frac(0.000123456, precision=2) assert result == 0.00012 - def test_qcut_binning_issues(self): + def test_qcut_binning_issues(self, datapath): # #1978, 1979 - path = os.path.join(tm.get_data_path(), 'cut_data.csv') - arr = np.loadtxt(path) + cut_file = datapath(os.path.join('reshape', 'data', 'cut_data.csv')) + arr = np.loadtxt(cut_file) result = qcut(arr, 20) diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 74bc08ee9649b..b93a0206479ca 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -1,4 +1,3 @@ -import os from distutils.version import LooseVersion from datetime import date, datetime, timedelta @@ -518,14 +517,15 @@ def test_add(self, offset_types, tz): assert isinstance(result, Timestamp) assert result == expected_localize - def test_pickle_v0_15_2(self): + def test_pickle_v0_15_2(self, datapath): offsets = {'DateOffset': DateOffset(years=1), 'MonthBegin': MonthBegin(1), 'Day': Day(1), 'YearBegin': YearBegin(1), 'Week': Week(1)} - pickle_path = os.path.join(tm.get_data_path(), - 'dateoffset_0_15_2.pickle') + + pickle_path = datapath('tseries', 'offsets', 'data', + 'dateoffset_0_15_2.pickle') # This code was executed once on v0.15.2 to generate the pickle: # with open(pickle_path, 'wb') as f: pickle.dump(offsets, f) # @@ -1838,12 +1838,10 @@ def _check_roundtrip(obj): _check_roundtrip(self.offset2) _check_roundtrip(self.offset * 2) - def test_pickle_compat_0_14_1(self): + def test_pickle_compat_0_14_1(self, datapath): hdays = [datetime(2013, 1, 1) for ele in range(4)] - - pth = tm.get_data_path() - - cday0_14_1 = read_pickle(os.path.join(pth, 'cday-0.14.1.pickle')) + pth = datapath('tseries', 'offsets', 'data', 'cday-0.14.1.pickle') + cday0_14_1 = read_pickle(pth) cday = CDay(holidays=hdays) assert cday == cday0_14_1 diff --git a/pandas/tests/util/test_testing.py b/pandas/tests/util/test_testing.py index ab7c4fb528452..4d34987e14f75 100644 --- a/pandas/tests/util/test_testing.py +++ b/pandas/tests/util/test_testing.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import os import pandas as pd import pytest import numpy as np @@ -841,3 +842,15 @@ def test_locale(self): # GH9744 locales = tm.get_locales() assert len(locales) >= 1 + + +def test_datapath_missing(datapath, request): + if not request.config.getoption("--strict-data-files"): + pytest.skip("Need to set '--strict-data-files'") + + with pytest.raises(ValueError): + datapath('not_a_file') + + result = datapath('data', 'iris.csv') + expected = os.path.join('pandas', 'tests', 'data', 'iris.csv') + assert result == expected diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 27c24e3a68079..c6ab24403d58d 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -23,7 +23,6 @@ def test_foo(): For more information, refer to the ``pytest`` documentation on ``skipif``. """ - import pytest import locale from distutils.version import LooseVersion diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 675dd94d49750..a5afcb6915034 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -6,7 +6,6 @@ import sys import tempfile import warnings -import inspect import os import subprocess import locale @@ -757,15 +756,6 @@ def ensure_clean(filename=None, return_filelike=False): print("Exception on removing file: {error}".format(error=e)) -def get_data_path(f=''): - """Return the path of a data file, these are relative to the current test - directory. - """ - # get our callers file - _, filename, _, _, _, _ = inspect.getouterframes(inspect.currentframe())[1] - base_dir = os.path.abspath(os.path.dirname(filename)) - return os.path.join(base_dir, 'data', f) - # ----------------------------------------------------------------------------- # Comparators diff --git a/setup.cfg b/setup.cfg index 6d9657737a8bd..9ec967c25e225 100644 --- a/setup.cfg +++ b/setup.cfg @@ -32,4 +32,5 @@ markers = slow: mark a test as slow network: mark a test as network high_memory: mark a test as a high-memory only -doctest_optionflags= NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL +addopts = --strict-data-files +doctest_optionflags= NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL \ No newline at end of file diff --git a/setup.py b/setup.py index dd026bd611727..0fd008612b5bd 100755 --- a/setup.py +++ b/setup.py @@ -735,11 +735,7 @@ def pxd(name): maintainer=AUTHOR, version=versioneer.get_version(), packages=find_packages(include=['pandas', 'pandas.*']), - package_data={'': ['data/*', 'templates/*', '_libs/*.dll'], - 'pandas.tests.io': ['data/legacy_hdf/*.h5', - 'data/legacy_pickle/*/*.pickle', - 'data/legacy_msgpack/*/*.msgpack', - 'data/html_encoding/*.html']}, + package_data={'': ['templates/*', '_libs/*.dll']}, ext_modules=extensions, maintainer_email=EMAIL, description=DESCRIPTION,
``` $ ls -lh dist total 86552 -rw-r--r-- 1 taugspurger staff 10M Feb 4 14:12 pandas-0.23.0.dev0+218.g3f3b4e0bc-cp36-cp36m-macosx_10_12_x86_64.whl -rw-r--r-- 1 taugspurger staff 12M Feb 4 14:12 pandas-0.23.0.dev0+218.g3f3b4e0bc.tar.gz -rw-r--r-- 1 taugspurger staff 7.8M Feb 4 12:18 pandas-0.23.0.dev0+219.g4d77cd8e6-cp36-cp36m-macosx_10_12_x86_64.whl -rw-r--r-- 1 taugspurger staff 9.5M Feb 4 14:11 pandas-0.23.0.dev0+219.g4d77cd8e6.tar.gz ``` Source: 12M -> 9.5M Binary: 10M -> 7.8M Need to do a bit more testing to make sure I didn't break anything. And need to think about how to test this going forward to ensure we don't write tests that aren't skipped if a data file isn't present. Closes https://github.com/pandas-dev/pandas/issues/19320 Closes https://github.com/pandas-dev/pandas/issues/21436
https://api.github.com/repos/pandas-dev/pandas/pulls/19535
2018-02-04T20:17:31Z
2018-06-26T15:02:18Z
2018-06-26T15:02:17Z
2018-07-02T15:37:48Z
Remove unused calendar options from period_helper
diff --git a/pandas/_libs/src/period_helper.c b/pandas/_libs/src/period_helper.c index f1367978bd6c9..8f1c527a68455 100644 --- a/pandas/_libs/src/period_helper.c +++ b/pandas/_libs/src/period_helper.c @@ -47,13 +47,10 @@ static int days_in_month[2][12] = { {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}}; -/* Return 1/0 iff year points to a leap year in calendar. */ -static int dInfoCalc_Leapyear(npy_int64 year, int calendar) { - if (calendar == GREGORIAN_CALENDAR) { - return (year % 4 == 0) && ((year % 100 != 0) || (year % 400 == 0)); - } else { - return (year % 4 == 0); - } +/* Return 1/0 iff year points to a leap year. + * Assumes GREGORIAN_CALENDAR */ +static int dInfoCalc_Leapyear(npy_int64 year) { + return (year % 4 == 0) && ((year % 100 != 0) || (year % 400 == 0)); } /* Return the day of the week for the given absolute date. */ @@ -71,40 +68,33 @@ static int dInfoCalc_DayOfWeek(npy_int64 absdate) { static int monthToQuarter(int month) { return ((month - 1) / 3) + 1; } /* Return the year offset, that is the absolute date of the day - 31.12.(year-1) in the given calendar. + 31.12.(year-1) + + Assumes GREGORIAN_CALENDAR + + This is equivalent to: + + (datetime(year, 1, 1) - datetime(1970, 1, 1)).days Note: For the Julian calendar we shift the absdate (which is measured using the Gregorian Epoch) value by two days because the Epoch (0001-01-01) in the Julian calendar lies 2 days before the Epoch in the Gregorian calendar. */ -static int dInfoCalc_YearOffset(npy_int64 year, int calendar) { +static int dInfoCalc_YearOffset(npy_int64 year) { year--; - if (calendar == GREGORIAN_CALENDAR) { - if (year >= 0 || -1 / 4 == -1) - return year * 365 + year / 4 - year / 100 + year / 400; - else - return year * 365 + (year - 3) / 4 - (year - 99) / 100 + + if (year >= 0 || -1 / 4 == -1) + return year * 365 + year / 4 - year / 100 + year / 400; + else + return year * 365 + (year - 3) / 4 - (year - 99) / 100 + (year - 399) / 400; - } else if (calendar == JULIAN_CALENDAR) { - if (year >= 0 || -1 / 4 == -1) - return year * 365 + year / 4 - 2; - else - return year * 365 + (year - 3) / 4 - 2; - } - Py_Error(PyExc_ValueError, "unknown calendar"); -onError: - return INT_ERR_CODE; } -/* Set the instance's value using the given date and time. calendar may be set - * to the flags: GREGORIAN_CALENDAR, JULIAN_CALENDAR to indicate the calendar - * to be used. */ - +/* Set the instance's value using the given date and time. + * Assumes GREGORIAN_CALENDAR */ static int dInfoCalc_SetFromDateAndTime(struct date_info *dinfo, int year, int month, int day, int hour, - int minute, double second, - int calendar) { + int minute, double second) { /* Calculate the absolute date */ { int leap; @@ -116,7 +106,7 @@ static int dInfoCalc_SetFromDateAndTime(struct date_info *dinfo, int year, PyExc_ValueError, "year out of range: %i", year); /* Is it a leap year ? */ - leap = dInfoCalc_Leapyear(year, calendar); + leap = dInfoCalc_Leapyear(year); /* Negative month values indicate months relative to the years end */ if (month < 0) month += 13; @@ -128,7 +118,7 @@ static int dInfoCalc_SetFromDateAndTime(struct date_info *dinfo, int year, Py_AssertWithArg(day >= 1 && day <= days_in_month[leap][month - 1], PyExc_ValueError, "day out of range: %i", day); - yearoffset = dInfoCalc_YearOffset(year, calendar); + yearoffset = dInfoCalc_YearOffset(year); if (yearoffset == INT_ERR_CODE) goto onError; absdate = day + month_offset[leap][month - 1] + yearoffset; @@ -142,8 +132,6 @@ static int dInfoCalc_SetFromDateAndTime(struct date_info *dinfo, int year, dinfo->day_of_week = dInfoCalc_DayOfWeek(absdate); dinfo->day_of_year = (short)(absdate - yearoffset); - - dinfo->calendar = calendar; } /* Calculate the absolute time */ @@ -171,33 +159,27 @@ static int dInfoCalc_SetFromDateAndTime(struct date_info *dinfo, int year, return INT_ERR_CODE; } -/* Sets the date part of the date_info struct using the indicated - calendar. +/* Sets the date part of the date_info struct + Assumes GREGORIAN_CALENDAR XXX This could also be done using some integer arithmetics rather than with this iterative approach... */ static int dInfoCalc_SetFromAbsDate(register struct date_info *dinfo, - npy_int64 absdate, int calendar) { + npy_int64 absdate) { register npy_int64 year; npy_int64 yearoffset; int leap, dayoffset; int *monthoffset; /* Approximate year */ - if (calendar == GREGORIAN_CALENDAR) { - year = (npy_int64)(((double)absdate) / 365.2425); - } else if (calendar == JULIAN_CALENDAR) { - year = (npy_int64)(((double)absdate) / 365.25); - } else { - Py_Error(PyExc_ValueError, "unknown calendar"); - } + year = (npy_int64)(((double)absdate) / 365.2425); if (absdate > 0) year++; /* Apply corrections to reach the correct year */ while (1) { /* Calculate the year offset */ - yearoffset = dInfoCalc_YearOffset(year, calendar); + yearoffset = dInfoCalc_YearOffset(year); if (yearoffset == INT_ERR_CODE) goto onError; /* Backward correction: absdate must be greater than the @@ -208,7 +190,7 @@ static int dInfoCalc_SetFromAbsDate(register struct date_info *dinfo, } dayoffset = absdate - yearoffset; - leap = dInfoCalc_Leapyear(year, calendar); + leap = dInfoCalc_Leapyear(year); /* Forward correction: non leap years only have 365 days */ if (dayoffset > 365 && !leap) { @@ -219,7 +201,6 @@ static int dInfoCalc_SetFromAbsDate(register struct date_info *dinfo, } dinfo->year = year; - dinfo->calendar = calendar; /* Now iterate to find the month */ monthoffset = month_offset[leap]; @@ -410,8 +391,7 @@ static npy_int64 DtoB_WeekendToFriday(npy_int64 absdate, int day_of_week) { static npy_int64 absdate_from_ymd(int y, int m, int d) { struct date_info tempDate; - if (dInfoCalc_SetFromDateAndTime(&tempDate, y, m, d, 0, 0, 0, - GREGORIAN_CALENDAR)) { + if (dInfoCalc_SetFromDateAndTime(&tempDate, y, m, d, 0, 0, 0)) { return INT_ERR_CODE; } return tempDate.absdate; @@ -423,8 +403,7 @@ static npy_int64 asfreq_DTtoA(npy_int64 ordinal, char relation, asfreq_info *af_info) { struct date_info dinfo; ordinal = downsample_daytime(ordinal, af_info, 0); - if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET, - GREGORIAN_CALENDAR)) + if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET)) return INT_ERR_CODE; if (dinfo.month > af_info->to_a_year_end) { return (npy_int64)(dinfo.year + 1 - BASE_YEAR); @@ -436,8 +415,7 @@ static npy_int64 asfreq_DTtoA(npy_int64 ordinal, char relation, static npy_int64 DtoQ_yq(npy_int64 ordinal, asfreq_info *af_info, int *year, int *quarter) { struct date_info dinfo; - if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET, - GREGORIAN_CALENDAR)) + if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET)) return INT_ERR_CODE; if (af_info->to_q_year_end != 12) { dinfo.month -= af_info->to_q_year_end; @@ -474,8 +452,7 @@ static npy_int64 asfreq_DTtoM(npy_int64 ordinal, char relation, ordinal = downsample_daytime(ordinal, af_info, 0); - if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET, - GREGORIAN_CALENDAR)) + if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET)) return INT_ERR_CODE; return (npy_int64)((dinfo.year - BASE_YEAR) * 12 + dinfo.month - 1); } @@ -493,8 +470,7 @@ static npy_int64 asfreq_DTtoB(npy_int64 ordinal, char relation, ordinal = downsample_daytime(ordinal, af_info, 0); - if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET, - GREGORIAN_CALENDAR)) + if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET)) return INT_ERR_CODE; if (relation == 'S') { @@ -595,8 +571,7 @@ static npy_int64 asfreq_WtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) { struct date_info dinfo; if (dInfoCalc_SetFromAbsDate( - &dinfo, asfreq_WtoDT(ordinal, relation, af_info) + ORD_OFFSET, - GREGORIAN_CALENDAR)) + &dinfo, asfreq_WtoDT(ordinal, relation, af_info) + ORD_OFFSET)) return INT_ERR_CODE; if (relation == 'S') { @@ -655,8 +630,7 @@ static npy_int64 asfreq_MtoB(npy_int64 ordinal, char relation, struct date_info dinfo; if (dInfoCalc_SetFromAbsDate( - &dinfo, asfreq_MtoDT(ordinal, relation, af_info) + ORD_OFFSET, - GREGORIAN_CALENDAR)) + &dinfo, asfreq_MtoDT(ordinal, relation, af_info) + ORD_OFFSET)) return INT_ERR_CODE; if (relation == 'S') { @@ -731,8 +705,7 @@ static npy_int64 asfreq_QtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) { struct date_info dinfo; if (dInfoCalc_SetFromAbsDate( - &dinfo, asfreq_QtoDT(ordinal, relation, af_info) + ORD_OFFSET, - GREGORIAN_CALENDAR)) + &dinfo, asfreq_QtoDT(ordinal, relation, af_info) + ORD_OFFSET)) return INT_ERR_CODE; if (relation == 'S') { @@ -803,8 +776,7 @@ static npy_int64 asfreq_AtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) { struct date_info dinfo; if (dInfoCalc_SetFromAbsDate( - &dinfo, asfreq_AtoDT(ordinal, relation, af_info) + ORD_OFFSET, - GREGORIAN_CALENDAR)) + &dinfo, asfreq_AtoDT(ordinal, relation, af_info) + ORD_OFFSET)) return INT_ERR_CODE; if (relation == 'S') { @@ -1096,19 +1068,17 @@ static int dInfoCalc_SetFromAbsTime(struct date_info *dinfo, double abstime) { return 0; } -/* Set the instance's value using the given date and time. calendar - may be set to the flags: GREGORIAN_CALENDAR, JULIAN_CALENDAR to - indicate the calendar to be used. */ +/* Set the instance's value using the given date and time. + Assumes GREGORIAN_CALENDAR. */ static int dInfoCalc_SetFromAbsDateTime(struct date_info *dinfo, - npy_int64 absdate, double abstime, - int calendar) { + npy_int64 absdate, double abstime) { /* Bounds check */ Py_AssertWithArg(abstime >= 0.0 && abstime <= SECONDS_PER_DAY, PyExc_ValueError, "abstime out of range (0.0 - 86400.0): %f", abstime); /* Calculate the date */ - if (dInfoCalc_SetFromAbsDate(dinfo, absdate, calendar)) goto onError; + if (dInfoCalc_SetFromAbsDate(dinfo, absdate)) goto onError; /* Calculate the time */ if (dInfoCalc_SetFromAbsTime(dinfo, abstime)) goto onError; @@ -1356,8 +1326,7 @@ static int _ISOWeek(struct date_info *dinfo) { /* Verify */ if (week < 0) { /* The day lies in last week of the previous year */ - if ((week > -2) || (week == -2 && dInfoCalc_Leapyear(dinfo->year - 1, - dinfo->calendar))) + if ((week > -2) || (week == -2 && dInfoCalc_Leapyear(dinfo->year - 1))) week = 53; else week = 52; @@ -1384,8 +1353,7 @@ int get_date_info(npy_int64 ordinal, int freq, struct date_info *dinfo) { absdate += 1; } - if (dInfoCalc_SetFromAbsDateTime(dinfo, absdate, abstime, - GREGORIAN_CALENDAR)) + if (dInfoCalc_SetFromAbsDateTime(dinfo, absdate, abstime)) return INT_ERR_CODE; return 0; @@ -1480,7 +1448,6 @@ int pdays_in_month(npy_int64 ordinal, int freq) { if (get_date_info(ordinal, freq, &dinfo) == INT_ERR_CODE) return INT_ERR_CODE; - days = days_in_month[dInfoCalc_Leapyear(dinfo.year, dinfo.calendar)] - [dinfo.month - 1]; + days = days_in_month[dInfoCalc_Leapyear(dinfo.year)][dinfo.month - 1]; return days; } diff --git a/pandas/_libs/src/period_helper.h b/pandas/_libs/src/period_helper.h index 35dd20848a2ec..d3d32f81d1f66 100644 --- a/pandas/_libs/src/period_helper.h +++ b/pandas/_libs/src/period_helper.h @@ -24,9 +24,6 @@ frequency conversion routines. * declarations from period here */ -#define GREGORIAN_CALENDAR 0 -#define JULIAN_CALENDAR 1 - #define SECONDS_PER_DAY ((double)86400.0) #define Py_AssertWithArg(x, errortype, errorstr, a1) \ @@ -138,7 +135,6 @@ typedef struct date_info { int year; int day_of_week; int day_of_year; - int calendar; } date_info; typedef npy_int64 (*freq_conv_func)(npy_int64, char, asfreq_info *); diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index e2caebe4c4afc..5098e5c9100ff 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -59,7 +59,6 @@ cdef extern from "period_helper.h": int year int day_of_week int day_of_year - int calendar ctypedef struct asfreq_info: int from_week_end
period_helper supports GREGORIAN_CALENDAR and JULIAN_CALENDAR, but only Gregorian is ever used. This removes that argument, which will allow for the following in follow-ups: - Remove a bunch of error checking and propogation boilerplate. - Get a a bunch of functions from datetime/np_datetime.h instead of re-implementing them in period_helper. - Do the same removals to simplify #19498
https://api.github.com/repos/pandas-dev/pandas/pulls/19534
2018-02-04T20:04:50Z
2018-02-05T11:06:42Z
2018-02-05T11:06:42Z
2018-02-05T15:23:44Z
Fix parsing corner case closes #19382
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 69965f44d87a8..85949f671be2d 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -455,6 +455,7 @@ Datetimelike - Bug in :func:`Series.truncate` which raises ``TypeError`` with a monotonic ``PeriodIndex`` (:issue:`17717`) - Bug in :func:`~DataFrame.pct_change` using ``periods`` and ``freq`` returned different length outputs (:issue:`7292`) - Bug in comparison of :class:`DatetimeIndex` against ``None`` or ``datetime.date`` objects raising ``TypeError`` for ``==`` and ``!=`` comparisons instead of all-``False`` and all-``True``, respectively (:issue:`19301`) +- Bug in :class:`Timestamp` and :func:`to_datetime` where a string representing a barely out-of-bounds timestamp would be incorrectly rounded down instead of raising ``OutOfBoundsDatetime`` (:issue:`19382`) - Timezones diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 81df7981096ba..877d7deff6ff4 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -609,20 +609,38 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', value = tz_convert_single(value, tz, 'UTC') iresult[i] = value check_dts_bounds(&dts) + except OutOfBoundsDatetime: + # GH#19382 for just-barely-OutOfBounds falling back to + # dateutil parser will return incorrect result because + # it will ignore nanoseconds + if require_iso8601: + if _parse_today_now(val, &iresult[i]): + continue + elif is_coerce: + iresult[i] = NPY_NAT + continue + elif is_raise: + raise ValueError("time data {val} doesn't match " + "format specified" + .format(val=val)) + return values + elif is_coerce: + iresult[i] = NPY_NAT + continue + raise except ValueError: # if requiring iso8601 strings, skip trying other formats if require_iso8601: if _parse_today_now(val, &iresult[i]): continue - if is_coerce: + elif is_coerce: iresult[i] = NPY_NAT continue elif is_raise: - raise ValueError( - "time data %r doesn't match format " - "specified" % (val,)) - else: - return values + raise ValueError("time data {val} doesn't match " + "format specified" + .format(val=val)) + return values try: py_dt = parse_datetime_string(val, dayfirst=dayfirst, diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index a32bfc1f6836c..4f1a053da6f1d 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -26,6 +26,7 @@ from np_datetime cimport (check_dts_bounds, dt64_to_dtstruct, dtstruct_to_dt64, get_datetime64_unit, get_datetime64_value, pydatetime_to_dt64) +from np_datetime import OutOfBoundsDatetime from util cimport (is_string_object, is_datetime64_object, @@ -472,6 +473,13 @@ cdef _TSObject convert_str_to_tsobject(object ts, object tz, object unit, ts = tz_localize_to_utc(np.array([ts], dtype='i8'), tz, ambiguous='raise', errors='raise')[0] + + except OutOfBoundsDatetime: + # GH#19382 for just-barely-OutOfBounds falling back to dateutil + # parser will return incorrect result because it will ignore + # nanoseconds + raise + except ValueError: try: ts = parse_datetime_string(ts, dayfirst=dayfirst, diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 44f3c21d23e62..f8b1f68ba33ce 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -17,6 +17,7 @@ from pandas._libs.tslibs import parsing from pandas.core.tools import datetimes as tools +from pandas.errors import OutOfBoundsDatetime from pandas.compat import lmap from pandas.compat.numpy import np_array_datetime64_compat from pandas.core.dtypes.common import is_datetime64_ns_dtype @@ -783,7 +784,6 @@ def test_dataframe_dtypes(self, cache): class TestToDatetimeMisc(object): - @pytest.mark.parametrize('cache', [True, False]) def test_to_datetime_iso8601(self, cache): result = to_datetime(["2012-01-01 00:00:00"], cache=cache) @@ -1596,6 +1596,20 @@ def test_coerce_of_invalid_datetimes(self): ) ) + def test_to_datetime_barely_out_of_bounds(self): + # GH#19529 + # GH#19382 close enough to bounds that dropping nanos would result + # in an in-bounds datetime + arr = np.array(['2262-04-11 23:47:16.854775808'], dtype=object) + + with pytest.raises(OutOfBoundsDatetime): + to_datetime(arr) + + with pytest.raises(OutOfBoundsDatetime): + # Essentially the same as above, but more directly calling + # the relevant function + tslib.array_to_datetime(arr) + def test_normalize_date(): value = date(2012, 9, 7) diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py index 301f6da140866..7695c94409232 100644 --- a/pandas/tests/scalar/test_timestamp.py +++ b/pandas/tests/scalar/test_timestamp.py @@ -18,6 +18,7 @@ from pandas._libs.tslibs import conversion from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz +from pandas.errors import OutOfBoundsDatetime from pandas.compat import long, PY3 from pandas.compat.numpy import np_datetime64_compat from pandas import Timestamp, Period, Timedelta @@ -410,6 +411,13 @@ def test_out_of_bounds_string(self): with pytest.raises(ValueError): Timestamp('2263-01-01') + def test_barely_out_of_bounds(self): + # GH#19529 + # GH#19382 close enough to bounds that dropping nanos would result + # in an in-bounds datetime + with pytest.raises(OutOfBoundsDatetime): + Timestamp('2262-04-11 23:47:16.854775808') + def test_bounds_with_different_units(self): out_of_bounds_dates = ('1677-09-21', '2262-04-12')
- [x] closes #19382 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19529
2018-02-04T06:17:20Z
2018-02-06T11:27:17Z
2018-02-06T11:27:16Z
2018-02-11T21:39:31Z
centralize and split frame division tests
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 9b99a7b73b82b..1bb8e8edffc6e 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1,8 +1,9 @@ # -*- coding: utf-8 -*- - import pytest import numpy as np +from pandas.compat import range + import pandas as pd import pandas.util.testing as tm @@ -58,10 +59,129 @@ def test_df_flex_cmp_constant_return_types_empty(self, opname): result = getattr(empty, opname)(const).get_dtype_counts() tm.assert_series_equal(result, pd.Series([2], ['bool'])) + @pytest.mark.parametrize('timestamps', [ + [pd.Timestamp('2012-01-01 13:00:00+00:00')] * 2, + [pd.Timestamp('2012-01-01 13:00:00')] * 2]) + def test_tz_aware_scalar_comparison(self, timestamps): + # Test for issue #15966 + df = pd.DataFrame({'test': timestamps}) + expected = pd.DataFrame({'test': [False, False]}) + tm.assert_frame_equal(df == -1, expected) + # ------------------------------------------------------------------- # Arithmetic +class TestFrameMulDiv(object): + """Tests for DataFrame multiplication and division""" + # ------------------------------------------------------------------ + # Mod By Zero + + def test_df_mod_zero_df(self): + # GH#3590, modulo as ints + df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]}) + + # this is technically wrong, as the integer portion is coerced to float + # ### + first = pd.Series([0, 0, 0, 0], dtype='float64') + second = pd.Series([np.nan, np.nan, np.nan, 0]) + expected = pd.DataFrame({'first': first, 'second': second}) + result = df % df + tm.assert_frame_equal(result, expected) + + def test_df_mod_zero_array(self): + # GH#3590, modulo as ints + df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]}) + + # this is technically wrong, as the integer portion is coerced to float + # ### + first = pd.Series([0, 0, 0, 0], dtype='float64') + second = pd.Series([np.nan, np.nan, np.nan, 0]) + expected = pd.DataFrame({'first': first, 'second': second}) + + # numpy has a slightly different (wrong) treatment + with np.errstate(all='ignore'): + arr = df.values % df.values + result2 = pd.DataFrame(arr, index=df.index, + columns=df.columns, dtype='float64') + result2.iloc[0:3, 1] = np.nan + tm.assert_frame_equal(result2, expected) + + def test_df_mod_zero_int(self): + # GH#3590, modulo as ints + df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]}) + + result = df % 0 + expected = pd.DataFrame(np.nan, index=df.index, columns=df.columns) + tm.assert_frame_equal(result, expected) + + # numpy has a slightly different (wrong) treatment + with np.errstate(all='ignore'): + arr = df.values.astype('float64') % 0 + result2 = pd.DataFrame(arr, index=df.index, columns=df.columns) + tm.assert_frame_equal(result2, expected) + + def test_df_mod_zero_series_does_not_commute(self): + # GH#3590, modulo as ints + # not commutative with series + df = pd.DataFrame(np.random.randn(10, 5)) + ser = df[0] + res = ser % df + res2 = df % ser + assert not res.fillna(0).equals(res2.fillna(0)) + + # ------------------------------------------------------------------ + # Division By Zero + + def test_df_div_zero_df(self): + # integer div, but deal with the 0's (GH#9144) + df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]}) + result = df / df + + first = pd.Series([1.0, 1.0, 1.0, 1.0]) + second = pd.Series([np.nan, np.nan, np.nan, 1]) + expected = pd.DataFrame({'first': first, 'second': second}) + tm.assert_frame_equal(result, expected) + + def test_df_div_zero_array(self): + # integer div, but deal with the 0's (GH#9144) + df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]}) + + first = pd.Series([1.0, 1.0, 1.0, 1.0]) + second = pd.Series([np.nan, np.nan, np.nan, 1]) + expected = pd.DataFrame({'first': first, 'second': second}) + + with np.errstate(all='ignore'): + arr = df.values.astype('float') / df.values + result = pd.DataFrame(arr, index=df.index, + columns=df.columns) + tm.assert_frame_equal(result, expected) + + def test_df_div_zero_int(self): + # integer div, but deal with the 0's (GH#9144) + df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]}) + + result = df / 0 + expected = pd.DataFrame(np.inf, index=df.index, columns=df.columns) + expected.iloc[0:3, 1] = np.nan + tm.assert_frame_equal(result, expected) + + # numpy has a slightly different (wrong) treatment + with np.errstate(all='ignore'): + arr = df.values.astype('float64') / 0 + result2 = pd.DataFrame(arr, index=df.index, + columns=df.columns) + tm.assert_frame_equal(result2, expected) + + def test_df_div_zero_series_does_not_commute(self): + # integer div, but deal with the 0's (GH#9144) + df = pd.DataFrame(np.random.randn(10, 5)) + ser = df[0] + res = ser / df + res2 = df / ser + assert not res.fillna(0).equals(res2.fillna(0)) + + class TestFrameArithmetic(object): @pytest.mark.xfail(reason='GH#7996 datetime64 units not converted to nano') diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index bdccbec6111d3..bf895be8bc813 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -203,76 +203,6 @@ def test_timestamp_compare(self): result = right_f(Timestamp('nat'), df) assert_frame_equal(result, expected) - def test_modulo(self): - # GH3590, modulo as ints - p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]}) - - # this is technically wrong as the integer portion is coerced to float - # ### - expected = DataFrame({'first': Series([0, 0, 0, 0], dtype='float64'), - 'second': Series([np.nan, np.nan, np.nan, 0])}) - result = p % p - assert_frame_equal(result, expected) - - # numpy has a slightly different (wrong) treatement - with np.errstate(all='ignore'): - arr = p.values % p.values - result2 = DataFrame(arr, index=p.index, - columns=p.columns, dtype='float64') - result2.iloc[0:3, 1] = np.nan - assert_frame_equal(result2, expected) - - result = p % 0 - expected = DataFrame(np.nan, index=p.index, columns=p.columns) - assert_frame_equal(result, expected) - - # numpy has a slightly different (wrong) treatement - with np.errstate(all='ignore'): - arr = p.values.astype('float64') % 0 - result2 = DataFrame(arr, index=p.index, columns=p.columns) - assert_frame_equal(result2, expected) - - # not commutative with series - p = DataFrame(np.random.randn(10, 5)) - s = p[0] - res = s % p - res2 = p % s - assert not res.fillna(0).equals(res2.fillna(0)) - - def test_div(self): - - # integer div, but deal with the 0's (GH 9144) - p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]}) - result = p / p - - expected = DataFrame({'first': Series([1.0, 1.0, 1.0, 1.0]), - 'second': Series([nan, nan, nan, 1])}) - assert_frame_equal(result, expected) - - with np.errstate(all='ignore'): - arr = p.values.astype('float') / p.values - result2 = DataFrame(arr, index=p.index, - columns=p.columns) - assert_frame_equal(result2, expected) - - result = p / 0 - expected = DataFrame(np.inf, index=p.index, columns=p.columns) - expected.iloc[0:3, 1] = nan - assert_frame_equal(result, expected) - - # numpy has a slightly different (wrong) treatement - with np.errstate(all='ignore'): - arr = p.values.astype('float64') / 0 - result2 = DataFrame(arr, index=p.index, - columns=p.columns) - assert_frame_equal(result2, expected) - - p = DataFrame(np.random.randn(10, 5)) - s = p[0] - res = s / p - res2 = p / s - assert not res.fillna(0).equals(res2.fillna(0)) - def test_logical_operators(self): def _check_bin_op(op): diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index e6b47fd69cb05..25dd285e883a0 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -738,12 +738,3 @@ def test_tz_convert_and_localize(self, fn): with assert_raises_regex(ValueError, 'not valid'): df = DataFrame(index=l0) df = getattr(df, fn)('US/Pacific', level=1) - - @pytest.mark.parametrize('timestamps', [ - [Timestamp('2012-01-01 13:00:00+00:00')] * 2, - [Timestamp('2012-01-01 13:00:00')] * 2]) - def test_tz_aware_scalar_comparison(self, timestamps): - # Test for issue #15966 - df = DataFrame({'test': timestamps}) - expected = DataFrame({'test': [False, False]}) - assert_frame_equal(df == -1, expected)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19527
2018-02-03T18:56:19Z
2018-02-06T11:20:36Z
2018-02-06T11:20:36Z
2018-02-11T21:39:39Z
ERR: Better error msg when merging on tz-aware and tz-naive columns
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 26a7a78bb5c55..69965f44d87a8 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -342,6 +342,7 @@ Other API Changes - Addition and subtraction of ``NaN`` from a :class:`Series` with ``dtype='timedelta64[ns]'`` will raise a ``TypeError` instead of treating the ``NaN`` as ``NaT`` (:issue:`19274`) - Set operations (union, difference...) on :class:`IntervalIndex` with incompatible index types will now raise a ``TypeError`` rather than a ``ValueError`` (:issue:`19329`) - :class:`DateOffset` objects render more simply, e.g. "<DateOffset: days=1>" instead of "<DateOffset: kwds={'days': 1}>" (:issue:`19403`) +- :func:`pandas.merge` provides a more informative error message when trying to merge on timezone-aware and timezone-naive columns (:issue:`15800`) .. _whatsnew_0230.deprecations: diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 99ea2c4fe4688..3ec78ce52c6e5 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -940,6 +940,11 @@ def _maybe_coerce_merge_keys(self): elif is_dtype_equal(lk.dtype, rk.dtype): continue + msg = ("You are trying to merge on {lk_dtype} and " + "{rk_dtype} columns. If you wish to proceed " + "you should use pd.concat".format(lk_dtype=lk.dtype, + rk_dtype=rk.dtype)) + # if we are numeric, then allow differing # kinds to proceed, eg. int64 and int8, int and float # further if we are object, but we infer to @@ -968,30 +973,18 @@ def _maybe_coerce_merge_keys(self): pass # Check if we are trying to merge on obviously - # incompatible dtypes GH 9780 + # incompatible dtypes GH 9780, GH 15800 elif is_numeric_dtype(lk) and not is_numeric_dtype(rk): - msg = ("You are trying to merge on {lk_dtype} and " - "{rk_dtype} columns. If you wish to proceed " - "you should use pd.concat".format(lk_dtype=lk.dtype, - rk_dtype=rk.dtype)) raise ValueError(msg) elif not is_numeric_dtype(lk) and is_numeric_dtype(rk): - msg = ("You are trying to merge on {lk_dtype} and " - "{rk_dtype} columns. If you wish to proceed " - "you should use pd.concat".format(lk_dtype=lk.dtype, - rk_dtype=rk.dtype)) raise ValueError(msg) elif is_datetimelike(lk) and not is_datetimelike(rk): - msg = ("You are trying to merge on {lk_dtype} and " - "{rk_dtype} columns. If you wish to proceed " - "you should use pd.concat".format(lk_dtype=lk.dtype, - rk_dtype=rk.dtype)) raise ValueError(msg) elif not is_datetimelike(lk) and is_datetimelike(rk): - msg = ("You are trying to merge on {lk_dtype} and " - "{rk_dtype} columns. If you wish to proceed " - "you should use pd.concat".format(lk_dtype=lk.dtype, - rk_dtype=rk.dtype)) + raise ValueError(msg) + elif is_datetime64tz_dtype(lk) and not is_datetime64tz_dtype(rk): + raise ValueError(msg) + elif not is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk): raise ValueError(msg) # Houston, we have a problem! diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index a8319339c6435..f63c206c0c407 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -1512,11 +1512,13 @@ def test_merge_on_ints_floats_warning(self): '2011-01-02']), (pd.date_range('1/1/2011', periods=2, freq='D'), [0, 1]), (pd.date_range('1/1/2011', periods=2, freq='D'), [0.0, 1.0]), + (pd.date_range('20130101', periods=3), + pd.date_range('20130101', periods=3, tz='US/Eastern')), ([0, 1, 2], Series(['a', 'b', 'a']).astype('category')), ([0.0, 1.0, 2.0], Series(['a', 'b', 'a']).astype('category')), ]) def test_merge_incompat_dtypes(self, df1_vals, df2_vals): - # GH 9780 + # GH 9780, GH 15800 # Raise a ValueError when a user tries to merge on # dtypes that are incompatible (e.g., obj and int/float)
- [ ] closes #15800 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19525
2018-02-03T17:19:23Z
2018-02-03T20:30:30Z
2018-02-03T20:30:30Z
2018-02-03T21:52:08Z
Frame ops prelims - de-duplicate, remove unused kwargs
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index ea56ebad7d782..1df8ea324eb79 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -581,6 +581,7 @@ Numeric - Bug in :class:`Index` multiplication and division methods where operating with a ``Series`` would return an ``Index`` object instead of a ``Series`` object (:issue:`19042`) - Bug in the :class:`DataFrame` constructor in which data containing very large positive or very large negative numbers was causing ``OverflowError`` (:issue:`18584`) - Bug in :class:`Index` constructor with ``dtype='uint64'`` where int-like floats were not coerced to :class:`UInt64Index` (:issue:`18400`) +- Bug in :class:`DataFrame` flex arithmetic (e.g. `df.add(other, fill_value=foo)`) with a `fill_value` other than ``None`` failed to raise ``NotImplementedError`` in corner cases where either the frame or ``other`` has length zero (:issue:`19522`) Indexing diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 201d8ba427c8a..882ea982afe29 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3915,8 +3915,7 @@ def reorder_levels(self, order, axis=0): # ---------------------------------------------------------------------- # Arithmetic / combination related - def _combine_frame(self, other, func, fill_value=None, level=None, - try_cast=True): + def _combine_frame(self, other, func, fill_value=None, level=None): this, other = self.align(other, join='outer', level=level, copy=False) new_index, new_columns = this.index, this.columns @@ -3968,52 +3967,40 @@ def f(i): def _combine_series(self, other, func, fill_value=None, axis=None, level=None, try_cast=True): + if fill_value is not None: + raise NotImplementedError("fill_value {fill} not supported." + .format(fill=fill_value)) + if axis is not None: axis = self._get_axis_name(axis) if axis == 'index': - return self._combine_match_index(other, func, level=level, - fill_value=fill_value, - try_cast=try_cast) + return self._combine_match_index(other, func, level=level) else: return self._combine_match_columns(other, func, level=level, - fill_value=fill_value, try_cast=try_cast) - return self._combine_series_infer(other, func, level=level, - fill_value=fill_value, - try_cast=try_cast) - - def _combine_series_infer(self, other, func, level=None, - fill_value=None, try_cast=True): - if len(other) == 0: - return self * np.nan + else: + if not len(other): + return self * np.nan - if len(self) == 0: - # Ambiguous case, use _series so works with DataFrame - return self._constructor(data=self._series, index=self.index, - columns=self.columns) + if not len(self): + # Ambiguous case, use _series so works with DataFrame + return self._constructor(data=self._series, index=self.index, + columns=self.columns) - return self._combine_match_columns(other, func, level=level, - fill_value=fill_value, - try_cast=try_cast) + # default axis is columns + return self._combine_match_columns(other, func, level=level, + try_cast=try_cast) - def _combine_match_index(self, other, func, level=None, - fill_value=None, try_cast=True): + def _combine_match_index(self, other, func, level=None): left, right = self.align(other, join='outer', axis=0, level=level, copy=False) - if fill_value is not None: - raise NotImplementedError("fill_value %r not supported." % - fill_value) return self._constructor(func(left.values.T, right.values).T, index=left.index, columns=self.columns, copy=False) - def _combine_match_columns(self, other, func, level=None, - fill_value=None, try_cast=True): + def _combine_match_columns(self, other, func, level=None, try_cast=True): left, right = self.align(other, join='outer', axis=1, level=level, copy=False) - if fill_value is not None: - raise NotImplementedError("fill_value %r not supported" % - fill_value) new_data = left._data.eval(func=func, other=right, axes=[left.columns, self.index], diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 1e1bb0d49b3df..15df77bf772dc 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -80,6 +80,26 @@ def _try_get_item(x): return x +def _make_invalid_op(name): + """ + Return a binary method that always raises a TypeError. + + Parameters + ---------- + name : str + + Returns + ------- + invalid_op : function + """ + def invalid_op(self, other=None): + raise TypeError("cannot perform {name} with this index type: " + "{typ}".format(name=name, typ=type(self))) + + invalid_op.__name__ = name + return invalid_op + + class InvalidIndexError(Exception): pass @@ -3916,30 +3936,12 @@ def _evaluate_compare(self, other): @classmethod def _add_numeric_methods_add_sub_disabled(cls): """ add in the numeric add/sub methods to disable """ - - def _make_invalid_op(name): - def invalid_op(self, other=None): - raise TypeError("cannot perform {name} with this index type: " - "{typ}".format(name=name, typ=type(self))) - - invalid_op.__name__ = name - return invalid_op - cls.__add__ = cls.__radd__ = __iadd__ = _make_invalid_op('__add__') # noqa cls.__sub__ = __isub__ = _make_invalid_op('__sub__') # noqa @classmethod def _add_numeric_methods_disabled(cls): """ add in numeric methods to disable other than add/sub """ - - def _make_invalid_op(name): - def invalid_op(self, other=None): - raise TypeError("cannot perform {name} with this index type: " - "{typ}".format(name=name, typ=type(self))) - - invalid_op.__name__ = name - return invalid_op - cls.__pow__ = cls.__rpow__ = _make_invalid_op('__pow__') cls.__mul__ = cls.__rmul__ = _make_invalid_op('__mul__') cls.__floordiv__ = cls.__rfloordiv__ = _make_invalid_op('__floordiv__') @@ -4147,15 +4149,6 @@ def logical_func(self, *args, **kwargs): @classmethod def _add_logical_methods_disabled(cls): """ add in logical methods to disable """ - - def _make_invalid_op(name): - def invalid_op(self, other=None): - raise TypeError("cannot perform {name} with this index type: " - "{typ}".format(name=name, typ=type(self))) - - invalid_op.__name__ = name - return invalid_op - cls.all = _make_invalid_op('all') cls.any = _make_invalid_op('any') diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 6db84aedce7e7..effa35695fcd1 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -1106,12 +1106,13 @@ def f(self, other, axis=default_axis, level=None, fill_value=None): if isinstance(other, ABCDataFrame): # Another DataFrame return self._combine_frame(other, na_op, fill_value, level) elif isinstance(other, ABCSeries): - return self._combine_series(other, na_op, fill_value, axis, level) + return self._combine_series(other, na_op, fill_value, axis, level, + try_cast=True) else: if fill_value is not None: self = self.fillna(fill_value) - return self._combine_const(other, na_op) + return self._combine_const(other, na_op, try_cast=True) f.__name__ = name @@ -1172,7 +1173,8 @@ def f(self, other): if isinstance(other, ABCDataFrame): # Another DataFrame return self._compare_frame(other, func, str_rep) elif isinstance(other, ABCSeries): - return self._combine_series_infer(other, func, try_cast=False) + return self._combine_series(other, func, + axis=None, try_cast=False) else: # straight boolean comparisons we want to allow all columns diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 91dc44e3f185e..122c2b11f25f9 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -540,8 +540,7 @@ def xs(self, key, axis=0, copy=False): # ---------------------------------------------------------------------- # Arithmetic-related methods - def _combine_frame(self, other, func, fill_value=None, level=None, - try_cast=True): + def _combine_frame(self, other, func, fill_value=None, level=None): this, other = self.align(other, join='outer', level=level, copy=False) new_index, new_columns = this.index, this.columns @@ -584,12 +583,9 @@ def _combine_frame(self, other, func, fill_value=None, level=None, default_fill_value=new_fill_value ).__finalize__(self) - def _combine_match_index(self, other, func, level=None, fill_value=None, - try_cast=True): + def _combine_match_index(self, other, func, level=None): new_data = {} - if fill_value is not None: - raise NotImplementedError("'fill_value' argument is not supported") if level is not None: raise NotImplementedError("'level' argument is not supported") @@ -605,6 +601,7 @@ def _combine_match_index(self, other, func, level=None, fill_value=None, new_data[col] = func(series.values, other.values) # fill_value is a function of our operator + fill_value = None if isna(other.fill_value) or isna(self.default_fill_value): fill_value = np.nan else: @@ -615,15 +612,12 @@ def _combine_match_index(self, other, func, level=None, fill_value=None, new_data, index=new_index, columns=self.columns, default_fill_value=fill_value).__finalize__(self) - def _combine_match_columns(self, other, func, level=None, fill_value=None, - try_cast=True): + def _combine_match_columns(self, other, func, level=None, try_cast=True): # patched version of DataFrame._combine_match_columns to account for # NumPy circumventing __rsub__ with float64 types, e.g.: 3.0 - series, # where 3.0 is numpy.float64 and series is a SparseSeries. Still # possible for this to happen, which is bothersome - if fill_value is not None: - raise NotImplementedError("'fill_value' argument is not supported") if level is not None: raise NotImplementedError("'level' argument is not supported") diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index bdccbec6111d3..f679a9820784e 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -451,6 +451,19 @@ def test_arith_flex_frame(self): with tm.assert_raises_regex(NotImplementedError, 'fill_value'): self.frame.add(self.frame.iloc[0], axis='index', fill_value=3) + def test_arith_flex_zero_len_raises(self): + # GH#19522 passing fill_value to frame flex arith methods should + # raise even in the zero-length special cases + ser_len0 = pd.Series([]) + df_len0 = pd.DataFrame([], columns=['A', 'B']) + df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + + with tm.assert_raises_regex(NotImplementedError, 'fill_value'): + df.add(ser_len0, fill_value='E') + + with tm.assert_raises_regex(NotImplementedError, 'fill_value'): + df_len0.sub(df['A'], axis=None, fill_value=3) + def test_binary_ops_align(self): # test aligning binary ops
Starting in on making DataFrame ops consistent with Series and Index ops. As a first pass, this goes through and removes some duplicate code and unused arguments. Changes logic in exactly two corner cases: arithmetic operations `frame.op(series, axis=None, fill_value=not_none)` where one of `series` or `frame` has length zero. `axis` needs to be specifically passed as `None` to hit the changed case. ``` ser = pd.Series([]) df = pd.DataFrame([[1, 2], [3, 4]]) >>> df.add(ser, axis=None, fill_value='E') >>> ser.to_frame().sub(df[0], axis=None, fill_value=3) ``` will now raise instead of returning ``` >>> df.add(ser, axis=None, fill_value='E') 0 1 0 NaN NaN 1 NaN NaN >>> ser.to_frame().sub(df[0], axis=None, fill_value=3) Empty DataFrame Columns: [0] Index: [] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19522
2018-02-03T03:26:21Z
2018-02-07T12:32:37Z
2018-02-07T12:32:37Z
2018-02-11T21:38:50Z
ENH: Allow storing ExtensionArrays in containers
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 624045a3d64bc..d616e3f92aa4d 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -15,11 +15,12 @@ is_unsigned_integer_dtype, is_signed_integer_dtype, is_integer_dtype, is_complex_dtype, is_object_dtype, + is_extension_array_dtype, is_categorical_dtype, is_sparse, is_period_dtype, is_numeric_dtype, is_float_dtype, is_bool_dtype, needs_i8_conversion, - is_categorical, is_datetimetz, + is_datetimetz, is_datetime64_any_dtype, is_datetime64tz_dtype, is_timedelta64_dtype, is_interval_dtype, is_scalar, is_list_like, @@ -547,7 +548,7 @@ def value_counts(values, sort=True, ascending=False, normalize=False, if is_categorical_dtype(values) or is_sparse(values): # handle Categorical and sparse, - result = Series(values).values.value_counts(dropna=dropna) + result = Series(values)._values.value_counts(dropna=dropna) result.name = name counts = result.values @@ -1292,10 +1293,13 @@ def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None, """ Specialized Cython take which sets NaN values in one pass + This dispatches to ``take`` defined on ExtensionArrays. It does not + currently dispatch to ``SparseArray.take`` for sparse ``arr``. + Parameters ---------- - arr : ndarray - Input array + arr : array-like + Input array. indexer : ndarray 1-D array of indices to take, subarrays corresponding to -1 value indicies are filed with fill_value @@ -1315,17 +1319,25 @@ def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None, If False, indexer is assumed to contain no -1 values so no filling will be done. This short-circuits computation of a mask. Result is undefined if allow_fill == False and -1 is present in indexer. + + Returns + ------- + subarray : array-like + May be the same type as the input, or cast to an ndarray. """ + # TODO(EA): Remove these if / elifs as datetimeTZ, interval, become EAs # dispatch to internal type takes - if is_categorical(arr): - return arr.take_nd(indexer, fill_value=fill_value, - allow_fill=allow_fill) + if is_extension_array_dtype(arr): + return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) elif is_datetimetz(arr): return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) elif is_interval_dtype(arr): return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) + if is_sparse(arr): + arr = arr.get_values() + if indexer is None: indexer = np.arange(arr.shape[axis], dtype=np.int64) dtype, fill_value = arr.dtype, arr.dtype.type() diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index e618dc6b69b2d..cec881394a021 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -25,14 +25,13 @@ class ExtensionArray(object): * isna * take * copy - * _formatting_values * _concat_same_type - Some additional methods are required to satisfy pandas' internal, private + Some additional methods are available to satisfy pandas' internal, private block API. - * _concat_same_type * _can_hold_na + * _formatting_values This class does not inherit from 'abc.ABCMeta' for performance reasons. Methods and properties required by the interface raise @@ -53,13 +52,14 @@ class ExtensionArray(object): Extension arrays should be able to be constructed with instances of the class, i.e. ``ExtensionArray(extension_array)`` should return an instance, not error. - - Additionally, certain methods and interfaces are required for proper - this array to be properly stored inside a ``DataFrame`` or ``Series``. """ + # '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray. + # Don't override this. + _typ = 'extension' # ------------------------------------------------------------------------ # Must be a Sequence # ------------------------------------------------------------------------ + def __getitem__(self, item): # type (Any) -> Any """Select a subset of self. @@ -92,7 +92,46 @@ def __getitem__(self, item): raise AbstractMethodError(self) def __setitem__(self, key, value): - # type: (Any, Any) -> None + # type: (Union[int, np.ndarray], Any) -> None + """Set one or more values inplace. + + This method is not required to satisfy the pandas extension array + interface. + + Parameters + ---------- + key : int, ndarray, or slice + When called from, e.g. ``Series.__setitem__``, ``key`` will be + one of + + * scalar int + * ndarray of integers. + * boolean ndarray + * slice object + + value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object + value or values to be set of ``key``. + + Returns + ------- + None + """ + # Some notes to the ExtensionArray implementor who may have ended up + # here. While this method is not required for the interface, if you + # *do* choose to implement __setitem__, then some semantics should be + # observed: + # + # * Setting multiple values : ExtensionArrays should support setting + # multiple values at once, 'key' will be a sequence of integers and + # 'value' will be a same-length sequence. + # + # * Broadcasting : For a sequence 'key' and a scalar 'value', + # each position in 'key' should be set to 'value'. + # + # * Coercion : Most users will expect basic coercion to work. For + # example, a string like '2018-01-01' is coerced to a datetime + # when setting on a datetime64ns array. In general, if the + # __init__ method coerces that value, then so should __setitem__ raise NotImplementedError(_not_implemented_message.format( type(self), '__setitem__') ) @@ -107,6 +146,16 @@ def __len__(self): # type: () -> int raise AbstractMethodError(self) + def __iter__(self): + """Iterate over elements of the array. + + """ + # This needs to be implemented so that pandas recognizes extension + # arrays as list-like. The default implementation makes successive + # calls to ``__getitem__``, which may be slower than necessary. + for i in range(len(self)): + yield self[i] + # ------------------------------------------------------------------------ # Required attributes # ------------------------------------------------------------------------ @@ -132,9 +181,9 @@ def nbytes(self): # type: () -> int """The number of bytes needed to store this object in memory. - If this is expensive to compute, return an approximate lower bound - on the number of bytes needed. """ + # If this is expensive to compute, return an approximate lower bound + # on the number of bytes needed. raise AbstractMethodError(self) # ------------------------------------------------------------------------ @@ -184,8 +233,8 @@ def take(self, indexer, allow_fill=True, fill_value=None): will be done. This short-circuits computation of a mask. Result is undefined if allow_fill == False and -1 is present in indexer. fill_value : any, default None - Fill value to replace -1 values with. By default, this uses - the missing value sentinel for this type, ``self._fill_value``. + Fill value to replace -1 values with. If applicable, this should + use the sentinel missing value for this type. Notes ----- @@ -198,17 +247,20 @@ def take(self, indexer, allow_fill=True, fill_value=None): Examples -------- - Suppose the extension array somehow backed by a NumPy structured array - and that the underlying structured array is stored as ``self.data``. - Then ``take`` may be written as + Suppose the extension array is backed by a NumPy array stored as + ``self.data``. Then ``take`` may be written as .. code-block:: python def take(self, indexer, allow_fill=True, fill_value=None): mask = indexer == -1 result = self.data.take(indexer) - result[mask] = self._fill_value + result[mask] = np.nan # NA for this type return type(self)(result) + + See Also + -------- + numpy.take """ raise AbstractMethodError(self) @@ -230,17 +282,12 @@ def copy(self, deep=False): # ------------------------------------------------------------------------ # Block-related methods # ------------------------------------------------------------------------ - @property - def _fill_value(self): - # type: () -> Any - """The missing value for this type, e.g. np.nan""" - return None def _formatting_values(self): # type: () -> np.ndarray # At the moment, this has to be an array since we use result.dtype """An array of values to be printed in, e.g. the Series repr""" - raise AbstractMethodError(self) + return np.array(self) @classmethod def _concat_same_type(cls, to_concat): @@ -257,6 +304,7 @@ def _concat_same_type(cls, to_concat): """ raise AbstractMethodError(cls) + @property def _can_hold_na(self): # type: () -> bool """Whether your array can hold missing values. True by default. diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py index c7c5378801f02..d54d980d02ffa 100644 --- a/pandas/core/dtypes/base.py +++ b/pandas/core/dtypes/base.py @@ -1,4 +1,7 @@ """Extend pandas with custom array types""" +import numpy as np + +from pandas import compat from pandas.errors import AbstractMethodError @@ -23,6 +26,32 @@ class ExtensionDtype(object): def __str__(self): return self.name + def __eq__(self, other): + """Check whether 'other' is equal to self. + + By default, 'other' is considered equal if + + * it's a string matching 'self.name'. + * it's an instance of this type. + + Parameters + ---------- + other : Any + + Returns + ------- + bool + """ + if isinstance(other, compat.string_types): + return other == self.name + elif isinstance(other, type(self)): + return True + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) + @property def type(self): # type: () -> type @@ -102,11 +131,12 @@ def construct_from_string(cls, string): @classmethod def is_dtype(cls, dtype): - """Check if we match 'dtype' + """Check if we match 'dtype'. Parameters ---------- - dtype : str or dtype + dtype : object + The object to check. Returns ------- @@ -118,12 +148,19 @@ def is_dtype(cls, dtype): 1. ``cls.construct_from_string(dtype)`` is an instance of ``cls``. - 2. 'dtype' is ``cls`` or a subclass of ``cls``. + 2. ``dtype`` is an object and is an instance of ``cls`` + 3. ``dtype`` has a ``dtype`` attribute, and any of the above + conditions is true for ``dtype.dtype``. """ - if isinstance(dtype, str): - try: - return isinstance(cls.construct_from_string(dtype), cls) - except TypeError: - return False - else: - return issubclass(dtype, cls) + dtype = getattr(dtype, 'dtype', dtype) + + if isinstance(dtype, np.dtype): + return False + elif dtype is None: + return False + elif isinstance(dtype, cls): + return True + try: + return cls.construct_from_string(dtype) is not None + except TypeError: + return False diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index c2b71bc316fe8..197b35de88896 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1708,9 +1708,9 @@ def is_extension_array_dtype(arr_or_dtype): """ from pandas.core.arrays import ExtensionArray - # we want to unpack series, anything else? if isinstance(arr_or_dtype, (ABCIndexClass, ABCSeries)): arr_or_dtype = arr_or_dtype._values + return isinstance(arr_or_dtype, (ExtensionDtype, ExtensionArray)) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 99e4033f104db..d262a71933915 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -66,13 +66,6 @@ def __hash__(self): raise NotImplementedError("sub-classes should implement an __hash__ " "method") - def __eq__(self, other): - raise NotImplementedError("sub-classes should implement an __eq__ " - "method") - - def __ne__(self, other): - return not self.__eq__(other) - def __getstate__(self): # pickle support; we don't want to pickle the cache return {k: getattr(self, k, None) for k in self._metadata} @@ -82,24 +75,6 @@ def reset_cache(cls): """ clear the cache """ cls._cache = {} - @classmethod - def is_dtype(cls, dtype): - """ Return a boolean if the passed type is an actual dtype that - we can match (via string or type) - """ - if hasattr(dtype, 'dtype'): - dtype = dtype.dtype - if isinstance(dtype, np.dtype): - return False - elif dtype is None: - return False - elif isinstance(dtype, cls): - return True - try: - return cls.construct_from_string(dtype) is not None - except: - return False - class CategoricalDtypeType(type): """ diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py index b032cb6f14d4c..cb54c94d29205 100644 --- a/pandas/core/dtypes/generic.py +++ b/pandas/core/dtypes/generic.py @@ -57,6 +57,8 @@ def _check(cls, inst): ABCDateOffset = create_pandas_abc_type("ABCDateOffset", "_typ", ("dateoffset",)) ABCInterval = create_pandas_abc_type("ABCInterval", "_typ", ("interval", )) +ABCExtensionArray = create_pandas_abc_type("ABCExtensionArray", "_typ", + ("extension", "categorical",)) class _ABCGeneric(type): diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index ffac702476af1..01c88c269e7e0 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -5,14 +5,16 @@ from pandas._libs import lib, missing as libmissing from pandas._libs.tslib import NaT, iNaT from .generic import (ABCMultiIndex, ABCSeries, - ABCIndexClass, ABCGeneric) + ABCIndexClass, ABCGeneric, + ABCExtensionArray) from .common import (is_string_dtype, is_datetimelike, is_datetimelike_v_numeric, is_float_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_timedelta64_dtype, is_interval_dtype, - is_complex_dtype, is_categorical_dtype, + is_complex_dtype, is_string_like_dtype, is_bool_dtype, is_integer_dtype, is_dtype_equal, + is_extension_array_dtype, needs_i8_conversion, _ensure_object, pandas_dtype, is_scalar, @@ -57,7 +59,8 @@ def _isna_new(obj): # hack (for now) because MI registers as ndarray elif isinstance(obj, ABCMultiIndex): raise NotImplementedError("isna is not defined for MultiIndex") - elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)): + elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass, + ABCExtensionArray)): return _isna_ndarraylike(obj) elif isinstance(obj, ABCGeneric): return obj._constructor(obj._data.isna(func=isna)) @@ -124,30 +127,31 @@ def _use_inf_as_na(key): def _isna_ndarraylike(obj): - values = getattr(obj, 'values', obj) dtype = values.dtype - if is_string_dtype(dtype): - if is_categorical_dtype(values): - from pandas import Categorical - if not isinstance(values, Categorical): - values = values.values - result = values.isna() - elif is_interval_dtype(values): - from pandas import IntervalIndex - result = IntervalIndex(obj).isna() + if is_extension_array_dtype(obj): + if isinstance(obj, (ABCIndexClass, ABCSeries)): + values = obj._values else: + values = obj + result = values.isna() + elif is_interval_dtype(values): + # TODO(IntervalArray): remove this if block + from pandas import IntervalIndex + result = IntervalIndex(obj).isna() + elif is_string_dtype(dtype): + # Working around NumPy ticket 1542 + shape = values.shape - # Working around NumPy ticket 1542 - shape = values.shape - - if is_string_like_dtype(dtype): - result = np.zeros(values.shape, dtype=bool) - else: - result = np.empty(shape, dtype=bool) - vec = libmissing.isnaobj(values.ravel()) - result[...] = vec.reshape(shape) + if is_string_like_dtype(dtype): + # object array of strings + result = np.zeros(values.shape, dtype=bool) + else: + # object array of non-strings + result = np.empty(shape, dtype=bool) + vec = libmissing.isnaobj(values.ravel()) + result[...] = vec.reshape(shape) elif needs_i8_conversion(obj): # this is the NaT pattern @@ -406,4 +410,7 @@ def remove_na_arraylike(arr): """ Return array-like containing only true/non-NaN values, possibly empty. """ - return arr[notna(lib.values_from_object(arr))] + if is_extension_array_dtype(arr): + return arr[notna(arr)] + else: + return arr[notna(lib.values_from_object(arr))] diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d81d22173bfbd..1b781be8fa2b3 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -39,6 +39,7 @@ is_categorical_dtype, is_object_dtype, is_extension_type, + is_extension_array_dtype, is_datetimetz, is_datetime64_any_dtype, is_datetime64tz_dtype, @@ -71,7 +72,7 @@ create_block_manager_from_arrays, create_block_manager_from_blocks) from pandas.core.series import Series -from pandas.core.arrays import Categorical +from pandas.core.arrays import Categorical, ExtensionArray import pandas.core.algorithms as algorithms from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u, OrderedDict, raise_with_traceback) @@ -511,7 +512,7 @@ def _get_axes(N, K, index=index, columns=columns): index, columns = _get_axes(len(values), 1) return _arrays_to_mgr([values], columns, index, columns, dtype=dtype) - elif is_datetimetz(values): + elif (is_datetimetz(values) or is_extension_array_dtype(values)): # GH19157 if columns is None: columns = [0] @@ -2820,7 +2821,7 @@ def reindexer(value): # now align rows value = reindexer(value).T - elif isinstance(value, Categorical): + elif isinstance(value, ExtensionArray): value = value.copy() elif isinstance(value, Index) or is_sequence(value): @@ -2850,7 +2851,7 @@ def reindexer(value): value = maybe_cast_to_datetime(value, value.dtype) # return internal types directly - if is_extension_type(value): + if is_extension_type(value) or is_extension_array_dtype(value): return value # broadcast across multiple columns if necessary @@ -3387,12 +3388,8 @@ class max type new_obj = self.copy() def _maybe_casted_values(index, labels=None): - if isinstance(index, PeriodIndex): - values = index.astype(object).values - elif isinstance(index, DatetimeIndex) and index.tz is not None: - values = index - else: - values = index.values + values = index._values + if not isinstance(index, (PeriodIndex, DatetimeIndex)): if values.dtype == np.object_: values = lib.maybe_convert_objects(values) @@ -5604,7 +5601,9 @@ def count(self, axis=0, level=None, numeric_only=False): if len(frame._get_axis(axis)) == 0: result = Series(0, index=frame._get_agg_axis(axis)) else: - if frame._is_mixed_type: + if frame._is_mixed_type or frame._data.any_extension_types: + # the or any_extension_types is really only hit for single- + # column frames with an extension array result = notna(frame).sum(axis=axis) else: counts = notna(frame.values).sum(axis=axis) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 7dfa34bd634ad..ef9facbacf490 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -13,6 +13,7 @@ from pandas import compat from pandas.core.accessor import CachedAccessor +from pandas.core.arrays import ExtensionArray from pandas.core.dtypes.generic import ( ABCSeries, ABCDataFrame, ABCMultiIndex, @@ -1982,6 +1983,7 @@ def _format_with_header(self, header, na_rep='NaN', **kwargs): if is_categorical_dtype(values.dtype): values = np.array(values) + elif is_object_dtype(values.dtype): values = lib.maybe_convert_objects(values, safe=1) @@ -2581,7 +2583,7 @@ def get_value(self, series, key): # if we have something that is Index-like, then # use this, e.g. DatetimeIndex s = getattr(series, '_values', None) - if isinstance(s, Index) and is_scalar(key): + if isinstance(s, (ExtensionArray, Index)) and is_scalar(key): try: return s[key] except (IndexError, ValueError): diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 352ce921d1d44..50f3c7a6b3d3d 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -618,6 +618,9 @@ def can_do_equal_len(): return if isinstance(value, (ABCSeries, dict)): + # TODO(EA): ExtensionBlock.setitem this causes issues with + # setting for extensionarrays that store dicts. Need to decide + # if it's worth supporting that. value = self._align_series(indexer, Series(value)) elif isinstance(value, ABCDataFrame): diff --git a/pandas/core/internals.py b/pandas/core/internals.py index dd5feefc49fe3..bad0626206e80 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -56,7 +56,11 @@ is_null_datelike_scalar) import pandas.core.dtypes.concat as _concat -from pandas.core.dtypes.generic import ABCSeries, ABCDatetimeIndex +from pandas.core.dtypes.generic import ( + ABCSeries, + ABCDatetimeIndex, + ABCExtensionArray, + ABCIndexClass) import pandas.core.common as com import pandas.core.algorithms as algos @@ -99,6 +103,7 @@ class Block(PandasObject): is_object = False is_categorical = False is_sparse = False + is_extension = False _box_to_block_values = True _can_hold_na = False _can_consolidate = True @@ -1854,11 +1859,40 @@ class ExtensionBlock(NonConsolidatableMixIn, Block): ExtensionArrays are limited to 1-D. """ + is_extension = True + + def __init__(self, values, placement, ndim=None): + values = self._maybe_coerce_values(values) + super(ExtensionBlock, self).__init__(values, placement, ndim) + + def _maybe_coerce_values(self, values): + """Unbox to an extension array. + + This will unbox an ExtensionArray stored in an Index or Series. + ExtensionArrays pass through. No dtype coercion is done. + + Parameters + ---------- + values : Index, Series, ExtensionArray + + Returns + ------- + ExtensionArray + """ + if isinstance(values, (ABCIndexClass, ABCSeries)): + values = values._values + return values + @property def _holder(self): # For extension blocks, the holder is values-dependent. return type(self.values) + @property + def _can_hold_na(self): + # The default ExtensionArray._can_hold_na is True + return self._holder._can_hold_na + @property def is_view(self): """Extension arrays are never treated as views.""" @@ -3451,6 +3485,8 @@ def apply(self, f, axes=None, filter=None, do_integrity_check=False, else: align_keys = [] + # TODO(EA): may interfere with ExtensionBlock.setitem for blocks + # with a .values attribute. aligned_args = dict((k, kwargs[k]) for k in align_keys if hasattr(kwargs[k], 'values')) @@ -3696,6 +3732,11 @@ def is_datelike_mixed_type(self): self._consolidate_inplace() return any(block.is_datelike for block in self.blocks) + @property + def any_extension_types(self): + """Whether any of the blocks in this manager are extension blocks""" + return any(block.is_extension for block in self.blocks) + @property def is_view(self): """ return a boolean if we are a single block and are a view """ @@ -4101,7 +4142,10 @@ def set(self, item, value, check=False): # FIXME: refactor, clearly separate broadcasting & zip-like assignment # can prob also fix the various if tests for sparse/categorical - value_is_extension_type = is_extension_type(value) + # TODO(EA): Remove an is_extension_ when all extension types satisfy + # the interface + value_is_extension_type = (is_extension_type(value) or + is_extension_array_dtype(value)) # categorical/spares/datetimetz if value_is_extension_type: @@ -4833,15 +4877,11 @@ def form_blocks(arrays, names, axes): if len(items_dict['ExtensionBlock']): - external_blocks = [] - for i, _, array in items_dict['ExtensionBlock']: - if isinstance(array, ABCSeries): - array = array.values - # Allow our internal arrays to chose their block type. - block_type = getattr(array, '_block_type', ExtensionBlock) - external_blocks.append( - make_block(array, klass=block_type, - fastpath=True, placement=[i])) + external_blocks = [ + make_block(array, klass=ExtensionBlock, placement=[i]) + for i, _, array in items_dict['ExtensionBlock'] + ] + blocks.extend(external_blocks) if len(extra_locs): @@ -5162,7 +5202,7 @@ def _safe_reshape(arr, new_shape): """ if isinstance(arr, ABCSeries): arr = arr._values - if not isinstance(arr, Categorical): + if not isinstance(arr, ABCExtensionArray): arr = arr.reshape(new_shape) return arr @@ -5673,6 +5713,8 @@ def is_na(self): if not values._null_fill_value and values.sp_index.ngaps > 0: return False values_flat = values.ravel(order='K') + elif isinstance(self.block, ExtensionBlock): + values_flat = values else: values_flat = values.ravel(order='K') total_len = values_flat.shape[0] diff --git a/pandas/core/series.py b/pandas/core/series.py index 90dc14836ab55..12865bfe44a3b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -14,12 +14,14 @@ import numpy.ma as ma from pandas.core.accessor import CachedAccessor +from pandas.core.arrays import ExtensionArray from pandas.core.dtypes.common import ( is_categorical_dtype, is_bool, is_integer, is_integer_dtype, is_float_dtype, is_extension_type, + is_extension_array_dtype, is_datetime64tz_dtype, is_timedelta64_dtype, is_list_like, @@ -173,12 +175,17 @@ def __init__(self, data=None, index=None, dtype=None, name=None, raise NotImplementedError("initializing a Series from a " "MultiIndex is not supported") elif isinstance(data, Index): - # need to copy to avoid aliasing issues if name is None: name = data.name - data = data._to_embed(keep_tz=True, dtype=dtype) + if dtype is not None: + # astype copies + data = data.astype(dtype) + else: + # need to copy to avoid aliasing issues + data = data._values.copy() copy = False + elif isinstance(data, np.ndarray): pass elif isinstance(data, Series): @@ -203,13 +210,15 @@ def __init__(self, data=None, index=None, dtype=None, name=None, '`data` argument and a different ' '`index` argument. `copy` must ' 'be False.') - elif isinstance(data, Categorical): + + elif is_extension_array_dtype(data) and dtype is not None: # GH12574: Allow dtype=category only, otherwise error - if ((dtype is not None) and - not is_categorical_dtype(dtype)): - raise ValueError("cannot specify a dtype with a " - "Categorical unless " - "dtype='category'") + if not data.dtype.is_dtype(dtype): + raise ValueError("Cannot specify a dtype '{}' with an " + "extension array of a different " + "dtype ('{}').".format(dtype, + data.dtype)) + elif (isinstance(data, types.GeneratorType) or (compat.PY3 and isinstance(data, map))): data = list(data) @@ -2556,8 +2565,7 @@ def _reindex_indexer(self, new_index, indexer, copy): return self.copy() return self - # be subclass-friendly - new_values = algorithms.take_1d(self.get_values(), indexer) + new_values = algorithms.take_1d(self._values, indexer) return self._constructor(new_values, index=new_index) def _needs_reindex_multi(self, axes, method, level): @@ -3113,10 +3121,11 @@ def _sanitize_index(data, index, copy=False): if isinstance(data, ABCIndexClass) and not copy: pass - elif isinstance(data, PeriodIndex): - data = data.astype(object).values - elif isinstance(data, DatetimeIndex): - data = data._to_embed(keep_tz=True) + elif isinstance(data, (PeriodIndex, DatetimeIndex)): + data = data._values + if copy: + data = data.copy() + elif isinstance(data, np.ndarray): # coerce datetimelike types @@ -3156,8 +3165,17 @@ def _try_cast(arr, take_fast_path): subarr = np.array(subarr, dtype=dtype, copy=copy) except (ValueError, TypeError): if is_categorical_dtype(dtype): + # We *do* allow casting to categorical, since we know + # that Categorical is the only array type for 'category'. subarr = Categorical(arr, dtype.categories, ordered=dtype.ordered) + elif is_extension_array_dtype(dtype): + # We don't allow casting to third party dtypes, since we don't + # know what array belongs to which type. + msg = ("Cannot cast data to extension dtype '{}'. " + "Pass the extension array directly.".format(dtype)) + raise ValueError(msg) + elif dtype is not None and raise_cast_failure: raise else: @@ -3189,9 +3207,15 @@ def _try_cast(arr, take_fast_path): # we will try to copy be-definition here subarr = _try_cast(data, True) - elif isinstance(data, Categorical): + elif isinstance(data, ExtensionArray): subarr = data + if dtype is not None and not data.dtype.is_dtype(dtype): + msg = ("Cannot coerce extension array to dtype '{typ}'. " + "Do the coercion before passing to the constructor " + "instead.".format(typ=dtype)) + raise ValueError(msg) + if copy: subarr = data.copy() return subarr diff --git a/pandas/tests/categorical/test_missing.py b/pandas/tests/categorical/test_missing.py index 79758dee5cfda..c8ac6a6ef14f8 100644 --- a/pandas/tests/categorical/test_missing.py +++ b/pandas/tests/categorical/test_missing.py @@ -1,9 +1,8 @@ # -*- coding: utf-8 -*- - import numpy as np import pandas.util.testing as tm -from pandas import (Categorical, Index, isna) +from pandas import Categorical, Index, isna from pandas.compat import lrange from pandas.core.dtypes.dtypes import CategoricalDtype diff --git a/pandas/tests/extension/base/__init__.py b/pandas/tests/extension/base/__init__.py new file mode 100644 index 0000000000000..2273ef1f3e110 --- /dev/null +++ b/pandas/tests/extension/base/__init__.py @@ -0,0 +1,42 @@ +"""Base test suite for extension arrays. + +These tests are intended for third-party libraries to subclass to validate +that their extension arrays and dtypes satisfy the interface. Moving or +renaming the tests should not be done lightly. + +Libraries are expected to implement a few pytest fixtures to provide data +for the tests. The fixtures may be located in either + +* The same module as your test class. +* A ``conftest.py`` in the same directory as your test class. + +The full list of fixtures may be found in the ``conftest.py`` next to this +file. + +.. code-block:: python + + import pytest + from pandas.tests.extension.base import BaseDtypeTests + + + @pytest.fixture + def dtype(): + return MyDtype() + + + class TestMyDtype(BaseDtypeTests): + pass + + +Your class ``TestDtype`` will inherit all the tests defined on +``BaseDtypeTests``. pytest's fixture discover will supply your ``dtype`` +wherever the test requires it. You're free to implement additional tests. +""" +from .casting import BaseCastingTests # noqa +from .constructors import BaseConstructorsTests # noqa +from .dtype import BaseDtypeTests # noqa +from .getitem import BaseGetitemTests # noqa +from .interface import BaseInterfaceTests # noqa +from .methods import BaseMethodsTests # noqa +from .missing import BaseMissingTests # noqa +from .reshaping import BaseReshapingTests # noqa diff --git a/pandas/tests/extension/base/casting.py b/pandas/tests/extension/base/casting.py new file mode 100644 index 0000000000000..bcfbf0a247269 --- /dev/null +++ b/pandas/tests/extension/base/casting.py @@ -0,0 +1,11 @@ +import pandas as pd +from pandas.core.internals import ObjectBlock + + +class BaseCastingTests(object): + """Casting to and from ExtensionDtypes""" + + def test_astype_object_series(self, all_data): + ser = pd.Series({"A": all_data}) + result = ser.astype(object) + assert isinstance(result._data.blocks[0], ObjectBlock) diff --git a/pandas/tests/extension/base/constructors.py b/pandas/tests/extension/base/constructors.py new file mode 100644 index 0000000000000..7ad100e6289e9 --- /dev/null +++ b/pandas/tests/extension/base/constructors.py @@ -0,0 +1,43 @@ +import pytest + +import pandas as pd +import pandas.util.testing as tm +from pandas.core.internals import ExtensionBlock + + +class BaseConstructorsTests(object): + + def test_series_constructor(self, data): + result = pd.Series(data) + assert result.dtype == data.dtype + assert len(result) == len(data) + assert isinstance(result._data.blocks[0], ExtensionBlock) + assert result._data.blocks[0].values is data + + # Series[EA] is unboxed / boxed correctly + result2 = pd.Series(result) + assert result2.dtype == data.dtype + assert isinstance(result2._data.blocks[0], ExtensionBlock) + + @pytest.mark.parametrize("from_series", [True, False]) + def test_dataframe_constructor_from_dict(self, data, from_series): + if from_series: + data = pd.Series(data) + result = pd.DataFrame({"A": data}) + assert result.dtypes['A'] == data.dtype + assert result.shape == (len(data), 1) + assert isinstance(result._data.blocks[0], ExtensionBlock) + + def test_dataframe_from_series(self, data): + result = pd.DataFrame(pd.Series(data)) + assert result.dtypes[0] == data.dtype + assert result.shape == (len(data), 1) + assert isinstance(result._data.blocks[0], ExtensionBlock) + + @pytest.mark.xfail(reason="GH-19342") + def test_series_given_mismatched_index_raises(self, data): + msg = 'Wrong number of items passed 3, placement implies 4' + with tm.assert_raises_regex(ValueError, None) as m: + pd.Series(data[:3], index=[0, 1, 2, 3, 4]) + + assert m.match(msg) diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py new file mode 100644 index 0000000000000..f5015bd469f13 --- /dev/null +++ b/pandas/tests/extension/base/dtype.py @@ -0,0 +1,46 @@ +import numpy as np +import pandas as pd + + +class BaseDtypeTests(object): + """Base class for ExtensionDtype classes""" + + def test_name(self, dtype): + assert isinstance(dtype.name, str) + + def test_kind(self, dtype): + valid = set('biufcmMOSUV') + if dtype.kind is not None: + assert dtype.kind in valid + + def test_construct_from_string_own_name(self, dtype): + result = dtype.construct_from_string(dtype.name) + assert type(result) is type(dtype) + + # check OK as classmethod + result = type(dtype).construct_from_string(dtype.name) + assert type(result) is type(dtype) + + def test_is_dtype_from_name(self, dtype): + result = type(dtype).is_dtype(dtype.name) + assert result is True + + def test_is_dtype_unboxes_dtype(self, data, dtype): + assert dtype.is_dtype(data) is True + + def test_is_dtype_from_self(self, dtype): + result = type(dtype).is_dtype(dtype) + assert result is True + + def test_is_not_string_type(self, dtype): + return not pd.api.types.is_string_dtype(dtype) + + def test_is_not_object_type(self, dtype): + return not pd.api.types.is_object_dtype(dtype) + + def test_eq_with_str(self, dtype): + assert dtype == dtype.name + assert dtype != dtype.name + '-suffix' + + def test_eq_with_numpy_object(self, dtype): + assert dtype != np.dtype('object') diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py new file mode 100644 index 0000000000000..f43971e928cac --- /dev/null +++ b/pandas/tests/extension/base/getitem.py @@ -0,0 +1,119 @@ +import numpy as np + +import pandas as pd +import pandas.util.testing as tm + + +class BaseGetitemTests(object): + """Tests for ExtensionArray.__getitem__.""" + + def test_iloc_series(self, data): + ser = pd.Series(data) + result = ser.iloc[:4] + expected = pd.Series(data[:4]) + tm.assert_series_equal(result, expected) + + result = ser.iloc[[0, 1, 2, 3]] + tm.assert_series_equal(result, expected) + + def test_iloc_frame(self, data): + df = pd.DataFrame({"A": data, 'B': np.arange(len(data))}) + expected = pd.DataFrame({"A": data[:4]}) + + # slice -> frame + result = df.iloc[:4, [0]] + tm.assert_frame_equal(result, expected) + + # sequence -> frame + result = df.iloc[[0, 1, 2, 3], [0]] + tm.assert_frame_equal(result, expected) + + expected = pd.Series(data[:4], name='A') + + # slice -> series + result = df.iloc[:4, 0] + tm.assert_series_equal(result, expected) + + # sequence -> series + result = df.iloc[:4, 0] + tm.assert_series_equal(result, expected) + + def test_loc_series(self, data): + ser = pd.Series(data) + result = ser.loc[:3] + expected = pd.Series(data[:4]) + tm.assert_series_equal(result, expected) + + result = ser.loc[[0, 1, 2, 3]] + tm.assert_series_equal(result, expected) + + def test_loc_frame(self, data): + df = pd.DataFrame({"A": data, 'B': np.arange(len(data))}) + expected = pd.DataFrame({"A": data[:4]}) + + # slice -> frame + result = df.loc[:3, ['A']] + tm.assert_frame_equal(result, expected) + + # sequence -> frame + result = df.loc[[0, 1, 2, 3], ['A']] + tm.assert_frame_equal(result, expected) + + expected = pd.Series(data[:4], name='A') + + # slice -> series + result = df.loc[:3, 'A'] + tm.assert_series_equal(result, expected) + + # sequence -> series + result = df.loc[:3, 'A'] + tm.assert_series_equal(result, expected) + + def test_getitem_scalar(self, data): + result = data[0] + assert isinstance(result, data.dtype.type) + + result = pd.Series(data)[0] + assert isinstance(result, data.dtype.type) + + def test_getitem_scalar_na(self, data_missing, na_cmp, na_value): + result = data_missing[0] + assert na_cmp(result, na_value) + + def test_getitem_mask(self, data): + # Empty mask, raw array + mask = np.zeros(len(data), dtype=bool) + result = data[mask] + assert len(result) == 0 + assert isinstance(result, type(data)) + + # Empty mask, in series + mask = np.zeros(len(data), dtype=bool) + result = pd.Series(data)[mask] + assert len(result) == 0 + assert result.dtype == data.dtype + + # non-empty mask, raw array + mask[0] = True + result = data[mask] + assert len(result) == 1 + assert isinstance(result, type(data)) + + # non-empty mask, in series + result = pd.Series(data)[mask] + assert len(result) == 1 + assert result.dtype == data.dtype + + def test_getitem_slice(self, data): + # getitem[slice] should return an array + result = data[slice(0)] # empty + assert isinstance(result, type(data)) + + result = data[slice(1)] # scalar + assert isinstance(result, type(data)) + + def test_take_sequence(self, data): + result = pd.Series(data)[[0, 1, 3]] + assert result.iloc[0] == data[0] + assert result.iloc[1] == data[1] + assert result.iloc[2] == data[3] diff --git a/pandas/tests/extension/base/interface.py b/pandas/tests/extension/base/interface.py new file mode 100644 index 0000000000000..8f17131a9482b --- /dev/null +++ b/pandas/tests/extension/base/interface.py @@ -0,0 +1,53 @@ +import numpy as np + +import pandas as pd +from pandas.compat import StringIO +from pandas.core.dtypes.common import is_extension_array_dtype +from pandas.core.dtypes.dtypes import ExtensionDtype + + +class BaseInterfaceTests(object): + """Tests that the basic interface is satisfied.""" + # ------------------------------------------------------------------------ + # Interface + # ------------------------------------------------------------------------ + + def test_len(self, data): + assert len(data) == 100 + + def test_ndim(self, data): + assert data.ndim == 1 + + def test_can_hold_na_valid(self, data): + assert data._can_hold_na in {True, False} + + def test_memory_usage(self, data): + s = pd.Series(data) + result = s.memory_usage(index=False) + assert result == s.nbytes + + def test_array_interface(self, data): + result = np.array(data) + assert result[0] == data[0] + + def test_as_ndarray_with_dtype_kind(self, data): + np.array(data, dtype=data.dtype.kind) + + def test_repr(self, data): + ser = pd.Series(data) + assert data.dtype.name in repr(ser) + + df = pd.DataFrame({"A": data}) + repr(df) + + def test_dtype_name_in_info(self, data): + buf = StringIO() + pd.DataFrame({"A": data}).info(buf=buf) + result = buf.getvalue() + assert data.dtype.name in result + + def test_is_extension_array_dtype(self, data): + assert is_extension_array_dtype(data) + assert is_extension_array_dtype(data.dtype) + assert is_extension_array_dtype(pd.Series(data)) + assert isinstance(data.dtype, ExtensionDtype) diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py new file mode 100644 index 0000000000000..c77811ca63926 --- /dev/null +++ b/pandas/tests/extension/base/methods.py @@ -0,0 +1,32 @@ +import pytest +import numpy as np + +import pandas as pd +import pandas.util.testing as tm + + +class BaseMethodsTests(object): + """Various Series and DataFrame methods.""" + + @pytest.mark.parametrize('dropna', [True, False]) + def test_value_counts(self, all_data, dropna): + all_data = all_data[:10] + if dropna: + other = np.array(all_data[~all_data.isna()]) + else: + other = all_data + + result = pd.Series(all_data).value_counts(dropna=dropna).sort_index() + expected = pd.Series(other).value_counts(dropna=dropna).sort_index() + + tm.assert_series_equal(result, expected) + + def test_count(self, data_missing): + df = pd.DataFrame({"A": data_missing}) + result = df.count(axis='columns') + expected = pd.Series([0, 1]) + tm.assert_series_equal(result, expected) + + def test_apply_simple_series(self, data): + result = pd.Series(data).apply(id) + assert isinstance(result, pd.Series) diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py new file mode 100644 index 0000000000000..1d6f2eea1f1f9 --- /dev/null +++ b/pandas/tests/extension/base/missing.py @@ -0,0 +1,45 @@ +import numpy as np + +import pandas as pd +import pandas.util.testing as tm + + +class BaseMissingTests(object): + def test_isna(self, data_missing): + if data_missing._can_hold_na: + expected = np.array([True, False]) + else: + expected = np.array([False, False]) + + result = pd.isna(data_missing) + tm.assert_numpy_array_equal(result, expected) + + result = pd.Series(data_missing).isna() + expected = pd.Series(expected) + tm.assert_series_equal(result, expected) + + def test_dropna_series(self, data_missing): + ser = pd.Series(data_missing) + result = ser.dropna() + expected = ser.iloc[[1]] + tm.assert_series_equal(result, expected) + + def test_dropna_frame(self, data_missing): + df = pd.DataFrame({"A": data_missing}) + + # defaults + result = df.dropna() + expected = df.iloc[[1]] + tm.assert_frame_equal(result, expected) + + # axis = 1 + result = df.dropna(axis='columns') + expected = pd.DataFrame(index=[0, 1]) + tm.assert_frame_equal(result, expected) + + # multiple + df = pd.DataFrame({"A": data_missing, + "B": [1, np.nan]}) + result = df.dropna() + expected = df.iloc[:0] + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py new file mode 100644 index 0000000000000..d8f577c6fa50d --- /dev/null +++ b/pandas/tests/extension/base/reshaping.py @@ -0,0 +1,61 @@ +import pytest + +import pandas as pd +import pandas.util.testing as tm +from pandas.core.internals import ExtensionBlock + + +class BaseReshapingTests(object): + """Tests for reshaping and concatenation.""" + @pytest.mark.parametrize('in_frame', [True, False]) + def test_concat(self, data, in_frame): + wrapped = pd.Series(data) + if in_frame: + wrapped = pd.DataFrame(wrapped) + result = pd.concat([wrapped, wrapped], ignore_index=True) + + assert len(result) == len(data) * 2 + + if in_frame: + dtype = result.dtypes[0] + else: + dtype = result.dtype + + assert dtype == data.dtype + assert isinstance(result._data.blocks[0], ExtensionBlock) + + def test_align(self, data, na_value): + a = data[:3] + b = data[2:5] + r1, r2 = pd.Series(a).align(pd.Series(b, index=[1, 2, 3])) + + # Assumes that the ctor can take a list of scalars of the type + e1 = pd.Series(type(data)(list(a) + [na_value])) + e2 = pd.Series(type(data)([na_value] + list(b))) + tm.assert_series_equal(r1, e1) + tm.assert_series_equal(r2, e2) + + def test_align_frame(self, data, na_value): + a = data[:3] + b = data[2:5] + r1, r2 = pd.DataFrame({'A': a}).align( + pd.DataFrame({'A': b}, index=[1, 2, 3]) + ) + + # Assumes that the ctor can take a list of scalars of the type + e1 = pd.DataFrame({'A': type(data)(list(a) + [na_value])}) + e2 = pd.DataFrame({'A': type(data)([na_value] + list(b))}) + tm.assert_frame_equal(r1, e1) + tm.assert_frame_equal(r2, e2) + + def test_set_frame_expand_regular_with_extension(self, data): + df = pd.DataFrame({"A": [1] * len(data)}) + df['B'] = data + expected = pd.DataFrame({"A": [1] * len(data), "B": data}) + tm.assert_frame_equal(df, expected) + + def test_set_frame_expand_extension_with_regular(self, data): + df = pd.DataFrame({'A': data}) + df['B'] = [1] * len(data) + expected = pd.DataFrame({"A": data, "B": [1] * len(data)}) + tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/extension/category/__init__.py b/pandas/tests/extension/category/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/extension/category/test_categorical.py b/pandas/tests/extension/category/test_categorical.py new file mode 100644 index 0000000000000..ec548fca6d901 --- /dev/null +++ b/pandas/tests/extension/category/test_categorical.py @@ -0,0 +1,84 @@ +import string + +import pytest +import numpy as np + +from pandas.api.types import CategoricalDtype +from pandas import Categorical +from pandas.tests.extension import base + + +def make_data(): + return np.random.choice(list(string.ascii_letters), size=100) + + +@pytest.fixture +def dtype(): + return CategoricalDtype() + + +@pytest.fixture +def data(): + """Length-100 PeriodArray for semantics test.""" + return Categorical(make_data()) + + +@pytest.fixture +def data_missing(): + """Length 2 array with [NA, Valid]""" + return Categorical([np.nan, 'A']) + + +@pytest.fixture +def na_value(): + return np.nan + + +class TestDtype(base.BaseDtypeTests): + pass + + +class TestInterface(base.BaseInterfaceTests): + @pytest.mark.skip(reason="Memory usage doesn't match") + def test_memory_usage(self): + # Is this deliberate? + pass + + +class TestConstructors(base.BaseConstructorsTests): + pass + + +class TestReshaping(base.BaseReshapingTests): + @pytest.mark.skip(reason="Unobserved categories preseved in concat.") + def test_align(self, data, na_value): + pass + + @pytest.mark.skip(reason="Unobserved categories preseved in concat.") + def test_align_frame(self, data, na_value): + pass + + +class TestGetitem(base.BaseGetitemTests): + @pytest.mark.skip(reason="Backwards compatability") + def test_getitem_scalar(self): + # CategoricalDtype.type isn't "correct" since it should + # be a parent of the elements (object). But don't want + # to break things by changing. + pass + + +class TestMissing(base.BaseMissingTests): + pass + + +class TestMethods(base.BaseMethodsTests): + pass + + @pytest.mark.skip(reason="Unobserved categories included") + def test_value_counts(self, all_data, dropna): + pass + + +class TestCasting(base.BaseCastingTests): + pass diff --git a/pandas/tests/extension/conftest.py b/pandas/tests/extension/conftest.py new file mode 100644 index 0000000000000..f86849b9cbd61 --- /dev/null +++ b/pandas/tests/extension/conftest.py @@ -0,0 +1,48 @@ +import operator + +import pytest + + +@pytest.fixture +def dtype(): + """A fixture providing the ExtensionDtype to validate.""" + raise NotImplementedError + + +@pytest.fixture +def data(): + """Length-100 array for this type.""" + raise NotImplementedError + + +@pytest.fixture +def data_missing(): + """Length-2 array with [NA, Valid]""" + raise NotImplementedError + + +@pytest.fixture(params=['data', 'data_missing']) +def all_data(request, data, data_missing): + """Parametrized fixture giving 'data' and 'data_missing'""" + if request.param == 'data': + return data + elif request.param == 'data_missing': + return data_missing + + +@pytest.fixture +def na_cmp(): + """Binary operator for comparing NA values. + + Should return a function of two arguments that returns + True if both arguments are (scalar) NA for your type. + + By defult, uses ``operator.or`` + """ + return operator.is_ + + +@pytest.fixture +def na_value(): + """The scalar missing value for this type. Default 'None'""" + return None diff --git a/pandas/tests/extension/decimal/__init__.py b/pandas/tests/extension/decimal/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py new file mode 100644 index 0000000000000..f526ac5996a10 --- /dev/null +++ b/pandas/tests/extension/decimal/array.py @@ -0,0 +1,86 @@ +import decimal +import numbers +import random +import sys + +import numpy as np + +import pandas as pd +from pandas.core.arrays import ExtensionArray +from pandas.core.dtypes.base import ExtensionDtype + + +class DecimalDtype(ExtensionDtype): + type = decimal.Decimal + name = 'decimal' + + @classmethod + def construct_from_string(cls, string): + if string == cls.name: + return cls() + else: + raise TypeError("Cannot construct a '{}' from " + "'{}'".format(cls, string)) + + +class DecimalArray(ExtensionArray): + dtype = DecimalDtype() + + def __init__(self, values): + values = np.asarray(values, dtype=object) + + self.values = values + + def __getitem__(self, item): + if isinstance(item, numbers.Integral): + return self.values[item] + else: + return type(self)(self.values[item]) + + def copy(self, deep=False): + if deep: + return type(self)(self.values.copy()) + return type(self)(self) + + def __setitem__(self, key, value): + if pd.api.types.is_list_like(value): + value = [decimal.Decimal(v) for v in value] + else: + value = decimal.Decimal(value) + self.values[key] = value + + def __len__(self): + return len(self.values) + + def __repr__(self): + return repr(self.values) + + @property + def nbytes(self): + n = len(self) + if n: + return n * sys.getsizeof(self[0]) + return 0 + + def isna(self): + return np.array([x.is_nan() for x in self.values]) + + def take(self, indexer, allow_fill=True, fill_value=None): + mask = indexer == -1 + + out = self.values.take(indexer) + out[mask] = self._na_value + + return type(self)(out) + + @property + def _na_value(self): + return decimal.Decimal('NaN') + + @classmethod + def _concat_same_type(cls, to_concat): + return cls(np.concatenate([x.values for x in to_concat])) + + +def make_data(): + return [decimal.Decimal(random.random()) for _ in range(100)] diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py new file mode 100644 index 0000000000000..7b4d079ecad87 --- /dev/null +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -0,0 +1,154 @@ +import decimal + +import numpy as np +import pandas as pd +import pandas.util.testing as tm +import pytest + +from pandas.tests.extension import base + +from .array import DecimalDtype, DecimalArray, make_data + + +@pytest.fixture +def dtype(): + return DecimalDtype() + + +@pytest.fixture +def data(): + return DecimalArray(make_data()) + + +@pytest.fixture +def data_missing(): + return DecimalArray([decimal.Decimal('NaN'), decimal.Decimal(1)]) + + +@pytest.fixture +def na_cmp(): + return lambda x, y: x.is_nan() and y.is_nan() + + +@pytest.fixture +def na_value(): + return decimal.Decimal("NaN") + + +class TestDtype(base.BaseDtypeTests): + pass + + +class TestInterface(base.BaseInterfaceTests): + pass + + +class TestConstructors(base.BaseConstructorsTests): + pass + + +class TestReshaping(base.BaseReshapingTests): + + def test_align(self, data, na_value): + # Have to override since assert_series_equal doesn't + # compare Decimal(NaN) properly. + a = data[:3] + b = data[2:5] + r1, r2 = pd.Series(a).align(pd.Series(b, index=[1, 2, 3])) + + # NaN handling + e1 = pd.Series(type(data)(list(a) + [na_value])) + e2 = pd.Series(type(data)([na_value] + list(b))) + tm.assert_series_equal(r1.iloc[:3], e1.iloc[:3]) + assert r1[3].is_nan() + assert e1[3].is_nan() + + tm.assert_series_equal(r2.iloc[1:], e2.iloc[1:]) + assert r2[0].is_nan() + assert e2[0].is_nan() + + def test_align_frame(self, data, na_value): + # Override for Decimal(NaN) comparison + a = data[:3] + b = data[2:5] + r1, r2 = pd.DataFrame({'A': a}).align( + pd.DataFrame({'A': b}, index=[1, 2, 3]) + ) + + # Assumes that the ctor can take a list of scalars of the type + e1 = pd.DataFrame({'A': type(data)(list(a) + [na_value])}) + e2 = pd.DataFrame({'A': type(data)([na_value] + list(b))}) + + tm.assert_frame_equal(r1.iloc[:3], e1.iloc[:3]) + assert r1.loc[3, 'A'].is_nan() + assert e1.loc[3, 'A'].is_nan() + + tm.assert_frame_equal(r2.iloc[1:], e2.iloc[1:]) + assert r2.loc[0, 'A'].is_nan() + assert e2.loc[0, 'A'].is_nan() + + +class TestGetitem(base.BaseGetitemTests): + pass + + +class TestMissing(base.BaseMissingTests): + pass + + +class TestMethods(base.BaseMethodsTests): + @pytest.mark.parametrize('dropna', [True, False]) + @pytest.mark.xfail(reason="value_counts not implemented yet.") + def test_value_counts(self, all_data, dropna): + all_data = all_data[:10] + if dropna: + other = np.array(all_data[~all_data.isna()]) + else: + other = all_data + + result = pd.Series(all_data).value_counts(dropna=dropna).sort_index() + expected = pd.Series(other).value_counts(dropna=dropna).sort_index() + + tm.assert_series_equal(result, expected) + + +class TestCasting(base.BaseCastingTests): + pass + + +def test_series_constructor_coerce_data_to_extension_dtype_raises(): + xpr = ("Cannot cast data to extension dtype 'decimal'. Pass the " + "extension array directly.") + with tm.assert_raises_regex(ValueError, xpr): + pd.Series([0, 1, 2], dtype=DecimalDtype()) + + +def test_series_constructor_with_same_dtype_ok(): + arr = DecimalArray([decimal.Decimal('10.0')]) + result = pd.Series(arr, dtype=DecimalDtype()) + expected = pd.Series(arr) + tm.assert_series_equal(result, expected) + + +def test_series_constructor_coerce_extension_array_to_dtype_raises(): + arr = DecimalArray([decimal.Decimal('10.0')]) + xpr = "Cannot specify a dtype 'int64' .* \('decimal'\)." + + with tm.assert_raises_regex(ValueError, xpr): + pd.Series(arr, dtype='int64') + + +def test_dataframe_constructor_with_same_dtype_ok(): + arr = DecimalArray([decimal.Decimal('10.0')]) + + result = pd.DataFrame({"A": arr}, dtype=DecimalDtype()) + expected = pd.DataFrame({"A": arr}) + tm.assert_frame_equal(result, expected) + + +def test_dataframe_constructor_with_different_dtype_raises(): + arr = DecimalArray([decimal.Decimal('10.0')]) + + xpr = "Cannot coerce extension array to dtype 'int64'. " + with tm.assert_raises_regex(ValueError, xpr): + pd.DataFrame({"A": arr}, dtype='int64') diff --git a/pandas/tests/extension/json/__init__.py b/pandas/tests/extension/json/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py new file mode 100644 index 0000000000000..90aac93c68f64 --- /dev/null +++ b/pandas/tests/extension/json/array.py @@ -0,0 +1,99 @@ +import collections +import itertools +import numbers +import random +import string +import sys + +import numpy as np + +from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.arrays import ExtensionArray + + +class JSONDtype(ExtensionDtype): + type = collections.Mapping + name = 'json' + + @classmethod + def construct_from_string(cls, string): + if string == cls.name: + return cls() + else: + raise TypeError("Cannot construct a '{}' from " + "'{}'".format(cls, string)) + + +class JSONArray(ExtensionArray): + dtype = JSONDtype() + + def __init__(self, values): + for val in values: + if not isinstance(val, self.dtype.type): + raise TypeError + self.data = values + + def __getitem__(self, item): + if isinstance(item, numbers.Integral): + return self.data[item] + elif isinstance(item, np.ndarray) and item.dtype == 'bool': + return type(self)([x for x, m in zip(self, item) if m]) + else: + return type(self)(self.data[item]) + + def __setitem__(self, key, value): + if isinstance(key, numbers.Integral): + self.data[key] = value + else: + if not isinstance(value, (type(self), + collections.Sequence)): + # broadcast value + value = itertools.cycle([value]) + + if isinstance(key, np.ndarray) and key.dtype == 'bool': + # masking + for i, (k, v) in enumerate(zip(key, value)): + if k: + assert isinstance(v, self.dtype.type) + self.data[i] = v + else: + for k, v in zip(key, value): + assert isinstance(v, self.dtype.type) + self.data[k] = v + + def __len__(self): + return len(self.data) + + def __repr__(self): + return 'JSONArary({!r})'.format(self.data) + + @property + def nbytes(self): + return sys.getsizeof(self.data) + + def isna(self): + return np.array([x == self._na_value for x in self.data]) + + def take(self, indexer, allow_fill=True, fill_value=None): + output = [self.data[loc] if loc != -1 else self._na_value + for loc in indexer] + return type(self)(output) + + def copy(self, deep=False): + return type(self)(self.data[:]) + + @property + def _na_value(self): + return {} + + @classmethod + def _concat_same_type(cls, to_concat): + data = list(itertools.chain.from_iterable([x.data for x in to_concat])) + return cls(data) + + +def make_data(): + # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer + return [collections.UserDict([ + (random.choice(string.ascii_letters), random.randint(0, 100)) + for _ in range(random.randint(0, 10))]) for _ in range(100)] diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py new file mode 100644 index 0000000000000..e0721bb1d8d1a --- /dev/null +++ b/pandas/tests/extension/json/test_json.py @@ -0,0 +1,73 @@ +import operator +import sys + +import pytest + + +from pandas.tests.extension import base + +from .array import JSONArray, JSONDtype, make_data + +pytestmark = pytest.mark.skipif(sys.version_info[0] == 2, + reason="Py2 doesn't have a UserDict") + + +@pytest.fixture +def dtype(): + return JSONDtype() + + +@pytest.fixture +def data(): + """Length-100 PeriodArray for semantics test.""" + return JSONArray(make_data()) + + +@pytest.fixture +def data_missing(): + """Length 2 array with [NA, Valid]""" + return JSONArray([{}, {'a': 10}]) + + +@pytest.fixture +def na_value(): + return {} + + +@pytest.fixture +def na_cmp(): + return operator.eq + + +class TestDtype(base.BaseDtypeTests): + pass + + +class TestInterface(base.BaseInterfaceTests): + pass + + +class TestConstructors(base.BaseConstructorsTests): + pass + + +class TestReshaping(base.BaseReshapingTests): + pass + + +class TestGetitem(base.BaseGetitemTests): + pass + + +class TestMissing(base.BaseMissingTests): + pass + + +class TestMethods(base.BaseMethodsTests): + @pytest.mark.skip(reason="Unhashable") + def test_value_counts(self, all_data, dropna): + pass + + +class TestCasting(base.BaseCastingTests): + pass diff --git a/pandas/tests/internals/test_external_block.py b/pandas/tests/extension/test_external_block.py similarity index 94% rename from pandas/tests/internals/test_external_block.py rename to pandas/tests/extension/test_external_block.py index 2487363df8f99..991da41168aa0 100644 --- a/pandas/tests/internals/test_external_block.py +++ b/pandas/tests/extension/test_external_block.py @@ -5,12 +5,12 @@ import pandas as pd from pandas.core.internals import ( - BlockManager, SingleBlockManager, ExtensionBlock) + BlockManager, SingleBlockManager, NonConsolidatableMixIn, Block) import pytest -class CustomBlock(ExtensionBlock): +class CustomBlock(NonConsolidatableMixIn, Block): _holder = np.ndarray
Followup number 1 to #19268 This adds changes to frame, series, dtypes, etc. (all the places other that `core/internals.py`) to support 3rd party extension arrays. These changes were developed to support IntervalArrays (https://github.com/TomAugspurger/pandas/compare/pandas-array-upstream+fu1...TomAugspurger:pandas-array-upstream+fu1+interval separate PR) and https://github.com/continuumio/pandas-ip (external). Also closes https://github.com/pandas-dev/pandas/issues/19585
https://api.github.com/repos/pandas-dev/pandas/pulls/19520
2018-02-02T21:43:26Z
2018-02-23T11:47:24Z
2018-02-23T11:47:24Z
2018-02-23T12:26:36Z
Spellcheck of enhancingperf.rst
diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst index 7afa852262a38..b786b1d0c134a 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/enhancingperf.rst @@ -19,6 +19,13 @@ Enhancing Performance ********************* +In this part of the tutorial, we will investigate how to speed up certain +functions operating on pandas ``DataFrames`` using three different techniques: +Cython, Numba and :func:`pandas.eval`. We will see a speed improvement of ~200 +when we use Cython and Numba on a test function operating row-wise on the +``DataFrame``. Using :func:`pandas.eval` we will speed up a sum by an order of +~2. + .. _enhancingperf.cython: Cython (Writing C extensions for pandas) @@ -29,20 +36,20 @@ computationally heavy applications however, it can be possible to achieve sizeab speed-ups by offloading work to `cython <http://cython.org/>`__. This tutorial assumes you have refactored as much as possible in Python, for example -trying to remove for loops and making use of NumPy vectorization, it's always worth +by trying to remove for-loops and making use of NumPy vectorization. It's always worth optimising in Python first. This tutorial walks through a "typical" process of cythonizing a slow computation. -We use an `example from the cython documentation <http://docs.cython.org/src/quickstart/cythonize.html>`__ +We use an `example from the Cython documentation <http://docs.cython.org/src/quickstart/cythonize.html>`__ but in the context of pandas. Our final cythonized solution is around 100 times -faster than the pure Python. +faster than the pure Python solution. .. _enhancingperf.pure: Pure python ~~~~~~~~~~~ -We have a DataFrame to which we want to apply a function row-wise. +We have a ``DataFrame`` to which we want to apply a function row-wise. .. ipython:: python @@ -91,10 +98,10 @@ hence we'll concentrate our efforts cythonizing these two functions. .. _enhancingperf.plain: -Plain cython +Plain Cython ~~~~~~~~~~~~ -First we're going to need to import the cython magic function to ipython: +First we're going to need to import the Cython magic function to ipython: .. ipython:: python :okwarning: @@ -102,7 +109,7 @@ First we're going to need to import the cython magic function to ipython: %load_ext Cython -Now, let's simply copy our functions over to cython as is (the suffix +Now, let's simply copy our functions over to Cython as is (the suffix is here to distinguish between function versions): .. ipython:: @@ -177,8 +184,8 @@ in Python, so maybe we could minimize these by cythonizing the apply part. .. note:: - We are now passing ndarrays into the cython function, fortunately cython plays - very nicely with numpy. + We are now passing ndarrays into the Cython function, fortunately Cython plays + very nicely with NumPy. .. ipython:: @@ -213,9 +220,9 @@ the rows, applying our ``integrate_f_typed``, and putting this in the zeros arra .. warning:: You can **not pass** a ``Series`` directly as a ``ndarray`` typed parameter - to a cython function. Instead pass the actual ``ndarray`` using the - ``.values`` attribute of the Series. The reason is that the cython - definition is specific to an ndarray and not the passed Series. + to a Cython function. Instead pass the actual ``ndarray`` using the + ``.values`` attribute of the ``Series``. The reason is that the Cython + definition is specific to an ndarray and not the passed ``Series``. So, do not do this: @@ -223,7 +230,7 @@ the rows, applying our ``integrate_f_typed``, and putting this in the zeros arra apply_integrate_f(df['a'], df['b'], df['N']) - But rather, use ``.values`` to get the underlying ``ndarray`` + But rather, use ``.values`` to get the underlying ``ndarray``: .. code-block:: python @@ -255,7 +262,7 @@ More advanced techniques ~~~~~~~~~~~~~~~~~~~~~~~~ There is still hope for improvement. Here's an example of using some more -advanced cython techniques: +advanced Cython techniques: .. ipython:: @@ -289,16 +296,17 @@ advanced cython techniques: In [4]: %timeit apply_integrate_f_wrap(df['a'].values, df['b'].values, df['N'].values) 1000 loops, best of 3: 987 us per loop -Even faster, with the caveat that a bug in our cython code (an off-by-one error, +Even faster, with the caveat that a bug in our Cython code (an off-by-one error, for example) might cause a segfault because memory access isn't checked. - +For more about ``boundscheck`` and ``wraparound``, see the Cython docs on +`compiler directives <http://cython.readthedocs.io/en/latest/src/reference/compilation.html?highlight=wraparound#compiler-directives>`__. .. _enhancingperf.numba: -Using numba +Using Numba ----------- -A recent alternative to statically compiling cython code, is to use a *dynamic jit-compiler*, ``numba``. +A recent alternative to statically compiling Cython code, is to use a *dynamic jit-compiler*, Numba. Numba gives you the power to speed up your applications with high performance functions written directly in Python. With a few annotations, array-oriented and math-heavy Python code can be just-in-time compiled to native machine instructions, similar in performance to C, C++ and Fortran, without having to switch languages or Python interpreters. @@ -306,16 +314,17 @@ Numba works by generating optimized machine code using the LLVM compiler infrast .. note:: - You will need to install ``numba``. This is easy with ``conda``, by using: ``conda install numba``, see :ref:`installing using miniconda<install.miniconda>`. + You will need to install Numba. This is easy with ``conda``, by using: ``conda install numba``, see :ref:`installing using miniconda<install.miniconda>`. .. note:: - As of ``numba`` version 0.20, pandas objects cannot be passed directly to numba-compiled functions. Instead, one must pass the ``numpy`` array underlying the ``pandas`` object to the numba-compiled function as demonstrated below. + As of Numba version 0.20, pandas objects cannot be passed directly to Numba-compiled functions. Instead, one must pass the NumPy array underlying the pandas object to the Numba-compiled function as demonstrated below. Jit ~~~ -Using ``numba`` to just-in-time compile your code. We simply take the plain Python code from above and annotate with the ``@jit`` decorator. +We demonstrate how to use Numba to just-in-time compile our code. We simply +take the plain Python code from above and annotate with the ``@jit`` decorator. .. code-block:: python @@ -346,17 +355,19 @@ Using ``numba`` to just-in-time compile your code. We simply take the plain Pyth result = apply_integrate_f_numba(df['a'].values, df['b'].values, df['N'].values) return pd.Series(result, index=df.index, name='result') -Note that we directly pass ``numpy`` arrays to the numba function. ``compute_numba`` is just a wrapper that provides a nicer interface by passing/returning pandas objects. +Note that we directly pass NumPy arrays to the Numba function. ``compute_numba`` is just a wrapper that provides a nicer interface by passing/returning pandas objects. .. code-block:: ipython In [4]: %timeit compute_numba(df) 1000 loops, best of 3: 798 us per loop +In this example, using Numba was faster than Cython. + Vectorize ~~~~~~~~~ -``numba`` can also be used to write vectorized functions that do not require the user to explicitly +Numba can also be used to write vectorized functions that do not require the user to explicitly loop over the observations of a vector; a vectorized function will be applied to each row automatically. Consider the following toy example of doubling each observation: @@ -389,13 +400,23 @@ Caveats .. note:: - ``numba`` will execute on any function, but can only accelerate certain classes of functions. + Numba will execute on any function, but can only accelerate certain classes of functions. -``numba`` is best at accelerating functions that apply numerical functions to NumPy arrays. When passed a function that only uses operations it knows how to accelerate, it will execute in ``nopython`` mode. +Numba is best at accelerating functions that apply numerical functions to NumPy +arrays. When passed a function that only uses operations it knows how to +accelerate, it will execute in ``nopython`` mode. -If ``numba`` is passed a function that includes something it doesn't know how to work with -- a category that currently includes sets, lists, dictionaries, or string functions -- it will revert to ``object mode``. In ``object mode``, numba will execute but your code will not speed up significantly. If you would prefer that ``numba`` throw an error if it cannot compile a function in a way that speeds up your code, pass numba the argument ``nopython=True`` (e.g. ``@numba.jit(nopython=True)``). For more on troubleshooting ``numba`` modes, see the `numba troubleshooting page <http://numba.pydata.org/numba-doc/0.20.0/user/troubleshoot.html#the-compiled-code-is-too-slow>`__. +If Numba is passed a function that includes something it doesn't know how to +work with -- a category that currently includes sets, lists, dictionaries, or +string functions -- it will revert to ``object mode``. In ``object mode``, +Numba will execute but your code will not speed up significantly. If you would +prefer that Numba throw an error if it cannot compile a function in a way that +speeds up your code, pass Numba the argument +``nopython=True`` (e.g. ``@numba.jit(nopython=True)``). For more on +troubleshooting Numba modes, see the `Numba troubleshooting page +<http://numba.pydata.org/numba-doc/latest/user/troubleshoot.html#the-compiled-code-is-too-slow>`__. -Read more in the `numba docs <http://numba.pydata.org/>`__. +Read more in the `Numba docs <http://numba.pydata.org/>`__. .. _enhancingperf.eval: @@ -448,7 +469,7 @@ These operations are supported by :func:`pandas.eval`: - Attribute access, e.g., ``df.a`` - Subscript expressions, e.g., ``df[0]`` - Simple variable evaluation, e.g., ``pd.eval('df')`` (this is not very useful) -- Math functions, `sin`, `cos`, `exp`, `log`, `expm1`, `log1p`, +- Math functions: `sin`, `cos`, `exp`, `log`, `expm1`, `log1p`, `sqrt`, `sinh`, `cosh`, `tanh`, `arcsin`, `arccos`, `arctan`, `arccosh`, `arcsinh`, `arctanh`, `abs` and `arctan2`. @@ -581,7 +602,7 @@ on the original ``DataFrame`` or return a copy with the new column. For backwards compatibility, ``inplace`` defaults to ``True`` if not specified. This will change in a future version of pandas - if your code depends on an inplace assignment you should update to explicitly - set ``inplace=True`` + set ``inplace=True``. .. ipython:: python @@ -780,7 +801,7 @@ Technical Minutia Regarding Expression Evaluation Expressions that would result in an object dtype or involve datetime operations (because of ``NaT``) must be evaluated in Python space. The main reason for this behavior is to maintain backwards compatibility with versions of NumPy < -1.7. In those versions of ``numpy`` a call to ``ndarray.astype(str)`` will +1.7. In those versions of NumPy a call to ``ndarray.astype(str)`` will truncate any strings that are more than 60 characters in length. Second, we can't pass ``object`` arrays to ``numexpr`` thus string comparisons must be evaluated in Python space. diff --git a/doc/source/sparse.rst b/doc/source/sparse.rst index 2e224f103a95e..260d8aa32ef52 100644 --- a/doc/source/sparse.rst +++ b/doc/source/sparse.rst @@ -17,11 +17,11 @@ Sparse data structures .. note:: The ``SparsePanel`` class has been removed in 0.19.0 -We have implemented "sparse" versions of Series and DataFrame. These are not sparse +We have implemented "sparse" versions of ``Series`` and ``DataFrame``. These are not sparse in the typical "mostly 0". Rather, you can view these objects as being "compressed" where any data matching a specific value (``NaN`` / missing value, though any value can be chosen) is omitted. A special ``SparseIndex`` object tracks where data has been -"sparsified". This will make much more sense in an example. All of the standard pandas +"sparsified". This will make much more sense with an example. All of the standard pandas data structures have a ``to_sparse`` method: .. ipython:: python @@ -32,7 +32,7 @@ data structures have a ``to_sparse`` method: sts The ``to_sparse`` method takes a ``kind`` argument (for the sparse index, see -below) and a ``fill_value``. So if we had a mostly zero Series, we could +below) and a ``fill_value``. So if we had a mostly zero ``Series``, we could convert it to sparse with ``fill_value=0``: .. ipython:: python @@ -40,7 +40,7 @@ convert it to sparse with ``fill_value=0``: ts.fillna(0).to_sparse(fill_value=0) The sparse objects exist for memory efficiency reasons. Suppose you had a -large, mostly NA DataFrame: +large, mostly NA ``DataFrame``: .. ipython:: python
Spellcheck of the docs, mostly `enhancingperf.rst`, and a sentence or two on `sparse.rst`. * Added a short introductory paragraph on enhancing performance. * Backticks ` `` ` around Series, DataFrame. * Typeset variants of ` ``numba`` `, `(n|N)umba`, `(c|C)ython`, etc as **Numba** and **Cython**. * Minor rephrasing of sentences, spelling, periods, colons etc. Cheers. Comments very welcome :)
https://api.github.com/repos/pandas-dev/pandas/pulls/19516
2018-02-02T19:39:49Z
2018-02-03T20:32:57Z
2018-02-03T20:32:56Z
2018-02-03T20:33:09Z
ops cleanup, named functions instead of lambdas
diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 6ea4a81cb52a1..6db84aedce7e7 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -42,6 +42,60 @@ ABCSparseSeries, ABCSparseArray) +# ----------------------------------------------------------------------------- +# Reversed Operations not available in the stdlib operator module. +# Defining these instead of using lambdas allows us to reference them by name. + +def radd(left, right): + return right + left + + +def rsub(left, right): + return right - left + + +def rmul(left, right): + return right * left + + +def rdiv(left, right): + return right / left + + +def rtruediv(left, right): + return right / left + + +def rfloordiv(left, right): + return right // left + + +def rmod(left, right): + return right % left + + +def rdivmod(left, right): + return divmod(right, left) + + +def rpow(left, right): + return right ** left + + +def rand_(left, right): + return operator.and_(right, left) + + +def ror_(left, right): + return operator.or_(right, left) + + +def rxor(left, right): + return operator.xor(right, left) + + +# ----------------------------------------------------------------------------- + def _gen_eval_kwargs(name): """ Find the keyword arguments to pass to numexpr for the given operation. @@ -140,64 +194,51 @@ def _get_frame_op_default_axis(name): _op_descriptions = { 'add': {'op': '+', 'desc': 'Addition', - 'reversed': False, 'reverse': 'radd'}, 'sub': {'op': '-', 'desc': 'Subtraction', - 'reversed': False, 'reverse': 'rsub'}, 'mul': {'op': '*', 'desc': 'Multiplication', - 'reversed': False, 'reverse': 'rmul'}, 'mod': {'op': '%', 'desc': 'Modulo', - 'reversed': False, 'reverse': 'rmod'}, 'pow': {'op': '**', 'desc': 'Exponential power', - 'reversed': False, 'reverse': 'rpow'}, 'truediv': {'op': '/', 'desc': 'Floating division', - 'reversed': False, 'reverse': 'rtruediv'}, 'floordiv': {'op': '//', 'desc': 'Integer division', - 'reversed': False, 'reverse': 'rfloordiv'}, 'divmod': {'op': 'divmod', 'desc': 'Integer division and modulo', - 'reversed': False, 'reverse': None}, 'eq': {'op': '==', 'desc': 'Equal to', - 'reversed': False, 'reverse': None}, 'ne': {'op': '!=', 'desc': 'Not equal to', - 'reversed': False, 'reverse': None}, 'lt': {'op': '<', 'desc': 'Less than', - 'reversed': False, 'reverse': None}, 'le': {'op': '<=', 'desc': 'Less than or equal to', - 'reversed': False, 'reverse': None}, 'gt': {'op': '>', 'desc': 'Greater than', - 'reversed': False, 'reverse': None}, 'ge': {'op': '>=', 'desc': 'Greater than or equal to', - 'reversed': False, 'reverse': None}} _op_names = list(_op_descriptions.keys()) for key in _op_names: + _op_descriptions[key]['reversed'] = False reverse_op = _op_descriptions[key]['reverse'] if reverse_op is not None: _op_descriptions[reverse_op] = _op_descriptions[key].copy() @@ -392,7 +433,7 @@ def names(x): # yapf: disable new_methods = dict( add=arith_method(operator.add, names('add'), op('+')), - radd=arith_method(lambda x, y: y + x, names('radd'), op('+')), + radd=arith_method(radd, names('radd'), op('+')), sub=arith_method(operator.sub, names('sub'), op('-')), mul=arith_method(operator.mul, names('mul'), op('*')), truediv=arith_method(operator.truediv, names('truediv'), op('/')), @@ -404,13 +445,11 @@ def names(x): # not entirely sure why this is necessary, but previously was included # so it's here to maintain compatibility rmul=arith_method(operator.mul, names('rmul'), op('*')), - rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-')), - rtruediv=arith_method(lambda x, y: operator.truediv(y, x), - names('rtruediv'), op('/')), - rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x), - names('rfloordiv'), op('//')), - rpow=arith_method(lambda x, y: y**x, names('rpow'), op('**')), - rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'))) + rsub=arith_method(rsub, names('rsub'), op('-')), + rtruediv=arith_method(rtruediv, names('rtruediv'), op('/')), + rfloordiv=arith_method(rfloordiv, names('rfloordiv'), op('//')), + rpow=arith_method(rpow, names('rpow'), op('**')), + rmod=arith_method(rmod, names('rmod'), op('%'))) # yapf: enable new_methods['div'] = new_methods['truediv'] new_methods['rdiv'] = new_methods['rtruediv'] @@ -430,12 +469,9 @@ def names(x): or_=bool_method(operator.or_, names('or_'), op('|')), # For some reason ``^`` wasn't used in original. xor=bool_method(operator.xor, names('xor'), op('^')), - rand_=bool_method(lambda x, y: operator.and_(y, x), - names('rand_'), op('&')), - ror_=bool_method(lambda x, y: operator.or_(y, x), - names('ror_'), op('|')), - rxor=bool_method(lambda x, y: operator.xor(y, x), - names('rxor'), op('^')))) + rand_=bool_method(rand_, names('rand_'), op('&')), + ror_=bool_method(ror_, names('ror_'), op('|')), + rxor=bool_method(rxor, names('rxor'), op('^')))) if have_divmod: # divmod doesn't have an op that is supported by numexpr new_methods['divmod'] = arith_method(divmod, names('divmod'), None)
Lots more ops work in the pipeline, this is just the easy bits that don't need new tests. Having reversed funcs that can be referenced will make things easier in some of the Index classmethods.
https://api.github.com/repos/pandas-dev/pandas/pulls/19515
2018-02-02T18:58:06Z
2018-02-04T16:18:11Z
2018-02-04T16:18:11Z
2018-02-04T16:39:24Z
DOC: minor groupby and resampler improvements
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index 413138b1e52fc..407fad39ba232 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -1219,8 +1219,8 @@ see :ref:`here <basics.pipe>`. Combining ``.groupby`` and ``.pipe`` is often useful when you need to reuse GroupBy objects. -For an example, imagine having a DataFrame with columns for stores, products, -revenue and sold quantity. We'd like to do a groupwise calculation of *prices* +As an example, imagine having a DataFrame with columns for stores, products, +revenue and quantity sold. We'd like to do a groupwise calculation of *prices* (i.e. revenue/quantity) per store and per product. We could do this in a multi-step operation, but expressing it in terms of piping can make the code more readable. First we set the data: @@ -1230,7 +1230,8 @@ code more readable. First we set the data: import numpy as np n = 1000 df = pd.DataFrame({'Store': np.random.choice(['Store_1', 'Store_2'], n), - 'Product': np.random.choice(['Product_1', 'Product_2', 'Product_3'], n), + 'Product': np.random.choice(['Product_1', + 'Product_2'], n), 'Revenue': (np.random.random(n)*50+10).round(2), 'Quantity': np.random.randint(1, 10, size=n)}) df.head(2) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d34a85b5b4388..6356f397001ce 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5561,6 +5561,10 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, reduce the dimensionality of the return type if possible, otherwise return a consistent type + Returns + ------- + GroupBy object + Examples -------- DataFrame results @@ -5572,10 +5576,15 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, >>> data.groupby(['col1', 'col2']).mean() - Returns - ------- - GroupBy object + Notes + ----- + See the `user guide + <http://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more. + See also + -------- + resample : Convenience method for frequency conversion and resampling + of time series. """ from pandas.core.groupby import groupby @@ -5774,8 +5783,16 @@ def resample(self, rule, how=None, axis=0, fill_method=None, closed=None, .. versionadded:: 0.19.0 + Returns + ------- + Resampler object + Notes ----- + See the `user guide + <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling>`_ + for more. + To learn more about the offset strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. @@ -5941,6 +5958,10 @@ def resample(self, rule, how=None, axis=0, fill_method=None, closed=None, a b c d 2000-01-01 00:00:00 0 6 12 18 2000-01-01 00:03:00 0 4 8 12 + + See also + -------- + groupby : Group by mapping, function, label, or list of labels. """ from pandas.core.resample import (resample, _maybe_process_deprecations) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 2c1deb9db7bba..88af80e295d74 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -230,7 +230,7 @@ Notes ----- See more `here -<http://pandas.pydata.org/pandas-docs/stable/groupby.html#pipe>`_ +<http://pandas.pydata.org/pandas-docs/stable/groupby.html#piping-function-calls>`_ Examples --------
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Some minor improvements to groupby and resampler. In particular: * Corrected a link under ``groupby.pipe`` * added "Notes" and "See Also" for NDFrame.groupby/resample * shortened an example in groupby.rst
https://api.github.com/repos/pandas-dev/pandas/pulls/19514
2018-02-02T18:43:44Z
2018-02-04T21:44:40Z
2018-02-04T21:44:40Z
2018-05-18T18:09:30Z
Frame specific parts of #19504
diff --git a/pandas/tests/frame/test_timezones.py b/pandas/tests/frame/test_timezones.py new file mode 100644 index 0000000000000..fa589a0aa4817 --- /dev/null +++ b/pandas/tests/frame/test_timezones.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- +""" +Tests for DataFrame timezone-related methods +""" +from datetime import datetime + +import pytest +import pytz +import numpy as np + +import pandas.util.testing as tm +from pandas.compat import lrange +from pandas.core.indexes.datetimes import date_range +from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas import Series, DataFrame + + +class TestDataFrameTimezones(object): + def test_frame_from_records_utc(self): + rec = {'datum': 1.5, + 'begin_time': datetime(2006, 4, 27, tzinfo=pytz.utc)} + + # it works + DataFrame.from_records([rec], index='begin_time') + + def test_frame_tz_localize(self): + rng = date_range('1/1/2011', periods=100, freq='H') + + df = DataFrame({'a': 1}, index=rng) + result = df.tz_localize('utc') + expected = DataFrame({'a': 1}, rng.tz_localize('UTC')) + assert result.index.tz.zone == 'UTC' + tm.assert_frame_equal(result, expected) + + df = df.T + result = df.tz_localize('utc', axis=1) + assert result.columns.tz.zone == 'UTC' + tm.assert_frame_equal(result, expected.T) + + def test_frame_tz_convert(self): + rng = date_range('1/1/2011', periods=200, freq='D', tz='US/Eastern') + + df = DataFrame({'a': 1}, index=rng) + result = df.tz_convert('Europe/Berlin') + expected = DataFrame({'a': 1}, rng.tz_convert('Europe/Berlin')) + assert result.index.tz.zone == 'Europe/Berlin' + tm.assert_frame_equal(result, expected) + + df = df.T + result = df.tz_convert('Europe/Berlin', axis=1) + assert result.columns.tz.zone == 'Europe/Berlin' + tm.assert_frame_equal(result, expected.T) + + def test_frame_join_tzaware(self): + test1 = DataFrame(np.zeros((6, 3)), + index=date_range("2012-11-15 00:00:00", periods=6, + freq="100L", tz="US/Central")) + test2 = DataFrame(np.zeros((3, 3)), + index=date_range("2012-11-15 00:00:00", periods=3, + freq="250L", tz="US/Central"), + columns=lrange(3, 6)) + + result = test1.join(test2, how='outer') + ex_index = test1.index.union(test2.index) + + tm.assert_index_equal(result.index, ex_index) + assert result.index.tz.zone == 'US/Central' + + def test_frame_add_tz_mismatch_converts_to_utc(self): + rng = date_range('1/1/2011', periods=10, freq='H', tz='US/Eastern') + df = DataFrame(np.random.randn(len(rng)), index=rng, columns=['a']) + + df_moscow = df.tz_convert('Europe/Moscow') + result = df + df_moscow + assert result.index.tz is pytz.utc + + result = df_moscow + df + assert result.index.tz is pytz.utc + + def test_frame_align_aware(self): + idx1 = date_range('2001', periods=5, freq='H', tz='US/Eastern') + idx2 = date_range('2001', periods=5, freq='2H', tz='US/Eastern') + df1 = DataFrame(np.random.randn(len(idx1), 3), idx1) + df2 = DataFrame(np.random.randn(len(idx2), 3), idx2) + new1, new2 = df1.align(df2) + assert df1.index.tz == new1.index.tz + assert df2.index.tz == new2.index.tz + + # different timezones convert to UTC + + # frame with frame + df1_central = df1.tz_convert('US/Central') + new1, new2 = df1.align(df1_central) + assert new1.index.tz == pytz.UTC + assert new2.index.tz == pytz.UTC + + # frame with Series + new1, new2 = df1.align(df1_central[0], axis=0) + assert new1.index.tz == pytz.UTC + assert new2.index.tz == pytz.UTC + + df1[0].align(df1_central, axis=0) + assert new1.index.tz == pytz.UTC + assert new2.index.tz == pytz.UTC + + @pytest.mark.parametrize('tz', ['US/Eastern', 'dateutil/US/Eastern']) + def test_frame_no_datetime64_dtype(self, tz): + # after GH#7822 + # these retain the timezones on dict construction + dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI') + dr_tz = dr.tz_localize(tz) + df = DataFrame({'A': 'foo', 'B': dr_tz}, index=dr) + tz_expected = DatetimeTZDtype('ns', dr_tz.tzinfo) + assert df['B'].dtype == tz_expected + + # GH#2810 (with timezones) + datetimes_naive = [ts.to_pydatetime() for ts in dr] + datetimes_with_tz = [ts.to_pydatetime() for ts in dr_tz] + df = DataFrame({'dr': dr, + 'dr_tz': dr_tz, + 'datetimes_naive': datetimes_naive, + 'datetimes_with_tz': datetimes_with_tz}) + result = df.get_dtype_counts().sort_index() + expected = Series({'datetime64[ns]': 2, + str(tz_expected): 2}).sort_index() + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('tz', ['US/Eastern', 'dateutil/US/Eastern']) + def test_frame_reset_index(self, tz): + dr = date_range('2012-06-02', periods=10, tz=tz) + df = DataFrame(np.random.randn(len(dr)), dr) + roundtripped = df.reset_index().set_index('index') + xp = df.index.tz + rs = roundtripped.index.tz + assert xp == rs diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py index cc5f4d30f9aaf..e47be69b79feb 100644 --- a/pandas/tests/tseries/test_timezones.py +++ b/pandas/tests/tseries/test_timezones.py @@ -16,13 +16,11 @@ import pandas.tseries.offsets as offsets from pandas.compat import lrange, zip from pandas.core.indexes.datetimes import bdate_range, date_range -from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas._libs import tslib from pandas._libs.tslibs import timezones, conversion -from pandas import (Index, Series, DataFrame, isna, Timestamp, NaT, +from pandas import (Index, Series, isna, Timestamp, NaT, DatetimeIndex, to_datetime) -from pandas.util.testing import (assert_frame_equal, assert_series_equal, - set_timezone) +from pandas.util.testing import assert_series_equal, set_timezone class FixedOffset(tzinfo): @@ -786,29 +784,6 @@ def test_to_datetime_tzlocal(self): result = to_datetime(arr, utc=True) assert result.tz is pytz.utc - def test_frame_no_datetime64_dtype(self): - - # after 7822 - # these retain the timezones on dict construction - - dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI') - dr_tz = dr.tz_localize(self.tzstr('US/Eastern')) - e = DataFrame({'A': 'foo', 'B': dr_tz}, index=dr) - tz_expected = DatetimeTZDtype('ns', dr_tz.tzinfo) - assert e['B'].dtype == tz_expected - - # GH 2810 (with timezones) - datetimes_naive = [ts.to_pydatetime() for ts in dr] - datetimes_with_tz = [ts.to_pydatetime() for ts in dr_tz] - df = DataFrame({'dr': dr, - 'dr_tz': dr_tz, - 'datetimes_naive': datetimes_naive, - 'datetimes_with_tz': datetimes_with_tz}) - result = df.get_dtype_counts().sort_index() - expected = Series({'datetime64[ns]': 2, - str(tz_expected): 2}).sort_index() - assert_series_equal(result, expected) - def test_hongkong_tz_convert(self): # #1673 dr = date_range('2012-01-01', '2012-01-10', freq='D', tz='Hongkong') @@ -872,21 +847,6 @@ def test_convert_datetime_list(self): assert dr.tz == dr2.tz assert dr2.name == 'foo' - def test_frame_from_records_utc(self): - rec = {'datum': 1.5, - 'begin_time': datetime(2006, 4, 27, tzinfo=pytz.utc)} - - # it works - DataFrame.from_records([rec], index='begin_time') - - def test_frame_reset_index(self): - dr = date_range('2012-06-02', periods=10, tz=self.tzstr('US/Eastern')) - df = DataFrame(np.random.randn(len(dr)), dr) - roundtripped = df.reset_index().set_index('index') - xp = df.index.tz - rs = roundtripped.index.tz - assert xp == rs - def test_dateutil_tzoffset_support(self): values = [188.5, 328.25] tzinfo = tzoffset(None, 7200) @@ -1289,7 +1249,7 @@ def test_tz_localize_roundtrip(self): tm.assert_index_equal(reset, idx) assert reset.tzinfo is None - def test_series_frame_tz_localize(self): + def test_series_tz_localize(self): rng = date_range('1/1/2011', periods=100, freq='H') ts = Series(1, index=rng) @@ -1297,41 +1257,19 @@ def test_series_frame_tz_localize(self): result = ts.tz_localize('utc') assert result.index.tz.zone == 'UTC' - df = DataFrame({'a': 1}, index=rng) - result = df.tz_localize('utc') - expected = DataFrame({'a': 1}, rng.tz_localize('UTC')) - assert result.index.tz.zone == 'UTC' - assert_frame_equal(result, expected) - - df = df.T - result = df.tz_localize('utc', axis=1) - assert result.columns.tz.zone == 'UTC' - assert_frame_equal(result, expected.T) - # Can't localize if already tz-aware rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') ts = Series(1, index=rng) tm.assert_raises_regex(TypeError, 'Already tz-aware', ts.tz_localize, 'US/Eastern') - def test_series_frame_tz_convert(self): + def test_series_tz_convert(self): rng = date_range('1/1/2011', periods=200, freq='D', tz='US/Eastern') ts = Series(1, index=rng) result = ts.tz_convert('Europe/Berlin') assert result.index.tz.zone == 'Europe/Berlin' - df = DataFrame({'a': 1}, index=rng) - result = df.tz_convert('Europe/Berlin') - expected = DataFrame({'a': 1}, rng.tz_convert('Europe/Berlin')) - assert result.index.tz.zone == 'Europe/Berlin' - assert_frame_equal(result, expected) - - df = df.T - result = df.tz_convert('Europe/Berlin', axis=1) - assert result.columns.tz.zone == 'Europe/Berlin' - assert_frame_equal(result, expected.T) - # can't convert tz-naive rng = date_range('1/1/2011', periods=200, freq='D') ts = Series(1, index=rng) @@ -1389,20 +1327,6 @@ def test_join_aware(self): pytest.raises(Exception, ts.__add__, ts_utc) pytest.raises(Exception, ts_utc.__add__, ts) - test1 = DataFrame(np.zeros((6, 3)), - index=date_range("2012-11-15 00:00:00", periods=6, - freq="100L", tz="US/Central")) - test2 = DataFrame(np.zeros((3, 3)), - index=date_range("2012-11-15 00:00:00", periods=3, - freq="250L", tz="US/Central"), - columns=lrange(3, 6)) - - result = test1.join(test2, how='outer') - ex_index = test1.index.union(test2.index) - - tm.assert_index_equal(result.index, ex_index) - assert result.index.tz.zone == 'US/Central' - # non-overlapping rng = date_range("2012-11-15 00:00:00", periods=6, freq="H", tz="US/Central") @@ -1413,34 +1337,13 @@ def test_join_aware(self): result = rng.union(rng2) assert result.tz.zone == 'UTC' - def test_align_aware(self): + def test_series_align_aware(self): idx1 = date_range('2001', periods=5, freq='H', tz='US/Eastern') - idx2 = date_range('2001', periods=5, freq='2H', tz='US/Eastern') - df1 = DataFrame(np.random.randn(len(idx1), 3), idx1) - df2 = DataFrame(np.random.randn(len(idx2), 3), idx2) - new1, new2 = df1.align(df2) - assert df1.index.tz == new1.index.tz - assert df2.index.tz == new2.index.tz - + ser = Series(np.random.randn(len(idx1)), index=idx1) + ser_central = ser.tz_convert('US/Central') # # different timezones convert to UTC - # frame - df1_central = df1.tz_convert('US/Central') - new1, new2 = df1.align(df1_central) - assert new1.index.tz == pytz.UTC - assert new2.index.tz == pytz.UTC - - # series - new1, new2 = df1[0].align(df1_central[0]) - assert new1.index.tz == pytz.UTC - assert new2.index.tz == pytz.UTC - - # combination - new1, new2 = df1.align(df1_central[0], axis=0) - assert new1.index.tz == pytz.UTC - assert new2.index.tz == pytz.UTC - - df1[0].align(df1_central, axis=0) + new1, new2 = ser.align(ser_central) assert new1.index.tz == pytz.UTC assert new2.index.tz == pytz.UTC @@ -1523,7 +1426,7 @@ def test_append_aware_naive(self): assert ts_result.index.equals(ts1.index.astype(object).append( ts2.index)) - def test_equal_join_ensure_utc(self): + def test_series_add_tz_mismatch_converts_to_utc(self): rng = date_range('1/1/2011', periods=10, freq='H', tz='US/Eastern') ts = Series(np.random.randn(len(rng)), index=rng) @@ -1535,14 +1438,6 @@ def test_equal_join_ensure_utc(self): result = ts_moscow + ts assert result.index.tz is pytz.utc - df = DataFrame({'a': ts}) - df_moscow = df.tz_convert('Europe/Moscow') - result = df + df_moscow - assert result.index.tz is pytz.utc - - result = df_moscow + df - assert result.index.tz is pytz.utc - def test_arith_utc_convert(self): rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19512
2018-02-02T16:26:43Z
2018-02-04T16:05:30Z
2018-02-04T16:05:30Z
2018-02-04T16:40:22Z
split Timestamp tests off of #19504
diff --git a/pandas/tests/scalar/timestamp/test_timezones.py b/pandas/tests/scalar/timestamp/test_timezones.py index eeec70cc234f5..7a5c6feb8b651 100644 --- a/pandas/tests/scalar/timestamp/test_timezones.py +++ b/pandas/tests/scalar/timestamp/test_timezones.py @@ -2,11 +2,18 @@ """ Tests for Timestamp timezone-related methods """ +from datetime import date, timedelta +from distutils.version import LooseVersion import pytest +import pytz from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError +import dateutil +from dateutil.tz import gettz, tzoffset import pandas.util.testing as tm +import pandas.util._test_decorators as td + from pandas import Timestamp, NaT @@ -14,6 +21,22 @@ class TestTimestampTZOperations(object): # -------------------------------------------------------------- # Timestamp.tz_localize + def test_tz_localize_ambiguous_bool(self): + # make sure that we are correctly accepting bool values as ambiguous + # GH#14402 + ts = Timestamp('2015-11-01 01:00:03') + expected0 = Timestamp('2015-11-01 01:00:03-0500', tz='US/Central') + expected1 = Timestamp('2015-11-01 01:00:03-0600', tz='US/Central') + + with pytest.raises(pytz.AmbiguousTimeError): + ts.tz_localize('US/Central') + + result = ts.tz_localize('US/Central', ambiguous=True) + assert result == expected0 + + result = ts.tz_localize('US/Central', ambiguous=False) + assert result == expected1 + def test_tz_localize_ambiguous(self): ts = Timestamp('2014-11-02 01:00') ts_dst = ts.tz_localize('US/Eastern', ambiguous=True) @@ -70,6 +93,55 @@ def test_tz_localize_roundtrip(self, stamp, tz): assert reset == ts assert reset.tzinfo is None + def test_tz_localize_ambiguous_compat(self): + # validate that pytz and dateutil are compat for dst + # when the transition happens + naive = Timestamp('2013-10-27 01:00:00') + + pytz_zone = 'Europe/London' + dateutil_zone = 'dateutil/Europe/London' + result_pytz = naive.tz_localize(pytz_zone, ambiguous=0) + result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=0) + assert result_pytz.value == result_dateutil.value + assert result_pytz.value == 1382835600000000000 + + if LooseVersion(dateutil.__version__) < LooseVersion('2.6.0'): + # dateutil 2.6 buggy w.r.t. ambiguous=0 + # see gh-14621 + # see https://github.com/dateutil/dateutil/issues/321 + assert (result_pytz.to_pydatetime().tzname() == + result_dateutil.to_pydatetime().tzname()) + assert str(result_pytz) == str(result_dateutil) + elif LooseVersion(dateutil.__version__) > LooseVersion('2.6.0'): + # fixed ambiguous behavior + assert result_pytz.to_pydatetime().tzname() == 'GMT' + assert result_dateutil.to_pydatetime().tzname() == 'BST' + assert str(result_pytz) != str(result_dateutil) + + # 1 hour difference + result_pytz = naive.tz_localize(pytz_zone, ambiguous=1) + result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=1) + assert result_pytz.value == result_dateutil.value + assert result_pytz.value == 1382832000000000000 + + # dateutil < 2.6 is buggy w.r.t. ambiguous timezones + if LooseVersion(dateutil.__version__) > LooseVersion('2.5.3'): + # see gh-14621 + assert str(result_pytz) == str(result_dateutil) + assert (result_pytz.to_pydatetime().tzname() == + result_dateutil.to_pydatetime().tzname()) + + @pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'), + gettz('US/Eastern'), + 'US/Eastern', 'dateutil/US/Eastern']) + def test_timestamp_tz_localize(self, tz): + stamp = Timestamp('3/11/2012 04:00') + + result = stamp.tz_localize(tz) + expected = Timestamp('3/11/2012 04:00', tz=tz) + assert result.hour == expected.hour + assert result == expected + # ------------------------------------------------------------------ # Timestamp.tz_convert @@ -85,3 +157,120 @@ def test_tz_convert_roundtrip(self, stamp, tz): assert reset == Timestamp(stamp) assert reset.tzinfo is None assert reset == converted.tz_convert('UTC').tz_localize(None) + + @pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern']) + def test_astimezone(self, tzstr): + # astimezone is an alias for tz_convert, so keep it with + # the tz_convert tests + utcdate = Timestamp('3/11/2012 22:00', tz='UTC') + expected = utcdate.tz_convert(tzstr) + result = utcdate.astimezone(tzstr) + assert expected == result + assert isinstance(result, Timestamp) + + @td.skip_if_windows + def test_tz_convert_utc_with_system_utc(self): + from pandas._libs.tslibs.timezones import maybe_get_tz + + # from system utc to real utc + ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC')) + # check that the time hasn't changed. + assert ts == ts.tz_convert(dateutil.tz.tzutc()) + + # from system utc to real utc + ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC')) + # check that the time hasn't changed. + assert ts == ts.tz_convert(dateutil.tz.tzutc()) + + # ------------------------------------------------------------------ + # Timestamp.__init__ with tz str or tzinfo + + def test_timestamp_constructor_tz_utc(self): + utc_stamp = Timestamp('3/11/2012 05:00', tz='utc') + assert utc_stamp.tzinfo is pytz.utc + assert utc_stamp.hour == 5 + + utc_stamp = Timestamp('3/11/2012 05:00').tz_localize('utc') + assert utc_stamp.hour == 5 + + def test_timestamp_to_datetime_tzoffset(self): + tzinfo = tzoffset(None, 7200) + expected = Timestamp('3/11/2012 04:00', tz=tzinfo) + result = Timestamp(expected.to_pydatetime()) + assert expected == result + + def test_timestamp_constructor_near_dst_boundary(self): + # GH#11481 & GH#15777 + # Naive string timestamps were being localized incorrectly + # with tz_convert_single instead of tz_localize_to_utc + + for tz in ['Europe/Brussels', 'Europe/Prague']: + result = Timestamp('2015-10-25 01:00', tz=tz) + expected = Timestamp('2015-10-25 01:00').tz_localize(tz) + assert result == expected + + with pytest.raises(pytz.AmbiguousTimeError): + Timestamp('2015-10-25 02:00', tz=tz) + + result = Timestamp('2017-03-26 01:00', tz='Europe/Paris') + expected = Timestamp('2017-03-26 01:00').tz_localize('Europe/Paris') + assert result == expected + + with pytest.raises(pytz.NonExistentTimeError): + Timestamp('2017-03-26 02:00', tz='Europe/Paris') + + # GH#11708 + naive = Timestamp('2015-11-18 10:00:00') + result = naive.tz_localize('UTC').tz_convert('Asia/Kolkata') + expected = Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata') + assert result == expected + + # GH#15823 + result = Timestamp('2017-03-26 00:00', tz='Europe/Paris') + expected = Timestamp('2017-03-26 00:00:00+0100', tz='Europe/Paris') + assert result == expected + + result = Timestamp('2017-03-26 01:00', tz='Europe/Paris') + expected = Timestamp('2017-03-26 01:00:00+0100', tz='Europe/Paris') + assert result == expected + + with pytest.raises(pytz.NonExistentTimeError): + Timestamp('2017-03-26 02:00', tz='Europe/Paris') + + result = Timestamp('2017-03-26 02:00:00+0100', tz='Europe/Paris') + naive = Timestamp(result.value) + expected = naive.tz_localize('UTC').tz_convert('Europe/Paris') + assert result == expected + + result = Timestamp('2017-03-26 03:00', tz='Europe/Paris') + expected = Timestamp('2017-03-26 03:00:00+0200', tz='Europe/Paris') + assert result == expected + + @pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'), + gettz('US/Eastern'), + 'US/Eastern', 'dateutil/US/Eastern']) + def test_timestamp_constructed_by_date_and_tz(self, tz): + # GH#2993, Timestamp cannot be constructed by datetime.date + # and tz correctly + + result = Timestamp(date(2012, 3, 11), tz=tz) + + expected = Timestamp('3/11/2012', tz=tz) + assert result.hour == expected.hour + assert result == expected + + @pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'), + gettz('US/Eastern'), + 'US/Eastern', 'dateutil/US/Eastern']) + def test_timestamp_add_timedelta_push_over_dst_boundary(self, tz): + # GH#1389 + + # 4 hours before DST transition + stamp = Timestamp('3/10/2012 22:00', tz=tz) + + result = stamp + timedelta(hours=6) + + # spring forward, + "7" hours + expected = Timestamp('3/11/2012 05:00', tz=tz) + + assert result == expected diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py index cc5f4d30f9aaf..37f91d5165fe4 100644 --- a/pandas/tests/tseries/test_timezones.py +++ b/pandas/tests/tseries/test_timezones.py @@ -9,7 +9,7 @@ from pytz import NonExistentTimeError from distutils.version import LooseVersion from dateutil.tz import tzlocal, tzoffset -from datetime import datetime, timedelta, tzinfo, date +from datetime import datetime, timedelta, tzinfo import pandas.util.testing as tm import pandas.util._test_decorators as td @@ -121,120 +121,6 @@ def test_localize_utc_conversion_explicit(self): pytest.raises(NonExistentTimeError, rng.tz_localize, self.tz('US/Eastern')) - def test_timestamp_tz_localize(self): - stamp = Timestamp('3/11/2012 04:00') - - result = stamp.tz_localize(self.tzstr('US/Eastern')) - expected = Timestamp('3/11/2012 04:00', tz=self.tzstr('US/Eastern')) - assert result.hour == expected.hour - assert result == expected - - def test_timestamp_tz_localize_explicit(self): - stamp = Timestamp('3/11/2012 04:00') - - result = stamp.tz_localize(self.tz('US/Eastern')) - expected = Timestamp('3/11/2012 04:00', tz=self.tz('US/Eastern')) - assert result.hour == expected.hour - assert result == expected - - def test_timestamp_constructed_by_date_and_tz(self): - # Fix Issue 2993, Timestamp cannot be constructed by datetime.date - # and tz correctly - - result = Timestamp(date(2012, 3, 11), tz=self.tzstr('US/Eastern')) - - expected = Timestamp('3/11/2012', tz=self.tzstr('US/Eastern')) - assert result.hour == expected.hour - assert result == expected - - def test_timestamp_constructed_by_date_and_tz_explicit(self): - # Fix Issue 2993, Timestamp cannot be constructed by datetime.date - # and tz correctly - - result = Timestamp(date(2012, 3, 11), tz=self.tz('US/Eastern')) - - expected = Timestamp('3/11/2012', tz=self.tz('US/Eastern')) - assert result.hour == expected.hour - assert result == expected - - def test_timestamp_constructor_near_dst_boundary(self): - # GH 11481 & 15777 - # Naive string timestamps were being localized incorrectly - # with tz_convert_single instead of tz_localize_to_utc - - for tz in ['Europe/Brussels', 'Europe/Prague']: - result = Timestamp('2015-10-25 01:00', tz=tz) - expected = Timestamp('2015-10-25 01:00').tz_localize(tz) - assert result == expected - - with pytest.raises(pytz.AmbiguousTimeError): - Timestamp('2015-10-25 02:00', tz=tz) - - result = Timestamp('2017-03-26 01:00', tz='Europe/Paris') - expected = Timestamp('2017-03-26 01:00').tz_localize('Europe/Paris') - assert result == expected - - with pytest.raises(pytz.NonExistentTimeError): - Timestamp('2017-03-26 02:00', tz='Europe/Paris') - - # GH 11708 - result = to_datetime("2015-11-18 15:30:00+05:30").tz_localize( - 'UTC').tz_convert('Asia/Kolkata') - expected = Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata') - assert result == expected - - # GH 15823 - result = Timestamp('2017-03-26 00:00', tz='Europe/Paris') - expected = Timestamp('2017-03-26 00:00:00+0100', tz='Europe/Paris') - assert result == expected - - result = Timestamp('2017-03-26 01:00', tz='Europe/Paris') - expected = Timestamp('2017-03-26 01:00:00+0100', tz='Europe/Paris') - assert result == expected - - with pytest.raises(pytz.NonExistentTimeError): - Timestamp('2017-03-26 02:00', tz='Europe/Paris') - result = Timestamp('2017-03-26 02:00:00+0100', tz='Europe/Paris') - expected = Timestamp(result.value).tz_localize( - 'UTC').tz_convert('Europe/Paris') - assert result == expected - - result = Timestamp('2017-03-26 03:00', tz='Europe/Paris') - expected = Timestamp('2017-03-26 03:00:00+0200', tz='Europe/Paris') - assert result == expected - - def test_timestamp_to_datetime_tzoffset(self): - tzinfo = tzoffset(None, 7200) - expected = Timestamp('3/11/2012 04:00', tz=tzinfo) - result = Timestamp(expected.to_pydatetime()) - assert expected == result - - def test_timedelta_push_over_dst_boundary(self): - # #1389 - - # 4 hours before DST transition - stamp = Timestamp('3/10/2012 22:00', tz=self.tzstr('US/Eastern')) - - result = stamp + timedelta(hours=6) - - # spring forward, + "7" hours - expected = Timestamp('3/11/2012 05:00', tz=self.tzstr('US/Eastern')) - - assert result == expected - - def test_timedelta_push_over_dst_boundary_explicit(self): - # #1389 - - # 4 hours before DST transition - stamp = Timestamp('3/10/2012 22:00', tz=self.tz('US/Eastern')) - - result = stamp + timedelta(hours=6) - - # spring forward, + "7" hours - expected = Timestamp('3/11/2012 05:00', tz=self.tz('US/Eastern')) - - assert result == expected - def test_tz_localize_dti(self): dti = DatetimeIndex(start='1/1/2005', end='1/1/2005 0:00:30.256', freq='L') @@ -269,13 +155,6 @@ def test_tz_localize_empty_series(self): ts2 = ts.tz_localize(self.tzstr('US/Eastern')) assert self.cmptz(ts2.index.tz, self.tz('US/Eastern')) - def test_astimezone(self): - utc = Timestamp('3/11/2012 22:00', tz='UTC') - expected = utc.tz_convert(self.tzstr('US/Eastern')) - result = utc.astimezone(self.tzstr('US/Eastern')) - assert expected == result - assert isinstance(result, Timestamp) - def test_create_with_tz(self): stamp = Timestamp('3/11/2012 05:00', tz=self.tzstr('US/Eastern')) assert stamp.hour == 5 @@ -285,13 +164,6 @@ def test_create_with_tz(self): assert stamp == rng[1] - utc_stamp = Timestamp('3/11/2012 05:00', tz='utc') - assert utc_stamp.tzinfo is pytz.utc - assert utc_stamp.hour == 5 - - utc_stamp = Timestamp('3/11/2012 05:00').tz_localize('utc') - assert utc_stamp.hour == 5 - def test_create_with_fixed_tz(self): off = FixedOffset(420, '+07:00') start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off) @@ -593,16 +465,6 @@ def test_ambiguous_bool(self): expected0 = Timestamp('2015-11-01 01:00:03-0500', tz='US/Central') expected1 = Timestamp('2015-11-01 01:00:03-0600', tz='US/Central') - def f(): - t.tz_localize('US/Central') - pytest.raises(pytz.AmbiguousTimeError, f) - - result = t.tz_localize('US/Central', ambiguous=True) - assert result == expected0 - - result = t.tz_localize('US/Central', ambiguous=False) - assert result == expected1 - s = Series([t]) expected0 = Series([expected0]) expected1 = Series([expected1]) @@ -988,20 +850,6 @@ def normalize(self, ts): # no-op for dateutil return ts - @td.skip_if_windows - def test_utc_with_system_utc(self): - from pandas._libs.tslibs.timezones import maybe_get_tz - - # from system utc to real utc - ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC')) - # check that the time hasn't changed. - assert ts == ts.tz_convert(dateutil.tz.tzutc()) - - # from system utc to real utc - ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC')) - # check that the time hasn't changed. - assert ts == ts.tz_convert(dateutil.tz.tzutc()) - def test_tz_convert_hour_overflow_dst(self): # Regression test for: # https://github.com/pandas-dev/pandas/issues/13306 @@ -1215,47 +1063,6 @@ def test_cache_keys_are_distinct_for_pytz_vs_dateutil(self, tz_name): class TestTimeZones(object): timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific'] - def test_ambiguous_compat(self): - # validate that pytz and dateutil are compat for dst - # when the transition happens - - pytz_zone = 'Europe/London' - dateutil_zone = 'dateutil/Europe/London' - result_pytz = (Timestamp('2013-10-27 01:00:00') - .tz_localize(pytz_zone, ambiguous=0)) - result_dateutil = (Timestamp('2013-10-27 01:00:00') - .tz_localize(dateutil_zone, ambiguous=0)) - assert result_pytz.value == result_dateutil.value - assert result_pytz.value == 1382835600000000000 - - if LooseVersion(dateutil.__version__) < LooseVersion('2.6.0'): - # dateutil 2.6 buggy w.r.t. ambiguous=0 - # see gh-14621 - # see https://github.com/dateutil/dateutil/issues/321 - assert (result_pytz.to_pydatetime().tzname() == - result_dateutil.to_pydatetime().tzname()) - assert str(result_pytz) == str(result_dateutil) - elif LooseVersion(dateutil.__version__) > LooseVersion('2.6.0'): - # fixed ambiguous behavior - assert result_pytz.to_pydatetime().tzname() == 'GMT' - assert result_dateutil.to_pydatetime().tzname() == 'BST' - assert str(result_pytz) != str(result_dateutil) - - # 1 hour difference - result_pytz = (Timestamp('2013-10-27 01:00:00') - .tz_localize(pytz_zone, ambiguous=1)) - result_dateutil = (Timestamp('2013-10-27 01:00:00') - .tz_localize(dateutil_zone, ambiguous=1)) - assert result_pytz.value == result_dateutil.value - assert result_pytz.value == 1382832000000000000 - - # dateutil < 2.6 is buggy w.r.t. ambiguous timezones - if LooseVersion(dateutil.__version__) > LooseVersion('2.5.3'): - # see gh-14621 - assert str(result_pytz) == str(result_dateutil) - assert (result_pytz.to_pydatetime().tzname() == - result_dateutil.to_pydatetime().tzname()) - def test_index_equals_with_tz(self): left = date_range('1/1/2011', periods=100, freq='H', tz='utc') right = date_range('1/1/2011', periods=100, freq='H', tz='US/Eastern')
There is de-duplication/parametrization etc to be done, but that should wait until the tests are collected because we'll have to re-do it then anyway.
https://api.github.com/repos/pandas-dev/pandas/pulls/19511
2018-02-02T16:14:44Z
2018-02-04T16:06:51Z
2018-02-04T16:06:51Z
2018-02-04T17:26:22Z
BUGFIX - AttributeError raised in StataReader.value_labels()
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index ea56ebad7d782..3109798499135 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -626,6 +626,7 @@ I/O - Bug in :func:`DataFrame.to_parquet` where an exception was raised if the write destination is S3 (:issue:`19134`) - :class:`Interval` now supported in :func:`DataFrame.to_excel` for all Excel file types (:issue:`19242`) - :class:`Timedelta` now supported in :func:`DataFrame.to_excel` for xls file type (:issue:`19242`, :issue:`9155`) +- Bug in :meth:`pandas.io.stata.StataReader.value_labels` raising an ``AttributeError`` when called on very old files. Now returns an empty dict (:issue:`19417`) Plotting ^^^^^^^^ diff --git a/pandas/io/stata.py b/pandas/io/stata.py index adbff06364dbe..ee6975ea1d938 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1341,12 +1341,14 @@ def _null_terminate(self, s): return s def _read_value_labels(self): - if self.format_version <= 108: - # Value labels are not supported in version 108 and earlier. - return if self._value_labels_read: # Don't read twice return + if self.format_version <= 108: + # Value labels are not supported in version 108 and earlier. + self._value_labels_read = True + self.value_label_dict = dict() + return if self.format_version >= 117: self.path_or_buf.seek(self.seek_value_labels) diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 89d76061329a3..4e259d0994bdb 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -589,6 +589,16 @@ def test_105(self): df0['psch_dis'] = df0["psch_dis"].astype(np.float32) tm.assert_frame_equal(df.head(3), df0) + def test_value_labels_old_format(self): + # GH 19417 + # + # Test that value_labels() returns an empty dict if the file format + # predates supporting value labels. + dpath = os.path.join(self.dirpath, 'S4_EDUC1.dta') + reader = StataReader(dpath) + assert reader.value_labels() == {} + reader.close() + def test_date_export_formats(self): columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty'] conversions = {c: c for c in columns}
- [x] closes #19417 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Returns empty dict instead of raising AttributeError for very old Stata files. Includes test against very old Stata file.
https://api.github.com/repos/pandas-dev/pandas/pulls/19510
2018-02-02T16:12:22Z
2018-02-06T14:17:14Z
2018-02-06T14:17:14Z
2018-02-06T14:17:17Z
TST: Remove dupe TimdeltaIndex tests from indexes/timedeltas/test_astype.py
diff --git a/pandas/tests/indexes/timedeltas/test_astype.py b/pandas/tests/indexes/timedeltas/test_astype.py index af16fe71edcf3..c3bd857036efc 100644 --- a/pandas/tests/indexes/timedeltas/test_astype.py +++ b/pandas/tests/indexes/timedeltas/test_astype.py @@ -2,36 +2,20 @@ import numpy as np -import pandas as pd import pandas.util.testing as tm from pandas import (TimedeltaIndex, timedelta_range, Int64Index, Float64Index, - Index, Timedelta) + Index, Timedelta, NaT) -from ..datetimelike import DatetimeLike - -class TestTimedeltaIndex(DatetimeLike): - _holder = TimedeltaIndex +class TestTimedeltaIndex(object): _multiprocess_can_split_ = True - def test_numeric_compat(self): - # Dummy method to override super's version; this test is now done - # in test_arithmetic.py - pass - - def setup_method(self, method): - self.indices = dict(index=tm.makeTimedeltaIndex(10)) - self.setup_indices() - - def create_index(self): - return pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1) - def test_astype(self): # GH 13149, GH 13209 - idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN]) + idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN]) result = idx.astype(object) - expected = Index([Timedelta('1 days 03:46:40')] + [pd.NaT] * 3, + expected = Index([Timedelta('1 days 03:46:40')] + [NaT] * 3, dtype=object) tm.assert_index_equal(result, expected) @@ -51,7 +35,7 @@ def test_astype(self): def test_astype_timedelta64(self): # GH 13149, GH 13209 - idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN]) + idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN]) result = idx.astype('timedelta64') expected = Float64Index([1e+14] + [np.NaN] * 3, dtype='float64') @@ -69,28 +53,7 @@ def test_astype_timedelta64(self): float, 'datetime64', 'datetime64[ns]']) def test_astype_raises(self, dtype): # GH 13149, GH 13209 - idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN]) + idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN]) msg = 'Cannot cast TimedeltaIndex to dtype' with tm.assert_raises_regex(TypeError, msg): idx.astype(dtype) - - def test_pickle_compat_construction(self): - pass - - def test_shift(self): - # test shift for TimedeltaIndex - # err8083 - - drange = self.create_index() - result = drange.shift(1) - expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00', - '3 days 01:00:00', - '4 days 01:00:00', '5 days 01:00:00'], - freq='D') - tm.assert_index_equal(result, expected) - - result = drange.shift(3, freq='2D 1s') - expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03', - '8 days 01:00:03', '9 days 01:00:03', - '10 days 01:00:03'], freq='D') - tm.assert_index_equal(result, expected)
- [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Both classes in `test_astype.py` and [`test_timedelta.py`](https://github.com/pandas-dev/pandas/blob/master/pandas/tests/indexes/timedeltas/test_timedelta.py#L19) inherit from `DatetimeLike`, which in turn [inherits from `Base`](https://github.com/pandas-dev/pandas/blob/master/pandas/tests/indexes/datetimelike.py#L9), leading to 200+ dupe tests being run. Modified `test_astype.py` to inherit from `object` instead, removed some unnecessary boilerplate, and removed some additional tests that were specified to override base class tests.
https://api.github.com/repos/pandas-dev/pandas/pulls/19509
2018-02-02T15:37:15Z
2018-02-04T15:54:15Z
2018-02-04T15:54:14Z
2018-02-04T18:52:40Z
DOC: improve docs to clarify MultiIndex indexing
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index ca903dadc6eb1..c455fbb8d0687 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -113,7 +113,13 @@ of the index is up to you: pd.DataFrame(np.random.randn(6, 6), index=index[:6], columns=index[:6]) We've "sparsified" the higher levels of the indexes to make the console output a -bit easier on the eyes. +bit easier on the eyes. Note that how the index is displayed can be controlled using the +``multi_sparse`` option in ``pandas.set_options()``: + +.. ipython:: python + + with pd.option_context('display.multi_sparse', False): + df It's worth keeping in mind that there's nothing preventing you from using tuples as atomic labels on an axis: @@ -129,15 +135,6 @@ can find yourself working with hierarchically-indexed data without creating a ``MultiIndex`` explicitly yourself. However, when loading data from a file, you may wish to generate your own ``MultiIndex`` when preparing the data set. -Note that how the index is displayed by be controlled using the -``multi_sparse`` option in ``pandas.set_options()``: - -.. ipython:: python - - pd.set_option('display.multi_sparse', False) - df - pd.set_option('display.multi_sparse', True) - .. _advanced.get_level_values: Reconstructing the level labels @@ -180,14 +177,13 @@ For example: .. ipython:: python -   # original MultiIndex -   df.columns +   df.columns # original MultiIndex - # sliced - df[['foo','qux']].columns + df[['foo','qux']].columns # sliced This is done to avoid a recomputation of the levels in order to make slicing -highly performant. If you want to see the actual used levels. +highly performant. If you want to see only the used levels, you can use the +:func:`MultiIndex.get_level_values` method. .. ipython:: python @@ -196,7 +192,7 @@ highly performant. If you want to see the actual used levels. # for a specific level df[['foo','qux']].columns.get_level_values(0) -To reconstruct the ``MultiIndex`` with only the used levels, the +To reconstruct the ``MultiIndex`` with only the used levels, the ``remove_unused_levels`` method may be used. .. versionadded:: 0.20.0 @@ -231,15 +227,33 @@ Advanced indexing with hierarchical index ----------------------------------------- Syntactically integrating ``MultiIndex`` in advanced indexing with ``.loc`` is a -bit challenging, but we've made every effort to do so. For example the -following works as you would expect: +bit challenging, but we've made every effort to do so. In general, MultiIndex +keys take the form of tuples. For example, the following works as you would expect: .. ipython:: python df = df.T df - df.loc['bar'] - df.loc['bar', 'two'] + df.loc[('bar', 'two'),] + +Note that ``df.loc['bar', 'two']`` would also work in this example, but this shorthand +notation can lead to ambiguity in general. + +If you also want to index a specific column with ``.loc``, you must use a tuple +like this: + +.. ipython:: python + + df.loc[('bar', 'two'), 'A'] + +You don't have to specify all levels of the ``MultiIndex`` by passing only the +first elements of the tuple. For example, you can use "partial" indexing to +get all elements with ``bar`` in the first level as follows: + +df.loc['bar'] + +This is a shortcut for the slightly more verbose notation ``df.loc[('bar',),]`` (equivalent +to ``df.loc['bar',]`` in this example). "Partial" slicing also works quite nicely. @@ -260,6 +274,24 @@ Passing a list of labels or tuples works similar to reindexing: df.loc[[('bar', 'two'), ('qux', 'one')]] +.. info:: + + It is important to note that tuples and lists are not treated identically + in pandas when it comes to indexing. Whereas a tuple is interpreted as one + multi-level key, a list is used to specify several keys. Or in other words, + tuples go horizontally (traversing levels), lists go vertically (scanning levels). + +Importantly, a list of tuples indexes several complete ``MultiIndex`` keys, +whereas a tuple of lists refer to several values within a level: + +.. ipython:: python + + s = pd.Series([1, 2, 3, 4, 5, 6], + index=pd.MultiIndex.from_product([["A", "B"], ["c", "d", "e"]])) + s.loc[[("A", "c"), ("B", "d")]] # list of tuples + s.loc[(["A", "B"], ["c", "d"])] # tuple of lists + + .. _advanced.mi_slicers: Using slicers @@ -317,7 +349,7 @@ Basic multi-index slicing using slices, lists, and labels. dfmi.loc[(slice('A1','A3'), slice(None), ['C1', 'C3']), :] -You can use :class:`pandas.IndexSlice` to facilitate a more natural syntax +You can use :class:`pandas.IndexSlice` to facilitate a more natural syntax using ``:``, rather than using ``slice(None)``. .. ipython:: python @@ -626,7 +658,7 @@ Index Types ----------- We have discussed ``MultiIndex`` in the previous sections pretty extensively. ``DatetimeIndex`` and ``PeriodIndex`` -are shown :ref:`here <timeseries.overview>`, and information about +are shown :ref:`here <timeseries.overview>`, and information about `TimedeltaIndex`` is found :ref:`here <timedeltas.timedeltas>`. In the following sub-sections we will highlight some other index types. @@ -671,9 +703,9 @@ The ``CategoricalIndex`` is **preserved** after indexing: df2.loc['a'].index -Sorting the index will sort by the order of the categories (Recall that we -created the index with ``CategoricalDtype(list('cab'))``, so the sorted -order is ``cab``.). +Sorting the index will sort by the order of the categories (recall that we +created the index with ``CategoricalDtype(list('cab'))``, so the sorted +order is ``cab``). .. ipython:: python @@ -726,7 +758,7 @@ Int64Index and RangeIndex Indexing on an integer-based Index with floats has been clarified in 0.18.0, for a summary of the changes, see :ref:`here <whatsnew_0180.float_indexers>`. -``Int64Index`` is a fundamental basic index in pandas. +``Int64Index`` is a fundamental basic index in pandas. This is an Immutable array implementing an ordered, sliceable set. Prior to 0.18.0, the ``Int64Index`` would provide the default index for all ``NDFrame`` objects. @@ -765,7 +797,7 @@ The only positional indexing is via ``iloc``. sf.iloc[3] A scalar index that is not found will raise a ``KeyError``. -Slicing is primarily on the values of the index when using ``[],ix,loc``, and +Slicing is primarily on the values of the index when using ``[],ix,loc``, and **always** positional when using ``iloc``. The exception is when the slice is boolean, in which case it will always be positional.
As per our discussion in #16943. Let me know what you think. I'm not quite happy with the new warning box, ideas how to improve the message are welcome.
https://api.github.com/repos/pandas-dev/pandas/pulls/19507
2018-02-02T08:26:47Z
2018-02-15T09:00:32Z
2018-02-15T09:00:32Z
2018-08-03T07:56:31Z
implement timedeltas.test_scalar_compat
diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index ef6523a9eb270..3dc60ed33b958 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -10,7 +10,7 @@ to_timedelta, timedelta_range, date_range, Series, Timestamp, Timedelta) -from pandas.errors import PerformanceWarning +from pandas.errors import PerformanceWarning, NullFrequencyError @pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2), @@ -138,6 +138,60 @@ def test_tdi_add_str_invalid(self): with pytest.raises(TypeError): 'a' + tdi + # ------------------------------------------------------------- + # TimedeltaIndex.shift is used by __add__/__sub__ + + def test_tdi_shift_empty(self): + # GH#9903 + idx = pd.TimedeltaIndex([], name='xxx') + tm.assert_index_equal(idx.shift(0, freq='H'), idx) + tm.assert_index_equal(idx.shift(3, freq='H'), idx) + + def test_tdi_shift_hours(self): + # GH#9903 + idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx') + tm.assert_index_equal(idx.shift(0, freq='H'), idx) + exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx') + tm.assert_index_equal(idx.shift(3, freq='H'), exp) + exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx') + tm.assert_index_equal(idx.shift(-3, freq='H'), exp) + + def test_tdi_shift_minutes(self): + # GH#9903 + idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx') + tm.assert_index_equal(idx.shift(0, freq='T'), idx) + exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'], + name='xxx') + tm.assert_index_equal(idx.shift(3, freq='T'), exp) + exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'], + name='xxx') + tm.assert_index_equal(idx.shift(-3, freq='T'), exp) + + def test_tdi_shift_int(self): + # GH#8083 + trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1) + result = trange.shift(1) + expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00', + '3 days 01:00:00', + '4 days 01:00:00', '5 days 01:00:00'], + freq='D') + tm.assert_index_equal(result, expected) + + def test_tdi_shift_nonstandard_freq(self): + # GH#8083 + trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1) + result = trange.shift(3, freq='2D 1s') + expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03', + '8 days 01:00:03', '9 days 01:00:03', + '10 days 01:00:03'], freq='D') + tm.assert_index_equal(result, expected) + + def test_shift_no_freq(self): + # GH#19147 + tdi = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00'], freq=None) + with pytest.raises(NullFrequencyError): + tdi.shift(2) + # ------------------------------------------------------------- @pytest.mark.parametrize('box', [np.array, pd.Index]) diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index 112c62b7e2f8d..e944aad13f8d5 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -98,32 +98,6 @@ def test_numpy_minmax(self): tm.assert_raises_regex( ValueError, errmsg, np.argmax, td, out=0) - def test_round(self): - td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min') - elt = td[1] - - expected_rng = TimedeltaIndex([ - Timedelta('16801 days 00:00:00'), - Timedelta('16801 days 00:00:00'), - Timedelta('16801 days 01:00:00'), - Timedelta('16801 days 02:00:00'), - Timedelta('16801 days 02:00:00'), - ]) - expected_elt = expected_rng[1] - - tm.assert_index_equal(td.round(freq='H'), expected_rng) - assert elt.round(freq='H') == expected_elt - - msg = pd._libs.tslibs.frequencies._INVALID_FREQ_ERROR - with tm.assert_raises_regex(ValueError, msg): - td.round(freq='foo') - with tm.assert_raises_regex(ValueError, msg): - elt.round(freq='foo') - - msg = "<MonthEnd> is a non-fixed frequency" - tm.assert_raises_regex(ValueError, msg, td.round, freq='M') - tm.assert_raises_regex(ValueError, msg, elt.round, freq='M') - def test_representation(self): idx1 = TimedeltaIndex([], freq='D') idx2 = TimedeltaIndex(['1 days'], freq='D') @@ -387,25 +361,7 @@ def test_nat_new(self): tm.assert_numpy_array_equal(result, exp) def test_shift(self): - # GH 9903 - idx = pd.TimedeltaIndex([], name='xxx') - tm.assert_index_equal(idx.shift(0, freq='H'), idx) - tm.assert_index_equal(idx.shift(3, freq='H'), idx) - - idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx') - tm.assert_index_equal(idx.shift(0, freq='H'), idx) - exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx') - tm.assert_index_equal(idx.shift(3, freq='H'), exp) - exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx') - tm.assert_index_equal(idx.shift(-3, freq='H'), exp) - - tm.assert_index_equal(idx.shift(0, freq='T'), idx) - exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'], - name='xxx') - tm.assert_index_equal(idx.shift(3, freq='T'), exp) - exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'], - name='xxx') - tm.assert_index_equal(idx.shift(-3, freq='T'), exp) + pass # handled in test_arithmetic.py def test_repeat(self): index = pd.timedelta_range('1 days', periods=2, freq='D') diff --git a/pandas/tests/indexes/timedeltas/test_scalar_compat.py b/pandas/tests/indexes/timedeltas/test_scalar_compat.py new file mode 100644 index 0000000000000..7d97e1fadea30 --- /dev/null +++ b/pandas/tests/indexes/timedeltas/test_scalar_compat.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +""" +Tests for TimedeltaIndex methods behaving like their Timedelta counterparts +""" + +import numpy as np + +import pandas as pd +import pandas.util.testing as tm +from pandas import timedelta_range, Timedelta, TimedeltaIndex, Index, Series + + +class TestVectorizedTimedelta(object): + def test_tdi_total_seconds(self): + # GH#10939 + # test index + rng = timedelta_range('1 days, 10:11:12.100123456', periods=2, + freq='s') + expt = [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9, + 1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456. / 1e9] + tm.assert_almost_equal(rng.total_seconds(), Index(expt)) + + # test Series + ser = Series(rng) + s_expt = Series(expt, index=[0, 1]) + tm.assert_series_equal(ser.dt.total_seconds(), s_expt) + + # with nat + ser[1] = np.nan + s_expt = Series([1 * 86400 + 10 * 3600 + 11 * 60 + + 12 + 100123456. / 1e9, np.nan], index=[0, 1]) + tm.assert_series_equal(ser.dt.total_seconds(), s_expt) + + # with both nat + ser = Series([np.nan, np.nan], dtype='timedelta64[ns]') + tm.assert_series_equal(ser.dt.total_seconds(), + Series([np.nan, np.nan], index=[0, 1])) + + def test_tdi_round(self): + td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min') + elt = td[1] + + expected_rng = TimedeltaIndex([Timedelta('16801 days 00:00:00'), + Timedelta('16801 days 00:00:00'), + Timedelta('16801 days 01:00:00'), + Timedelta('16801 days 02:00:00'), + Timedelta('16801 days 02:00:00')]) + expected_elt = expected_rng[1] + + tm.assert_index_equal(td.round(freq='H'), expected_rng) + assert elt.round(freq='H') == expected_elt + + msg = pd._libs.tslibs.frequencies._INVALID_FREQ_ERROR + with tm.assert_raises_regex(ValueError, msg): + td.round(freq='foo') + with tm.assert_raises_regex(ValueError, msg): + elt.round(freq='foo') + + msg = "<MonthEnd> is a non-fixed frequency" + with tm.assert_raises_regex(ValueError, msg): + td.round(freq='M') + with tm.assert_raises_regex(ValueError, msg): + elt.round(freq='M') diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index 1af971e8a4326..32157a9a44e04 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -4,7 +4,6 @@ from datetime import timedelta import pandas as pd -from pandas.errors import NullFrequencyError import pandas.util.testing as tm from pandas import (timedelta_range, date_range, Series, Timedelta, TimedeltaIndex, Index, DataFrame, @@ -34,28 +33,7 @@ def test_numeric_compat(self): pass def test_shift(self): - # test shift for TimedeltaIndex - # err8083 - - drange = self.create_index() - result = drange.shift(1) - expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00', - '3 days 01:00:00', - '4 days 01:00:00', '5 days 01:00:00'], - freq='D') - tm.assert_index_equal(result, expected) - - result = drange.shift(3, freq='2D 1s') - expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03', - '8 days 01:00:03', '9 days 01:00:03', - '10 days 01:00:03'], freq='D') - tm.assert_index_equal(result, expected) - - def test_shift_no_freq(self): - # GH#19147 - tdi = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00'], freq=None) - with pytest.raises(NullFrequencyError): - tdi.shift(2) + pass # this is handled in test_arithmetic.py def test_pickle_compat_construction(self): pass @@ -203,31 +181,6 @@ def test_map(self): exp = Int64Index([f(x) for x in rng]) tm.assert_index_equal(result, exp) - def test_total_seconds(self): - # GH 10939 - # test index - rng = timedelta_range('1 days, 10:11:12.100123456', periods=2, - freq='s') - expt = [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9, - 1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456. / 1e9] - tm.assert_almost_equal(rng.total_seconds(), Index(expt)) - - # test Series - s = Series(rng) - s_expt = Series(expt, index=[0, 1]) - tm.assert_series_equal(s.dt.total_seconds(), s_expt) - - # with nat - s[1] = np.nan - s_expt = Series([1 * 86400 + 10 * 3600 + 11 * 60 + - 12 + 100123456. / 1e9, np.nan], index=[0, 1]) - tm.assert_series_equal(s.dt.total_seconds(), s_expt) - - # with both nat - s = Series([np.nan, np.nan], dtype='timedelta64[ns]') - tm.assert_series_equal(s.dt.total_seconds(), - Series([np.nan, np.nan], index=[0, 1])) - def test_pass_TimedeltaIndex_to_index(self): rng = timedelta_range('1 days', '10 days')
Centralize and split redundant TDI.shift tests in test_arith
https://api.github.com/repos/pandas-dev/pandas/pulls/19503
2018-02-02T02:35:31Z
2018-02-02T11:05:30Z
2018-02-02T11:05:30Z
2018-06-22T03:38:10Z
Added E741 to flake8 config
diff --git a/setup.cfg b/setup.cfg index 828ef80971f7b..942b2b0a1a0bf 100644 --- a/setup.cfg +++ b/setup.cfg @@ -12,7 +12,11 @@ tag_prefix = v parentdir_prefix = pandas- [flake8] -ignore = E731,E402,W503 +ignore = + E402, # module level import not at top of file + E731, # do not assign a lambda expression, use a def + E741, # do not use variables named 'l', 'O', or 'I' + W503 # line break before binary operator max-line-length = 79 [yapf]
Makes this easier to run locally. Do we want to ignore bare exception (E722) too? Or is the hope that we'll fix all those eventually?
https://api.github.com/repos/pandas-dev/pandas/pulls/19496
2018-02-01T21:03:52Z
2018-02-02T11:03:30Z
2018-02-02T11:03:30Z
2018-09-19T19:58:16Z
TST: fix up pandas_datareader downstream tests
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index 0f0abd8cd3400..b438d6a6137b0 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -89,7 +89,8 @@ def test_pandas_gbq(df): def test_pandas_datareader(): pandas_datareader = import_module('pandas_datareader') # noqa - pandas_datareader.get_data_google('AAPL') + pandas_datareader.DataReader( + 'F', 'quandl', '2017-01-01', '2017-02-01') def test_geopandas():
closes #18935
https://api.github.com/repos/pandas-dev/pandas/pulls/19490
2018-02-01T11:06:55Z
2018-02-01T12:54:57Z
2018-02-01T12:54:57Z
2018-02-01T12:55:16Z
PERF: Cythonize Groupby Rank
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 083242cd69b74..cf5a44442045b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -581,6 +581,7 @@ Performance Improvements - Improved performance of :func:`DataFrame.median` with ``axis=1`` when bottleneck is not installed (:issue:`16468`) - Improved performance of :func:`MultiIndex.get_loc` for large indexes, at the cost of a reduction in performance for small ones (:issue:`18519`) - Improved performance of pairwise ``.rolling()`` and ``.expanding()`` with ``.cov()`` and ``.corr()`` operations (:issue:`17917`) +- Improved performance of :func:`DataFrameGroupBy.rank` (:issue:`15779`) .. _whatsnew_0230.docs: diff --git a/pandas/_libs/algos.pxd b/pandas/_libs/algos.pxd index 6d80e6f0073eb..a535872ff7279 100644 --- a/pandas/_libs/algos.pxd +++ b/pandas/_libs/algos.pxd @@ -11,3 +11,11 @@ cdef inline Py_ssize_t swap(numeric *a, numeric *b) nogil: a[0] = b[0] b[0] = t return 0 + +cdef enum TiebreakEnumType: + TIEBREAK_AVERAGE + TIEBREAK_MIN, + TIEBREAK_MAX + TIEBREAK_FIRST + TIEBREAK_FIRST_DESCENDING + TIEBREAK_DENSE diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 5d17488963b1c..a418e54e4da9b 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -31,14 +31,6 @@ cdef double nan = NaN cdef int64_t iNaT = get_nat() -cdef: - int TIEBREAK_AVERAGE = 0 - int TIEBREAK_MIN = 1 - int TIEBREAK_MAX = 2 - int TIEBREAK_FIRST = 3 - int TIEBREAK_FIRST_DESCENDING = 4 - int TIEBREAK_DENSE = 5 - tiebreakers = { 'average': TIEBREAK_AVERAGE, 'min': TIEBREAK_MIN, diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 55de700c9af52..d75c3a71896e3 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -16,8 +16,9 @@ from numpy cimport (ndarray, from libc.stdlib cimport malloc, free from util cimport numeric, get_nat -from algos cimport swap -from algos import take_2d_axis1_float64_float64, groupsort_indexer +from algos cimport (swap, TiebreakEnumType, TIEBREAK_AVERAGE, TIEBREAK_MIN, + TIEBREAK_MAX, TIEBREAK_FIRST, TIEBREAK_DENSE) +from algos import take_2d_axis1_float64_float64, groupsort_indexer, tiebreakers cdef int64_t iNaT = get_nat() diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index a751fadaf48cf..b24444c422efa 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -444,8 +444,173 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, else: out[i, j] = resx[i, j] + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_rank_{{name}}(ndarray[float64_t, ndim=2] out, + ndarray[{{c_type}}, ndim=2] values, + ndarray[int64_t] labels, + bint is_datetimelike, object ties_method, + bint ascending, bint pct, object na_option): + """Provides the rank of values within each group + + Parameters + ---------- + out : array of float64_t values which this method will write its results to + values : array of {{c_type}} values to be ranked + labels : array containing unique label for each group, with its ordering + matching up to the corresponding record in `values` + is_datetimelike : bool + unused in this method but provided for call compatability with other + Cython transformations + ties_method : {'keep', 'top', 'bottom'} + * keep: leave NA values where they are + * top: smallest rank if ascending + * bottom: smallest rank if descending + ascending : boolean + False for ranks by high (1) to low (N) + pct : boolean + Compute percentage rank of data within each group + + Notes + ----- + This method modifies the `out` parameter rather than returning an object + """ + cdef: + TiebreakEnumType tiebreak + Py_ssize_t i, j, N, K, val_start=0, grp_start=0, dups=0, sum_ranks=0 + Py_ssize_t grp_vals_seen=1, grp_na_count=0 + ndarray[int64_t] _as + ndarray[float64_t, ndim=2] grp_sizes + ndarray[{{c_type}}] masked_vals + ndarray[uint8_t] mask + bint keep_na + {{c_type}} nan_fill_val + + tiebreak = tiebreakers[ties_method] + keep_na = na_option == 'keep' + N, K = (<object> values).shape + grp_sizes = np.ones_like(out) + + # Copy values into new array in order to fill missing data + # with mask, without obfuscating location of missing data + # in values array + masked_vals = np.array(values[:, 0], copy=True) + {{if name=='int64'}} + mask = (masked_vals == {{nan_val}}).astype(np.uint8) + {{else}} + mask = np.isnan(masked_vals).astype(np.uint8) + {{endif}} + + if ascending ^ (na_option == 'top'): + {{if name == 'int64'}} + nan_fill_val = np.iinfo(np.int64).max + {{else}} + nan_fill_val = np.inf + {{endif}} + order = (masked_vals, mask, labels) + else: + {{if name == 'int64'}} + nan_fill_val = np.iinfo(np.int64).min + {{else}} + nan_fill_val = -np.inf + {{endif}} + order = (masked_vals, ~mask, labels) + np.putmask(masked_vals, mask, nan_fill_val) + + # lexsort using labels, then mask, then actual values + # each label corresponds to a different group value, + # the mask helps you differentiate missing values before + # performing sort on the actual values + _as = np.lexsort(order) + + if not ascending: + _as = _as[::-1] + + with nogil: + # Loop over the length of the value array + # each incremental i value can be looked up in the _as array + # that we sorted previously, which gives us the location of + # that sorted value for retrieval back from the original + # values / masked_vals arrays + for i in range(N): + # dups and sum_ranks will be incremented each loop where + # the value / group remains the same, and should be reset + # when either of those change + # Used to calculate tiebreakers + dups += 1 + sum_ranks += i - grp_start + 1 + + # if keep_na, check for missing values and assign back + # to the result where appropriate + if keep_na and masked_vals[_as[i]] == nan_fill_val: + grp_na_count += 1 + out[_as[i], 0] = nan + else: + # this implementation is inefficient because it will + # continue overwriting previously encountered dups + # i.e. if 5 duplicated values are encountered it will + # write to the result as follows (assumes avg tiebreaker): + # 1 + # .5 .5 + # .33 .33 .33 + # .25 .25 .25 .25 + # .2 .2 .2 .2 .2 + # + # could potentially be optimized to only write to the + # result once the last duplicate value is encountered + if tiebreak == TIEBREAK_AVERAGE: + for j in range(i - dups + 1, i + 1): + out[_as[j], 0] = sum_ranks / <float64_t>dups + elif tiebreak == TIEBREAK_MIN: + for j in range(i - dups + 1, i + 1): + out[_as[j], 0] = i - grp_start - dups + 2 + elif tiebreak == TIEBREAK_MAX: + for j in range(i - dups + 1, i + 1): + out[_as[j], 0] = i - grp_start + 1 + elif tiebreak == TIEBREAK_FIRST: + for j in range(i - dups + 1, i + 1): + if ascending: + out[_as[j], 0] = j + 1 - grp_start + else: + out[_as[j], 0] = 2 * i - j - dups + 2 - grp_start + elif tiebreak == TIEBREAK_DENSE: + for j in range(i - dups + 1, i + 1): + out[_as[j], 0] = grp_vals_seen + + # look forward to the next value (using the sorting in _as) + # if the value does not equal the current value then we need to + # reset the dups and sum_ranks, knowing that a new value is coming + # up. the conditional also needs to handle nan equality and the + # end of iteration + if (i == N - 1 or ( + (masked_vals[_as[i]] != masked_vals[_as[i+1]]) and not + (mask[_as[i]] and mask[_as[i+1]]))): + dups = sum_ranks = 0 + val_start = i + grp_vals_seen += 1 + + # Similar to the previous conditional, check now if we are moving + # to a new group. If so, keep track of the index where the new + # group occurs, so the tiebreaker calculations can decrement that + # from their position. fill in the size of each group encountered + # (used by pct calculations later). also be sure to reset any of + # the items helping to calculate dups + if i == N - 1 or labels[_as[i]] != labels[_as[i+1]]: + for j in range(grp_start, i + 1): + grp_sizes[_as[j], 0] = i - grp_start + 1 - grp_na_count + dups = sum_ranks = 0 + grp_na_count = 0 + val_start = i + 1 + grp_start = i + 1 + grp_vals_seen = 1 + + if pct: + for i in range(N): + out[i, 0] = out[i, 0] / grp_sizes[i, 0] {{endfor}} + #---------------------------------------------------------------------- # group_min, group_max #---------------------------------------------------------------------- diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 01241db7c0c42..0363bcd02aa16 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -994,7 +994,7 @@ def _transform_should_cast(self, func_nm): return (self.size().fillna(0) > 0).any() and (func_nm not in _cython_cast_blacklist) - def _cython_transform(self, how, numeric_only=True): + def _cython_transform(self, how, numeric_only=True, **kwargs): output = collections.OrderedDict() for name, obj in self._iterate_slices(): is_numeric = is_numeric_dtype(obj.dtype) @@ -1002,12 +1002,16 @@ def _cython_transform(self, how, numeric_only=True): continue try: - result, names = self.grouper.transform(obj.values, how) + result, names = self.grouper.transform(obj.values, how, + **kwargs) except NotImplementedError: continue except AssertionError as e: raise GroupByError(str(e)) - output[name] = self._try_cast(result, obj) + if self._transform_should_cast(how): + output[name] = self._try_cast(result, obj) + else: + output[name] = result if len(output) == 0: raise DataError('No numeric types to aggregate') @@ -1768,6 +1772,37 @@ def cumcount(self, ascending=True): cumcounts = self._cumcount_array(ascending=ascending) return Series(cumcounts, index) + @Substitution(name='groupby') + @Appender(_doc_template) + def rank(self, method='average', ascending=True, na_option='keep', + pct=False, axis=0): + """Provides the rank of values within each group + + Parameters + ---------- + method : {'average', 'min', 'max', 'first', 'dense'}, efault 'average' + * average: average rank of group + * min: lowest rank in group + * max: highest rank in group + * first: ranks assigned in order they appear in the array + * dense: like 'min', but rank always increases by 1 between groups + method : {'keep', 'top', 'bottom'}, default 'keep' + * keep: leave NA values where they are + * top: smallest rank if ascending + * bottom: smallest rank if descending + ascending : boolean, default True + False for ranks by high (1) to low (N) + pct : boolean, default False + Compute percentage rank of data within each group + + Returns + ----- + DataFrame with ranking of values within each group + """ + return self._cython_transform('rank', numeric_only=False, + ties_method=method, ascending=ascending, + na_option=na_option, pct=pct, axis=axis) + @Substitution(name='groupby') @Appender(_doc_template) def cumprod(self, axis=0, *args, **kwargs): @@ -2183,6 +2218,16 @@ def get_group_levels(self): 'cumsum': 'group_cumsum', 'cummin': 'group_cummin', 'cummax': 'group_cummax', + 'rank': { + 'name': 'group_rank', + 'f': lambda func, a, b, c, d, **kwargs: func( + a, b, c, d, + kwargs.get('ties_method', 'average'), + kwargs.get('ascending', True), + kwargs.get('pct', False), + kwargs.get('na_option', 'keep') + ) + } } } @@ -2242,7 +2287,8 @@ def wrapper(*args, **kwargs): (how, dtype_str)) return func - def _cython_operation(self, kind, values, how, axis, min_count=-1): + def _cython_operation(self, kind, values, how, axis, min_count=-1, + **kwargs): assert kind in ['transform', 'aggregate'] # can we do this operation with our cython functions @@ -2314,10 +2360,13 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1): else: raise - if is_numeric: - out_dtype = '%s%d' % (values.dtype.kind, values.dtype.itemsize) + if how == 'rank': + out_dtype = 'float' else: - out_dtype = 'object' + if is_numeric: + out_dtype = '%s%d' % (values.dtype.kind, values.dtype.itemsize) + else: + out_dtype = 'object' labels, _, _ = self.group_info @@ -2334,7 +2383,8 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1): # TODO: min_count result = self._transform( - result, values, labels, func, is_numeric, is_datetimelike) + result, values, labels, func, is_numeric, is_datetimelike, + **kwargs) if is_integer_dtype(result) and not is_datetimelike: mask = result == iNaT @@ -2373,8 +2423,8 @@ def aggregate(self, values, how, axis=0, min_count=-1): return self._cython_operation('aggregate', values, how, axis, min_count=min_count) - def transform(self, values, how, axis=0): - return self._cython_operation('transform', values, how, axis) + def transform(self, values, how, axis=0, **kwargs): + return self._cython_operation('transform', values, how, axis, **kwargs) def _aggregate(self, result, counts, values, comp_ids, agg_func, is_numeric, is_datetimelike, min_count=-1): @@ -2394,7 +2444,7 @@ def _aggregate(self, result, counts, values, comp_ids, agg_func, return result def _transform(self, result, values, comp_ids, transform_func, - is_numeric, is_datetimelike): + is_numeric, is_datetimelike, **kwargs): comp_ids, _, ngroups = self.group_info if values.ndim > 3: @@ -2406,9 +2456,9 @@ def _transform(self, result, values, comp_ids, transform_func, chunk = chunk.squeeze() transform_func(result[:, :, i], values, - comp_ids, is_datetimelike) + comp_ids, is_datetimelike, **kwargs) else: - transform_func(result, values, comp_ids, is_datetimelike) + transform_func(result, values, comp_ids, is_datetimelike, **kwargs) return result diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 5172efe25d697..2db772ac54369 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1895,6 +1895,172 @@ def test_rank_apply(self): expected = expected.reindex(result.index) assert_series_equal(result, expected) + @pytest.mark.parametrize("grps", [ + ['qux'], ['qux', 'quux']]) + @pytest.mark.parametrize("vals", [ + [2, 2, 8, 2, 6], + [pd.Timestamp('2018-01-02'), pd.Timestamp('2018-01-02'), + pd.Timestamp('2018-01-08'), pd.Timestamp('2018-01-02'), + pd.Timestamp('2018-01-06')]]) + @pytest.mark.parametrize("ties_method,ascending,pct,exp", [ + ('average', True, False, [2., 2., 5., 2., 4.]), + ('average', True, True, [0.4, 0.4, 1.0, 0.4, 0.8]), + ('average', False, False, [4., 4., 1., 4., 2.]), + ('average', False, True, [.8, .8, .2, .8, .4]), + ('min', True, False, [1., 1., 5., 1., 4.]), + ('min', True, True, [0.2, 0.2, 1.0, 0.2, 0.8]), + ('min', False, False, [3., 3., 1., 3., 2.]), + ('min', False, True, [.6, .6, .2, .6, .4]), + ('max', True, False, [3., 3., 5., 3., 4.]), + ('max', True, True, [0.6, 0.6, 1.0, 0.6, 0.8]), + ('max', False, False, [5., 5., 1., 5., 2.]), + ('max', False, True, [1., 1., .2, 1., .4]), + ('first', True, False, [1., 2., 5., 3., 4.]), + ('first', True, True, [0.2, 0.4, 1.0, 0.6, 0.8]), + ('first', False, False, [3., 4., 1., 5., 2.]), + ('first', False, True, [.6, .8, .2, 1., .4]), + ('dense', True, False, [1., 1., 3., 1., 2.]), + ('dense', True, True, [0.2, 0.2, 0.6, 0.2, 0.4]), + ('dense', False, False, [3., 3., 1., 3., 2.]), + ('dense', False, True, [.6, .6, .2, .6, .4]), + ]) + def test_rank_args(self, grps, vals, ties_method, ascending, pct, exp): + key = np.repeat(grps, len(vals)) + vals = vals * len(grps) + df = DataFrame({'key': key, 'val': vals}) + result = df.groupby('key').rank(method=ties_method, + ascending=ascending, pct=pct) + + exp_df = DataFrame(exp * len(grps), columns=['val']) + assert_frame_equal(result, exp_df) + + @pytest.mark.parametrize("grps", [ + ['qux'], ['qux', 'quux']]) + @pytest.mark.parametrize("vals", [ + [2, 2, np.nan, 8, 2, 6, np.nan, np.nan], # floats + [pd.Timestamp('2018-01-02'), pd.Timestamp('2018-01-02'), np.nan, + pd.Timestamp('2018-01-08'), pd.Timestamp('2018-01-02'), + pd.Timestamp('2018-01-06'), np.nan, np.nan] + ]) + @pytest.mark.parametrize("ties_method,ascending,na_option,pct,exp", [ + ('average', True, 'keep', False, + [2., 2., np.nan, 5., 2., 4., np.nan, np.nan]), + ('average', True, 'keep', True, + [0.4, 0.4, np.nan, 1.0, 0.4, 0.8, np.nan, np.nan]), + ('average', False, 'keep', False, + [4., 4., np.nan, 1., 4., 2., np.nan, np.nan]), + ('average', False, 'keep', True, + [.8, 0.8, np.nan, 0.2, 0.8, 0.4, np.nan, np.nan]), + ('min', True, 'keep', False, + [1., 1., np.nan, 5., 1., 4., np.nan, np.nan]), + ('min', True, 'keep', True, + [0.2, 0.2, np.nan, 1.0, 0.2, 0.8, np.nan, np.nan]), + ('min', False, 'keep', False, + [3., 3., np.nan, 1., 3., 2., np.nan, np.nan]), + ('min', False, 'keep', True, + [.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]), + ('max', True, 'keep', False, + [3., 3., np.nan, 5., 3., 4., np.nan, np.nan]), + ('max', True, 'keep', True, + [0.6, 0.6, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]), + ('max', False, 'keep', False, + [5., 5., np.nan, 1., 5., 2., np.nan, np.nan]), + ('max', False, 'keep', True, + [1., 1., np.nan, 0.2, 1., 0.4, np.nan, np.nan]), + ('first', True, 'keep', False, + [1., 2., np.nan, 5., 3., 4., np.nan, np.nan]), + ('first', True, 'keep', True, + [0.2, 0.4, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]), + ('first', False, 'keep', False, + [3., 4., np.nan, 1., 5., 2., np.nan, np.nan]), + ('first', False, 'keep', True, + [.6, 0.8, np.nan, 0.2, 1., 0.4, np.nan, np.nan]), + ('dense', True, 'keep', False, + [1., 1., np.nan, 3., 1., 2., np.nan, np.nan]), + ('dense', True, 'keep', True, + [0.2, 0.2, np.nan, 0.6, 0.2, 0.4, np.nan, np.nan]), + ('dense', False, 'keep', False, + [3., 3., np.nan, 1., 3., 2., np.nan, np.nan]), + ('dense', False, 'keep', True, + [.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]), + ('average', True, 'no_na', False, [2., 2., 7., 5., 2., 4., 7., 7.]), + ('average', True, 'no_na', True, + [0.25, 0.25, 0.875, 0.625, 0.25, 0.5, 0.875, 0.875]), + ('average', False, 'no_na', False, [4., 4., 7., 1., 4., 2., 7., 7.]), + ('average', False, 'no_na', True, + [0.5, 0.5, 0.875, 0.125, 0.5, 0.25, 0.875, 0.875]), + ('min', True, 'no_na', False, [1., 1., 6., 5., 1., 4., 6., 6.]), + ('min', True, 'no_na', True, + [0.125, 0.125, 0.75, 0.625, 0.125, 0.5, 0.75, 0.75]), + ('min', False, 'no_na', False, [3., 3., 6., 1., 3., 2., 6., 6.]), + ('min', False, 'no_na', True, + [0.375, 0.375, 0.75, 0.125, 0.375, 0.25, 0.75, 0.75]), + ('max', True, 'no_na', False, [3., 3., 8., 5., 3., 4., 8., 8.]), + ('max', True, 'no_na', True, + [0.375, 0.375, 1., 0.625, 0.375, 0.5, 1., 1.]), + ('max', False, 'no_na', False, [5., 5., 8., 1., 5., 2., 8., 8.]), + ('max', False, 'no_na', True, + [0.625, 0.625, 1., 0.125, 0.625, 0.25, 1., 1.]), + ('first', True, 'no_na', False, [1., 2., 6., 5., 3., 4., 7., 8.]), + ('first', True, 'no_na', True, + [0.125, 0.25, 0.75, 0.625, 0.375, 0.5, 0.875, 1.]), + ('first', False, 'no_na', False, [3., 4., 6., 1., 5., 2., 7., 8.]), + ('first', False, 'no_na', True, + [0.375, 0.5, 0.75, 0.125, 0.625, 0.25, 0.875, 1.]), + ('dense', True, 'no_na', False, [1., 1., 4., 3., 1., 2., 4., 4.]), + ('dense', True, 'no_na', True, + [0.125, 0.125, 0.5, 0.375, 0.125, 0.25, 0.5, 0.5]), + ('dense', False, 'no_na', False, [3., 3., 4., 1., 3., 2., 4., 4.]), + ('dense', False, 'no_na', True, + [0.375, 0.375, 0.5, 0.125, 0.375, 0.25, 0.5, 0.5]) + ]) + def test_rank_args_missing(self, grps, vals, ties_method, ascending, + na_option, pct, exp): + key = np.repeat(grps, len(vals)) + vals = vals * len(grps) + df = DataFrame({'key': key, 'val': vals}) + result = df.groupby('key').rank(method=ties_method, + ascending=ascending, + na_option=na_option, pct=pct) + + exp_df = DataFrame(exp * len(grps), columns=['val']) + assert_frame_equal(result, exp_df) + + @pytest.mark.parametrize("pct,exp", [ + (False, [3., 3., 3., 3., 3.]), + (True, [.6, .6, .6, .6, .6])]) + def test_rank_resets_each_group(self, pct, exp): + df = DataFrame( + {'key': ['a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b'], + 'val': [1] * 10} + ) + result = df.groupby('key').rank(pct=pct) + exp_df = DataFrame(exp * 2, columns=['val']) + assert_frame_equal(result, exp_df) + + def test_rank_avg_even_vals(self): + df = DataFrame({'key': ['a'] * 4, 'val': [1] * 4}) + result = df.groupby('key').rank() + exp_df = DataFrame([2.5, 2.5, 2.5, 2.5], columns=['val']) + assert_frame_equal(result, exp_df) + + @pytest.mark.parametrize("ties_method", [ + 'average', 'min', 'max', 'first', 'dense']) + @pytest.mark.parametrize("ascending", [True, False]) + @pytest.mark.parametrize("na_option", ["keep", "top", "bottom"]) + @pytest.mark.parametrize("pct", [True, False]) + @pytest.mark.parametrize("vals", [ + ['bar', 'bar', 'foo', 'bar', 'baz'], + ['bar', np.nan, 'foo', np.nan, 'baz'] + ]) + def test_rank_object_raises(self, ties_method, ascending, na_option, + pct, vals): + df = DataFrame({'key': ['foo'] * 5, 'val': vals}) + with tm.assert_raises_regex(TypeError, "not callable"): + df.groupby('key').rank(method=ties_method, + ascending=ascending, + na_option=na_option, pct=pct) + def test_dont_clobber_name_column(self): df = DataFrame({'key': ['a', 'a', 'a', 'b', 'b', 'b'], 'name': ['foo', 'bar', 'baz'] * 2})
- [x] closes #15779 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry ~~This is not complete~~ but I wanted to submit for review on the direction. In particular, I wanted to know if my way of passing the named rank arguments back to the Cython layer makes sense, or if we'd rather bypass using kwargs and call that function directly from the `GroupBy` instance method (similar to how `shift` does it). Right now this only increments values ascending, doesn't handle tiebreakers, nor does it allow for the return of a percentage. I also plan on adding some test cases to cover the arguments as I can't find them in the `pandas.tests.groupby` package. More to come but again wanted feedback before going too far in
https://api.github.com/repos/pandas-dev/pandas/pulls/19481
2018-01-31T19:21:31Z
2018-02-10T16:05:52Z
2018-02-10T16:05:52Z
2018-04-20T01:34:49Z
implement test_scalar_compat
diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index 480f025db17ca..671071b5e4945 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -10,7 +10,7 @@ import pandas as pd from pandas.compat.numpy import np_datetime64_compat import pandas.util.testing as tm -from pandas.errors import PerformanceWarning +from pandas.errors import PerformanceWarning, NullFrequencyError from pandas import (Timestamp, Timedelta, Series, DatetimeIndex, TimedeltaIndex, date_range) @@ -274,6 +274,64 @@ def test_dti_isub_int(self, tz, one): rng -= one tm.assert_index_equal(rng, expected) + # ------------------------------------------------------------- + # DatetimeIndex.shift is used in integer addition + + def test_dti_shift_tzaware(self, tz): + # GH#9903 + idx = pd.DatetimeIndex([], name='xxx', tz=tz) + tm.assert_index_equal(idx.shift(0, freq='H'), idx) + tm.assert_index_equal(idx.shift(3, freq='H'), idx) + + idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00' + '2011-01-01 12:00'], name='xxx', tz=tz) + tm.assert_index_equal(idx.shift(0, freq='H'), idx) + exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00' + '2011-01-01 15:00'], name='xxx', tz=tz) + tm.assert_index_equal(idx.shift(3, freq='H'), exp) + exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00' + '2011-01-01 09:00'], name='xxx', tz=tz) + tm.assert_index_equal(idx.shift(-3, freq='H'), exp) + + def test_dti_shift_freqs(self): + # test shift for DatetimeIndex and non DatetimeIndex + # GH#8083 + drange = pd.date_range('20130101', periods=5) + result = drange.shift(1) + expected = pd.DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04', + '2013-01-05', + '2013-01-06'], freq='D') + tm.assert_index_equal(result, expected) + + result = drange.shift(-1) + expected = pd.DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02', + '2013-01-03', '2013-01-04'], + freq='D') + tm.assert_index_equal(result, expected) + + result = drange.shift(3, freq='2D') + expected = pd.DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09', + '2013-01-10', + '2013-01-11'], freq='D') + tm.assert_index_equal(result, expected) + + def test_dti_shift_int(self): + rng = date_range('1/1/2000', periods=20) + + result = rng + 5 + expected = rng.shift(5) + tm.assert_index_equal(result, expected) + + result = rng - 5 + expected = rng.shift(-5) + tm.assert_index_equal(result, expected) + + def test_dti_shift_no_freq(self): + # GH#19147 + dti = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01'], freq=None) + with pytest.raises(NullFrequencyError): + dti.shift(2) + # ------------------------------------------------------------- # Binary operations DatetimeIndex and timedelta-like diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 49f94bfa65543..a75ace2933b71 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -2,7 +2,7 @@ import pytest import numpy as np -from datetime import date, timedelta, time, datetime +from datetime import date, timedelta, time import dateutil import pandas as pd @@ -16,31 +16,6 @@ randn = np.random.randn -class TestDatetimeIndexLikeTimestamp(object): - # Tests for DatetimeIndex behaving like a vectorized Timestamp - - def test_dti_date_out_of_range(self): - # see gh-1475 - pytest.raises(ValueError, DatetimeIndex, ['1400-01-01']) - pytest.raises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)]) - - def test_timestamp_fields(self): - # extra fields from DatetimeIndex like quarter and week - idx = tm.makeDateIndex(100) - - fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', - 'days_in_month', 'is_month_start', 'is_month_end', - 'is_quarter_start', 'is_quarter_end', 'is_year_start', - 'is_year_end', 'weekday_name'] - for f in fields: - expected = getattr(idx, f)[-1] - result = getattr(Timestamp(idx[-1]), f) - assert result == expected - - assert idx.freq == Timestamp(idx[-1], idx.freq).freq - assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr - - class TestDatetimeIndex(object): def test_get_loc(self): @@ -371,18 +346,6 @@ def test_isin(self): assert_almost_equal(index.isin([index[2], 5]), np.array([False, False, True, False])) - def test_time(self): - rng = pd.date_range('1/1/2000', freq='12min', periods=10) - result = pd.Index(rng).time - expected = [t.time() for t in rng] - assert (result == expected).all() - - def test_date(self): - rng = pd.date_range('1/1/2000', freq='12H', periods=10) - result = pd.Index(rng).date - expected = [t.date() for t in rng] - assert (result == expected).all() - def test_does_not_convert_mixed_integer(self): df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs: randn(), diff --git a/pandas/tests/indexes/datetimes/test_datetimelike.py b/pandas/tests/indexes/datetimes/test_datetimelike.py index 9d6d27ecb4b6f..c6b3a77773dc7 100644 --- a/pandas/tests/indexes/datetimes/test_datetimelike.py +++ b/pandas/tests/indexes/datetimes/test_datetimelike.py @@ -1,9 +1,7 @@ """ generic tests from the Datetimelike class """ -import numpy as np -import pandas as pd from pandas.util import testing as tm -from pandas import Series, Index, DatetimeIndex, date_range +from pandas import DatetimeIndex, date_range from ..datetimelike import DatetimeLike @@ -27,31 +25,7 @@ def test_pickle_compat_construction(self): pass def test_intersection(self): - first = self.index - second = self.index[5:] - intersect = first.intersection(second) - assert tm.equalContents(intersect, second) - - # GH 10149 - cases = [klass(second.values) for klass in [np.array, Series, list]] - for case in cases: - result = first.intersection(case) - assert tm.equalContents(result, second) - - third = Index(['a', 'b', 'c']) - result = first.intersection(third) - expected = pd.Index([], dtype=object) - tm.assert_index_equal(result, expected) + pass # handled in test_setops def test_union(self): - first = self.index[:5] - second = self.index[5:] - everything = self.index - union = first.union(second) - assert tm.equalContents(union, everything) - - # GH 10149 - cases = [klass(second.values) for klass in [np.array, Series, list]] - for case in cases: - result = first.union(case) - assert tm.equalContents(result, everything) + pass # handled in test_setops diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py index 951aa2c520d0f..4a46c3b04bbad 100644 --- a/pandas/tests/indexes/datetimes/test_misc.py +++ b/pandas/tests/indexes/datetimes/test_misc.py @@ -4,53 +4,7 @@ import pandas as pd import pandas.util.testing as tm from pandas import (Index, DatetimeIndex, datetime, offsets, - Float64Index, date_range, Timestamp) - - -class TestDateTimeIndexToJulianDate(object): - - def test_1700(self): - r1 = Float64Index([2345897.5, 2345898.5, 2345899.5, 2345900.5, - 2345901.5]) - r2 = date_range(start=Timestamp('1710-10-01'), periods=5, - freq='D').to_julian_date() - assert isinstance(r2, Float64Index) - tm.assert_index_equal(r1, r2) - - def test_2000(self): - r1 = Float64Index([2451601.5, 2451602.5, 2451603.5, 2451604.5, - 2451605.5]) - r2 = date_range(start=Timestamp('2000-02-27'), periods=5, - freq='D').to_julian_date() - assert isinstance(r2, Float64Index) - tm.assert_index_equal(r1, r2) - - def test_hour(self): - r1 = Float64Index( - [2451601.5, 2451601.5416666666666666, 2451601.5833333333333333, - 2451601.625, 2451601.6666666666666666]) - r2 = date_range(start=Timestamp('2000-02-27'), periods=5, - freq='H').to_julian_date() - assert isinstance(r2, Float64Index) - tm.assert_index_equal(r1, r2) - - def test_minute(self): - r1 = Float64Index( - [2451601.5, 2451601.5006944444444444, 2451601.5013888888888888, - 2451601.5020833333333333, 2451601.5027777777777777]) - r2 = date_range(start=Timestamp('2000-02-27'), periods=5, - freq='T').to_julian_date() - assert isinstance(r2, Float64Index) - tm.assert_index_equal(r1, r2) - - def test_second(self): - r1 = Float64Index( - [2451601.5, 2451601.500011574074074, 2451601.5000231481481481, - 2451601.5000347222222222, 2451601.5000462962962962]) - r2 = date_range(start=Timestamp('2000-02-27'), periods=5, - freq='S').to_julian_date() - assert isinstance(r2, Float64Index) - tm.assert_index_equal(r1, r2) + date_range, Timestamp) class TestTimeSeries(object): @@ -129,17 +83,6 @@ def test_range_edges(self): '1970-01-03', '1970-01-04']) tm.assert_index_equal(idx, exp) - def test_datetimeindex_integers_shift(self): - rng = date_range('1/1/2000', periods=20) - - result = rng + 5 - expected = rng.shift(5) - tm.assert_index_equal(result, expected) - - result = rng - 5 - expected = rng.shift(-5) - tm.assert_index_equal(result, expected) - def test_datetimeindex_repr_short(self): dr = date_range(start='1/1/2012', periods=1) repr(dr) @@ -150,25 +93,6 @@ def test_datetimeindex_repr_short(self): dr = date_range(start='1/1/2012', periods=3) repr(dr) - def test_normalize(self): - rng = date_range('1/1/2000 9:30', periods=10, freq='D') - - result = rng.normalize() - expected = date_range('1/1/2000', periods=10, freq='D') - tm.assert_index_equal(result, expected) - - rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, - 1380585612343234312]).astype( - "datetime64[ns]")) - rng_ns_normalized = rng_ns.normalize() - expected = pd.DatetimeIndex(np.array([1380585600000000000, - 1380585600000000000]).astype( - "datetime64[ns]")) - tm.assert_index_equal(rng_ns_normalized, expected) - - assert result.is_normalized - assert not rng.is_normalized - class TestDatetime64(object): diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index fb8dd1a43aa7f..4f386eb28cc0f 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -7,7 +7,6 @@ from itertools import product import pandas as pd -from pandas.errors import NullFrequencyError import pandas._libs.tslib as tslib from pandas._libs.tslibs.offsets import shift_months import pandas.util.testing as tm @@ -144,76 +143,6 @@ def test_numpy_minmax(self): tm.assert_raises_regex( ValueError, errmsg, np.argmax, dr, out=0) - def test_round_daily(self): - dti = pd.date_range('20130101 09:10:11', periods=5) - result = dti.round('D') - expected = pd.date_range('20130101', periods=5) - tm.assert_index_equal(result, expected) - - dti = dti.tz_localize('UTC').tz_convert('US/Eastern') - result = dti.round('D') - expected = pd.date_range('20130101', - periods=5).tz_localize('US/Eastern') - tm.assert_index_equal(result, expected) - - result = dti.round('s') - tm.assert_index_equal(result, dti) - - # invalid - for freq in ['Y', 'M', 'foobar']: - pytest.raises(ValueError, lambda: dti.round(freq)) - - def test_round(self): - for tz in self.tz: - rng = pd.date_range(start='2016-01-01', periods=5, - freq='30Min', tz=tz) - elt = rng[1] - - expected_rng = DatetimeIndex([ - Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'), - Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'), - Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'), - Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'), - Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'), - ]) - expected_elt = expected_rng[1] - - tm.assert_index_equal(rng.round(freq='H'), expected_rng) - assert elt.round(freq='H') == expected_elt - - msg = pd._libs.tslibs.frequencies._INVALID_FREQ_ERROR - with tm.assert_raises_regex(ValueError, msg): - rng.round(freq='foo') - with tm.assert_raises_regex(ValueError, msg): - elt.round(freq='foo') - - msg = "<MonthEnd> is a non-fixed frequency" - tm.assert_raises_regex(ValueError, msg, rng.round, freq='M') - tm.assert_raises_regex(ValueError, msg, elt.round, freq='M') - - # GH 14440 & 15578 - index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz) - result = index.round('ms') - expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz) - tm.assert_index_equal(result, expected) - - for freq in ['us', 'ns']: - tm.assert_index_equal(index, index.round(freq)) - - index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz) - result = index.round('ms') - expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz) - tm.assert_index_equal(result, expected) - - index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031']) - result = index.round('10ns') - expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030']) - tm.assert_index_equal(result, expected) - - with tm.assert_produces_warning(): - ts = '2016-10-17 12:00:00.001501031' - pd.DatetimeIndex([ts]).round('1010ns') - def test_repeat_range(self): rng = date_range('1/1/2000', '1/1/2001') @@ -586,52 +515,6 @@ def test_nat_new(self): exp = np.array([tslib.iNaT] * 5, dtype=np.int64) tm.assert_numpy_array_equal(result, exp) - def test_shift_no_freq(self): - # GH#19147 - dti = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01'], freq=None) - with pytest.raises(NullFrequencyError): - dti.shift(2) - - def test_shift(self): - # GH 9903 - for tz in self.tz: - idx = pd.DatetimeIndex([], name='xxx', tz=tz) - tm.assert_index_equal(idx.shift(0, freq='H'), idx) - tm.assert_index_equal(idx.shift(3, freq='H'), idx) - - idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00' - '2011-01-01 12:00'], name='xxx', tz=tz) - tm.assert_index_equal(idx.shift(0, freq='H'), idx) - exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00' - '2011-01-01 15:00'], name='xxx', tz=tz) - tm.assert_index_equal(idx.shift(3, freq='H'), exp) - exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00' - '2011-01-01 09:00'], name='xxx', tz=tz) - tm.assert_index_equal(idx.shift(-3, freq='H'), exp) - - # TODO: moved from test_datetimelike; de-duplicate with test_shift above - def test_shift2(self): - # test shift for datetimeIndex and non datetimeIndex - # GH8083 - drange = pd.date_range('20130101', periods=5) - result = drange.shift(1) - expected = pd.DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04', - '2013-01-05', - '2013-01-06'], freq='D') - tm.assert_index_equal(result, expected) - - result = drange.shift(-1) - expected = pd.DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02', - '2013-01-03', '2013-01-04'], - freq='D') - tm.assert_index_equal(result, expected) - - result = drange.shift(3, freq='2D') - expected = pd.DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09', - '2013-01-10', - '2013-01-11'], freq='D') - tm.assert_index_equal(result, expected) - def test_nat(self): assert pd.DatetimeIndex._na_value is pd.NaT assert pd.DatetimeIndex([])._na_value is pd.NaT diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py new file mode 100644 index 0000000000000..111f68ba14775 --- /dev/null +++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py @@ -0,0 +1,188 @@ +# -*- coding: utf-8 -*- +""" +Tests for DatetimeIndex methods behaving like their Timestamp counterparts +""" +from datetime import datetime + +import numpy as np +import pytest + +import pandas.util.testing as tm +import pandas as pd + +from pandas import date_range, Timestamp, DatetimeIndex + + +@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo', + 'US/Eastern', 'dateutil/Asia/Singapore', + 'dateutil/US/Pacific']) +def tz(request): + return request.param + + +class TestDatetimeIndexOps(object): + def test_dti_time(self): + rng = date_range('1/1/2000', freq='12min', periods=10) + result = pd.Index(rng).time + expected = [t.time() for t in rng] + assert (result == expected).all() + + def test_dti_date(self): + rng = date_range('1/1/2000', freq='12H', periods=10) + result = pd.Index(rng).date + expected = [t.date() for t in rng] + assert (result == expected).all() + + def test_dti_date_out_of_range(self): + # GH#1475 + pytest.raises(ValueError, DatetimeIndex, ['1400-01-01']) + pytest.raises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)]) + + def test_dti_timestamp_fields(self): + # extra fields from DatetimeIndex like quarter and week + idx = tm.makeDateIndex(100) + + fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', + 'days_in_month', 'is_month_start', 'is_month_end', + 'is_quarter_start', 'is_quarter_end', 'is_year_start', + 'is_year_end', 'weekday_name'] + for f in fields: + expected = getattr(idx, f)[-1] + result = getattr(Timestamp(idx[-1]), f) + assert result == expected + + assert idx.freq == Timestamp(idx[-1], idx.freq).freq + assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr + + # ---------------------------------------------------------------- + # DatetimeIndex.round + + def test_round_daily(self): + dti = date_range('20130101 09:10:11', periods=5) + result = dti.round('D') + expected = date_range('20130101', periods=5) + tm.assert_index_equal(result, expected) + + dti = dti.tz_localize('UTC').tz_convert('US/Eastern') + result = dti.round('D') + expected = date_range('20130101', + periods=5).tz_localize('US/Eastern') + tm.assert_index_equal(result, expected) + + result = dti.round('s') + tm.assert_index_equal(result, dti) + + # invalid + for freq in ['Y', 'M', 'foobar']: + pytest.raises(ValueError, lambda: dti.round(freq)) + + def test_round(self, tz): + rng = date_range(start='2016-01-01', periods=5, + freq='30Min', tz=tz) + elt = rng[1] + + expected_rng = DatetimeIndex([ + Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'), + Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'), + Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'), + Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'), + Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'), + ]) + expected_elt = expected_rng[1] + + tm.assert_index_equal(rng.round(freq='H'), expected_rng) + assert elt.round(freq='H') == expected_elt + + msg = pd._libs.tslibs.frequencies._INVALID_FREQ_ERROR + with tm.assert_raises_regex(ValueError, msg): + rng.round(freq='foo') + with tm.assert_raises_regex(ValueError, msg): + elt.round(freq='foo') + + msg = "<MonthEnd> is a non-fixed frequency" + tm.assert_raises_regex(ValueError, msg, rng.round, freq='M') + tm.assert_raises_regex(ValueError, msg, elt.round, freq='M') + + # GH#14440 & GH#15578 + index = DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz) + result = index.round('ms') + expected = DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz) + tm.assert_index_equal(result, expected) + + for freq in ['us', 'ns']: + tm.assert_index_equal(index, index.round(freq)) + + index = DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz) + result = index.round('ms') + expected = DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz) + tm.assert_index_equal(result, expected) + + index = DatetimeIndex(['2016-10-17 12:00:00.001501031']) + result = index.round('10ns') + expected = DatetimeIndex(['2016-10-17 12:00:00.001501030']) + tm.assert_index_equal(result, expected) + + with tm.assert_produces_warning(): + ts = '2016-10-17 12:00:00.001501031' + DatetimeIndex([ts]).round('1010ns') + + # ---------------------------------------------------------------- + # DatetimeIndex.normalize + + def test_normalize(self): + rng = date_range('1/1/2000 9:30', periods=10, freq='D') + + result = rng.normalize() + expected = date_range('1/1/2000', periods=10, freq='D') + tm.assert_index_equal(result, expected) + + arr_ns = np.array([1380585623454345752, + 1380585612343234312]).astype("datetime64[ns]") + rng_ns = DatetimeIndex(arr_ns) + rng_ns_normalized = rng_ns.normalize() + + arr_ns = np.array([1380585600000000000, + 1380585600000000000]).astype("datetime64[ns]") + expected = DatetimeIndex(arr_ns) + tm.assert_index_equal(rng_ns_normalized, expected) + + assert result.is_normalized + assert not rng.is_normalized + + +class TestDateTimeIndexToJulianDate(object): + + def test_1700(self): + dr = date_range(start=Timestamp('1710-10-01'), periods=5, freq='D') + r1 = pd.Index([x.to_julian_date() for x in dr]) + r2 = dr.to_julian_date() + assert isinstance(r2, pd.Float64Index) + tm.assert_index_equal(r1, r2) + + def test_2000(self): + dr = date_range(start=Timestamp('2000-02-27'), periods=5, freq='D') + r1 = pd.Index([x.to_julian_date() for x in dr]) + r2 = dr.to_julian_date() + assert isinstance(r2, pd.Float64Index) + tm.assert_index_equal(r1, r2) + + def test_hour(self): + dr = date_range(start=Timestamp('2000-02-27'), periods=5, freq='H') + r1 = pd.Index([x.to_julian_date() for x in dr]) + r2 = dr.to_julian_date() + assert isinstance(r2, pd.Float64Index) + tm.assert_index_equal(r1, r2) + + def test_minute(self): + dr = date_range(start=Timestamp('2000-02-27'), periods=5, freq='T') + r1 = pd.Index([x.to_julian_date() for x in dr]) + r2 = dr.to_julian_date() + assert isinstance(r2, pd.Float64Index) + tm.assert_index_equal(r1, r2) + + def test_second(self): + dr = date_range(start=Timestamp('2000-02-27'), periods=5, freq='S') + r1 = pd.Index([x.to_julian_date() for x in dr]) + r2 = dr.to_julian_date() + assert isinstance(r2, pd.Float64Index) + tm.assert_index_equal(r1, r2) diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py index b74da4922429d..84632e59e2bfb 100644 --- a/pandas/tests/indexes/datetimes/test_setops.py +++ b/pandas/tests/indexes/datetimes/test_setops.py @@ -17,6 +17,20 @@ class TestDatetimeIndexSetOps(object): tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore', 'dateutil/US/Pacific'] + # TODO: moved from test_datetimelike; dedup with version below + def test_union2(self): + everything = tm.makeDateIndex(10) + first = everything[:5] + second = everything[5:] + union = first.union(second) + assert tm.equalContents(union, everything) + + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + result = first.union(case) + assert tm.equalContents(result, everything) + @pytest.mark.parametrize("tz", tz) def test_union(self, tz): rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) @@ -101,6 +115,24 @@ def test_union_with_DatetimeIndex(self): i1.union(i2) # Works i2.union(i1) # Fails with "AttributeError: can't set attribute" + # TODO: moved from test_datetimelike; de-duplicate with version below + def test_intersection2(self): + first = tm.makeDateIndex(10) + second = first[5:] + intersect = first.intersection(second) + assert tm.equalContents(intersect, second) + + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + result = first.intersection(case) + assert tm.equalContents(result, second) + + third = Index(['a', 'b', 'c']) + result = first.intersection(third) + expected = pd.Index([], dtype=object) + tm.assert_index_equal(result, expected) + @pytest.mark.parametrize("tz", [None, 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']) def test_intersection(self, tz):
More test organizing, this one focusing on tests.indexes.datetimes. - continue collecting staggler arithmetic tests - new test module for testing that DatetimeIndex.foo behaves like Timestamp.foo - de-duplicate some tests scattered across modules - parametrize a handful of tests
https://api.github.com/repos/pandas-dev/pandas/pulls/19479
2018-01-31T18:24:48Z
2018-02-01T11:26:46Z
2018-02-01T11:26:46Z
2018-02-01T15:56:14Z