title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
TYP ensure bool_t is always used in pandas/core/generic.py
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d927be76843e1..7988012498db7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -206,3 +206,8 @@ repos: files: ^pandas/core/ exclude: ^pandas/core/api\.py$ types: [python] + - id: no-bool-in-core-generic + name: Use bool_t instead of bool in pandas/core/generic.py + entry: python scripts/no_bool_in_generic.py + language: python + files: ^pandas/core/generic\.py$ diff --git a/LICENSES/PYUPGRADE_LICENSE b/LICENSES/PYUPGRADE_LICENSE new file mode 100644 index 0000000000000..522fbe20b8991 --- /dev/null +++ b/LICENSES/PYUPGRADE_LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2017 Anthony Sottile + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index b3262c61a0597..8e20eeb16c7a8 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -232,7 +232,7 @@ class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin): def __init__( self, data: Manager, - copy: bool = False, + copy: bool_t = False, attrs: Mapping[Hashable, Any] | None = None, ): # copy kwarg is retained for mypy compat, is not used @@ -249,7 +249,7 @@ def __init__( @classmethod def _init_mgr( - cls, mgr, axes, dtype: Dtype | None = None, copy: bool = False + cls, mgr, axes, dtype: Dtype | None = None, copy: bool_t = False ) -> Manager: """ passed a manager and a axes dict """ for a, axe in axes.items(): @@ -377,8 +377,8 @@ def flags(self) -> Flags: def set_flags( self: FrameOrSeries, *, - copy: bool = False, - allows_duplicate_labels: bool | None = None, + copy: bool_t = False, + allows_duplicate_labels: bool_t | None = None, ) -> FrameOrSeries: """ Return a new object with updated flags. @@ -467,7 +467,7 @@ def _data(self): _stat_axis_name = "index" _AXIS_ORDERS: list[str] _AXIS_TO_AXIS_NUMBER: dict[Axis, int] = {0: 0, "index": 0, "rows": 0} - _AXIS_REVERSED: bool + _AXIS_REVERSED: bool_t _info_axis_number: int _info_axis_name: str _AXIS_LEN: int @@ -494,7 +494,7 @@ def _construct_axes_dict(self, axes=None, **kwargs): @final @classmethod def _construct_axes_from_arguments( - cls, args, kwargs, require_all: bool = False, sentinel=None + cls, args, kwargs, require_all: bool_t = False, sentinel=None ): """ Construct and returns axes if supplied in args/kwargs. @@ -714,11 +714,11 @@ def set_axis(self: FrameOrSeries, labels, *, inplace: Literal[True]) -> None: @overload def set_axis( - self: FrameOrSeries, labels, axis: Axis = ..., inplace: bool = ... + self: FrameOrSeries, labels, axis: Axis = ..., inplace: bool_t = ... ) -> FrameOrSeries | None: ... - def set_axis(self, labels, axis: Axis = 0, inplace: bool = False): + def set_axis(self, labels, axis: Axis = 0, inplace: bool_t = False): """ Assign desired index to given axis. @@ -749,7 +749,7 @@ def set_axis(self, labels, axis: Axis = 0, inplace: bool = False): return self._set_axis_nocheck(labels, axis, inplace) @final - def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool): + def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool_t): # NDFrame.rename with inplace=False calls set_axis(inplace=True) on a copy. if inplace: setattr(self, self._get_axis_name(axis), labels) @@ -995,8 +995,8 @@ def rename( index: Renamer | None = None, columns: Renamer | None = None, axis: Axis | None = None, - copy: bool = True, - inplace: bool = False, + copy: bool_t = True, + inplace: bool_t = False, level: Level | None = None, errors: str = "ignore", ) -> FrameOrSeries | None: @@ -1402,13 +1402,13 @@ def _set_axis_name(self, name, axis=0, inplace=False): # Comparison Methods @final - def _indexed_same(self, other) -> bool: + def _indexed_same(self, other) -> bool_t: return all( self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS ) @final - def equals(self, other: object) -> bool: + def equals(self, other: object) -> bool_t: """ Test whether two objects contain the same elements. @@ -5071,7 +5071,7 @@ def filter( return self.reindex(**{name: [r for r in items if r in labels]}) elif like: - def f(x) -> bool: + def f(x) -> bool_t: assert like is not None # needed for mypy return like in ensure_str(x) @@ -5079,7 +5079,7 @@ def f(x) -> bool: return self.loc(axis=axis)[values] elif regex: - def f(x) -> bool: + def f(x) -> bool_t: return matcher.search(ensure_str(x)) is not None matcher = re.compile(regex) diff --git a/scripts/no_bool_in_generic.py b/scripts/no_bool_in_generic.py new file mode 100644 index 0000000000000..f80eff56b2729 --- /dev/null +++ b/scripts/no_bool_in_generic.py @@ -0,0 +1,92 @@ +""" +Check that pandas/core/generic.py doesn't use bool as a type annotation. + +There is already the method `bool`, so the alias `bool_t` should be used instead. + +This is meant to be run as a pre-commit hook - to run it manually, you can do: + + pre-commit run no-bool-in-core-generic --all-files + +The function `visit` is adapted from a function by the same name in pyupgrade: +https://github.com/asottile/pyupgrade/blob/5495a248f2165941c5d3b82ac3226ba7ad1fa59d/pyupgrade/_data.py#L70-L113 +""" + +import argparse +import ast +import collections +from typing import ( + Dict, + List, + Optional, + Sequence, + Tuple, +) + + +def visit(tree: ast.Module) -> Dict[int, List[int]]: + "Step through tree, recording when nodes are in annotations." + in_annotation = False + nodes: List[Tuple[bool, ast.AST]] = [(in_annotation, tree)] + to_replace = collections.defaultdict(list) + + while nodes: + in_annotation, node = nodes.pop() + + if isinstance(node, ast.Name) and in_annotation and node.id == "bool": + to_replace[node.lineno].append(node.col_offset) + + for name in reversed(node._fields): + value = getattr(node, name) + if name in {"annotation", "returns"}: + next_in_annotation = True + else: + next_in_annotation = in_annotation + if isinstance(value, ast.AST): + nodes.append((next_in_annotation, value)) + elif isinstance(value, list): + for value in reversed(value): + if isinstance(value, ast.AST): + nodes.append((next_in_annotation, value)) + + return to_replace + + +def replace_bool_with_bool_t(to_replace, content: str) -> str: + new_lines = [] + + for n, line in enumerate(content.splitlines(), start=1): + if n in to_replace: + for col_offset in reversed(to_replace[n]): + line = line[:col_offset] + "bool_t" + line[col_offset + 4 :] + new_lines.append(line) + return "\n".join(new_lines) + + +def check_for_bool_in_generic(content: str) -> Tuple[bool, str]: + tree = ast.parse(content) + to_replace = visit(tree) + + if not to_replace: + mutated = False + return mutated, content + + mutated = True + return mutated, replace_bool_with_bool_t(to_replace, content) + + +def main(argv: Optional[Sequence[str]] = None) -> None: + parser = argparse.ArgumentParser() + parser.add_argument("paths", nargs="*") + args = parser.parse_args(argv) + + for path in args.paths: + with open(path, encoding="utf-8") as fd: + content = fd.read() + mutated, new_content = check_for_bool_in_generic(content) + if mutated: + with open(path, "w", encoding="utf-8") as fd: + fd.write(new_content) + + +if __name__ == "__main__": + main() diff --git a/scripts/tests/test_no_bool_in_generic.py b/scripts/tests/test_no_bool_in_generic.py new file mode 100644 index 0000000000000..0bc91c5d1cf1e --- /dev/null +++ b/scripts/tests/test_no_bool_in_generic.py @@ -0,0 +1,20 @@ +from scripts.no_bool_in_generic import check_for_bool_in_generic + +BAD_FILE = "def foo(a: bool) -> bool:\n return bool(0)" +GOOD_FILE = "def foo(a: bool_t) -> bool_t:\n return bool(0)" + + +def test_bad_file_with_replace(): + content = BAD_FILE + mutated, result = check_for_bool_in_generic(content) + expected = GOOD_FILE + assert result == expected + assert mutated + + +def test_good_file_with_replace(): + content = GOOD_FILE + mutated, result = check_for_bool_in_generic(content) + expected = content + assert result == expected + assert not mutated
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them Currently, lots of methods use the `bool` annotation in that file. I'm not sure it affects `mypy` (some experiments with `reveal_type` suggest it doesn't) - however, it does confuse VSCode, which takes you to the method `bool` if you hover over one of these annotations. The file already contains ```python bool_t = bool # Need alias because NDFrame has def bool: ``` (added in #26024), but it's not always used
https://api.github.com/repos/pandas-dev/pandas/pulls/40175
2021-03-02T18:43:20Z
2021-04-08T14:43:39Z
2021-04-08T14:43:39Z
2021-04-08T14:59:56Z
Backport PR #40170 on branch 1.2.x (DOC: Start v1.2.4 release notes)
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index c9c31b408fb7e..8739694c20e33 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -16,6 +16,7 @@ Version 1.2 .. toctree:: :maxdepth: 2 + v1.2.4 v1.2.3 v1.2.2 v1.2.1 diff --git a/doc/source/whatsnew/v1.2.3.rst b/doc/source/whatsnew/v1.2.3.rst index c94491df474ab..dec2d061504b4 100644 --- a/doc/source/whatsnew/v1.2.3.rst +++ b/doc/source/whatsnew/v1.2.3.rst @@ -29,4 +29,4 @@ Fixed regressions Contributors ~~~~~~~~~~~~ -.. contributors:: v1.2.2..v1.2.3|HEAD +.. contributors:: v1.2.2..v1.2.3 diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst new file mode 100644 index 0000000000000..790ff4c78cad6 --- /dev/null +++ b/doc/source/whatsnew/v1.2.4.rst @@ -0,0 +1,48 @@ +.. _whatsnew_124: + +What's new in 1.2.4 (April ??, 2021) +--------------------------------------- + +These are the changes in pandas 1.2.4. See :ref:`release` for a full changelog +including other versions of pandas. + +{{ header }} + +.. --------------------------------------------------------------------------- + +.. _whatsnew_124.regressions: + +Fixed regressions +~~~~~~~~~~~~~~~~~ + +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_124.bug_fixes: + +Bug fixes +~~~~~~~~~ + +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_124.other: + +Other +~~~~~ + +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_124.contributors: + +Contributors +~~~~~~~~~~~~ + +.. contributors:: v1.2.3..v1.2.4|HEAD
Backport PR #40170: DOC: Start v1.2.4 release notes
https://api.github.com/repos/pandas-dev/pandas/pulls/40172
2021-03-02T12:26:43Z
2021-03-02T13:26:42Z
2021-03-02T13:26:42Z
2021-03-02T13:26:42Z
DOC: Start v1.2.4 release notes
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index 71c9b0613b3ce..8697182f5ca6f 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -24,6 +24,7 @@ Version 1.2 .. toctree:: :maxdepth: 2 + v1.2.4 v1.2.3 v1.2.2 v1.2.1 diff --git a/doc/source/whatsnew/v1.2.3.rst b/doc/source/whatsnew/v1.2.3.rst index c94491df474ab..dec2d061504b4 100644 --- a/doc/source/whatsnew/v1.2.3.rst +++ b/doc/source/whatsnew/v1.2.3.rst @@ -29,4 +29,4 @@ Fixed regressions Contributors ~~~~~~~~~~~~ -.. contributors:: v1.2.2..v1.2.3|HEAD +.. contributors:: v1.2.2..v1.2.3 diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst new file mode 100644 index 0000000000000..790ff4c78cad6 --- /dev/null +++ b/doc/source/whatsnew/v1.2.4.rst @@ -0,0 +1,48 @@ +.. _whatsnew_124: + +What's new in 1.2.4 (April ??, 2021) +--------------------------------------- + +These are the changes in pandas 1.2.4. See :ref:`release` for a full changelog +including other versions of pandas. + +{{ header }} + +.. --------------------------------------------------------------------------- + +.. _whatsnew_124.regressions: + +Fixed regressions +~~~~~~~~~~~~~~~~~ + +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_124.bug_fixes: + +Bug fixes +~~~~~~~~~ + +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_124.other: + +Other +~~~~~ + +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_124.contributors: + +Contributors +~~~~~~~~~~~~ + +.. contributors:: v1.2.3..v1.2.4|HEAD
do not merge yet. will merge to master after github release to trigger website update
https://api.github.com/repos/pandas-dev/pandas/pulls/40170
2021-03-02T11:25:41Z
2021-03-02T12:26:15Z
2021-03-02T12:26:15Z
2021-03-02T12:26:35Z
REF: remove internal Block usage from FrameColumnApply
diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 15cee1419afb5..203a0c675282d 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -883,9 +883,8 @@ def series_generator(self): # of it. Kids: don't do this at home. ser = self.obj._ixs(0, axis=0) mgr = ser._mgr - blk = mgr.blocks[0] - if is_extension_array_dtype(blk.dtype): + if is_extension_array_dtype(ser.dtype): # values will be incorrect for this block # TODO(EA2D): special case would be unnecessary with 2D EAs obj = self.obj @@ -896,7 +895,7 @@ def series_generator(self): for (arr, name) in zip(values, self.index): # GH#35462 re-pin mgr in case setitem changed it ser._mgr = mgr - blk.values = arr + mgr.set_values(arr) ser.name = name yield ser diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index e013a7f680d6f..2ad7471d6f086 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1646,6 +1646,15 @@ def fast_xs(self, loc): """ raise NotImplementedError("Use series._values[loc] instead") + def set_values(self, values: ArrayLike): + """ + Set the values of the single block in place. + + Use at your own risk! This does not check if the passed values are + valid for the current Block/SingleBlockManager (length, dtype, etc). + """ + self.blocks[0].values = values + # -------------------------------------------------------------------- # Constructor Helpers
In the `apply` code, there is still one case where we are directly using the internal blocks. This moves that to a method on the manager, so we don't have to interact directly with the blocks (which ideally we don't want outside of the internals, and let's ArrayManager cleanly support this as well). For the rest it still updates the Series' values in place, preserving the performance trick. cc @jbrockmendel @rhshadrach
https://api.github.com/repos/pandas-dev/pandas/pulls/40166
2021-03-02T08:17:06Z
2021-03-02T17:22:49Z
2021-03-02T17:22:49Z
2021-03-02T17:22:52Z
Backport PR #40137 on branch 1.2.x (DOC: 1.2.3 release date)
diff --git a/doc/source/whatsnew/v1.2.3.rst b/doc/source/whatsnew/v1.2.3.rst index 99e997189d7b8..c94491df474ab 100644 --- a/doc/source/whatsnew/v1.2.3.rst +++ b/doc/source/whatsnew/v1.2.3.rst @@ -1,6 +1,6 @@ .. _whatsnew_123: -What's new in 1.2.3 (March ??, 2021) +What's new in 1.2.3 (March 02, 2021) ------------------------------------ These are the changes in pandas 1.2.3. See :ref:`release` for a full changelog @@ -19,33 +19,8 @@ Fixed regressions - Fixed regression in nullable integer unary ops propagating mask on assignment (:issue:`39943`) - Fixed regression in :meth:`DataFrame.__setitem__` not aligning :class:`DataFrame` on right-hand side for boolean indexer (:issue:`39931`) - Fixed regression in :meth:`~DataFrame.to_json` failing to use ``compression`` with URL-like paths that are internally opened in binary mode or with user-provided file objects that are opened in binary mode (:issue:`39985`) -- Fixed regression in :meth:`~Series.sort_index` and :meth:`~DataFrame.sort_index`, - which exited with an ungraceful error when having kwarg ``ascending=None`` passed (:issue:`39434`). - Passing ``ascending=None`` is still considered invalid, - and the new error message suggests a proper usage - (``ascending`` must be a boolean or a list-like boolean). +- Fixed regression in :meth:`Series.sort_index` and :meth:`DataFrame.sort_index`, which exited with an ungraceful error when having kwarg ``ascending=None`` passed. Passing ``ascending=None`` is still considered invalid, and the improved error message suggests a proper usage (``ascending`` must be a boolean or a list-like of boolean) (:issue:`39434`) - Fixed regression in :meth:`DataFrame.transform` and :meth:`Series.transform` giving incorrect column labels when passed a dictionary with a mix of list and non-list values (:issue:`40018`) -- - -.. --------------------------------------------------------------------------- - -.. _whatsnew_123.bug_fixes: - -Bug fixes -~~~~~~~~~ - -- -- - -.. --------------------------------------------------------------------------- - -.. _whatsnew_123.other: - -Other -~~~~~ - -- -- .. ---------------------------------------------------------------------------
Backport PR #40137: DOC: 1.2.3 release date
https://api.github.com/repos/pandas-dev/pandas/pulls/40165
2021-03-02T07:30:49Z
2021-03-02T08:39:24Z
2021-03-02T08:39:24Z
2021-03-02T08:39:24Z
BUG: EWM.__getitem__ raised error with times
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 5c61f259a4202..9bb9f0c7a467a 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -520,7 +520,8 @@ Groupby/resample/rolling - Bug in :meth:`DataFrameGroupBy.sample` where error was raised when ``weights`` was specified and the index was an :class:`Int64Index` (:issue:`39927`) - Bug in :meth:`DataFrameGroupBy.aggregate` and :meth:`.Resampler.aggregate` would sometimes raise ``SpecificationError`` when passed a dictionary and columns were missing; will now always raise a ``KeyError`` instead (:issue:`40004`) - Bug in :meth:`DataFrameGroupBy.sample` where column selection was not applied to sample result (:issue:`39928`) -- +- Bug in :class:`core.window.ewm.ExponentialMovingWindow` when calling ``__getitem__`` would incorrectly raise a ``ValueError`` when providing ``times`` (:issue:`40164`) +- Bug in :class:`core.window.ewm.ExponentialMovingWindow` when calling ``__getitem__`` would not retain ``com``, ``span``, ``alpha`` or ``halflife`` attributes (:issue:`40164`) Reshaping ^^^^^^^^^ diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index fcd2acc3e025a..efacfad40ef82 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -1476,7 +1476,7 @@ def roll_weighted_var(const float64_t[:] values, const float64_t[:] weights, def ewma(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end, int minp, float64_t com, bint adjust, bint ignore_na, - const float64_t[:] times, float64_t halflife): + const float64_t[:] deltas): """ Compute exponentially-weighted moving average using center-of-mass. @@ -1501,7 +1501,7 @@ def ewma(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end, Py_ssize_t i, j, s, e, nobs, win_size, N = len(vals), M = len(start) const float64_t[:] sub_vals ndarray[float64_t] sub_output, output = np.empty(N, dtype=float) - float64_t alpha, old_wt_factor, new_wt, weighted_avg, old_wt, cur, delta + float64_t alpha, old_wt_factor, new_wt, weighted_avg, old_wt, cur bint is_observation if N == 0: @@ -1532,8 +1532,7 @@ def ewma(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end, if weighted_avg == weighted_avg: if is_observation or not ignore_na: - delta = times[i] - times[i - 1] - old_wt *= old_wt_factor ** (delta / halflife) + old_wt *= old_wt_factor ** deltas[i - 1] if is_observation: # avoid numerical errors on constant series diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index 696c4af27e3f2..5a71db82f26e4 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -215,11 +215,13 @@ class ExponentialMovingWindow(BaseWindow): _attributes = [ "com", + "span", + "halflife", + "alpha", "min_periods", "adjust", "ignore_na", "axis", - "halflife", "times", ] @@ -245,38 +247,48 @@ def __init__( method="single", axis=axis, ) + self.com = com + self.span = span + self.halflife = halflife + self.alpha = alpha self.adjust = adjust self.ignore_na = ignore_na - if times is not None: + self.times = times + if self.times is not None: if isinstance(times, str): - times = self._selected_obj[times] - if not is_datetime64_ns_dtype(times): + self.times = self._selected_obj[times] + if not is_datetime64_ns_dtype(self.times): raise ValueError("times must be datetime64[ns] dtype.") - if len(times) != len(obj): + if len(self.times) != len(obj): raise ValueError("times must be the same length as the object.") if not isinstance(halflife, (str, datetime.timedelta)): raise ValueError( "halflife must be a string or datetime.timedelta object" ) - if isna(times).any(): + if isna(self.times).any(): raise ValueError("Cannot convert NaT values to integer") - self.times = np.asarray(times.view(np.int64)) - self.halflife = Timedelta(halflife).value + _times = np.asarray(self.times.view(np.int64), dtype=np.float64) + _halflife = float(Timedelta(self.halflife).value) + self._deltas = np.diff(_times) / _halflife # Halflife is no longer applicable when calculating COM # But allow COM to still be calculated if the user passes other decay args - if common.count_not_none(com, span, alpha) > 0: - self.com = get_center_of_mass(com, span, None, alpha) + if common.count_not_none(self.com, self.span, self.alpha) > 0: + self._com = get_center_of_mass(self.com, self.span, None, self.alpha) else: - self.com = 0.0 + self._com = 1.0 else: - if halflife is not None and isinstance(halflife, (str, datetime.timedelta)): + if self.halflife is not None and isinstance( + self.halflife, (str, datetime.timedelta) + ): raise ValueError( "halflife can only be a timedelta convertible argument if " "times is not None." ) - self.times = None - self.halflife = None - self.com = get_center_of_mass(com, span, halflife, alpha) + # Without times, points are equally spaced + self._deltas = np.ones(max(len(self.obj) - 1, 0), dtype=np.float64) + self._com = get_center_of_mass( + self.com, self.span, self.halflife, self.alpha + ) def _get_window_indexer(self) -> BaseIndexer: """ @@ -334,22 +346,13 @@ def aggregate(self, func, *args, **kwargs): ) def mean(self, *args, **kwargs): nv.validate_window_func("mean", args, kwargs) - if self.times is not None: - com = 1.0 - times = self.times.astype(np.float64) - halflife = float(self.halflife) - else: - com = self.com - times = np.arange(len(self.obj), dtype=np.float64) - halflife = 1.0 window_func = window_aggregations.ewma window_func = partial( window_func, - com=com, + com=self._com, adjust=self.adjust, ignore_na=self.ignore_na, - times=times, - halflife=halflife, + deltas=self._deltas, ) return self._apply(window_func) @@ -411,7 +414,7 @@ def var(self, bias: bool = False, *args, **kwargs): window_func = window_aggregations.ewmcov window_func = partial( window_func, - com=self.com, + com=self._com, adjust=self.adjust, ignore_na=self.ignore_na, bias=bias, @@ -480,7 +483,7 @@ def cov_func(x, y): end, self.min_periods, y_array, - self.com, + self._com, self.adjust, self.ignore_na, bias, @@ -546,7 +549,7 @@ def _cov(X, Y): end, self.min_periods, Y, - self.com, + self._com, self.adjust, self.ignore_na, 1, @@ -613,7 +616,7 @@ def mean(self, engine=None, engine_kwargs=None): if maybe_use_numba(engine): groupby_ewma_func = generate_numba_groupby_ewma_func( engine_kwargs, - self.com, + self._com, self.adjust, self.ignore_na, ) diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py index fbd7a36a75bf0..3e823844c7f56 100644 --- a/pandas/tests/window/test_ewm.py +++ b/pandas/tests/window/test_ewm.py @@ -142,6 +142,29 @@ def test_ewm_with_nat_raises(halflife_with_times): ser.ewm(com=0.1, halflife=halflife_with_times, times=times) +def test_ewm_with_times_getitem(halflife_with_times): + # GH 40164 + halflife = halflife_with_times + data = np.arange(10.0) + data[::2] = np.nan + times = date_range("2000", freq="D", periods=10) + df = DataFrame({"A": data, "B": data}) + result = df.ewm(halflife=halflife, times=times)["A"].mean() + expected = df.ewm(halflife=1.0)["A"].mean() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("arg", ["com", "halflife", "span", "alpha"]) +def test_ewm_getitem_attributes_retained(arg, adjust, ignore_na): + # GH 40164 + kwargs = {arg: 1, "adjust": adjust, "ignore_na": ignore_na} + ewm = DataFrame({"A": range(1), "B": range(1)}).ewm(**kwargs) + expected = {attr: getattr(ewm, attr) for attr in ewm._attributes} + ewm_slice = ewm["A"] + result = {attr: getattr(ewm, attr) for attr in ewm_slice._attributes} + assert result == expected + + def test_ewm_vol_deprecated(): ser = Series(range(1)) with tm.assert_produces_warning(FutureWarning):
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Also somewhat ameliorates the performance hit in https://github.com/pandas-dev/pandas/pull/40072#issuecomment-787456038 ``` dtype="float" window=1000 N = 10 ** 5 arr = (100 * np.random.random(N)).astype(dtype) ewm = pd.Series(arr).ewm(halflife=window) %timeit ewm.mean() # Master 2.59 ms ± 25.5 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) # PR 2.5 ms ± 31.5 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/40164
2021-03-02T05:30:25Z
2021-03-03T13:06:59Z
2021-03-03T13:06:59Z
2021-03-03T20:23:41Z
BUG: DataFrame(frozenset) should raise
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 41db72612a66b..ee4ca6a06a634 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -402,6 +402,7 @@ Conversion - Bug in :meth:`Series.to_dict` with ``orient='records'`` now returns python native types (:issue:`25969`) - Bug in :meth:`Series.view` and :meth:`Index.view` when converting between datetime-like (``datetime64[ns]``, ``datetime64[ns, tz]``, ``timedelta64``, ``period``) dtypes (:issue:`39788`) - Bug in creating a :class:`DataFrame` from an empty ``np.recarray`` not retaining the original dtypes (:issue:`40121`) +- Bug in :class:`DataFrame` failing to raise ``TypeError`` when constructing from a ``frozenset`` (:issue:`40163`) - Strings diff --git a/pandas/core/construction.py b/pandas/core/construction.py index db9239d03dd13..9ba9a5bd38164 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -518,9 +518,9 @@ def sanitize_array( elif isinstance(data, (list, tuple, abc.Set, abc.ValuesView)) and len(data) > 0: # TODO: deque, array.array - if isinstance(data, set): + if isinstance(data, (set, frozenset)): # Raise only for unordered sets, e.g., not for dict_keys - raise TypeError("Set type is unordered") + raise TypeError(f"'{type(data).__name__}' type is unordered") data = list(data) if dtype is not None: diff --git a/pandas/core/series.py b/pandas/core/series.py index b2e620c9b8047..24c356e7a8269 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -375,10 +375,8 @@ def __init__( "`index` argument. `copy` must be False." ) - elif is_extension_array_dtype(data): + elif isinstance(data, ExtensionArray): pass - elif isinstance(data, (set, frozenset)): - raise TypeError(f"'{type(data).__name__}' type is unordered") else: data = com.maybe_iterable_to_list(data) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 3bbe5f9e46efa..493763cf9c4e0 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -377,15 +377,18 @@ def test_constructor_dict(self): with pytest.raises(ValueError, match=msg): DataFrame({"A": {"a": "a", "b": "b"}, "B": ["a", "b", "c"]}) + def test_constructor_dict_length1(self): # Length-one dict micro-optimization frame = DataFrame({"A": {"1": 1, "2": 2}}) tm.assert_index_equal(frame.index, Index(["1", "2"])) + def test_constructor_dict_with_index(self): # empty dict plus index idx = Index([0, 1, 2]) frame = DataFrame({}, index=idx) assert frame.index is idx + def test_constructor_dict_with_index_and_columns(self): # empty dict with index and columns idx = Index([0, 1, 2]) frame = DataFrame({}, index=idx, columns=idx) @@ -393,10 +396,12 @@ def test_constructor_dict(self): assert frame.columns is idx assert len(frame._series) == 3 + def test_constructor_dict_of_empty_lists(self): # with dict of empty list and Series frame = DataFrame({"A": [], "B": []}, columns=["A", "B"]) tm.assert_index_equal(frame.index, RangeIndex(0), exact=True) + def test_constructor_dict_with_none(self): # GH 14381 # Dict with None value frame_none = DataFrame({"a": None}, index=[0]) @@ -405,6 +410,7 @@ def test_constructor_dict(self): assert frame_none_list._get_value(0, "a") is None tm.assert_frame_equal(frame_none, frame_none_list) + def test_constructor_dict_errors(self): # GH10856 # dict with scalar values should raise error, even if columns passed msg = "If using all scalar values, you must pass an index" @@ -560,7 +566,7 @@ def test_constructor_error_msgs(self): with pytest.raises(ValueError, match=msg): DataFrame({"a": False, "b": True}) - def test_constructor_subclass_dict(self, float_frame, dict_subclass): + def test_constructor_subclass_dict(self, dict_subclass): # Test for passing dict subclass to constructor data = { "col1": dict_subclass((x, 10.0 * x) for x in range(10)), @@ -574,6 +580,7 @@ def test_constructor_subclass_dict(self, float_frame, dict_subclass): df = DataFrame(data) tm.assert_frame_equal(refdf, df) + def test_constructor_defaultdict(self, float_frame): # try with defaultdict from collections import defaultdict @@ -608,6 +615,7 @@ def test_constructor_dict_cast(self): assert frame["B"].dtype == np.object_ assert frame["A"].dtype == np.float64 + def test_constructor_dict_cast2(self): # can't cast to float test_data = { "A": dict(zip(range(20), tm.makeStringIndex(20))), @@ -623,6 +631,7 @@ def test_constructor_dict_dont_upcast(self): df = DataFrame(d) assert isinstance(df["Col1"]["Row2"], float) + def test_constructor_dict_dont_upcast2(self): dm = DataFrame([[1, 2], ["a", "b"]], index=[1, 2], columns=[1, 2]) assert isinstance(dm[1][1], int) @@ -1195,6 +1204,7 @@ def __len__(self, n): expected = DataFrame([[1, "a"], [2, "b"]], columns=columns) tm.assert_frame_equal(result, expected, check_dtype=False) + def test_constructor_stdlib_array(self): # GH 4297 # support Array import array @@ -2427,11 +2437,16 @@ def test_from_2d_ndarray_with_dtype(self): expected = DataFrame(array_dim2).astype("datetime64[ns, UTC]") tm.assert_frame_equal(df, expected) - def test_construction_from_set_raises(self): + @pytest.mark.parametrize("typ", [set, frozenset]) + def test_construction_from_set_raises(self, typ): # https://github.com/pandas-dev/pandas/issues/32582 - msg = "Set type is unordered" + values = typ({1, 2, 3}) + msg = f"'{typ.__name__}' type is unordered" with pytest.raises(TypeError, match=msg): - DataFrame({"a": {1, 2, 3}}) + DataFrame({"a": values}) + + with pytest.raises(TypeError, match=msg): + Series(values) def get1(obj): diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index aec1e65cbb4c0..a69a693bb6203 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -69,6 +69,7 @@ class TestSeriesConstructors: ], ) def test_empty_constructor(self, constructor, check_index_type): + # TODO: share with frame test of the same name with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): expected = Series() result = constructor() @@ -310,6 +311,7 @@ def test_constructor_generator(self): exp = Series(range(10)) tm.assert_series_equal(result, exp) + # same but with non-default index gen = (i for i in range(10)) result = Series(gen, index=range(10, 20)) exp.index = range(10, 20) @@ -323,6 +325,7 @@ def test_constructor_map(self): exp = Series(range(10)) tm.assert_series_equal(result, exp) + # same but with non-default index m = map(lambda x: x, range(10)) result = Series(m, index=range(10, 20)) exp.index = range(10, 20) @@ -386,6 +389,7 @@ def test_constructor_categorical_with_coercion(self): str(df.values) str(df) + def test_constructor_categorical_with_coercion2(self): # GH8623 x = DataFrame( [[1, "John P. Doe"], [2, "Jane Dove"], [1, "John P. Doe"]], @@ -747,6 +751,7 @@ def test_constructor_datelike_coercion(self): assert s.iloc[1] == "NOV" assert s.dtype == object + def test_constructor_datelike_coercion2(self): # the dtype was being reset on the slicing and re-inferred to datetime # even thought the blocks are mixed belly = "216 3T19".split() @@ -798,6 +803,7 @@ def test_constructor_dtype_datetime64(self): assert isna(s[1]) assert s.dtype == "M8[ns]" + def test_constructor_dtype_datetime64_10(self): # GH3416 dates = [ np.datetime64(datetime(2013, 1, 1)), @@ -850,6 +856,7 @@ def test_constructor_dtype_datetime64(self): expected = Series(dts.astype(np.int64)) tm.assert_series_equal(result, expected) + def test_constructor_dtype_datetime64_9(self): # invalid dates can be help as object result = Series([datetime(2, 1, 1)]) assert result[0] == datetime(2, 1, 1, 0, 0) @@ -857,11 +864,13 @@ def test_constructor_dtype_datetime64(self): result = Series([datetime(3000, 1, 1)]) assert result[0] == datetime(3000, 1, 1, 0, 0) + def test_constructor_dtype_datetime64_8(self): # don't mix types result = Series([Timestamp("20130101"), 1], index=["a", "b"]) assert result["a"] == Timestamp("20130101") assert result["b"] == 1 + def test_constructor_dtype_datetime64_7(self): # GH6529 # coerce datetime64 non-ns properly dates = date_range("01-Jan-2015", "01-Dec-2015", freq="M") @@ -887,6 +896,7 @@ def test_constructor_dtype_datetime64(self): tm.assert_numpy_array_equal(series1.values, dates2) assert series1.dtype == object + def test_constructor_dtype_datetime64_6(self): # these will correctly infer a datetime s = Series([None, NaT, "2013-08-05 15:30:00.000001"]) assert s.dtype == "datetime64[ns]" @@ -897,6 +907,7 @@ def test_constructor_dtype_datetime64(self): s = Series([NaT, np.nan, "2013-08-05 15:30:00.000001"]) assert s.dtype == "datetime64[ns]" + def test_constructor_dtype_datetime64_5(self): # tz-aware (UTC and other tz's) # GH 8411 dr = date_range("20130101", periods=3) @@ -906,18 +917,21 @@ def test_constructor_dtype_datetime64(self): dr = date_range("20130101", periods=3, tz="US/Eastern") assert str(Series(dr).iloc[0].tz) == "US/Eastern" + def test_constructor_dtype_datetime64_4(self): # non-convertible s = Series([1479596223000, -1479590, NaT]) assert s.dtype == "object" assert s[2] is NaT assert "NaT" in str(s) + def test_constructor_dtype_datetime64_3(self): # if we passed a NaT it remains s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), NaT]) assert s.dtype == "object" assert s[2] is NaT assert "NaT" in str(s) + def test_constructor_dtype_datetime64_2(self): # if we passed a nan it remains s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan]) assert s.dtype == "object" @@ -980,6 +994,7 @@ def test_constructor_with_datetime_tz(self): result = DatetimeIndex(s, freq="infer") tm.assert_index_equal(result, dr) + def test_constructor_with_datetime_tz4(self): # inference s = Series( [ @@ -990,6 +1005,7 @@ def test_constructor_with_datetime_tz(self): assert s.dtype == "datetime64[ns, US/Pacific]" assert lib.infer_dtype(s, skipna=True) == "datetime64" + def test_constructor_with_datetime_tz3(self): s = Series( [ Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"), @@ -999,6 +1015,7 @@ def test_constructor_with_datetime_tz(self): assert s.dtype == "object" assert lib.infer_dtype(s, skipna=True) == "datetime" + def test_constructor_with_datetime_tz2(self): # with all NaT s = Series(NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]") expected = Series(DatetimeIndex(["NaT", "NaT"], tz="US/Eastern")) @@ -1231,14 +1248,6 @@ def test_constructor_dict_of_tuples(self): expected = Series([3, 6], index=MultiIndex.from_tuples([(1, 2), (None, 5)])) tm.assert_series_equal(result, expected) - def test_constructor_set(self): - values = {1, 2, 3, 4, 5} - with pytest.raises(TypeError, match="'set' type is unordered"): - Series(values) - values = frozenset(values) - with pytest.raises(TypeError, match="'frozenset' type is unordered"): - Series(values) - # https://github.com/pandas-dev/pandas/issues/22698 @pytest.mark.filterwarnings("ignore:elementwise comparison:FutureWarning") def test_fromDict(self):
DataFrame(set) raises, and Series raises for both. This unifies the 2 places this is checked. Started out as a split-big-tests branch, so some of that is left over.
https://api.github.com/repos/pandas-dev/pandas/pulls/40163
2021-03-02T03:45:54Z
2021-03-02T23:25:54Z
2021-03-02T23:25:54Z
2021-03-02T23:37:52Z
BUG: Add decl when xml_declaration=True and pretty_print=False for et…
diff --git a/pandas/io/formats/xml.py b/pandas/io/formats/xml.py index 044b03ba83714..dd68f0f78261e 100644 --- a/pandas/io/formats/xml.py +++ b/pandas/io/formats/xml.py @@ -324,7 +324,9 @@ def build_tree(self) -> bytes: if self.pretty_print: self.out_xml = self.prettify_tree() - if not self.xml_declaration: + if self.xml_declaration: + self.out_xml = self.add_declaration() + else: self.out_xml = self.remove_declaration() if self.stylesheet is not None: @@ -416,6 +418,23 @@ def prettify_tree(self) -> bytes: return dom.toprettyxml(indent=" ", encoding=self.encoding) + def add_declaration(self) -> bytes: + """ + Add xml declaration. + + This method will add xml declaration of working tree. Currently, + xml_declaration is supported in etree starting in Python 3.8. + """ + decl = f'<?xml version="1.0" encoding="{self.encoding}"?>\n' + + doc = ( + self.out_xml + if self.out_xml.startswith(b"<?xml") + else decl.encode(self.encoding) + self.out_xml + ) + + return doc + def remove_declaration(self) -> bytes: """ Remove xml declaration. diff --git a/pandas/tests/io/xml/test_to_xml.py b/pandas/tests/io/xml/test_to_xml.py index 2026035a23370..97793ce8f65b8 100644 --- a/pandas/tests/io/xml/test_to_xml.py +++ b/pandas/tests/io/xml/test_to_xml.py @@ -867,8 +867,7 @@ def test_xml_declaration_pretty_print(): assert output == expected -@td.skip_if_no("lxml") -def test_no_pretty_print_with_decl(): +def test_no_pretty_print_with_decl(parser): expected = ( "<?xml version='1.0' encoding='utf-8'?>\n" "<data><row><index>0</index><shape>square</shape>" @@ -879,7 +878,7 @@ def test_no_pretty_print_with_decl(): "</row></data>" ) - output = geom_df.to_xml(pretty_print=False, parser="lxml") + output = geom_df.to_xml(pretty_print=False, parser=parser) output = equalize_decl(output) # etree adds space for closed tags @@ -889,8 +888,7 @@ def test_no_pretty_print_with_decl(): assert output == expected -@td.skip_if_no("lxml") -def test_no_pretty_print_no_decl(): +def test_no_pretty_print_no_decl(parser): expected = ( "<data><row><index>0</index><shape>square</shape>" "<degrees>360</degrees><sides>4.0</sides></row><row>" @@ -900,7 +898,11 @@ def test_no_pretty_print_no_decl(): "</row></data>" ) - output = geom_df.to_xml(xml_declaration=False, pretty_print=False) + output = geom_df.to_xml(xml_declaration=False, pretty_print=False, parser=parser) + + # etree adds space for closed tags + if output is not None: + output = output.replace(" />", "/>") assert output == expected
…ree parser - [X] references IO XML tracker #40131 - [X] tests added / passed - [X] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry (not necessary, module releases: v1.3.0)
https://api.github.com/repos/pandas-dev/pandas/pulls/40162
2021-03-02T03:44:35Z
2021-03-03T03:09:05Z
2021-03-03T03:09:05Z
2021-03-04T22:51:41Z
STYLE: Inconsistent namespace - dtypes (pandas-dev#39992)
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index bf83085058cfc..ca311768dc2d9 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -821,7 +821,7 @@ class TestCategoricalDtypeParametrized: np.arange(1000), ["a", "b", 10, 2, 1.3, True], [True, False], - pd.date_range("2017", periods=4), + date_range("2017", periods=4), ], ) def test_basic(self, categories, ordered): diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 5fbe9b01db6e3..78a62c832833f 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -808,7 +808,7 @@ def test_unicode(self): (object, None, True, "empty"), ], ) - @pytest.mark.parametrize("box", [pd.Series, np.array]) + @pytest.mark.parametrize("box", [Series, np.array]) def test_object_empty(self, box, missing, dtype, skipna, expected): # GH 23421 arr = box([missing, missing], dtype=dtype) @@ -915,7 +915,7 @@ def test_infer_dtype_period(self): arr = np.array([Period("2011-01", freq="D"), Period("2011-02", freq="M")]) assert lib.infer_dtype(arr, skipna=True) == "period" - @pytest.mark.parametrize("klass", [pd.array, pd.Series, pd.Index]) + @pytest.mark.parametrize("klass", [pd.array, Series, Index]) @pytest.mark.parametrize("skipna", [True, False]) def test_infer_dtype_period_array(self, klass, skipna): # https://github.com/pandas-dev/pandas/issues/23553 @@ -1264,7 +1264,7 @@ def test_interval(self): inferred = lib.infer_dtype(Series(idx), skipna=False) assert inferred == "interval" - @pytest.mark.parametrize("klass", [pd.array, pd.Series]) + @pytest.mark.parametrize("klass", [pd.array, Series]) @pytest.mark.parametrize("skipna", [True, False]) @pytest.mark.parametrize("data", [["a", "b", "c"], ["a", "b", pd.NA]]) def test_string_dtype(self, data, skipna, klass): @@ -1273,7 +1273,7 @@ def test_string_dtype(self, data, skipna, klass): inferred = lib.infer_dtype(val, skipna=skipna) assert inferred == "string" - @pytest.mark.parametrize("klass", [pd.array, pd.Series]) + @pytest.mark.parametrize("klass", [pd.array, Series]) @pytest.mark.parametrize("skipna", [True, False]) @pytest.mark.parametrize("data", [[True, False, True], [True, False, pd.NA]]) def test_boolean_dtype(self, data, skipna, klass): diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index ecd56b5b61244..02bae02436d8c 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -205,16 +205,16 @@ def test_isna_datetime(self): def test_isna_old_datetimelike(self): # isna_old should work for dt64tz, td64, and period, not just tznaive - dti = pd.date_range("2016-01-01", periods=3) + dti = date_range("2016-01-01", periods=3) dta = dti._data - dta[-1] = pd.NaT + dta[-1] = NaT expected = np.array([False, False, True], dtype=bool) objs = [dta, dta.tz_localize("US/Eastern"), dta - dta, dta.to_period("D")] for obj in objs: with cf.option_context("mode.use_inf_as_na", True): - result = pd.isna(obj) + result = isna(obj) tm.assert_numpy_array_equal(result, expected) @@ -320,38 +320,38 @@ def test_period(self): def test_decimal(self): # scalars GH#23530 a = Decimal(1.0) - assert pd.isna(a) is False - assert pd.notna(a) is True + assert isna(a) is False + assert notna(a) is True b = Decimal("NaN") - assert pd.isna(b) is True - assert pd.notna(b) is False + assert isna(b) is True + assert notna(b) is False # array arr = np.array([a, b]) expected = np.array([False, True]) - result = pd.isna(arr) + result = isna(arr) tm.assert_numpy_array_equal(result, expected) - result = pd.notna(arr) + result = notna(arr) tm.assert_numpy_array_equal(result, ~expected) # series ser = Series(arr) expected = Series(expected) - result = pd.isna(ser) + result = isna(ser) tm.assert_series_equal(result, expected) - result = pd.notna(ser) + result = notna(ser) tm.assert_series_equal(result, ~expected) # index idx = pd.Index(arr) expected = np.array([False, True]) - result = pd.isna(idx) + result = isna(idx) tm.assert_numpy_array_equal(result, expected) - result = pd.notna(idx) + result = notna(idx) tm.assert_numpy_array_equal(result, ~expected) @@ -578,7 +578,7 @@ def _check_behavior(self, arr, expected): tm.assert_numpy_array_equal(result, expected) def test_basic(self): - arr = np.array([1, None, "foo", -5.1, pd.NaT, np.nan]) + arr = np.array([1, None, "foo", -5.1, NaT, np.nan]) expected = np.array([False, True, False, False, True, True]) self._check_behavior(arr, expected)
- [x] xref #39992 - [ ] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40161
2021-03-02T01:25:25Z
2021-03-02T08:10:30Z
2021-03-02T08:10:30Z
2021-03-02T08:10:38Z
STYLE: incondintent namaspace(series)
diff --git a/pandas/tests/series/accessors/test_dt_accessor.py b/pandas/tests/series/accessors/test_dt_accessor.py index 6199e77e10166..5db159e1abb80 100644 --- a/pandas/tests/series/accessors/test_dt_accessor.py +++ b/pandas/tests/series/accessors/test_dt_accessor.py @@ -74,7 +74,7 @@ def get_expected(s, name): if isinstance(result, np.ndarray): if is_integer_dtype(result): result = result.astype("int64") - elif not is_list_like(result) or isinstance(result, pd.DataFrame): + elif not is_list_like(result) or isinstance(result, DataFrame): return result return Series(result, index=s.index, name=s.name) @@ -83,7 +83,7 @@ def compare(s, name): b = get_expected(s, prop) if not (is_list_like(a) and is_list_like(b)): assert a == b - elif isinstance(a, pd.DataFrame): + elif isinstance(a, DataFrame): tm.assert_frame_equal(a, b) else: tm.assert_series_equal(a, b) @@ -180,7 +180,7 @@ def compare(s, name): assert result.dtype == object result = s.dt.total_seconds() - assert isinstance(result, pd.Series) + assert isinstance(result, Series) assert result.dtype == "float64" freq_result = s.dt.freq @@ -236,11 +236,11 @@ def get_dir(s): # 11295 # ambiguous time error on the conversions - s = Series(pd.date_range("2015-01-01", "2016-01-01", freq="T"), name="xxx") + s = Series(date_range("2015-01-01", "2016-01-01", freq="T"), name="xxx") s = s.dt.tz_localize("UTC").dt.tz_convert("America/Chicago") results = get_dir(s) tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods))) - exp_values = pd.date_range( + exp_values = date_range( "2015-01-01", "2016-01-01", freq="T", tz="UTC" ).tz_convert("America/Chicago") # freq not preserved by tz_localize above @@ -297,7 +297,7 @@ def test_dt_round_tz(self): @pytest.mark.parametrize("method", ["ceil", "round", "floor"]) def test_dt_round_tz_ambiguous(self, method): # GH 18946 round near "fall back" DST - df1 = pd.DataFrame( + df1 = DataFrame( [ pd.to_datetime("2017-10-29 02:00:00+02:00", utc=True), pd.to_datetime("2017-10-29 02:00:00+01:00", utc=True), @@ -634,7 +634,7 @@ def test_dt_accessor_invalid(self, ser): assert not hasattr(ser, "dt") def test_dt_accessor_updates_on_inplace(self): - s = Series(pd.date_range("2018-01-01", periods=10)) + s = Series(date_range("2018-01-01", periods=10)) s[2] = None return_value = s.fillna(pd.Timestamp("2018-01-01"), inplace=True) assert return_value is None @@ -680,7 +680,7 @@ def test_dt_timetz_accessor(self, tz_naive_fixture): ) def test_isocalendar(self, input_series, expected_output): result = pd.to_datetime(Series(input_series)).dt.isocalendar() - expected_frame = pd.DataFrame( + expected_frame = DataFrame( expected_output, columns=["year", "week", "day"], dtype="UInt32" ) tm.assert_frame_equal(result, expected_frame) diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py index 1de6540217655..e4ba530d0741c 100644 --- a/pandas/tests/series/indexing/test_datetime.py +++ b/pandas/tests/series/indexing/test_datetime.py @@ -352,7 +352,7 @@ def test_indexing_over_size_cutoff_period_index(monkeypatch): monkeypatch.setattr(libindex, "_SIZE_CUTOFF", 1000) n = 1100 - idx = pd.period_range("1/1/2000", freq="T", periods=n) + idx = period_range("1/1/2000", freq="T", periods=n) assert idx._engine.over_size_threshold s = Series(np.random.randn(len(idx)), index=idx) diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py index d57a4c271680b..7642ccff31c6a 100644 --- a/pandas/tests/series/indexing/test_getitem.py +++ b/pandas/tests/series/indexing/test_getitem.py @@ -309,7 +309,7 @@ def test_getitem_slice_integers(self): class TestSeriesGetitemListLike: - @pytest.mark.parametrize("box", [list, np.array, Index, pd.Series]) + @pytest.mark.parametrize("box", [list, np.array, Index, Series]) def test_getitem_no_matches(self, box): # GH#33462 we expect the same behavior for list/ndarray/Index/Series ser = Series(["A", "B"]) diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index cd5a7af1d5ec0..30c37113f6b8f 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -5,7 +5,6 @@ import numpy as np import pytest -import pandas as pd from pandas import ( DataFrame, IndexSlice, @@ -58,7 +57,7 @@ def test_basic_getitem_dt64tz_values(): # GH12089 # with tz for values ser = Series( - pd.date_range("2011-01-01", periods=3, tz="US/Eastern"), index=["a", "b", "c"] + date_range("2011-01-01", periods=3, tz="US/Eastern"), index=["a", "b", "c"] ) expected = Timestamp("2011-01-01", tz="US/Eastern") result = ser.loc["a"] @@ -114,7 +113,7 @@ def test_getitem_setitem_integers(): def test_series_box_timestamp(): - rng = pd.date_range("20090415", "20090519", freq="B") + rng = date_range("20090415", "20090519", freq="B") ser = Series(rng) assert isinstance(ser[0], Timestamp) assert isinstance(ser.at[1], Timestamp) @@ -131,7 +130,7 @@ def test_series_box_timestamp(): def test_series_box_timedelta(): - rng = pd.timedelta_range("1 day 1 s", periods=5, freq="h") + rng = timedelta_range("1 day 1 s", periods=5, freq="h") ser = Series(rng) assert isinstance(ser[0], Timedelta) assert isinstance(ser.at[1], Timedelta) diff --git a/pandas/tests/series/indexing/test_where.py b/pandas/tests/series/indexing/test_where.py index 1e50fef55b4ec..799f3d257434d 100644 --- a/pandas/tests/series/indexing/test_where.py +++ b/pandas/tests/series/indexing/test_where.py @@ -475,7 +475,7 @@ def test_where_datetimelike_categorical(tz_naive_fixture): # GH#37682 tz = tz_naive_fixture - dr = pd.date_range("2001-01-01", periods=3, tz=tz)._with_freq(None) + dr = date_range("2001-01-01", periods=3, tz=tz)._with_freq(None) lvals = pd.DatetimeIndex([dr[0], dr[1], pd.NaT]) rvals = pd.Categorical([dr[0], pd.NaT, dr[2]]) diff --git a/pandas/tests/series/methods/test_interpolate.py b/pandas/tests/series/methods/test_interpolate.py index cad5476d4861c..5686e6478772d 100644 --- a/pandas/tests/series/methods/test_interpolate.py +++ b/pandas/tests/series/methods/test_interpolate.py @@ -642,7 +642,7 @@ def test_interp_datetime64(self, method, tz_naive_fixture): def test_interp_pad_datetime64tz_values(self): # GH#27628 missing.interpolate_2d should handle datetimetz values - dti = pd.date_range("2015-04-05", periods=3, tz="US/Central") + dti = date_range("2015-04-05", periods=3, tz="US/Central") ser = Series(dti) ser[1] = pd.NaT result = ser.interpolate(method="pad") @@ -735,13 +735,13 @@ def test_series_interpolate_method_values(self): def test_series_interpolate_intraday(self): # #1698 - index = pd.date_range("1/1/2012", periods=4, freq="12D") + index = date_range("1/1/2012", periods=4, freq="12D") ts = Series([0, 12, 24, 36], index) new_index = index.append(index + pd.DateOffset(days=1)).sort_values() exp = ts.reindex(new_index).interpolate(method="time") - index = pd.date_range("1/1/2012", periods=4, freq="12H") + index = date_range("1/1/2012", periods=4, freq="12H") ts = Series([0, 12, 24, 36], index) new_index = index.append(index + pd.DateOffset(hours=1)).sort_values() result = ts.reindex(new_index).interpolate(method="time") diff --git a/pandas/tests/series/methods/test_shift.py b/pandas/tests/series/methods/test_shift.py index 60ec0a90e906f..73684e300ed77 100644 --- a/pandas/tests/series/methods/test_shift.py +++ b/pandas/tests/series/methods/test_shift.py @@ -353,14 +353,14 @@ def test_shift_preserve_freqstr(self, periods): # GH#21275 ser = Series( range(periods), - index=pd.date_range("2016-1-1 00:00:00", periods=periods, freq="H"), + index=date_range("2016-1-1 00:00:00", periods=periods, freq="H"), ) result = ser.shift(1, "2H") expected = Series( range(periods), - index=pd.date_range("2016-1-1 02:00:00", periods=periods, freq="H"), + index=date_range("2016-1-1 02:00:00", periods=periods, freq="H"), ) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 63c9b4d899622..aec1e65cbb4c0 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -689,16 +689,16 @@ def test_constructor_pass_nan_nat(self): tm.assert_series_equal(Series([np.nan, np.nan]), exp) tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp) - exp = Series([pd.NaT, pd.NaT]) + exp = Series([NaT, NaT]) assert exp.dtype == "datetime64[ns]" - tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp) - tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp) + tm.assert_series_equal(Series([NaT, NaT]), exp) + tm.assert_series_equal(Series(np.array([NaT, NaT])), exp) - tm.assert_series_equal(Series([pd.NaT, np.nan]), exp) - tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp) + tm.assert_series_equal(Series([NaT, np.nan]), exp) + tm.assert_series_equal(Series(np.array([NaT, np.nan])), exp) - tm.assert_series_equal(Series([np.nan, pd.NaT]), exp) - tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp) + tm.assert_series_equal(Series([np.nan, NaT]), exp) + tm.assert_series_equal(Series(np.array([np.nan, NaT])), exp) def test_constructor_cast(self): msg = "could not convert string to float" @@ -824,7 +824,7 @@ def test_constructor_dtype_datetime64(self): tm.assert_series_equal(result, expected) expected = Series( - [pd.NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]" + [NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]" ) result = Series([np.nan] + dates[1:], dtype="datetime64[ns]") tm.assert_series_equal(result, expected) @@ -888,13 +888,13 @@ def test_constructor_dtype_datetime64(self): assert series1.dtype == object # these will correctly infer a datetime - s = Series([None, pd.NaT, "2013-08-05 15:30:00.000001"]) + s = Series([None, NaT, "2013-08-05 15:30:00.000001"]) assert s.dtype == "datetime64[ns]" - s = Series([np.nan, pd.NaT, "2013-08-05 15:30:00.000001"]) + s = Series([np.nan, NaT, "2013-08-05 15:30:00.000001"]) assert s.dtype == "datetime64[ns]" - s = Series([pd.NaT, None, "2013-08-05 15:30:00.000001"]) + s = Series([NaT, None, "2013-08-05 15:30:00.000001"]) assert s.dtype == "datetime64[ns]" - s = Series([pd.NaT, np.nan, "2013-08-05 15:30:00.000001"]) + s = Series([NaT, np.nan, "2013-08-05 15:30:00.000001"]) assert s.dtype == "datetime64[ns]" # tz-aware (UTC and other tz's) @@ -907,15 +907,15 @@ def test_constructor_dtype_datetime64(self): assert str(Series(dr).iloc[0].tz) == "US/Eastern" # non-convertible - s = Series([1479596223000, -1479590, pd.NaT]) + s = Series([1479596223000, -1479590, NaT]) assert s.dtype == "object" - assert s[2] is pd.NaT + assert s[2] is NaT assert "NaT" in str(s) # if we passed a NaT it remains - s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT]) + s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), NaT]) assert s.dtype == "object" - assert s[2] is pd.NaT + assert s[2] is NaT assert "NaT" in str(s) # if we passed a nan it remains @@ -941,7 +941,7 @@ def test_constructor_with_datetime_tz(self): assert isinstance(result, np.ndarray) assert result.dtype == "datetime64[ns]" - exp = pd.DatetimeIndex(result) + exp = DatetimeIndex(result) exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz) tm.assert_index_equal(dr, exp) @@ -977,7 +977,7 @@ def test_constructor_with_datetime_tz(self): t = Series(date_range("20130101", periods=1000, tz="US/Eastern")) assert "datetime64[ns, US/Eastern]" in str(t) - result = pd.DatetimeIndex(s, freq="infer") + result = DatetimeIndex(s, freq="infer") tm.assert_index_equal(result, dr) # inference @@ -1000,8 +1000,8 @@ def test_constructor_with_datetime_tz(self): assert lib.infer_dtype(s, skipna=True) == "datetime" # with all NaT - s = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]") - expected = Series(pd.DatetimeIndex(["NaT", "NaT"], tz="US/Eastern")) + s = Series(NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]") + expected = Series(DatetimeIndex(["NaT", "NaT"], tz="US/Eastern")) tm.assert_series_equal(s, expected) @pytest.mark.parametrize("arr_dtype", [np.int64, np.float64]) @@ -1018,7 +1018,7 @@ def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit): tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", pd.NaT, np.nan, None]) + @pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", NaT, np.nan, None]) def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg): # GH 17415: With naive string result = Series([arg], dtype="datetime64[ns, CET]") @@ -1302,7 +1302,7 @@ def test_constructor_dtype_timedelta64(self): td = Series([timedelta(days=1), np.nan], dtype="m8[ns]") assert td.dtype == "timedelta64[ns]" - td = Series([np.timedelta64(300000000), pd.NaT], dtype="m8[ns]") + td = Series([np.timedelta64(300000000), NaT], dtype="m8[ns]") assert td.dtype == "timedelta64[ns]" # improved inference @@ -1317,7 +1317,7 @@ def test_constructor_dtype_timedelta64(self): td = Series([np.timedelta64(300000000), np.nan]) assert td.dtype == "timedelta64[ns]" - td = Series([pd.NaT, np.timedelta64(300000000)]) + td = Series([NaT, np.timedelta64(300000000)]) assert td.dtype == "timedelta64[ns]" td = Series([np.timedelta64(1, "s")]) @@ -1349,13 +1349,13 @@ def test_constructor_dtype_timedelta64(self): assert td.dtype == "object" # these will correctly infer a timedelta - s = Series([None, pd.NaT, "1 Day"]) + s = Series([None, NaT, "1 Day"]) assert s.dtype == "timedelta64[ns]" - s = Series([np.nan, pd.NaT, "1 Day"]) + s = Series([np.nan, NaT, "1 Day"]) assert s.dtype == "timedelta64[ns]" - s = Series([pd.NaT, None, "1 Day"]) + s = Series([NaT, None, "1 Day"]) assert s.dtype == "timedelta64[ns]" - s = Series([pd.NaT, np.nan, "1 Day"]) + s = Series([NaT, np.nan, "1 Day"]) assert s.dtype == "timedelta64[ns]" # GH 16406 @@ -1606,7 +1606,7 @@ def test_constructor_dict_multiindex(self): _d = sorted(d.items()) result = Series(d) expected = Series( - [x[1] for x in _d], index=pd.MultiIndex.from_tuples([x[0] for x in _d]) + [x[1] for x in _d], index=MultiIndex.from_tuples([x[0] for x in _d]) ) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py index a91908f7fba52..96a69476ccbef 100644 --- a/pandas/tests/series/test_repr.py +++ b/pandas/tests/series/test_repr.py @@ -169,7 +169,7 @@ def test_repr_should_return_str(self): def test_repr_max_rows(self): # GH 6863 - with pd.option_context("max_rows", None): + with option_context("max_rows", None): str(Series(range(1001))) # should not raise exception def test_unicode_string_with_unicode(self):
- [x] xref #39992 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40159
2021-03-02T00:15:55Z
2021-03-02T08:09:01Z
2021-03-02T08:09:01Z
2021-03-02T08:09:02Z
REF: refactor array_strptime_with_fallback from to_datetime
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 9bfe852083390..279f4bd056ed8 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -284,7 +284,7 @@ def _convert_listlike_datetimes( tz: Optional[Timezone] = None, unit: Optional[str] = None, errors: Optional[str] = None, - infer_datetime_format: Optional[bool] = None, + infer_datetime_format: bool = False, dayfirst: Optional[bool] = None, yearfirst: Optional[bool] = None, exact: bool = True, @@ -305,7 +305,7 @@ def _convert_listlike_datetimes( None or string of the frequency of the passed data errors : string error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore' - infer_datetime_format : boolean + infer_datetime_format : bool, default False inferring format behavior from to_datetime dayfirst : boolean dayfirst parsing behavior from to_datetime @@ -413,17 +413,16 @@ def _convert_listlike_datetimes( require_iso8601 = not infer_datetime_format format = None - tz_parsed = None result = None if format is not None: try: # shortcut formatting here if format == "%Y%m%d": + # pass orig_arg as float-dtype may have been converted to + # datetime64[ns] + orig_arg = ensure_object(orig_arg) try: - # pass orig_arg as float-dtype may have been converted to - # datetime64[ns] - orig_arg = ensure_object(orig_arg) result = _attempt_YYYYMMDD(orig_arg, errors=errors) except (ValueError, TypeError, OutOfBoundsDatetime) as err: raise ValueError( @@ -432,36 +431,12 @@ def _convert_listlike_datetimes( # fallback if result is None: - try: - result, timezones = array_strptime( - arg, format, exact=exact, errors=errors - ) - if "%Z" in format or "%z" in format: - return _return_parsed_timezone_results( - result, timezones, tz, name - ) - except OutOfBoundsDatetime: - if errors == "raise": - raise - elif errors == "coerce": - result = np.empty(arg.shape, dtype="M8[ns]") - iresult = result.view("i8") - iresult.fill(iNaT) - else: - result = arg - except ValueError: - # if format was inferred, try falling back - # to array_to_datetime - terminate here - # for specified formats - if not infer_datetime_format: - if errors == "raise": - raise - elif errors == "coerce": - result = np.empty(arg.shape, dtype="M8[ns]") - iresult = result.view("i8") - iresult.fill(iNaT) - else: - result = arg + result = _array_strptime_with_fallback( + arg, name, tz, format, exact, errors, infer_datetime_format + ) + if result is not None: + return result + except ValueError as e: # Fallback to try to convert datetime objects if timezone-aware # datetime objects are found without passing `utc=True` @@ -485,16 +460,63 @@ def _convert_listlike_datetimes( allow_object=True, ) - if tz_parsed is not None: - # We can take a shortcut since the datetime64 numpy array - # is in UTC - dta = DatetimeArray(result, dtype=tz_to_dtype(tz_parsed)) - return DatetimeIndex._simple_new(dta, name=name) + if tz_parsed is not None: + # We can take a shortcut since the datetime64 numpy array + # is in UTC + dta = DatetimeArray(result, dtype=tz_to_dtype(tz_parsed)) + return DatetimeIndex._simple_new(dta, name=name) utc = tz == "utc" return _box_as_indexlike(result, utc=utc, name=name) +def _array_strptime_with_fallback( + arg, + name, + tz, + fmt: str, + exact: bool, + errors: Optional[str], + infer_datetime_format: bool, +) -> Optional[Index]: + """ + Call array_strptime, with fallback behavior depending on 'errors'. + """ + utc = tz == "utc" + + try: + result, timezones = array_strptime(arg, fmt, exact=exact, errors=errors) + if "%Z" in fmt or "%z" in fmt: + return _return_parsed_timezone_results(result, timezones, tz, name) + except OutOfBoundsDatetime: + if errors == "raise": + raise + elif errors == "coerce": + result = np.empty(arg.shape, dtype="M8[ns]") + iresult = result.view("i8") + iresult.fill(iNaT) + else: + result = arg + except ValueError: + # if fmt was inferred, try falling back + # to array_to_datetime - terminate here + # for specified formats + if not infer_datetime_format: + if errors == "raise": + raise + elif errors == "coerce": + result = np.empty(arg.shape, dtype="M8[ns]") + iresult = result.view("i8") + iresult.fill(iNaT) + else: + result = arg + else: + # Indicates to the caller to fallback to objects_to_datetime64ns + return None + + return _box_as_indexlike(result, utc=utc, name=name) + + def _adjust_to_origin(arg, origin, unit): """ Helper function for to_datetime.
ultimate goal is to condense the several datetime casting/inference functions we use down to just one.
https://api.github.com/repos/pandas-dev/pandas/pulls/40156
2021-03-01T21:09:45Z
2021-03-02T17:27:55Z
2021-03-02T17:27:55Z
2021-03-02T17:29:16Z
REF: avoid circular (runtime) import in dtypes.cast
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 4593b4dea78a4..71863c8925e89 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -43,6 +43,7 @@ iNaT, ints_to_pydatetime, ) +from pandas._libs.tslibs.timedeltas import array_to_timedelta64 from pandas._typing import ( AnyArrayLike, ArrayLike, @@ -1459,14 +1460,15 @@ def try_timedelta(v: np.ndarray) -> np.ndarray: # safe coerce to timedelta64 # will try first with a string & object conversion - from pandas import to_timedelta - try: - td_values = to_timedelta(v) + # bc we know v.dtype == object, this is equivalent to + # `np.asarray(to_timedelta(v))`, but using a lower-level API that + # does not require a circular import. + td_values = array_to_timedelta64(v).view("m8[ns]") except (ValueError, OverflowError): return v.reshape(shape) else: - return np.asarray(td_values).reshape(shape) + return td_values.reshape(shape) inferred_type = lib.infer_datetimelike_array(ensure_object(v)) @@ -1500,8 +1502,8 @@ def maybe_cast_to_datetime( try to cast the array/value to a datetimelike dtype, converting float nan to iNaT """ + from pandas.core.arrays.timedeltas import sequence_to_td64ns from pandas.core.tools.datetimes import to_datetime - from pandas.core.tools.timedeltas import to_timedelta if not is_list_like(value): raise TypeError("value must be listlike") @@ -1582,7 +1584,8 @@ def maybe_cast_to_datetime( # so localize and convert value = dta.tz_localize("UTC").tz_convert(dtype.tz) elif is_timedelta64: - value = to_timedelta(value, errors="raise")._values + # if successful, we get a ndarray[td64ns] + value, _ = sequence_to_td64ns(value) except OutOfBoundsDatetime: raise except ValueError as err:
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40155
2021-03-01T21:08:10Z
2021-03-01T23:10:12Z
2021-03-01T23:10:12Z
2021-03-01T23:53:10Z
TYP: internals.construction
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 178e6ce9b02eb..9f111282473c2 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1620,6 +1620,7 @@ def maybe_cast_to_datetime( dta = dta.tz_localize(None) value = dta elif is_datetime64tz: + dtype = cast(DatetimeTZDtype, dtype) # The string check can be removed once issue #13712 # is solved. String data that is passed with a # datetime64tz is assumed to be naive which should @@ -1700,6 +1701,8 @@ def ensure_nanosecond_dtype(dtype: DtypeObj) -> DtypeObj: dtype('<M8[ns]') >>> ensure_nanosecond_dtype(np.dtype("m8[ps]")) + Traceback (most recent call last): + ... TypeError: cannot convert timedeltalike to dtype [timedelta64[ps]] """ msg = ( diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f0f8d813bba96..dcd6ef77238f9 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1939,12 +1939,11 @@ def from_records( arr_columns_list.append(k) arrays.append(v) - arrays, arr_columns = reorder_arrays(arrays, arr_columns_list, columns) + arr_columns = Index(arr_columns_list) + arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) - if columns is not None: - columns = ensure_index(columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) @@ -1954,9 +1953,7 @@ def from_records( arrays[i] = lib.maybe_convert_objects(arr, try_float=True) arr_columns = ensure_index(arr_columns) - if columns is not None: - columns = ensure_index(columns) - else: + if columns is None: columns = arr_columns if exclude is None: diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index d49114c0da719..9a7ae39b9f8eb 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -79,6 +79,7 @@ if TYPE_CHECKING: from numpy.ma.mrecords import MaskedRecords + # --------------------------------------------------------------------- # BlockManager Interface @@ -91,7 +92,7 @@ def arrays_to_mgr( dtype: Optional[DtypeObj] = None, verify_integrity: bool = True, typ: Optional[str] = None, -): +) -> Manager: """ Segregate Series based on type and coerce into matrices. @@ -109,11 +110,11 @@ def arrays_to_mgr( # don't force copy because getting jammed in an ndarray anyway arrays = _homogenize(arrays, index, dtype) - columns = ensure_index(columns) else: - columns = ensure_index(columns) index = ensure_index(index) + columns = ensure_index(columns) + # from BlockManager perspective axes = [columns, index] @@ -140,9 +141,8 @@ def rec_array_to_mgr( fdata = ma.getdata(data) if index is None: index = _get_names_from_index(fdata) - if index is None: - index = ibase.default_index(len(data)) - index = ensure_index(index) + else: + index = ensure_index(index) if columns is not None: columns = ensure_index(columns) @@ -215,14 +215,14 @@ def mgr_to_mgr(mgr, typ: str): def ndarray_to_mgr( values, index, columns, dtype: Optional[DtypeObj], copy: bool, typ: str -): +) -> Manager: # used in DataFrame.__init__ - # input must be a ndarray, list, Series, index + # input must be a ndarray, list, Series, Index, ExtensionArray if isinstance(values, ABCSeries): if columns is None: if values.name is not None: - columns = [values.name] + columns = Index([values.name]) if index is None: index = values.index else: @@ -309,7 +309,9 @@ def ndarray_to_mgr( return create_block_manager_from_blocks(block_values, [columns, index]) -def dict_to_mgr(data: Dict, index, columns, dtype: Optional[DtypeObj], typ: str): +def dict_to_mgr( + data: Dict, index, columns, dtype: Optional[DtypeObj], typ: str +) -> Manager: """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. @@ -531,21 +533,18 @@ def extract_index(data) -> Index: return ensure_index(index) -def reorder_arrays(arrays, arr_columns, columns): +def reorder_arrays( + arrays: List[ArrayLike], arr_columns: Index, columns: Optional[Index] +) -> Tuple[List[ArrayLike], Index]: # reorder according to the columns - if ( - columns is not None - and len(columns) - and arr_columns is not None - and len(arr_columns) - ): + if columns is not None and len(columns) and len(arr_columns): indexer = ensure_index(arr_columns).get_indexer(columns) arr_columns = ensure_index([arr_columns[i] for i in indexer]) arrays = [arrays[i] for i in indexer] return arrays, arr_columns -def _get_names_from_index(data): +def _get_names_from_index(data) -> Index: has_some_name = any(getattr(s, "name", None) is not None for s in data) if not has_some_name: return ibase.default_index(len(data)) @@ -560,7 +559,7 @@ def _get_names_from_index(data): index[i] = f"Unnamed {count}" count += 1 - return index + return Index(index) def _get_axes(
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40154
2021-03-01T21:02:24Z
2021-03-03T18:41:53Z
2021-03-03T18:41:53Z
2021-03-03T18:48:27Z
[ArrayManager] Add SingleArrayManager to back a Series
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 06a06484b921a..a84de6cda0432 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -165,6 +165,9 @@ jobs: pytest pandas/tests/resample/ pytest pandas/tests/reshape/merge + pytest pandas/tests/series/methods + pytest pandas/tests/series/test_* + # indexing subset (temporary since other tests don't pass yet) pytest pandas/tests/frame/indexing/test_indexing.py::TestDataFrameIndexing::test_setitem_boolean pytest pandas/tests/frame/indexing/test_where.py diff --git a/pandas/_typing.py b/pandas/_typing.py index c50d532f40dd7..e464f2a021ef6 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -58,6 +58,8 @@ from pandas.core.internals import ( ArrayManager, BlockManager, + SingleArrayManager, + SingleBlockManager, ) from pandas.core.resample import Resampler from pandas.core.series import Series @@ -184,3 +186,4 @@ # internals Manager = Union["ArrayManager", "BlockManager"] +SingleManager = Union["SingleArrayManager", "SingleBlockManager"] diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 9ba9a5bd38164..43900709ad11f 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -481,7 +481,6 @@ def sanitize_array( DataFrame constructor, as the dtype keyword there may be interpreted as only applying to a subset of columns, see GH#24435. """ - if isinstance(data, ma.MaskedArray): data = sanitize_masked_array(data) @@ -555,6 +554,7 @@ def sanitize_array( inferred = lib.infer_dtype(subarr, skipna=False) if inferred in {"interval", "period"}: subarr = array(subarr) + subarr = extract_array(subarr, extract_numpy=True) return subarr diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 5bba7ab67b2bf..205aebbf4124a 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -138,6 +138,7 @@ from pandas.core.internals import ( ArrayManager, BlockManager, + SingleArrayManager, ) from pandas.core.internals.construction import mgr_to_mgr from pandas.core.missing import find_valid_index @@ -5563,7 +5564,7 @@ def _protect_consolidate(self, f): Consolidate _mgr -- if the blocks have changed, then clear the cache """ - if isinstance(self._mgr, ArrayManager): + if isinstance(self._mgr, (ArrayManager, SingleArrayManager)): return f() blocks_before = len(self._mgr.blocks) result = f() diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index aaf67fb1be532..2de5e81360a93 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -108,10 +108,7 @@ all_indexes_same, ) import pandas.core.indexes.base as ibase -from pandas.core.internals import ( - ArrayManager, - BlockManager, -) +from pandas.core.internals import ArrayManager from pandas.core.series import Series from pandas.core.util.numba_ import maybe_use_numba @@ -1151,18 +1148,18 @@ def py_fallback(values: ArrayLike) -> ArrayLike: # in the operation. We un-split here. result = result._consolidate() assert isinstance(result, (Series, DataFrame)) # for mypy + # unwrap DataFrame/Series to get array mgr = result._mgr - assert isinstance(mgr, BlockManager) - - # unwrap DataFrame to get array - if len(mgr.blocks) != 1: + arrays = mgr.arrays + if len(arrays) != 1: # We've split an object block! Everything we've assumed # about a single block input returning a single block output # is a lie. See eg GH-39329 return mgr.as_array() else: - result = mgr.blocks[0].values - return result + # We are a single block from a BlockManager + # or one array from SingleArrayManager + return arrays[0] def array_func(values: ArrayLike) -> ArrayLike: diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index bded503a1e6db..a434e94abec5a 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1590,7 +1590,11 @@ def _setitem_with_indexer(self, indexer, value, name="iloc"): # if there is only one block/type, still have to take split path # unless the block is one-dimensional or it can hold the value - if not take_split_path and self.obj._mgr.blocks and self.ndim > 1: + if ( + not take_split_path + and getattr(self.obj._mgr, "blocks", False) + and self.ndim > 1 + ): # in case of dict, keys are indices val = list(value.values()) if isinstance(value, dict) else value blk = self.obj._mgr.blocks[0] diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index 054ce8a40288b..23d35b412e1ae 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -1,5 +1,11 @@ -from pandas.core.internals.array_manager import ArrayManager -from pandas.core.internals.base import DataManager +from pandas.core.internals.array_manager import ( + ArrayManager, + SingleArrayManager, +) +from pandas.core.internals.base import ( + DataManager, + SingleDataManager, +) from pandas.core.internals.blocks import ( # io.pytables, io.packers Block, CategoricalBlock, @@ -34,7 +40,9 @@ "DataManager", "ArrayManager", "BlockManager", + "SingleDataManager", "SingleBlockManager", + "SingleArrayManager", "concatenate_managers", # those two are preserved here for downstream compatibility (GH-33892) "create_block_manager_from_arrays", diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 998f1ffcf02ee..d6f2530ed2ca5 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -4,7 +4,6 @@ from __future__ import annotations from typing import ( - TYPE_CHECKING, Any, Callable, List, @@ -47,6 +46,7 @@ ) from pandas.core.dtypes.generic import ( ABCDataFrame, + ABCPandasArray, ABCSeries, ) from pandas.core.dtypes.missing import ( @@ -58,6 +58,9 @@ from pandas.core.arrays import ( DatetimeArray, ExtensionArray, + IntervalArray, + PandasArray, + PeriodArray, TimedeltaArray, ) from pandas.core.arrays.sparse import SparseDtype @@ -71,13 +74,12 @@ Index, ensure_index, ) -from pandas.core.internals.base import DataManager +from pandas.core.internals.base import ( + DataManager, + SingleDataManager, +) from pandas.core.internals.blocks import make_block -if TYPE_CHECKING: - from pandas.core.internals.managers import SingleBlockManager - - T = TypeVar("T", bound="ArrayManager") @@ -132,7 +134,7 @@ def make_empty(self: T, axes=None) -> T: @property def items(self) -> Index: - return self._axes[1] + return self._axes[-1] @property def axes(self) -> List[Index]: # type: ignore[override] @@ -191,7 +193,8 @@ def get_dtypes(self): def __repr__(self) -> str: output = type(self).__name__ output += f"\nIndex: {self._axes[0]}" - output += f"\nColumns: {self._axes[1]}" + if self.ndim == 1: + output += f"\nColumns: {self._axes[1]}" output += f"\n{len(self.arrays)} arrays:" for arr in self.arrays: output += f"\n{arr.dtype}" @@ -407,12 +410,16 @@ def apply_with_block(self: T, f, align_keys=None, **kwargs) -> T: # The caller is responsible for ensuring that # obj.axes[-1].equals(self.items) if obj.ndim == 1: - kwargs[k] = obj.iloc[[i]] + if self.ndim == 2: + kwargs[k] = obj.iloc[slice(i, i + 1)]._values + else: + kwargs[k] = obj.iloc[:]._values else: kwargs[k] = obj.iloc[:, [i]]._values else: # otherwise we have an ndarray - kwargs[k] = obj[[i]] + if obj.ndim == 2: + kwargs[k] = obj[[i]] if hasattr(arr, "tz") and arr.tz is None: # type: ignore[union-attr] # DatetimeArray needs to be converted to ndarray for DatetimeBlock @@ -420,15 +427,21 @@ def apply_with_block(self: T, f, align_keys=None, **kwargs) -> T: elif arr.dtype.kind == "m" and not isinstance(arr, np.ndarray): # TimedeltaArray needs to be converted to ndarray for TimedeltaBlock arr = arr._data # type: ignore[union-attr] - if isinstance(arr, np.ndarray): - arr = np.atleast_2d(arr) - block = make_block(arr, placement=slice(0, 1, 1), ndim=2) + + if self.ndim == 2: + if isinstance(arr, np.ndarray): + arr = np.atleast_2d(arr) + block = make_block(arr, placement=slice(0, 1, 1), ndim=2) + else: + block = make_block(arr, placement=slice(0, len(self), 1), ndim=1) + applied = getattr(block, f)(**kwargs) if isinstance(applied, list): applied = applied[0] arr = applied.values - if isinstance(arr, np.ndarray): - arr = arr[0, :] + if self.ndim == 2: + if isinstance(arr, np.ndarray): + arr = arr[0, :] result_arrays.append(arr) return type(self)(result_arrays, self._axes) @@ -459,7 +472,6 @@ def where(self, other, cond, align: bool, errors: str, axis: int) -> ArrayManage # return self.apply_with_block("setitem", indexer=indexer, value=value) def putmask(self, mask, new, align: bool = True): - if align: align_keys = ["new", "mask"] else: @@ -734,16 +746,12 @@ def fast_xs(self, loc: int) -> ArrayLike: result = np.array(values, dtype=dtype) return result - def iget(self, i: int) -> SingleBlockManager: + def iget(self, i: int) -> SingleArrayManager: """ - Return the data as a SingleBlockManager. + Return the data as a SingleArrayManager. """ - from pandas.core.internals.managers import SingleBlockManager - values = self.arrays[i] - block = make_block(values, placement=slice(0, len(values)), ndim=1) - - return SingleBlockManager(block, self._axes[0]) + return SingleArrayManager([values], [self._axes[0]]) def iget_values(self, i: int) -> ArrayLike: """ @@ -909,8 +917,8 @@ def _reindex_indexer( if not allow_dups: self._axes[axis]._validate_can_reindex(indexer) - # if axis >= self.ndim: - # raise IndexError("Requested axis not found in manager") + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") if axis == 1: new_arrays = [] @@ -1039,3 +1047,151 @@ def _interleaved_dtype(blocks) -> Optional[DtypeObj]: return None return find_common_type([b.dtype for b in blocks]) + + +class SingleArrayManager(ArrayManager, SingleDataManager): + + __slots__ = [ + "_axes", # private attribute, because 'axes' has different order, see below + "arrays", + ] + + arrays: List[Union[np.ndarray, ExtensionArray]] + _axes: List[Index] + + ndim = 1 + + def __init__( + self, + arrays: List[Union[np.ndarray, ExtensionArray]], + axes: List[Index], + verify_integrity: bool = True, + ): + self._axes = axes + self.arrays = arrays + + if verify_integrity: + assert len(axes) == 1 + assert len(arrays) == 1 + self._axes = [ensure_index(ax) for ax in self._axes] + arr = arrays[0] + arr = ensure_wrapped_if_datetimelike(arr) + if isinstance(arr, ABCPandasArray): + arr = arr.to_numpy() + self.arrays = [arr] + self._verify_integrity() + + def _verify_integrity(self) -> None: + (n_rows,) = self.shape + assert len(self.arrays) == 1 + assert len(self.arrays[0]) == n_rows + + @staticmethod + def _normalize_axis(axis): + return axis + + def make_empty(self, axes=None) -> SingleArrayManager: + """Return an empty ArrayManager with index/array of length 0""" + if axes is None: + axes = [Index([], dtype=object)] + array = np.array([], dtype=self.dtype) + return type(self)([array], axes) + + @classmethod + def from_array(cls, array, index): + return cls([array], [index]) + + @property + def axes(self): + return self._axes + + @property + def index(self) -> Index: + return self._axes[0] + + @property + def array(self): + return self.arrays[0] + + @property + def dtype(self): + return self.array.dtype + + def external_values(self): + """The array that Series.values returns""" + if isinstance(self.array, (PeriodArray, IntervalArray)): + return self.array.astype(object) + elif isinstance(self.array, (DatetimeArray, TimedeltaArray)): + return self.array._data + else: + return self.array + + def internal_values(self): + """The array that Series._values returns""" + return self.array + + def array_values(self): + """The array that Series.array returns""" + arr = self.array + if isinstance(arr, np.ndarray): + arr = PandasArray(arr) + return arr + + @property + def _can_hold_na(self) -> bool: + if isinstance(self.array, np.ndarray): + return self.array.dtype.kind not in ["b", "i", "u"] + else: + # ExtensionArray + return self.array._can_hold_na + + @property + def is_single_block(self) -> bool: + return True + + def _consolidate_check(self): + pass + + def get_slice(self, slobj: slice, axis: int = 0) -> SingleArrayManager: + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + new_array = self.array[slobj] + new_index = self.index[slobj] + return type(self)([new_array], [new_index]) + + def apply(self, func, **kwargs): + if callable(func): + new_array = func(self.array, **kwargs) + else: + new_array = getattr(self.array, func)(**kwargs) + return type(self)([new_array], self._axes) + + def setitem(self, indexer, value): + return self.apply_with_block("setitem", indexer=indexer, value=value) + + def idelete(self, indexer): + """ + Delete selected locations in-place (new array, same ArrayManager) + """ + to_keep = np.ones(self.shape[0], dtype=np.bool_) + to_keep[indexer] = False + + self.arrays = [self.arrays[0][to_keep]] + self._axes = [self._axes[0][to_keep]] + + def _get_data_subset(self, predicate: Callable) -> ArrayManager: + # used in get_numeric_data / get_bool_data + if predicate(self.array): + return type(self)(self.arrays, self._axes, verify_integrity=False) + else: + return self.make_empty() + + def set_values(self, values: ArrayLike): + """ + Set (replace) the values of the SingleArrayManager in place. + + Use at your own risk! This does not check if the passed values are + valid for the current SingleArrayManager (length, dtype, etc). + """ + self.arrays[0] = values diff --git a/pandas/core/internals/base.py b/pandas/core/internals/base.py index 2ce91134f61d6..0e4b5ce2e7452 100644 --- a/pandas/core/internals/base.py +++ b/pandas/core/internals/base.py @@ -98,3 +98,7 @@ def equals(self, other: object) -> bool: return False return self._equal_values(other) + + +class SingleDataManager(DataManager): + ndim = 1 diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 09559e571d5ee..b3a60d7d1ef45 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -61,7 +61,10 @@ Index, ensure_index, ) -from pandas.core.internals.base import DataManager +from pandas.core.internals.base import ( + DataManager, + SingleDataManager, +) from pandas.core.internals.blocks import ( Block, CategoricalBlock, @@ -1525,7 +1528,7 @@ def unstack(self, unstacker, fill_value) -> BlockManager: return bm -class SingleBlockManager(BlockManager): +class SingleBlockManager(BlockManager, SingleDataManager): """ manage a single block with """ ndim = 1 @@ -1617,6 +1620,10 @@ def internal_values(self): """The array that Series._values returns""" return self._block.internal_values() + def array_values(self): + """The array that Series.array returns""" + return self._block.array_values() + @property def _can_hold_na(self) -> bool: return self._block._can_hold_na diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index a0dfb1c83a70b..288668b700ad0 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1742,10 +1742,12 @@ def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike: if isinstance(values, np.ndarray): result = result.view(orig_dtype) else: - # DatetimeArray + # DatetimeArray/TimedeltaArray # TODO: have this case go through a DTA method? + # For DatetimeTZDtype, view result as M8[ns] + npdtype = orig_dtype if isinstance(orig_dtype, np.dtype) else "M8[ns]" result = type(values)._simple_new( # type: ignore[attr-defined] - result.view("M8[ns]"), dtype=orig_dtype + result.view(npdtype), dtype=orig_dtype ) elif skipna and not issubclass(values.dtype.type, (np.integer, np.bool_)): diff --git a/pandas/core/series.py b/pandas/core/series.py index 24c356e7a8269..5a5d1c44b312c 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -43,6 +43,7 @@ FrameOrSeriesUnion, IndexKeyFunc, NpDtype, + SingleManager, StorageOptions, ValueKeyFunc, ) @@ -125,7 +126,10 @@ from pandas.core.indexes.period import PeriodIndex from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.indexing import check_bool_indexer -from pandas.core.internals import SingleBlockManager +from pandas.core.internals import ( + SingleArrayManager, + SingleBlockManager, +) from pandas.core.shared_docs import _shared_docs from pandas.core.sorting import ( ensure_key_mapped, @@ -267,7 +271,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame): base.IndexOpsMixin.hasnans.func, doc=base.IndexOpsMixin.hasnans.__doc__ ) __hash__ = generic.NDFrame.__hash__ - _mgr: SingleBlockManager + _mgr: SingleManager div: Callable[[Series, Any], Series] rdiv: Callable[[Series, Any], Series] @@ -285,7 +289,7 @@ def __init__( ): if ( - isinstance(data, SingleBlockManager) + isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None and copy is False @@ -299,8 +303,12 @@ def __init__( if fastpath: # data is an ndarray, index is defined - if not isinstance(data, SingleBlockManager): - data = SingleBlockManager.from_array(data, index) + if not isinstance(data, (SingleBlockManager, SingleArrayManager)): + manager = get_option("mode.data_manager") + if manager == "block": + data = SingleBlockManager.from_array(data, index) + elif manager == "array": + data = SingleArrayManager.from_array(data, index) if copy: data = data.copy() if index is None: @@ -363,7 +371,7 @@ def __init__( data, index = self._init_dict(data, index, dtype) dtype = None copy = False - elif isinstance(data, SingleBlockManager): + elif isinstance(data, (SingleBlockManager, SingleArrayManager)): if index is None: index = data.index elif not data.index.equals(index) or copy: @@ -388,7 +396,7 @@ def __init__( com.require_length_match(data, index) # create/copy the manager - if isinstance(data, SingleBlockManager): + if isinstance(data, (SingleBlockManager, SingleArrayManager)): if dtype is not None: data = data.astype(dtype=dtype, errors="ignore", copy=copy) elif copy: @@ -396,7 +404,11 @@ def __init__( else: data = sanitize_array(data, index, dtype, copy) - data = SingleBlockManager.from_array(data, index) + manager = get_option("mode.data_manager") + if manager == "block": + data = SingleBlockManager.from_array(data, index) + elif manager == "array": + data = SingleArrayManager.from_array(data, index) generic.NDFrame.__init__(self, data) self.name = name @@ -657,7 +669,7 @@ def _values(self): @Appender(base.IndexOpsMixin.array.__doc__) # type: ignore[misc] @property def array(self) -> ExtensionArray: - return self._mgr._block.array_values() + return self._mgr.array_values() # ops def ravel(self, order="C"): diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py index f873c93d90683..bb541739c7f44 100644 --- a/pandas/tests/groupby/test_bin_groupby.py +++ b/pandas/tests/groupby/test_bin_groupby.py @@ -5,6 +5,7 @@ lib, reduction as libreduction, ) +import pandas.util._test_decorators as td import pandas as pd from pandas import Series @@ -61,7 +62,13 @@ def cumsum_max(x): return 0 -@pytest.mark.parametrize("func", [cumsum_max, assert_block_lengths]) +@pytest.mark.parametrize( + "func", + [ + cumsum_max, + pytest.param(assert_block_lengths, marks=td.skip_array_manager_invalid_test), + ], +) def test_mgr_locs_updated(func): # https://github.com/pandas-dev/pandas/issues/31802 # Some operations may require creating new blocks, which requires diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index a69a693bb6203..ab484e7ae9d8a 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -13,6 +13,7 @@ iNaT, lib, ) +import pandas.util._test_decorators as td from pandas.core.dtypes.common import ( is_categorical_dtype, @@ -650,6 +651,7 @@ def test_constructor_copy(self): assert x[0] == 2.0 assert y[0] == 1.0 + @td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite test @pytest.mark.parametrize( "index", [ @@ -1691,12 +1693,14 @@ def test_series_constructor_infer_multiindex(self): class TestSeriesConstructorInternals: - def test_constructor_no_pandas_array(self): + def test_constructor_no_pandas_array(self, using_array_manager): ser = Series([1, 2, 3]) result = Series(ser.array) tm.assert_series_equal(ser, result) - assert isinstance(result._mgr.blocks[0], NumericBlock) + if not using_array_manager: + assert isinstance(result._mgr.blocks[0], NumericBlock) + @td.skip_array_manager_invalid_test def test_from_array(self): result = Series(pd.array(["1H", "2H"], dtype="timedelta64[ns]")) assert result._mgr.blocks[0].is_extension is False @@ -1704,6 +1708,7 @@ def test_from_array(self): result = Series(pd.array(["2015"], dtype="datetime64[ns]")) assert result._mgr.blocks[0].is_extension is False + @td.skip_array_manager_invalid_test def test_from_list_dtype(self): result = Series(["1H", "2H"], dtype="timedelta64[ns]") assert result._mgr.blocks[0].is_extension is False
xref https://github.com/pandas-dev/pandas/issues/39146#issuecomment-786741804 This implements a `SingleArrayManager` class for backing a Series, consistent with how we have a SingleBlockManager. cc @jreback @jbrockmendel
https://api.github.com/repos/pandas-dev/pandas/pulls/40152
2021-03-01T20:45:53Z
2021-03-05T00:55:16Z
2021-03-05T00:55:15Z
2021-03-05T07:14:46Z
PERF: extract_array -> _values
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index a4da249894084..74fb0e2bd54fb 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -84,7 +84,7 @@ from pandas.core.array_algos.take import take_nd from pandas.core.construction import ( - array, + array as pd_array, ensure_wrapped_if_datetimelike, extract_array, ) @@ -474,7 +474,7 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray: elif needs_i8_conversion(comps.dtype): # Dispatch to DatetimeLikeArrayMixin.isin - return array(comps).isin(values) + return pd_array(comps).isin(values) elif needs_i8_conversion(values.dtype) and not is_object_dtype(comps.dtype): # e.g. comps are integers and values are datetime64s return np.zeros(comps.shape, dtype=bool) @@ -1566,7 +1566,7 @@ def searchsorted(arr, value, side="left", sorter=None) -> np.ndarray: if is_scalar(value): value = dtype.type(value) else: - value = array(value, dtype=dtype) + value = pd_array(value, dtype=dtype) elif not ( is_object_dtype(arr) or is_numeric_dtype(arr) or is_categorical_dtype(arr) ): diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 9db21800d2499..7777cb4bf674e 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -97,7 +97,7 @@ ) import pandas.core.common as com from pandas.core.construction import ( - array, + array as pd_array, extract_array, sanitize_array, ) @@ -498,7 +498,7 @@ def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike: # TODO: consolidate with ndarray case? elif is_extension_array_dtype(dtype): - result = array(self, dtype=dtype, copy=copy) + result = pd_array(self, dtype=dtype, copy=copy) elif is_integer_dtype(dtype) and self.isna().any(): raise ValueError("Cannot convert float NaN to integer") diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index e476c3566c10f..00a32e4443de5 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -106,7 +106,7 @@ ) import pandas.core.common as com from pandas.core.construction import ( - array, + array as pd_array, extract_array, ) from pandas.core.indexers import ( @@ -719,7 +719,7 @@ def _validate_listlike(self, value, allow_object: bool = False): # Do type inference if necessary up front # e.g. we passed PeriodIndex.values and got an ndarray of Periods - value = array(value) + value = pd_array(value) value = extract_array(value, extract_numpy=True) if is_dtype_equal(value.dtype, "string"): @@ -1207,7 +1207,7 @@ def _addsub_object_array(self, other: np.ndarray, op): assert self.shape == other.shape, (self.shape, other.shape) res_values = op(self.astype("O"), np.asarray(other)) - result = array(res_values.ravel()) + result = pd_array(res_values.ravel()) result = extract_array(result, extract_numpy=True).reshape(self.shape) return result diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 4d165dac40397..43c3a5e8bfd4c 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -76,7 +76,7 @@ from pandas.core.arrays.categorical import Categorical import pandas.core.common as com from pandas.core.construction import ( - array, + array as pd_array, ensure_wrapped_if_datetimelike, extract_array, ) @@ -661,7 +661,7 @@ def _cmp_method(self, other, op): if is_list_like(other): if len(self) != len(other): raise ValueError("Lengths must match to compare") - other = array(other) + other = pd_array(other) elif not isinstance(other, Interval): # non-interval scalar -> no matches return invalid_comparison(self, other, op) diff --git a/pandas/core/construction.py b/pandas/core/construction.py index d0fe5b5ab0c19..db9239d03dd13 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -303,6 +303,7 @@ def array( raise ValueError(msg) if dtype is None and isinstance(data, (ABCSeries, ABCIndex, ABCExtensionArray)): + # Note: we exclude np.ndarray here, will do type inference on it dtype = data.dtype data = extract_array(data, extract_numpy=True) diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 42ac786ff315e..1545b5b106803 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -25,7 +25,7 @@ from pandas.core.arrays import ExtensionArray from pandas.core.arrays.sparse import SparseArray from pandas.core.construction import ( - array, + array as pd_array, ensure_wrapped_if_datetimelike, ) @@ -66,7 +66,7 @@ def _cast_to_common_type(arr: ArrayLike, dtype: DtypeObj) -> ArrayLike: if is_extension_array_dtype(dtype) and isinstance(arr, np.ndarray): # numpy's astype cannot handle ExtensionDtypes - return array(arr, dtype=dtype, copy=False) + return pd_array(arr, dtype=dtype, copy=False) return arr.astype(dtype, copy=False) diff --git a/pandas/core/series.py b/pandas/core/series.py index ddfeea381ff2e..b2e620c9b8047 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -804,7 +804,7 @@ def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray: array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ - return np.asarray(self.array, dtype) + return np.asarray(self._values, dtype) # ---------------------------------------------------------------------- # Unary Methods @@ -1798,7 +1798,7 @@ def count(self, level=None): 2 """ if level is None: - return notna(self.array).sum() + return notna(self._values).sum() elif not isinstance(self.index, MultiIndex): raise ValueError("Series.count level is only valid with a MultiIndex") @@ -2498,7 +2498,7 @@ def diff(self, periods: int = 1) -> Series: -------- {examples} """ - result = algorithms.diff(self.array, periods) + result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index).__finalize__( self, method="diff" ) @@ -3808,7 +3808,7 @@ def explode(self, ignore_index: bool = False) -> Series: if not len(self) or not is_object_dtype(self): return self.copy() - values, counts = reshape.explode(np.asarray(self.array)) + values, counts = reshape.explode(np.asarray(self._values)) if ignore_index: index = ibase.default_index(len(values)) @@ -5013,7 +5013,7 @@ def _cmp_method(self, other, op): if isinstance(other, Series) and not self._indexed_same(other): raise ValueError("Can only compare identically-labeled Series objects") - lvalues = extract_array(self, extract_numpy=True) + lvalues = self._values rvalues = extract_array(other, extract_numpy=True) res_values = ops.comparison_op(lvalues, rvalues, op) @@ -5024,7 +5024,7 @@ def _logical_method(self, other, op): res_name = ops.get_op_result_name(self, other) self, other = ops.align_method_SERIES(self, other, align_asobject=True) - lvalues = extract_array(self, extract_numpy=True) + lvalues = self._values rvalues = extract_array(other, extract_numpy=True) res_values = ops.logical_op(lvalues, rvalues, op) @@ -5034,7 +5034,7 @@ def _arith_method(self, other, op): res_name = ops.get_op_result_name(self, other) self, other = ops.align_method_SERIES(self, other) - lvalues = extract_array(self, extract_numpy=True) + lvalues = self._values rvalues = extract_array(other, extract_numpy=True) result = ops.arithmetic_op(lvalues, rvalues, op)
Found that grepping for uses of pd.array is a PITA, so went through and changed them all (inside core) to pd_array. @MarcoGorelli would it be feasible to make a code check for this?
https://api.github.com/repos/pandas-dev/pandas/pulls/40150
2021-03-01T18:05:21Z
2021-03-01T21:41:50Z
2021-03-01T21:41:50Z
2021-03-02T06:43:14Z
PERF: DataFrame.transpose with dt64tz
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index f6b5c30635980..ba0c4b99f861f 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -673,6 +673,7 @@ Performance improvements - Performance improvement for concatenation of data with type :class:`CategoricalDtype` (:issue:`40193`) - Performance improvement in :meth:`.GroupBy.cummin` and :meth:`.GroupBy.cummax` with nullable data types (:issue:`37493`) - Performance improvement in :meth:`Series.nunique` with nan values (:issue:`40865`) +- Performance improvement in :meth:`DataFrame.transpose`, :meth:`Series.unstack` with ``DatetimeTZDtype`` (:issue:`40149`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/array_algos/take.py b/pandas/core/array_algos/take.py index 93d87f6bb4dfa..201e177d8bb10 100644 --- a/pandas/core/array_algos/take.py +++ b/pandas/core/array_algos/take.py @@ -16,7 +16,10 @@ from pandas._typing import ArrayLike from pandas.core.dtypes.cast import maybe_promote -from pandas.core.dtypes.common import ensure_platform_int +from pandas.core.dtypes.common import ( + ensure_platform_int, + is_1d_only_ea_obj, +) from pandas.core.dtypes.missing import na_value_for_dtype from pandas.core.construction import ensure_wrapped_if_datetimelike @@ -91,12 +94,14 @@ def take_nd( if not isinstance(arr, np.ndarray): # i.e. ExtensionArray, - if arr.ndim == 2: - # e.g. DatetimeArray, TimedeltArray + # includes for EA to catch DatetimeArray, TimedeltaArray + if not is_1d_only_ea_obj(arr): + # i.e. DatetimeArray, TimedeltaArray arr = cast("NDArrayBackedExtensionArray", arr) return arr.take( indexer, fill_value=fill_value, allow_fill=allow_fill, axis=axis ) + return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) arr = np.asarray(arr) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d90487647d35b..efefeb23445af 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -101,6 +101,7 @@ ensure_platform_int, infer_dtype_from_object, is_1d_only_ea_dtype, + is_1d_only_ea_obj, is_bool_dtype, is_dataclass, is_datetime64_any_dtype, @@ -139,7 +140,11 @@ ) from pandas.core.array_algos.take import take_2d_multi from pandas.core.arraylike import OpsMixin -from pandas.core.arrays import ExtensionArray +from pandas.core.arrays import ( + DatetimeArray, + ExtensionArray, + TimedeltaArray, +) from pandas.core.arrays.sparse import SparseFrameAccessor from pandas.core.construction import ( extract_array, @@ -852,6 +857,28 @@ def _can_fast_transpose(self) -> bool: # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) + @property + def _values_compat(self) -> np.ndarray | DatetimeArray | TimedeltaArray: + """ + Analogue to ._values that may return a 2D ExtensionArray. + """ + mgr = self._mgr + if isinstance(mgr, ArrayManager): + return self._values + + blocks = mgr.blocks + if len(blocks) != 1: + return self._values + + arr = blocks[0].values + if arr.ndim == 1: + # non-2D ExtensionArray + return self._values + + # more generally, whatever we allow in NDArrayBackedExtensionBlock + arr = cast("DatetimeArray | TimedeltaArray", arr) + return arr.T + # ---------------------------------------------------------------------- # Rendering Methods @@ -3292,7 +3319,18 @@ def transpose(self, *args, copy: bool = False) -> DataFrame: # construct the args dtypes = list(self.dtypes) - if self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]): + + if self._can_fast_transpose: + # Note: tests pass without this, but this improves perf quite a bit. + new_vals = self._values_compat.T + if copy: + new_vals = new_vals.copy() + + result = self._constructor(new_vals, index=self.columns, columns=self.index) + + elif ( + self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) + ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() @@ -9760,8 +9798,9 @@ def func(values: np.ndarray): def blk_func(values, axis=1): if isinstance(values, ExtensionArray): - if values.ndim == 2: - # i.e. DatetimeArray, TimedeltaArray + if not is_1d_only_ea_obj(values) and not isinstance( + self._mgr, ArrayManager + ): return values._reduce(name, axis=1, skipna=skipna, **kwds) return values._reduce(name, skipna=skipna, **kwds) else: diff --git a/pandas/tests/frame/methods/test_transpose.py b/pandas/tests/frame/methods/test_transpose.py index d6ab3268c8c37..62537d37a8c11 100644 --- a/pandas/tests/frame/methods/test_transpose.py +++ b/pandas/tests/frame/methods/test_transpose.py @@ -90,3 +90,16 @@ def test_transpose_get_view(self, float_frame): dft.values[:, 5:10] = 5 assert (float_frame.values[5:10] == 5).all() + + @td.skip_array_manager_invalid_test + def test_transpose_get_view_dt64tzget_view(self): + dti = date_range("2016-01-01", periods=6, tz="US/Pacific") + arr = dti._data.reshape(3, 2) + df = DataFrame(arr) + assert df._mgr.nblocks == 1 + + result = df.T + assert result._mgr.nblocks == 1 + + rtrip = result._mgr.blocks[0].values + assert np.shares_memory(arr._data, rtrip._data)
Does what it says on the tin: `DatetimeBlock.values` is always `DatetimeArray`, and `dt64tzblock.shape == dt64tzblock.values` in all cases. Similarly `TimedeltaBlock.values` is always `TimedeltaArray`. Notes: - It is straightforward to extend this to work for PeriodDtype (i have a branch). Haven't tried it, but I expect it would be similarly easy to do the same for CategoricalDtype. Things that im not yet fully happy with: - fillna method on 2D (I think @simonjayhawkins commented on this in another branch recently), - nargminmax with 2D and mask.any() - pytables kludge ASVs: run repeatedly (vs master from yesterday) with --record-samples --append-samples so im pretty confident these are stable (but still include some nonsense xref #40066) ``` before after ratio [f4b67b5e] [65792836] <master> <ref-hybrid-3> + 10.1±3ms 13.9±3ms 1.38 eval.Eval.time_add('python', 'all') + 2.06±0.02ms 2.40±0.06ms 1.16 hash_functions.NumericSeriesIndexingShuffled.time_loc_slice(<class 'pandas.core.indexes.numeric.Int64Index'>, 1000000) + 227±2μs 263±2μs 1.15 groupby.GroupByMethods.time_dtype_as_field('datetime', 'head', 'transformation') + 228±2μs 261±2μs 1.15 groupby.GroupByMethods.time_dtype_as_field('datetime', 'head', 'direct') + 238±2μs 272±2μs 1.14 groupby.GroupByMethods.time_dtype_as_field('datetime', 'tail', 'transformation') + 248±6μs 282±5μs 1.14 groupby.GroupByMethods.time_dtype_as_field('datetime', 'tail', 'direct') + 3.92±0.03ms 4.37±0.01ms 1.11 rolling.Engine.time_rolling_apply('DataFrame', 'float', <function Engine.<lambda> at 0x7fb1c0b40670>, 'cython', 'median') + 2.83±0.02ms 3.14±0.06ms 1.11 io.hdf.HDFStoreDataFrame.time_store_info - 275±4μs 248±4μs 0.90 groupby.GroupByMethods.time_dtype_as_field('datetime', 'shift', 'direct') - 1.41±0.05ms 1.27±0.01ms 0.90 stat_ops.FrameOps.time_op('sum', 'int', 1) - 1.13±0.06ms 1.02±0.07ms 0.90 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 5.0, <built-in function ne>) - 271±2μs 242±2μs 0.89 groupby.GroupByMethods.time_dtype_as_field('datetime', 'shift', 'transformation') - 188±3μs 167±1μs 0.89 algos.isin.IsIn.time_isin_empty('datetime64[ns]') - 192±2μs 170±2μs 0.89 algos.isin.IsIn.time_isin_mismatched_dtype('datetime64[ns]') - 227±2μs 200±2μs 0.88 groupby.GroupByMethods.time_dtype_as_field('datetime', 'any', 'direct') - 226±2μs 199±1μs 0.88 groupby.GroupByMethods.time_dtype_as_field('datetime', 'all', 'transformation') - 227±2μs 199±1μs 0.88 groupby.GroupByMethods.time_dtype_as_field('datetime', 'any', 'transformation') - 895±60μs 785±80μs 0.88 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 3.0, <built-in function ge>) - 10.2±0.3ms 8.93±0.7ms 0.88 algos.isin.IsinAlmostFullWithRandomInt.time_isin(<class 'numpy.int64'>, 19, 'inside') - 235±4μs 204±4μs 0.87 groupby.GroupByMethods.time_dtype_as_field('datetime', 'all', 'direct') - 3.26±0.03μs 2.83±0.03μs 0.87 frame_methods.ToNumpy.time_to_numpy_tall - 3.28±0.03μs 2.82±0.02μs 0.86 frame_methods.ToNumpy.time_to_numpy_wide - 9.77±0.2ms 8.40±0.2ms 0.86 indexing.NumericSeriesIndexing.time_loc_slice(<class 'pandas.core.indexes.numeric.UInt64Index'>, 'nonunique_monotonic_inc') - 2.94±0.05μs 2.52±0.02μs 0.86 frame_methods.ToNumpy.time_values_tall - 2.95±0.03μs 2.52±0.02μs 0.85 frame_methods.ToNumpy.time_values_wide - 2.09±0.02ms 1.77±0.01ms 0.85 groupby.FillNA.time_df_ffill - 2.09±0.02ms 1.77±0.01ms 0.85 groupby.FillNA.time_df_bfill - 204±3μs 168±2μs 0.82 arithmetic.OffsetArrayArithmetic.time_add_series_offset(<Day>) - 170±2μs 137±3μs 0.81 groupby.GroupByMethods.time_dtype_as_field('datetime', 'count', 'direct') - 170±2μs 137±4μs 0.80 groupby.GroupByMethods.time_dtype_as_field('datetime', 'count', 'transformation') - 29.1±3ms 22.9±0.4ms 0.79 algos.isin.IsinAlmostFullWithRandomInt.time_isin(<class 'numpy.uint64'>, 20, 'outside') - 26.0±1ms 19.2±2ms 0.74 algos.isin.IsinAlmostFullWithRandomInt.time_isin(<class 'numpy.int64'>, 20, 'inside') - 26.2±0.2ms 18.0±0.07ms 0.69 index_object.SetOperations.time_operation('date_string', 'symmetric_difference') - 11.6±0.1ms 7.32±0.08ms 0.63 reshape.ReshapeExtensionDtype.time_stack('datetime64[ns, US/Pacific]') - 40.2±0.5μs 25.0±0.3μs 0.62 ctors.SeriesDtypesConstructors.time_dtindex_from_index_with_series - 3.77±0.03ms 2.08±0.03ms 0.55 reshape.ReshapeExtensionDtype.time_unstack_slow('datetime64[ns, US/Pacific]') - 32.1±0.5μs 17.0±0.2μs 0.53 ctors.SeriesDtypesConstructors.time_dtindex_from_series - 1.11±0.03ms 408±7μs 0.37 categoricals.Constructor.time_datetimes - 14.1±0.1μs 1.26±0.02μs 0.09 attrs_caching.SeriesArrayAttribute.time_extract_array_numpy('datetime64') - 13.7±0.1μs 1.04±0.03μs 0.08 attrs_caching.SeriesArrayAttribute.time_extract_array('datetime64') - 13.0±0.2μs 455±10ns 0.04 attrs_caching.SeriesArrayAttribute.time_array('datetime64') - 73.8±1ms 1.66±0.03ms 0.02 reshape.ReshapeExtensionDtype.time_unstack_fast('datetime64[ns, US/Pacific]') - 64.3±0.9ms 258±2μs 0.00 reshape.ReshapeExtensionDtype.time_transpose('datetime64[ns, US/Pacific]') ``` IIRC the groupby.GroupByMethods.time_dtype_as_field were heavily influenced by constructor overhead, which motivated #40054. Still need to try out @jorisvandenbossche's suggestion of non-cython optimization there.
https://api.github.com/repos/pandas-dev/pandas/pulls/40149
2021-03-01T15:55:44Z
2021-05-17T19:17:35Z
2021-05-17T19:17:35Z
2021-05-17T19:22:05Z
[ArrayManager] Ensure to store datetimelike data as DatetimeArray/TimedeltaArray (and not ndarray)
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 5001754017dda..0556434c2c223 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -33,6 +33,7 @@ ) from pandas.core.dtypes.common import ( is_bool_dtype, + is_datetime64_ns_dtype, is_dtype_equal, is_extension_array_dtype, is_numeric_dtype, @@ -53,7 +54,11 @@ ) import pandas.core.algorithms as algos -from pandas.core.arrays import ExtensionArray +from pandas.core.arrays import ( + DatetimeArray, + ExtensionArray, + TimedeltaArray, +) from pandas.core.arrays.sparse import SparseDtype from pandas.core.construction import ( ensure_wrapped_if_datetimelike, @@ -113,6 +118,7 @@ def __init__( if verify_integrity: self._axes = [ensure_index(ax) for ax in axes] + self.arrays = [ensure_wrapped_if_datetimelike(arr) for arr in arrays] self._verify_integrity() def make_empty(self: T, axes=None) -> T: @@ -715,20 +721,16 @@ def fast_xs(self, loc: int) -> ArrayLike: """ dtype = _interleaved_dtype(self.arrays) - if isinstance(dtype, SparseDtype): - temp_dtype = dtype.subtype - elif isinstance(dtype, PandasDtype): - temp_dtype = dtype.numpy_dtype - elif is_extension_array_dtype(dtype): - temp_dtype = "object" - elif is_dtype_equal(dtype, str): - temp_dtype = "object" - else: - temp_dtype = dtype - - result = np.array([arr[loc] for arr in self.arrays], dtype=temp_dtype) + values = [arr[loc] for arr in self.arrays] if isinstance(dtype, ExtensionDtype): - result = dtype.construct_array_type()._from_sequence(result, dtype=dtype) + result = dtype.construct_array_type()._from_sequence(values, dtype=dtype) + # for datetime64/timedelta64, the np.ndarray constructor cannot handle pd.NaT + elif is_datetime64_ns_dtype(dtype): + result = DatetimeArray._from_sequence(values, dtype=dtype)._data + elif is_timedelta64_ns_dtype(dtype): + result = TimedeltaArray._from_sequence(values, dtype=dtype)._data + else: + result = np.array(values, dtype=dtype) return result def iget(self, i: int) -> SingleBlockManager: diff --git a/pandas/tests/frame/methods/test_rename.py b/pandas/tests/frame/methods/test_rename.py index 677d862dfe077..462d588aff58f 100644 --- a/pandas/tests/frame/methods/test_rename.py +++ b/pandas/tests/frame/methods/test_rename.py @@ -170,6 +170,7 @@ def test_rename_multiindex(self): renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0) tm.assert_index_equal(renamed.index, new_index) + @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) setitem copy/view def test_rename_nocopy(self, float_frame): renamed = float_frame.rename(columns={"C": "foo"}, copy=False) renamed["foo"] = 1.0
Pre-cursor for https://github.com/pandas-dev/pandas/pull/39991 Currently we didn't really check that we were consistently storing datetimelike data as the EA (DatetimeArray, TimedeltaArray) or as ndarrray. Ensuring this in the ArrayManager constructor turns up a few failures. I *think* it will be the easiest to always store them as EA and not as ndarray (eg for many other operations, we otherwise would wrap them in the EA anyway).
https://api.github.com/repos/pandas-dev/pandas/pulls/40147
2021-03-01T14:12:51Z
2021-03-02T21:08:13Z
2021-03-02T21:08:13Z
2021-03-03T08:22:26Z
REGR: reduction operations failing if `min_count` is larger
diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst index 790ff4c78cad6..edf23bf89d7e1 100644 --- a/doc/source/whatsnew/v1.2.4.rst +++ b/doc/source/whatsnew/v1.2.4.rst @@ -15,7 +15,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ -- +- Fixed regression in :meth:`DataFrame.sum` when ``min_count`` greater than the :class:`DataFrame` shape was passed resulted in a ``ValueError`` (:issue:`39738`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index dcd6ef77238f9..16d6071585a29 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9175,6 +9175,7 @@ def _reduce( **kwds, ): + min_count = kwds.get("min_count", 0) assert filter_type is None or filter_type == "bool", filter_type out_dtype = "bool" if filter_type == "bool" else None @@ -9219,7 +9220,7 @@ def _get_data() -> DataFrame: data = self._get_bool_data() return data - if numeric_only is not None or axis == 0: + if (numeric_only is not None or axis == 0) and min_count == 0: # For numeric_only non-None and axis non-None, we know # which blocks to use and no try/except is needed. # For numeric_only=None only the case with axis==0 and no object @@ -9236,7 +9237,7 @@ def _get_data() -> DataFrame: # After possibly _get_data and transposing, we are now in the # simple case where we can use BlockManager.reduce - res, indexer = df._mgr.reduce(blk_func, ignore_failures=ignore_failures) + res, _ = df._mgr.reduce(blk_func, ignore_failures=ignore_failures) out = df._constructor(res).iloc[0] if out_dtype is not None: out = out.astype(out_dtype) @@ -9264,14 +9265,15 @@ def _get_data() -> DataFrame: with np.errstate(all="ignore"): result = func(values) - if filter_type == "bool" and notna(result).all(): - result = result.astype(np.bool_) - elif filter_type is None and is_object_dtype(result.dtype): - try: - result = result.astype(np.float64) - except (ValueError, TypeError): - # try to coerce to the original dtypes item by item if we can - pass + if hasattr(result, "dtype"): + if filter_type == "bool" and notna(result).all(): + result = result.astype(np.bool_) + elif filter_type is None and is_object_dtype(result.dtype): + try: + result = result.astype(np.float64) + except (ValueError, TypeError): + # try to coerce to the original dtypes item by item if we can + pass result = self._constructor_sliced(result, index=labels) return result diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index a0dfb1c83a70b..57656190011dc 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import functools import itertools import operator @@ -1380,7 +1382,7 @@ def _maybe_null_out( mask: Optional[np.ndarray], shape: Tuple[int, ...], min_count: int = 1, -) -> float: +) -> np.ndarray | float: """ Returns ------- diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 3f205bde31940..d24320ad17709 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -847,6 +847,13 @@ def test_sum_nanops_timedelta(self): expected = Series([0, 0, np.nan], dtype="m8[ns]", index=idx) tm.assert_series_equal(result, expected) + def test_sum_nanops_min_count(self): + # https://github.com/pandas-dev/pandas/issues/39738 + df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) + result = df.sum(min_count=10) + expected = Series([np.nan, np.nan], index=["x", "y"]) + tm.assert_series_equal(result, expected) + def test_sum_object(self, float_frame): values = float_frame.values.astype(int) frame = DataFrame(values, index=float_frame.index, columns=float_frame.columns)
- [ ] closes #39738 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40143
2021-03-01T13:13:59Z
2021-03-05T01:09:48Z
2021-03-05T01:09:48Z
2021-03-05T15:49:50Z
CI, TST enable branch coverage
diff --git a/setup.cfg b/setup.cfg index ce055f550a868..ca0673bd5fc34 100644 --- a/setup.cfg +++ b/setup.cfg @@ -125,7 +125,7 @@ ignore-words-list = ba,blocs,coo,hist,nd,ser ignore-regex = https://(\w+\.)+ [coverage:run] -branch = False +branch = True omit = */tests/* pandas/_typing.py
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them As suggested on Gitter - this could help identify more untested parts of the codebase and hence uncover bugs
https://api.github.com/repos/pandas-dev/pandas/pulls/40142
2021-03-01T12:38:29Z
2021-03-01T19:32:52Z
2021-03-01T19:32:52Z
2021-03-01T19:45:36Z
REF: move Block.astype implementation to dtypes/cast.py
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 71863c8925e89..d456f9c56e309 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -10,6 +10,7 @@ datetime, timedelta, ) +import inspect from typing import ( TYPE_CHECKING, Any, @@ -87,6 +88,7 @@ is_timedelta64_dtype, is_timedelta64_ns_dtype, is_unsigned_integer_dtype, + pandas_dtype, ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, @@ -1227,6 +1229,107 @@ def astype_nansafe( return arr.astype(dtype, copy=copy) +def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> ArrayLike: + """ + Cast array (ndarray or ExtensionArray) to the new dtype. + + Parameters + ---------- + values : ndarray or ExtensionArray + dtype : dtype object + copy : bool, default False + copy if indicated + + Returns + ------- + ndarray or ExtensionArray + """ + if ( + values.dtype.kind in ["m", "M"] + and dtype.kind in ["i", "u"] + and isinstance(dtype, np.dtype) + and dtype.itemsize != 8 + ): + # TODO(2.0) remove special case once deprecation on DTA/TDA is enforced + msg = rf"cannot astype a datetimelike from [{values.dtype}] to [{dtype}]" + raise TypeError(msg) + + if is_datetime64tz_dtype(dtype) and is_datetime64_dtype(values.dtype): + return astype_dt64_to_dt64tz(values, dtype, copy, via_utc=True) + + if is_dtype_equal(values.dtype, dtype): + if copy: + return values.copy() + return values + + if isinstance(values, ABCExtensionArray): + values = values.astype(dtype, copy=copy) + + else: + values = astype_nansafe(values, dtype, copy=copy) + + # in pandas we don't store numpy str dtypes, so convert to object + if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str): + values = np.array(values, dtype=object) + + return values + + +def astype_array_safe( + values: ArrayLike, dtype, copy: bool = False, errors: str = "raise" +) -> ArrayLike: + """ + Cast array (ndarray or ExtensionArray) to the new dtype. + + This basically is the implementation for DataFrame/Series.astype and + includes all custom logic for pandas (NaN-safety, converting str to object, + not allowing ) + + Parameters + ---------- + values : ndarray or ExtensionArray + dtype : str, dtype convertible + copy : bool, default False + copy if indicated + errors : str, {'raise', 'ignore'}, default 'raise' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + + Returns + ------- + ndarray or ExtensionArray + """ + errors_legal_values = ("raise", "ignore") + + if errors not in errors_legal_values: + invalid_arg = ( + "Expected value of kwarg 'errors' to be one of " + f"{list(errors_legal_values)}. Supplied value is '{errors}'" + ) + raise ValueError(invalid_arg) + + if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype): + msg = ( + f"Expected an instance of {dtype.__name__}, " + "but got the class instead. Try instantiating 'dtype'." + ) + raise TypeError(msg) + + dtype = pandas_dtype(dtype) + + try: + new_values = astype_array(values, dtype, copy=copy) + except (ValueError, TypeError): + # e.g. astype_nansafe can fail on object-dtype of strings + # trying to convert to float + if errors == "ignore": + new_values = values + else: + raise + + return new_values + + def soft_convert_objects( values: np.ndarray, datetime: bool = True, diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 5001754017dda..48e27e7100d2f 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -28,6 +28,7 @@ from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( + astype_array_safe, find_common_type, infer_dtype_from_scalar, ) @@ -499,7 +500,7 @@ def downcast(self) -> ArrayManager: return self.apply_with_block("downcast") def astype(self, dtype, copy: bool = False, errors: str = "raise") -> ArrayManager: - return self.apply("astype", dtype=dtype, copy=copy) # , errors=errors) + return self.apply(astype_array_safe, dtype=dtype, copy=copy, errors=errors) def convert( self, diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index b65043be6fda6..f2b8499a316b7 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1,6 +1,5 @@ from __future__ import annotations -import inspect import re from typing import ( TYPE_CHECKING, @@ -36,8 +35,7 @@ from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( - astype_dt64_to_dt64tz, - astype_nansafe, + astype_array_safe, can_hold_element, find_common_type, infer_dtype_from, @@ -49,7 +47,6 @@ ) from pandas.core.dtypes.common import ( is_categorical_dtype, - is_datetime64_dtype, is_datetime64tz_dtype, is_dtype_equal, is_extension_array_dtype, @@ -652,33 +649,11 @@ def astype(self, dtype, copy: bool = False, errors: str = "raise"): ------- Block """ - errors_legal_values = ("raise", "ignore") - - if errors not in errors_legal_values: - invalid_arg = ( - "Expected value of kwarg 'errors' to be one of " - f"{list(errors_legal_values)}. Supplied value is '{errors}'" - ) - raise ValueError(invalid_arg) - - if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype): - msg = ( - f"Expected an instance of {dtype.__name__}, " - "but got the class instead. Try instantiating 'dtype'." - ) - raise TypeError(msg) - - dtype = pandas_dtype(dtype) + values = self.values + if values.dtype.kind in ["m", "M"]: + values = self.array_values() - try: - new_values = self._astype(dtype, copy=copy) - except (ValueError, TypeError): - # e.g. astype_nansafe can fail on object-dtype of strings - # trying to convert to float - if errors == "ignore": - new_values = self.values - else: - raise + new_values = astype_array_safe(values, dtype, copy=copy, errors=errors) newb = self.make_block(new_values) if newb.shape != self.shape: @@ -689,37 +664,6 @@ def astype(self, dtype, copy: bool = False, errors: str = "raise"): ) return newb - def _astype(self, dtype: DtypeObj, copy: bool) -> ArrayLike: - values = self.values - if values.dtype.kind in ["m", "M"]: - values = self.array_values() - - if ( - values.dtype.kind in ["m", "M"] - and dtype.kind in ["i", "u"] - and isinstance(dtype, np.dtype) - and dtype.itemsize != 8 - ): - # TODO(2.0) remove special case once deprecation on DTA/TDA is enforced - msg = rf"cannot astype a datetimelike from [{values.dtype}] to [{dtype}]" - raise TypeError(msg) - - if is_datetime64tz_dtype(dtype) and is_datetime64_dtype(values.dtype): - return astype_dt64_to_dt64tz(values, dtype, copy, via_utc=True) - - if is_dtype_equal(values.dtype, dtype): - if copy: - return values.copy() - return values - - if isinstance(values, ExtensionArray): - values = values.astype(dtype, copy=copy) - - else: - values = astype_nansafe(values, dtype, copy=copy) - - return values - def convert( self, copy: bool = True, diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py index 8c11f659e8454..161fe7990a327 100644 --- a/pandas/tests/frame/methods/test_astype.py +++ b/pandas/tests/frame/methods/test_astype.py @@ -3,8 +3,6 @@ import numpy as np import pytest -import pandas.util._test_decorators as td - import pandas as pd from pandas import ( Categorical, @@ -92,7 +90,6 @@ def test_astype_mixed_type(self, mixed_type_frame): casted = mn.astype("O") _check_cast(casted, "object") - @td.skip_array_manager_not_yet_implemented def test_astype_with_exclude_string(self, float_frame): df = float_frame.copy() expected = float_frame.astype(int) @@ -127,7 +124,6 @@ def test_astype_with_view_mixed_float(self, mixed_float_frame): casted = tf.astype(np.int64) casted = tf.astype(np.float32) # noqa - @td.skip_array_manager_not_yet_implemented @pytest.mark.parametrize("dtype", [np.int32, np.int64]) @pytest.mark.parametrize("val", [np.nan, np.inf]) def test_astype_cast_nan_inf_int(self, val, dtype): @@ -386,7 +382,6 @@ def test_astype_to_datetimelike_unit(self, arr_dtype, dtype, unit): tm.assert_frame_equal(result, expected) - @td.skip_array_manager_not_yet_implemented @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"]) def test_astype_to_datetime_unit(self, unit): # tests all units from datetime origination @@ -411,7 +406,6 @@ def test_astype_to_timedelta_unit_ns(self, unit): tm.assert_frame_equal(result, expected) - @td.skip_array_manager_not_yet_implemented @pytest.mark.parametrize("unit", ["us", "ms", "s", "h", "m", "D"]) def test_astype_to_timedelta_unit(self, unit): # coerce to float @@ -441,7 +435,6 @@ def test_astype_to_incorrect_datetimelike(self, unit): with pytest.raises(TypeError, match=msg): df.astype(dtype) - @td.skip_array_manager_not_yet_implemented def test_astype_arg_for_errors(self): # GH#14878 @@ -570,7 +563,6 @@ def test_astype_empty_dtype_dict(self): tm.assert_frame_equal(result, df) assert result is not df - @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) ignore keyword @pytest.mark.parametrize( "df", [ diff --git a/pandas/util/_exceptions.py b/pandas/util/_exceptions.py index 5ca96a1f9989f..c31c421ee1445 100644 --- a/pandas/util/_exceptions.py +++ b/pandas/util/_exceptions.py @@ -31,7 +31,7 @@ def find_stack_level() -> int: if stack[n].function == "astype": break - while stack[n].function in ["astype", "apply", "_astype"]: + while stack[n].function in ["astype", "apply", "astype_array_safe", "astype_array"]: # e.g. # bump up Block.astype -> BlockManager.astype -> NDFrame.astype # bump up Datetime.Array.astype -> DatetimeIndex.astype
This moves the `astype` implementation (so the current logic of calling `EA.astype` or `astype_nansafe` depending on the array type, handling datetime and string special cases, handling error keyword, etc) from being defined on `Block` to the `array_algos/` submodule. That's a useful clean-up anyway I think (it's not block specific), and this way it can then be reused in ArrayManager. I currently put it in `core/array_algos/cast.py` since the other similar functions are also put in `array_algos` (`shift`, `putmask`, `replace`, `quantile` etc). But since this is casting specific and we already have most of the underlying pieces in `core/dtypes/cast.py`, could also just put it there (either is fine for me).
https://api.github.com/repos/pandas-dev/pandas/pulls/40141
2021-03-01T11:30:20Z
2021-03-02T17:23:42Z
2021-03-02T17:23:42Z
2021-03-02T17:23:46Z
to_offset was not using the compiled regex
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 2d4704ad3bda6..4e6e5485b2ade 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -3579,7 +3579,7 @@ cpdef to_offset(freq): stride_sign = None try: - split = re.split(opattern, freq) + split = opattern.split(freq) if split[-1] != "" and not split[-1].isspace(): # the last element must be blank raise ValueError("last element must be blank")
- [x] closes #39988 - [ ] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40138
2021-03-01T09:12:42Z
2021-03-01T13:54:59Z
2021-03-01T13:54:59Z
2021-03-01T13:55:03Z
DOC: 1.2.3 release date
diff --git a/doc/source/whatsnew/v1.2.3.rst b/doc/source/whatsnew/v1.2.3.rst index 99e997189d7b8..c94491df474ab 100644 --- a/doc/source/whatsnew/v1.2.3.rst +++ b/doc/source/whatsnew/v1.2.3.rst @@ -1,6 +1,6 @@ .. _whatsnew_123: -What's new in 1.2.3 (March ??, 2021) +What's new in 1.2.3 (March 02, 2021) ------------------------------------ These are the changes in pandas 1.2.3. See :ref:`release` for a full changelog @@ -19,33 +19,8 @@ Fixed regressions - Fixed regression in nullable integer unary ops propagating mask on assignment (:issue:`39943`) - Fixed regression in :meth:`DataFrame.__setitem__` not aligning :class:`DataFrame` on right-hand side for boolean indexer (:issue:`39931`) - Fixed regression in :meth:`~DataFrame.to_json` failing to use ``compression`` with URL-like paths that are internally opened in binary mode or with user-provided file objects that are opened in binary mode (:issue:`39985`) -- Fixed regression in :meth:`~Series.sort_index` and :meth:`~DataFrame.sort_index`, - which exited with an ungraceful error when having kwarg ``ascending=None`` passed (:issue:`39434`). - Passing ``ascending=None`` is still considered invalid, - and the new error message suggests a proper usage - (``ascending`` must be a boolean or a list-like boolean). +- Fixed regression in :meth:`Series.sort_index` and :meth:`DataFrame.sort_index`, which exited with an ungraceful error when having kwarg ``ascending=None`` passed. Passing ``ascending=None`` is still considered invalid, and the improved error message suggests a proper usage (``ascending`` must be a boolean or a list-like of boolean) (:issue:`39434`) - Fixed regression in :meth:`DataFrame.transform` and :meth:`Series.transform` giving incorrect column labels when passed a dictionary with a mix of list and non-list values (:issue:`40018`) -- - -.. --------------------------------------------------------------------------- - -.. _whatsnew_123.bug_fixes: - -Bug fixes -~~~~~~~~~ - -- -- - -.. --------------------------------------------------------------------------- - -.. _whatsnew_123.other: - -Other -~~~~~ - -- -- .. ---------------------------------------------------------------------------
https://api.github.com/repos/pandas-dev/pandas/pulls/40137
2021-03-01T08:25:54Z
2021-03-02T07:30:15Z
2021-03-02T07:30:15Z
2021-03-02T07:31:26Z
DEPR: `Styler.set_na_rep` and `.set_precision` in favour of `.format(na_rep='x', precision=3)`
diff --git a/doc/source/reference/style.rst b/doc/source/reference/style.rst index 3a8d912fa6ffe..79a52b7cdb9d1 100644 --- a/doc/source/reference/style.rst +++ b/doc/source/reference/style.rst @@ -35,7 +35,6 @@ Style application Styler.applymap Styler.where Styler.format - Styler.set_precision Styler.set_td_classes Styler.set_table_styles Styler.set_table_attributes @@ -44,7 +43,6 @@ Style application Styler.set_caption Styler.set_properties Styler.set_uuid - Styler.set_na_rep Styler.clear Styler.pipe diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index e99963c6ad56b..00c35a935e9cd 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -363,6 +363,7 @@ Deprecations - Deprecated :meth:`core.window.ewm.ExponentialMovingWindow.vol` (:issue:`39220`) - Using ``.astype`` to convert between ``datetime64[ns]`` dtype and :class:`DatetimeTZDtype` is deprecated and will raise in a future version, use ``obj.tz_localize`` or ``obj.dt.tz_localize`` instead (:issue:`38622`) - Deprecated casting ``datetime.date`` objects to ``datetime64`` when used as ``fill_value`` in :meth:`DataFrame.unstack`, :meth:`DataFrame.shift`, :meth:`Series.shift`, and :meth:`DataFrame.reindex`, pass ``pd.Timestamp(dateobj)`` instead (:issue:`39767`) +- Deprecated :meth:`.Styler.set_na_rep` and :meth:`.Styler.set_precision` in favour of :meth:`.Styler.format` with ``na_rep`` and ``precision`` as existing and new input arguments respectively (:issue:`40134`) - Deprecated allowing partial failure in :meth:`Series.transform` and :meth:`DataFrame.transform` when ``func`` is list-like or dict-like; will raise if any function fails on a column in a future version (:issue:`40211`) .. --------------------------------------------------------------------------- diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index dcae87f9d6d48..1ce50d3f905f6 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -7,7 +7,6 @@ from contextlib import contextmanager import copy from functools import partial -from itertools import product from typing import ( Any, Callable, @@ -22,6 +21,7 @@ cast, ) from uuid import uuid4 +import warnings import numpy as np @@ -37,14 +37,10 @@ from pandas.compat._optional import import_optional_dependency from pandas.util._decorators import doc -from pandas.core.dtypes.common import is_float from pandas.core.dtypes.generic import ABCSeries import pandas as pd -from pandas.api.types import ( - is_dict_like, - is_list_like, -) +from pandas.api.types import is_list_like from pandas.core import generic import pandas.core.common as com from pandas.core.frame import DataFrame @@ -53,6 +49,8 @@ jinja2 = import_optional_dependency("jinja2", extra="DataFrame.style requires jinja2.") +BaseFormatter = Union[str, Callable] +ExtFormatter = Union[BaseFormatter, Dict[Any, Optional[BaseFormatter]]] CSSPair = Tuple[str, Union[str, int, float]] CSSList = List[CSSPair] CSSProperties = Union[str, CSSList] @@ -182,9 +180,6 @@ def __init__( self.data: DataFrame = data self.index: pd.Index = data.index self.columns: pd.Index = data.columns - if precision is None: - precision = get_option("display.precision") - self.precision = precision self.table_styles = table_styles if not isinstance(uuid_len, int) or not uuid_len >= 0: raise TypeError("``uuid_len`` must be an integer in range [0, 32].") @@ -193,7 +188,6 @@ def __init__( self.caption = caption self.table_attributes = table_attributes self.cell_ids = cell_ids - self.na_rep = na_rep # assign additional default vars self.hidden_index: bool = False @@ -204,7 +198,10 @@ def __init__( self.tooltips: Optional[_Tooltips] = None self._display_funcs: DefaultDict[ # maps (row, col) -> formatting function Tuple[int, int], Callable[[Any], str] - ] = defaultdict(lambda: self._default_display_func) + ] = defaultdict(lambda: partial(_default_formatter, precision=None)) + self.precision = precision # can be removed on set_precision depr cycle + self.na_rep = na_rep # can be removed on set_na_rep depr cycle + self.format(formatter=None, precision=precision, na_rep=na_rep) def _repr_html_(self) -> str: """ @@ -225,15 +222,6 @@ def _init_tooltips(self): if self.tooltips is None: self.tooltips = _Tooltips() - def _default_display_func(self, x): - if self.na_rep is not None and pd.isna(x): - return self.na_rep - elif is_float(x): - display_format = f"{x:.{self.precision}f}" - return display_format - else: - return x - def set_tooltips(self, ttips: DataFrame) -> Styler: """ Add string based tooltips that will appear in the `Styler` HTML result. These @@ -389,7 +377,6 @@ def _translate(self): table_styles = self.table_styles or [] caption = self.caption ctx = self.ctx - precision = self.precision hidden_index = self.hidden_index hidden_columns = self.hidden_columns uuid = self.uuid @@ -569,7 +556,6 @@ def _translate(self): "cellstyle": cellstyle, "body": body, "uuid": uuid, - "precision": precision, "table_styles": _format_table_styles(table_styles), "caption": caption, "table_attributes": table_attr, @@ -579,14 +565,20 @@ def _translate(self): return d - def format(self, formatter, subset=None, na_rep: Optional[str] = None) -> Styler: + def format( + self, + formatter: Optional[ExtFormatter] = None, + subset: Optional[Union[slice, Sequence[Any]]] = None, + na_rep: Optional[str] = None, + precision: Optional[int] = None, + ) -> Styler: """ Format the text display value of cells. Parameters ---------- formatter : str, callable, dict or None - If ``formatter`` is None, the default formatter is used. + Object to define how values are displayed. See notes. subset : IndexSlice An argument to ``DataFrame.loc`` that restricts which elements ``formatter`` is applied to. @@ -596,58 +588,107 @@ def format(self, formatter, subset=None, na_rep: Optional[str] = None) -> Styler .. versionadded:: 1.0.0 + precision : int, optional + Floating point precision to use for display purposes, if not determined by + the specified ``formatter``. + + .. versionadded:: 1.3.0 + Returns ------- self : Styler Notes ----- - ``formatter`` is either an ``a`` or a dict ``{column name: a}`` where - ``a`` is one of - - - str: this will be wrapped in: ``a.format(x)`` - - callable: called with the value of an individual cell - - The default display value for numeric values is the "general" (``g``) - format with ``pd.options.display.precision`` precision. + This method assigns a formatting function, ``formatter``, to each cell in the + DataFrame. If ``formatter`` is ``None``, then the default formatter is used. + If a callable then that function should take a data value as input and return + a displayable representation, such as a string. If ``formatter`` is + given as a string this is assumed to be a valid Python format specification + and is wrapped to a callable as ``string.format(x)``. If a ``dict`` is given, + keys should correspond to column names, and values should be string or + callable, as above. + + The default formatter currently expresses floats and complex numbers with the + pandas display precision unless using the ``precision`` argument here. The + default formatter does not adjust the representation of missing values unless + the ``na_rep`` argument is used. + + The ``subset`` argument defines which region to apply the formatting function + to. If the ``formatter`` argument is given in dict form but does not include + all columns within the subset then these columns will have the default formatter + applied. Any columns in the formatter dict excluded from the subset will + raise a ``KeyError``. + + When using a ``formatter`` string the dtypes must be compatible, otherwise a + `ValueError` will be raised. Examples -------- - >>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b']) - >>> df.style.format("{:.2%}") - >>> df['c'] = ['a', 'b', 'c', 'd'] - >>> df.style.format({'c': str.upper}) + Using ``na_rep`` and ``precision`` with the default ``formatter`` + + >>> df = pd.DataFrame([[np.nan, 1.0, 'A'], [2.0, np.nan, 3.0]]) + >>> df.style.format(na_rep='MISS', precision=3) + 0 1 2 + 0 MISS 1.000 A + 1 2.000 MISS 3.000 + + Using a format specification on consistent column dtypes + + >>> df.style.format('{:.2f}', na_rep='MISS', subset=[0,1]) + 0 1 2 + 0 MISS 1.00 A + 1 2.00 MISS 3.000000 + + Using the default ``formatter`` for unspecified columns + + >>> df.style.format({0: '{:.2f}', 1: '£ {:.1f}'}, na_rep='MISS', precision=1) + 0 1 2 + 0 MISS £ 1.0 A + 1 2.00 MISS 3.0 + + Multiple ``na_rep`` or ``precision`` specifications under the default + ``formatter``. + + >>> df.style.format(na_rep='MISS', precision=1, subset=[0]) + ... .format(na_rep='PASS', precision=2, subset=[1, 2]) + 0 1 2 + 0 MISS 1.00 A + 1 2.0 PASS 3.00 + + Using a callable formatting function + + >>> func = lambda s: 'STRING' if isinstance(s, str) else 'FLOAT' + >>> df.style.format({0: '{:.1f}', 2: func}, precision=4, na_rep='MISS') + 0 1 2 + 0 MISS 1.0000 STRING + 1 2.0 MISS FLOAT """ - if formatter is None: - assert self._display_funcs.default_factory is not None - formatter = self._display_funcs.default_factory() + if all((formatter is None, subset is None, precision is None, na_rep is None)): + self._display_funcs.clear() + return self # clear the formatter / revert to default and avoid looping + + subset = slice(None) if subset is None else subset + subset = _non_reducing_slice(subset) + data = self.data.loc[subset] + + columns = data.columns + if not isinstance(formatter, dict): + formatter = {col: formatter for col in columns} + + for col in columns: + try: + format_func = formatter[col] + except KeyError: + format_func = None + format_func = _maybe_wrap_formatter( + format_func, na_rep=na_rep, precision=precision + ) + + for row, value in data[[col]].itertuples(): + i, j = self.index.get_loc(row), self.columns.get_loc(col) + self._display_funcs[(i, j)] = format_func - if subset is None: - row_locs = range(len(self.data)) - col_locs = range(len(self.data.columns)) - else: - subset = _non_reducing_slice(subset) - if len(subset) == 1: - subset = subset, self.data.columns - - sub_df = self.data.loc[subset] - row_locs = self.data.index.get_indexer_for(sub_df.index) - col_locs = self.data.columns.get_indexer_for(sub_df.columns) - - if is_dict_like(formatter): - for col, col_formatter in formatter.items(): - # formatter must be callable, so '{}' are converted to lambdas - col_formatter = _maybe_wrap_formatter(col_formatter, na_rep) - col_num = self.data.columns.get_indexer_for([col])[0] - - for row_num in row_locs: - self._display_funcs[(row_num, col_num)] = col_formatter - else: - # single scalar to format all cells with - formatter = _maybe_wrap_formatter(formatter, na_rep) - locs = product(*(row_locs, col_locs)) - for i, j in locs: - self._display_funcs[(i, j)] = formatter return self def set_td_classes(self, classes: DataFrame) -> Styler: @@ -748,7 +789,6 @@ def render(self, **kwargs) -> str: * cellstyle * body * uuid - * precision * table_styles * caption * table_attributes @@ -1048,7 +1088,9 @@ def where( def set_precision(self, precision: int) -> Styler: """ - Set the precision used to render. + Set the precision used to display values. + + .. deprecated:: 1.3.0 Parameters ---------- @@ -1057,9 +1099,18 @@ def set_precision(self, precision: int) -> Styler: Returns ------- self : Styler + + Notes + ----- + This method is deprecated see `Styler.format`. """ + warnings.warn( + "this method is deprecated in favour of `Styler.format(precision=..)`", + FutureWarning, + stacklevel=2, + ) self.precision = precision - return self + return self.format(precision=precision, na_rep=self.na_rep) def set_table_attributes(self, attributes: str) -> Styler: """ @@ -1270,6 +1321,8 @@ def set_na_rep(self, na_rep: str) -> Styler: .. versionadded:: 1.0.0 + .. deprecated:: 1.3.0 + Parameters ---------- na_rep : str @@ -1277,9 +1330,18 @@ def set_na_rep(self, na_rep: str) -> Styler: Returns ------- self : Styler + + Notes + ----- + This method is deprecated. See `Styler.format()` """ + warnings.warn( + "this method is deprecated in favour of `Styler.format(na_rep=..)`", + FutureWarning, + stacklevel=2, + ) self.na_rep = na_rep - return self + return self.format(na_rep=na_rep, precision=self.precision) def hide_index(self) -> Styler: """ @@ -2041,24 +2103,52 @@ def _get_level_lengths(index, hidden_elements=None): return non_zero_lengths +def _default_formatter(x: Any, precision: Optional[int] = None) -> Any: + """ + Format the display of a value + + Parameters + ---------- + x : Any + Input variable to be formatted + precision : Int, optional + Floating point precision used if ``x`` is float or complex. + + Returns + ------- + value : Any + Matches input type, or string if input is float or complex. + """ + if precision is None: + precision = get_option("display.precision") + if isinstance(x, (float, complex)): + return f"{x:.{precision}f}" + return x + + def _maybe_wrap_formatter( - formatter: Union[Callable, str], na_rep: Optional[str] + formatter: Optional[BaseFormatter] = None, + na_rep: Optional[str] = None, + precision: Optional[int] = None, ) -> Callable: + """ + Allows formatters to be expressed as str, callable or None, where None returns + a default formatting function. wraps with na_rep, and precision where they are + available. + """ if isinstance(formatter, str): formatter_func = lambda x: formatter.format(x) elif callable(formatter): formatter_func = formatter + elif formatter is None: + formatter_func = partial(_default_formatter, precision=precision) else: - msg = f"Expected a template string or callable, got {formatter} instead" - raise TypeError(msg) + raise TypeError(f"'formatter' expected str or callable, got {type(formatter)}") if na_rep is None: return formatter_func - elif isinstance(na_rep, str): - return lambda x: na_rep if pd.isna(x) else formatter_func(x) else: - msg = f"Expected a string, got {na_rep} instead" - raise TypeError(msg) + return lambda x: na_rep if pd.isna(x) else formatter_func(x) def _maybe_convert_css_to_tuples(style: CSSProperties) -> CSSList: diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py index 09e14d06f4d9b..b938495ca9e31 100644 --- a/pandas/tests/io/formats/style/test_style.py +++ b/pandas/tests/io/formats/style/test_style.py @@ -605,15 +605,17 @@ def test_set_na_rep(self): # GH 21527 28358 df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"]) - ctx = df.style.set_na_rep("NA")._translate() + with tm.assert_produces_warning(FutureWarning): + ctx = df.style.set_na_rep("NA")._translate() assert ctx["body"][0][1]["display_value"] == "NA" assert ctx["body"][0][2]["display_value"] == "NA" - ctx = ( - df.style.set_na_rep("NA") - .format(None, na_rep="-", subset=["B"]) - ._translate() - ) + with tm.assert_produces_warning(FutureWarning): + ctx = ( + df.style.set_na_rep("NA") + .format(None, na_rep="-", subset=["B"]) + ._translate() + ) assert ctx["body"][0][1]["display_value"] == "NA" assert ctx["body"][0][2]["display_value"] == "-" @@ -626,7 +628,8 @@ def test_format_non_numeric_na(self): } ) - ctx = df.style.set_na_rep("NA")._translate() + with tm.assert_produces_warning(FutureWarning): + ctx = df.style.set_na_rep("NA")._translate() assert ctx["body"][0][1]["display_value"] == "NA" assert ctx["body"][0][2]["display_value"] == "NA" assert ctx["body"][1][1]["display_value"] == "NA" @@ -638,12 +641,12 @@ def test_format_non_numeric_na(self): assert ctx["body"][1][1]["display_value"] == "-" assert ctx["body"][1][2]["display_value"] == "-" - def test_format_with_bad_na_rep(self): - # GH 21527 28358 - df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"]) - msg = "Expected a string, got -1 instead" - with pytest.raises(TypeError, match=msg): - df.style.format(None, na_rep=-1) + def test_format_clear(self): + assert (0, 0) not in self.styler._display_funcs # using default + self.styler.format("{:.2f") + assert (0, 0) in self.styler._display_funcs # formatter is specified + self.styler.format() + assert (0, 0) not in self.styler._display_funcs # formatter cleared to default def test_nonunique_raises(self): df = DataFrame([[1, 2]], columns=["A", "A"]) @@ -734,13 +737,11 @@ def test_table_attributes(self): assert 'class="foo" data-bar' in result def test_precision(self): - with pd.option_context("display.precision", 10): - s = Styler(self.df) - assert s.precision == 10 s = Styler(self.df, precision=2) assert s.precision == 2 - s2 = s.set_precision(4) + with tm.assert_produces_warning(FutureWarning): + s2 = s.set_precision(4) assert s is s2 assert s.precision == 4 @@ -783,44 +784,35 @@ def test_display_format(self): ) assert len(ctx["body"][0][1]["display_value"].lstrip("-")) <= 3 - def test_display_format_raises(self): - df = DataFrame(np.random.randn(2, 2)) - msg = "Expected a template string or callable, got 5 instead" - with pytest.raises(TypeError, match=msg): - df.style.format(5) - - msg = "Expected a template string or callable, got True instead" - with pytest.raises(TypeError, match=msg): - df.style.format(True) + @pytest.mark.parametrize("formatter", [5, True, [2.0]]) + def test_format_raises(self, formatter): + with pytest.raises(TypeError, match="expected str or callable"): + self.df.style.format(formatter) - def test_display_set_precision(self): + def test_format_with_precision(self): # Issue #13257 df = DataFrame(data=[[1.0, 2.0090], [3.2121, 4.566]], columns=["a", "b"]) s = Styler(df) - ctx = s.set_precision(1)._translate() - - assert s.precision == 1 + ctx = s.format(precision=1)._translate() assert ctx["body"][0][1]["display_value"] == "1.0" assert ctx["body"][0][2]["display_value"] == "2.0" assert ctx["body"][1][1]["display_value"] == "3.2" assert ctx["body"][1][2]["display_value"] == "4.6" - ctx = s.set_precision(2)._translate() - assert s.precision == 2 + ctx = s.format(precision=2)._translate() assert ctx["body"][0][1]["display_value"] == "1.00" assert ctx["body"][0][2]["display_value"] == "2.01" assert ctx["body"][1][1]["display_value"] == "3.21" assert ctx["body"][1][2]["display_value"] == "4.57" - ctx = s.set_precision(3)._translate() - assert s.precision == 3 + ctx = s.format(precision=3)._translate() assert ctx["body"][0][1]["display_value"] == "1.000" assert ctx["body"][0][2]["display_value"] == "2.009" assert ctx["body"][1][1]["display_value"] == "3.212" assert ctx["body"][1][2]["display_value"] == "4.566" - def test_display_subset(self): + def test_format_subset(self): df = DataFrame([[0.1234, 0.1234], [1.1234, 1.1234]], columns=["a", "b"]) ctx = df.style.format( {"a": "{:0.1f}", "b": "{0:.2%}"}, subset=pd.IndexSlice[0, :] @@ -851,7 +843,7 @@ def test_display_subset(self): assert ctx["body"][0][2]["display_value"] == "0.123400" assert ctx["body"][1][2]["display_value"] == raw_11 - def test_display_dict(self): + def test_format_dict(self): df = DataFrame([[0.1234, 0.1234], [1.1234, 1.1234]], columns=["a", "b"]) ctx = df.style.format({"a": "{:0.1f}", "b": "{0:.2%}"})._translate() assert ctx["body"][0][1]["display_value"] == "0.1"
- [x] closes issue #40032 and PR #40060 - [x] deprecates the methods `Styler.set_na_rep()` and `Styler.set_precision()` in favour of, and incorporating `precision` into `Styler.format()`. Maintains some backwards compatibility with a deprecation warning, the copy mechanics remain broken. - [x] `Styler.set_na_rep('MISS')` is replaced and equivalent to `Styler.format(na_rep='MISS')` - [x] `Styler.set_precision(5)` is replaced and equivalent to `Styler.format(precision=5)` - [x] `Styler.set_na_rep('MISS').set_precision(5)` is equivalent to `Styler.format(na_rep='MISS', precision=5)` - [x] `Styler.set_precision(5).set_na_rep('MISS')` is equivalent to `Styler.format(na_rep='MISS', precision=5)` - [x] moves the `default_formatter` and `maybe_wrap_formatter` methods to `module` level. - [x] documentation is added to show new functionality - [x] most existing tests still pass with, some needed minor modification and some were no longer necessary - [x] tests added for new combinations - [x] whatsnew entry This PR was suggested after review of #40060 as an alternative. It seems to be a simpler and more consistent solution for API.
https://api.github.com/repos/pandas-dev/pandas/pulls/40134
2021-03-01T06:30:47Z
2021-03-05T22:07:58Z
2021-03-05T22:07:58Z
2022-10-28T16:49:39Z
DOC: small doc build fixes
diff --git a/doc/source/io.rst b/doc/source/io.rst index dfe85c4cb224b..1852330c0b1be 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -3411,6 +3411,7 @@ table : `test_dataset.test_table` into a DataFrame using the :func:`~pandas.io.r function. .. code-block:: python + # Insert your BigQuery Project ID Here # Can be found in the Google web console projectid = "xxxxxxxx" diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 7ca4ff0529b4e..7f3e0819c2c06 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1280,9 +1280,10 @@ To supply the time zone, you can use the ``tz`` keyword to ``date_range`` and other functions. Dateutil time zone strings are distinguished from ``pytz`` time zones by starting with ``dateutil/``. -- In ``pytz`` you can find a list of common (and less common) time zones using ``from pytz import common_timezones, all_timezones``. +- In ``pytz`` you can find a list of common (and less common) time zones using + ``from pytz import common_timezones, all_timezones``. - ``dateutil`` uses the OS timezones so there isn't a fixed list available. For -common zones, the names are the same as ``pytz``. + common zones, the names are the same as ``pytz``. .. ipython:: python
https://api.github.com/repos/pandas-dev/pandas/pulls/7635
2014-07-01T13:06:48Z
2014-07-01T13:07:00Z
2014-07-01T13:07:00Z
2014-07-01T13:07:00Z
FIX: don't create 32 bit int type for int64 column (GH7433)
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index bb6f9cee5766e..5a778ca08b0a3 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -700,7 +700,8 @@ def _harmonize_columns(self, parse_dates=None): pass # this column not in results def _sqlalchemy_type(self, arr_or_dtype): - from sqlalchemy.types import Integer, Float, Text, Boolean, DateTime, Date, Interval + from sqlalchemy.types import (BigInteger, Float, Text, Boolean, + DateTime, Date, Interval) if arr_or_dtype is date: return Date @@ -714,12 +715,12 @@ def _sqlalchemy_type(self, arr_or_dtype): warnings.warn("the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the " "database.", UserWarning) - return Integer + return BigInteger elif com.is_float_dtype(arr_or_dtype): return Float elif com.is_integer_dtype(arr_or_dtype): # TODO: Refine integer size. - return Integer + return BigInteger elif com.is_bool(arr_or_dtype): return Boolean return Text diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index a34f278fc5a96..bdb609e99eb62 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -828,6 +828,14 @@ def test_default_type_conversion(self): self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.object), "BoolColWithNull loaded with incorrect type") + def test_bigint(self): + # int64 should be converted to BigInteger, GH7433 + df = DataFrame(data={'i64':[2**62]}) + df.to_sql('test_bigint', self.conn, index=False) + result = sql.read_sql_table('test_bigint', self.conn) + + tm.assert_frame_equal(df, result) + def test_default_date_load(self): df = sql.read_sql_table("types_test_data", self.conn)
Closes #7433 Replace the sqlalchemy `Integer` type with `BigInteger` as the default integer type in pandas is int64. Probably we could be more specific in the type conversion (check for the exact integer type).
https://api.github.com/repos/pandas-dev/pandas/pulls/7634
2014-07-01T12:59:02Z
2014-07-05T13:48:39Z
2014-07-05T13:48:39Z
2014-07-06T14:52:45Z
BUG: doc example in groupby.rst (GH7559 / GH7628)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 96eb0189a97f6..249aa0afdfd64 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -829,11 +829,21 @@ def nth(self, n, dropna=None): dropped = self.obj.dropna(how=dropna, axis=self.axis) # get a new grouper for our dropped obj - grouper, exclusions, obj = _get_grouper(dropped, key=self.keys, axis=self.axis, - level=self.level, sort=self.sort) + if self.keys is None and self.level is None: - sizes = obj.groupby(grouper).size() - result = obj.groupby(grouper).nth(n) + # we don't have the grouper info available (e.g. we have selected out + # a column that is not in the current object) + axis = self.grouper.axis + grouper = axis[axis.isin(dropped.index)] + keys = self.grouper.names + else: + + # create a grouper with the original parameters, but on the dropped object + grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis, + level=self.level, sort=self.sort) + + sizes = dropped.groupby(grouper).size() + result = dropped.groupby(grouper).nth(n) mask = (sizes<max_len).values # set the results which don't meet the criteria @@ -841,7 +851,7 @@ def nth(self, n, dropna=None): result.loc[mask] = np.nan # reset/reindex to the original groups - if len(self.obj) == len(dropped): + if len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index): result.index = self.grouper.result_index else: result = result.reindex(self.grouper.result_index) diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 03febbc47c0fc..434591a86d0c4 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -304,6 +304,13 @@ def test_nth(self): result = s.groupby(g).nth(0,dropna='all') assert_series_equal(result,expected) + # doc example + df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B']) + g = df.groupby('A') + result = g.B.nth(0, dropna=True) + expected = g.B.first() + assert_series_equal(result,expected) + def test_grouper_index_types(self): # related GH5375 # groupby misbehaving when using a Floatlike index
closes #7628
https://api.github.com/repos/pandas-dev/pandas/pulls/7631
2014-07-01T12:10:14Z
2014-07-01T12:50:25Z
2014-07-01T12:50:25Z
2014-07-01T12:50:25Z
PERF: optimize MultiIndex.from_product
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 1aaf77625cf7f..b22720d277873 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -145,7 +145,7 @@ Performance - Improvements in Series.transform for significant performance gains (:issue:`6496`) - Improvements in DataFrame.transform with ufuncs and built-in grouper functions for signifcant performance gains (:issue:`7383`) - Regression in groupby aggregation of datetime64 dtypes (:issue:`7555`) - +- Improvements in `MultiIndex.from_product` for large iterables (:issue:`7627`) diff --git a/pandas/core/index.py b/pandas/core/index.py index 2138ecfa5281f..4d7e14c9e026f 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2875,10 +2875,14 @@ def from_product(cls, iterables, sortorder=None, names=None): MultiIndex.from_arrays : Convert list of arrays to MultiIndex MultiIndex.from_tuples : Convert list of tuples to MultiIndex """ + from pandas.core.categorical import Categorical from pandas.tools.util import cartesian_product - product = cartesian_product(iterables) - return MultiIndex.from_arrays(product, sortorder=sortorder, - names=names) + + categoricals = [Categorical.from_array(it) for it in iterables] + labels = cartesian_product([c.labels for c in categoricals]) + + return MultiIndex(levels=[c.levels for c in categoricals], + labels=labels, sortorder=sortorder, names=names) @property def nlevels(self): diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index e20b209b3b5dd..23a0f39ef3547 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -1861,6 +1861,15 @@ def test_from_product(self): assert_array_equal(result, expected) self.assertEqual(result.names, names) + def test_from_product_datetimeindex(self): + dt_index = pd.date_range('2000-01-01', periods=2) + mi = pd.MultiIndex.from_product([[1, 2], dt_index]) + etalon = pd.lib.list_to_object_array([(1, pd.Timestamp('2000-01-01')), + (1, pd.Timestamp('2000-01-02')), + (2, pd.Timestamp('2000-01-01')), + (2, pd.Timestamp('2000-01-02'))]) + assert_array_equal(mi.values, etalon) + def test_append(self): result = self.index[:3].append(self.index[3:]) self.assertTrue(result.equals(self.index)) diff --git a/vb_suite/index_object.py b/vb_suite/index_object.py index e6bd32737d567..5ddb2fb0ac7ec 100644 --- a/vb_suite/index_object.py +++ b/vb_suite/index_object.py @@ -105,3 +105,15 @@ start_date=datetime(2014, 4, 13)) index_float64_div = Benchmark('idx / 2', setup, name='index_float64_div', start_date=datetime(2014, 4, 13)) + + +# Constructing MultiIndex from cartesian product of iterables +# + +setup = common_setup + """ +iterables = [tm.makeStringIndex(10000), xrange(20)] +""" + +multiindex_from_product = Benchmark('MultiIndex.from_product(iterables)', + setup, name='multiindex_from_product', + start_date=datetime(2014, 6, 30))
This PR speeds up MultiIndex.from_product employing the fact that operating on categorical codes is faster than on the values themselves. This yields about 2x improvement in the benchmark ``` python In [1]: import pandas.util.testing as tm In [2]: data = [tm.makeStringIndex(10000), tm.makeFloatIndex(20)] In [3]: %timeit pd.MultiIndex.from_product(data) 100 loops, best of 3: 10.6 ms per loop In [4]: %timeit pd.MultiIndex.from_arrays(pd.tools.util.cartesian_product(data)) 10 loops, best of 3: 23.4 ms per loop ``` It's only marginally slower in small size cases: ``` python In [1]: data = [np.arange(20).astype(object), np.arange(20)] In [2]: %timeit pd.MultiIndex.from_product(data) 1000 loops, best of 3: 317 µs per loop In [3]: %timeit pd.MultiIndex.from_arrays(pd.tools.util.cartesian_product(data)) 1000 loops, best of 3: 308 µs per loop In [4]: data_int = [np.arange(20), np.arange(20)] In [5]: %timeit pd.MultiIndex.from_product(data_int) 1000 loops, best of 3: 285 µs per loop In [6]: %timeit pd.MultiIndex.from_arrays(pd.tools.util.cartesian_product(data_int)) 1000 loops, best of 3: 269 µs per loop ``` And this case came as a surprise because the cartesian product is blazingly fast both in old and new versions, but profiling showed that factorization is a lot faster when done on a smaller array: ``` python In [7]: data_large = [np.arange(10000), np.arange(20)] In [8]: %timeit pd.MultiIndex.from_arrays(pd.tools.util.cartesian_product(data_large)) 100 loops, best of 3: 9.88 ms per loop In [9]: %timeit pd.MultiIndex.from_product(data_large) 100 loops, best of 3: 2.74 ms per loop ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7627
2014-07-01T05:45:02Z
2014-07-01T11:36:24Z
2014-07-01T11:36:24Z
2014-07-01T17:42:19Z
CLN/DEPR: Fix instances of 'U'/'rU' in open(...)
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index a34f278fc5a96..68de031641c58 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -157,12 +157,13 @@ def _get_exec(self): return self.conn.cursor() def _load_iris_data(self): + import io iris_csv_file = os.path.join(tm.get_data_path(), 'iris.csv') self.drop_table('iris') self._get_exec().execute(SQL_STRINGS['create_iris'][self.flavor]) - with open(iris_csv_file, 'rU') as iris_csv: + with io.open(iris_csv_file, mode='r', newline=None) as iris_csv: r = csv.reader(iris_csv) next(r) # skip header row ins = SQL_STRINGS['insert_iris'][self.flavor] diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 093954f1d8c1d..08aed66432385 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -4235,11 +4235,12 @@ def test_from_csv(self): assert_series_equal(checkseries, series) def test_to_csv(self): + import io with ensure_clean() as path: self.ts.to_csv(path) - lines = open(path, 'U').readlines() + lines = io.open(path, newline=None).readlines() assert(lines[1] != '\n') self.ts.to_csv(path, index=False)
closes #7131
https://api.github.com/repos/pandas-dev/pandas/pulls/7625
2014-07-01T02:28:22Z
2014-07-01T17:52:25Z
2014-07-01T17:52:25Z
2014-07-02T11:46:11Z
CLN: Fix typo
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index bb6f9cee5766e..88607d8273ec8 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -88,7 +88,7 @@ def execute(sql, con, cur=None, params=None): Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. - cur : depreciated, cursor is obtained from connection + cur : deprecated, cursor is obtained from connection params : list or tuple, optional List of parameters to pass to execute method. @@ -134,7 +134,7 @@ def tquery(sql, con=None, cur=None, retry=True): sql: string SQL query to be executed con: DBAPI2 connection - cur: depreciated, cursor is obtained from connection + cur: deprecated, cursor is obtained from connection Returns ------- @@ -142,7 +142,7 @@ def tquery(sql, con=None, cur=None, retry=True): """ warnings.warn( - "tquery is depreciated, and will be removed in future versions. " + "tquery is deprecated, and will be removed in future versions. " "You can use ``execute(...).fetchall()`` instead.", FutureWarning) @@ -187,7 +187,7 @@ def uquery(sql, con=None, cur=None, retry=True, params=None): sql: string SQL query to be executed con: DBAPI2 connection - cur: depreciated, cursor is obtained from connection + cur: deprecated, cursor is obtained from connection params: list or tuple, optional List of parameters to pass to execute method. @@ -197,7 +197,7 @@ def uquery(sql, con=None, cur=None, retry=True, params=None): """ warnings.warn( - "uquery is depreciated, and will be removed in future versions. " + "uquery is deprecated, and will be removed in future versions. " "You can use ``execute(...).rowcount`` instead.", FutureWarning) @@ -1153,21 +1153,21 @@ def get_sqltype(dtype, flavor): # legacy names, with depreciation warnings and copied docs def read_frame(*args, **kwargs): - """DEPRECIATED - use read_sql + """DEPRECATED - use read_sql """ - warnings.warn("read_frame is depreciated, use read_sql", FutureWarning) + warnings.warn("read_frame is deprecated, use read_sql", FutureWarning) return read_sql(*args, **kwargs) def frame_query(*args, **kwargs): - """DEPRECIATED - use read_sql + """DEPRECATED - use read_sql """ - warnings.warn("frame_query is depreciated, use read_sql", FutureWarning) + warnings.warn("frame_query is deprecated, use read_sql", FutureWarning) return read_sql(*args, **kwargs) def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): - """DEPRECIATED - use to_sql + """DEPRECATED - use to_sql Write records stored in a DataFrame to a SQL database. @@ -1200,7 +1200,7 @@ def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): pandas.DataFrame.to_sql """ - warnings.warn("write_frame is depreciated, use to_sql", FutureWarning) + warnings.warn("write_frame is deprecated, use to_sql", FutureWarning) # for backwards compatibility, set index=False when not specified index = kwargs.pop('index', False)
https://api.github.com/repos/pandas-dev/pandas/pulls/7624
2014-07-01T02:05:42Z
2014-07-01T10:41:40Z
2014-07-01T10:41:40Z
2014-07-01T12:12:29Z
ENH: dataframe memory usage
diff --git a/doc/source/faq.rst b/doc/source/faq.rst index a613d53218ce2..259243638ac74 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -24,6 +24,81 @@ Frequently Asked Questions (FAQ) options.display.mpl_style='default' from pandas.compat import lrange + +.. _df-memory-usage: + +DataFrame memory usage +~~~~~~~~~~~~~~~~~~~~~~ +As of pandas version 0.15.0, the memory usage of a dataframe (including +the index) is shown when accessing the ``info`` method of a dataframe. A +configuration option, ``display.memory_usage`` (see :ref:`options`), +specifies if the dataframe's memory usage will be displayed when +invoking the df.info() method. + +For example, the memory usage of the dataframe below is shown +when calling df.info(): + +.. ipython:: python + + dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]', + 'complex128', 'object', 'bool'] + n = 5000 + data = dict([ (t, np.random.randint(100, size=n).astype(t)) + for t in dtypes]) + df = DataFrame(data) + + df.info() + +By default the display option is set to True but can be explicitly +overridden by passing the memory_usage argument when invoking df.info(). +Note that ``memory_usage=None`` is the default value for the df.info() +method and follows the setting specified by display.memory_usage. + +The memory usage of each column can be found by calling the ``memory_usage`` +method. This returns a Series with an index represented by column names +and memory usage of each column shown in bytes. For the dataframe above, +the memory usage of each column and the total memory usage of the +dataframe can be found with the memory_usage method: + +.. ipython:: python + + df.memory_usage() + + # total memory usage of dataframe + df.memory_usage().sum() + +By default the memory usage of the dataframe's index is not shown in the +returned Series, the memory usage of the index can be shown by passing +the ``index=True`` argument: + +.. ipython:: python + + df.memory_usage(index=True) + +The memory usage displayed by the ``info`` method utilizes the +``memory_usage`` method to determine the memory usage of a dataframe +while also formatting the output in human-readable units (base-2 +representation; i.e., 1KB = 1024 bytes). + +Pandas version 0.15.0 introduces a new categorical data type (see +:ref:`categorical`), which can be used in Series and DataFrames. +Significant memory savings can be achieved when using the category +datatype. This is demonstrated below: + +.. ipython:: python + + df['bases_object'] = Series(np.array(['adenine', 'cytosine', 'guanine', 'thymine']).take(np.random.randint(0,4,size=len(df)))) + + df['bases_categorical'] = df['bases_object'].astype('category') + + df.memory_usage() + +While the *base_object* and *bases_categorical* appear as identical +columns in the dataframe, the memory savings of the categorical +datatype, versus the object datatype, is revealed by ``memory_usage``. + + + .. _ref-monkey-patching: Adding Features to your pandas Installation diff --git a/doc/source/options.rst b/doc/source/options.rst index 95a137fb96e66..5edd28e559bc1 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -348,6 +348,9 @@ display.max_seq_items 100 when pretty-printing a long sequence, of "..." to the resulting string. If set to None, the number of items to be printed is unlimited. +display.memory_usage True This specifies if the memory usage of + a DataFrame should be displayed when the + df.info() method is invoked. display.mpl_style None Setting this to 'default' will modify the rcParams used by matplotlib to give plots a more pleasing visual diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 1d9acadb68e58..78e511cecba6e 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -259,6 +259,16 @@ API changes - ``DataFrame.plot`` and ``Series.plot`` keywords are now have consistent orders (:issue:`8037`) +- Implements methods to find memory usage of a DataFrame (:issue:`6852`). A new display option ``display.memory_usage`` (see :ref:`options`) sets the default behavior of the ``memory_usage`` argument in the ``df.info()`` method; by default ``display.memory_usage`` is True but this can be overridden by explicitly passing the memory_usage argument to the df.info() method, as shown below. Additionally `memory_usage` is an available method for a dataframe object which returns the memory usage of each column (for more information see :ref:`df-memory-usage`): + + .. ipython:: python + + df = DataFrame({ 'float' : np.random.randn(1000), 'int' : np.random.randint(0,5,size=1000)}) + df.memory_usage() + + df.info(memory_usage=True) + + .. _whatsnew_0150.dt: .dt accessor diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 1d93b9d5e69c1..a56d3b93d87da 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -203,6 +203,12 @@ Setting this to None/False restores the values to their initial value. """ +pc_memory_usage_doc = """ +: bool or None + This specifies if the memory usage of a DataFrame should be displayed when + df.info() is called. +""" + style_backup = dict() @@ -274,6 +280,8 @@ def mpl_style_cb(key): # redirected to width, make defval identical cf.register_option('line_width', get_default_val('display.width'), pc_line_width_doc) + cf.register_option('memory_usage', True, pc_memory_usage_doc, + validator=is_instance_factory([type(None), bool])) cf.deprecate_option('display.line_width', msg=pc_line_width_deprecation_warning, diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5cfb2affe5a7b..65f7d56f5aa8a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1390,7 +1390,7 @@ def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None, if buf is None: return formatter.buf.getvalue() - def info(self, verbose=None, buf=None, max_cols=None): + def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None): """ Concise summary of a DataFrame. @@ -1404,6 +1404,12 @@ def info(self, verbose=None, buf=None, max_cols=None): max_cols : int, default None Determines whether full summary or short summary is printed. None follows the `display.max_info_columns` setting. + memory_usage : boolean, default None + Specifies whether total memory usage of the DataFrame + elements (including index) should be displayed. None follows + the `display.memory_usage` setting. True or False overrides + the `display.memory_usage` setting. Memory usage is shown in + human-readable units (base-2 representation). """ from pandas.core.format import _put_lines @@ -1462,6 +1468,14 @@ def _verbose_repr(): def _non_verbose_repr(): lines.append(self.columns.summary(name='Columns')) + def _sizeof_fmt(num): + # returns size in human readable format + for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: + if num < 1024.0: + return "%3.1f %s" % (num, x) + num /= 1024.0 + return "%3.1f %s" % (num, 'PB') + if verbose: _verbose_repr() elif verbose is False: # specifically set to False, not nesc None @@ -1474,9 +1488,46 @@ def _non_verbose_repr(): counts = self.get_dtype_counts() dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))] - lines.append('dtypes: %s\n' % ', '.join(dtypes)) + lines.append('dtypes: %s' % ', '.join(dtypes)) + if memory_usage is None: + memory_usage = get_option('display.memory_usage') + if memory_usage: # append memory usage of df to display + lines.append("memory usage: %s\n" % + _sizeof_fmt(self.memory_usage(index=True).sum())) _put_lines(buf, lines) + def memory_usage(self, index=False): + """Memory usage of DataFrame columns. + + Parameters + ---------- + index : bool + Specifies whether to include memory usage of DataFrame's + index in returned Series. If `index=True` (default is False) + the first index of the Series is `Index`. + + Returns + ------- + sizes : Series + A series with column names as index and memory usage of + columns with units of bytes. + + Notes + ----- + Memory usage does not include memory consumed by elements that + are not components of the array. + + See Also + -------- + numpy.ndarray.nbytes + """ + result = Series([ c.values.nbytes for col, c in self.iteritems() ], + index=self.columns) + if index: + result = Series(self.index.values.nbytes, + index=['Index']).append(result) + return result + def transpose(self): """Transpose index and columns""" return super(DataFrame, self).transpose(1, 0) diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 9216b7a286c54..7d4ee05a1e64f 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -43,7 +43,7 @@ def has_info_repr(df): def has_non_verbose_info_repr(df): has_info = has_info_repr(df) r = repr(df) - nv = len(r.split('\n')) == 5 # 1. <class>, 2. Index, 3. Columns, 4. dtype, 5. trailing newline + nv = len(r.split('\n')) == 6 # 1. <class>, 2. Index, 3. Columns, 4. dtype, 5. memory usage, 6. trailing newline return has_info and nv def has_horizontally_truncated_repr(df): diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 7062a53bb5881..035a301807039 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -6553,7 +6553,7 @@ def test_info_max_cols(self): buf = StringIO() df.info(buf=buf, verbose=verbose) res = buf.getvalue() - self.assertEqual(len(res.split('\n')), len_) + self.assertEqual(len(res.strip().split('\n')), len_) for len_, verbose in [(10, None), (5, False), (10, True)]: @@ -6562,7 +6562,7 @@ def test_info_max_cols(self): buf = StringIO() df.info(buf=buf, verbose=verbose) res = buf.getvalue() - self.assertEqual(len(res.split('\n')), len_) + self.assertEqual(len(res.strip().split('\n')), len_) for len_, max_cols in [(10, 5), (5, 4)]: # setting truncates @@ -6570,15 +6570,49 @@ def test_info_max_cols(self): buf = StringIO() df.info(buf=buf, max_cols=max_cols) res = buf.getvalue() - self.assertEqual(len(res.split('\n')), len_) + self.assertEqual(len(res.strip().split('\n')), len_) # setting wouldn't truncate with option_context('max_info_columns', 5): buf = StringIO() df.info(buf=buf, max_cols=max_cols) res = buf.getvalue() - self.assertEqual(len(res.split('\n')), len_) + self.assertEqual(len(res.strip().split('\n')), len_) + def test_info_memory_usage(self): + # Ensure memory usage is displayed, when asserted, on the last line + dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]', + 'complex128', 'object', 'bool'] + data = {} + n = 10 + for i, dtype in enumerate(dtypes): + data[i] = np.random.randint(2, size=n).astype(dtype) + df = DataFrame(data) + buf = StringIO() + # display memory usage case + df.info(buf=buf, memory_usage=True) + res = buf.getvalue().splitlines() + self.assertTrue("memory usage: " in res[-1]) + # do not display memory usage cas + df.info(buf=buf, memory_usage=False) + res = buf.getvalue().splitlines() + self.assertTrue("memory usage: " not in res[-1]) + + # Test a DataFrame with duplicate columns + dtypes = ['int64', 'int64', 'int64', 'float64'] + data = {} + n = 100 + for i, dtype in enumerate(dtypes): + data[i] = np.random.randint(2, size=n).astype(dtype) + df = DataFrame(data) + df.columns = dtypes + # Ensure df size is as expected + df_size = df.memory_usage().sum() + exp_size = len(dtypes) * n * 8 # cols * rows * bytes + self.assertEqual(df_size, exp_size) + # Ensure number of cols in memory_usage is the same as df + size_df = np.size(df.columns.values) # index=False; default + self.assertEqual(size_df, np.size(df.memory_usage())) def test_dtypes(self): self.mixed_frame['bool'] = self.mixed_frame['A'] > 0
Closes [#6852](https://github.com/pydata/pandas/issues/6852) For a sample DataFrame: ``` python df = pd.DataFrame({ 'float' : np.random.randn(10000000), 'int' : np.random.randint(0,5,size=10000000), 'date' : Timestamp('20130101'), 'string' : 'foo', 'smallint' : np.random.randint(0,5,size=10000000).astype('int16') }) ``` The memory usage of the elements of each column (in units of bytes) are returned by accessing the `meminfo` method. This returns a Series: ``` python >>> df.meminfo() date 80000000 float 80000000 int 80000000 smallint 20000000 string 80000000 dtype: int64 ``` Also, the total memory usage (in a human readable format) can be found via the `info(memory_usage=True)` method: ``` python >>> df.info(memory_usage=True) <class 'pandas.core.frame.DataFrame'> Int64Index: 10000000 entries, 0 to 9999999 Data columns (total 5 columns): date datetime64[ns] float float64 int int64 smallint int16 string object dtypes: datetime64[ns](1), float64(1), int16(1), int64(1), object(1) memory usage: 324.2 MB ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7619
2014-06-30T18:44:51Z
2014-10-05T02:31:10Z
2014-10-05T02:31:10Z
2014-10-05T02:35:24Z
COMPAT: make numpy NaT comparison use a view to avoid implicit conversions
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 70675875c8143..d32978c881244 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -892,7 +892,7 @@ cdef convert_to_tsobject(object ts, object tz, object unit): if ts is None or ts is NaT or ts is np_NaT: obj.value = NPY_NAT elif is_datetime64_object(ts): - if ts == np_NaT: + if ts.view('i8') == iNaT: obj.value = NPY_NAT else: obj.value = _get_datetime64_nanos(ts) @@ -1222,7 +1222,7 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False, continue raise elif util.is_datetime64_object(val): - if val == np_NaT or val.view('i8') == iNaT: + if val is np_NaT or val.view('i8') == iNaT: iresult[i] = iNaT else: try: @@ -1303,7 +1303,7 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False, if _checknull_with_nat(val): oresult[i] = np.nan elif util.is_datetime64_object(val): - if val == np_NaT: + if val is np_NaT or val.view('i8') == iNaT: oresult[i] = np.nan else: oresult[i] = val.item()
https://api.github.com/repos/pandas-dev/pandas/pulls/7617
2014-06-30T14:40:13Z
2014-06-30T15:17:20Z
2014-06-30T15:17:20Z
2014-06-30T15:17:20Z
BUG: Bug in to_timedelta that accepted invalid units and misinterpreted m/h (GH7611, GH6423)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index be818e71f55a8..58154e066c0bf 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -176,7 +176,7 @@ Bug Fixes - Bug in groupby ``.nth`` with a Series and integer-like column name (:issue:`7559`) - Bug in ``value_counts`` where ``NaT`` did not qualify as missing (``NaN``) (:issue:`7423`) - +- Bug in ``to_timedelta`` that accepted invalid units and misinterpreted 'm/h' (:issue:`7611`, :issue: `6423`) - Bug in ``Panel.apply`` with a multi-index as an axis (:issue:`7469`) diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py index f7acb182b8cde..8e841632d88d3 100644 --- a/pandas/tseries/tests/test_timedeltas.py +++ b/pandas/tseries/tests/test_timedeltas.py @@ -199,20 +199,40 @@ def conv(v): expected = Series([ np.timedelta64(1,'D') ]*5) tm.assert_series_equal(result, expected) + def testit(unit, transform): + + # array + result = to_timedelta(np.arange(5),unit=unit) + expected = Series([ np.timedelta64(i,transform(unit)) for i in np.arange(5).tolist() ]) + tm.assert_series_equal(result, expected) + + # scalar + result = to_timedelta(2,unit=unit) + expected = np.timedelta64(2,transform(unit)).astype('timedelta64[ns]') + self.assert_numpy_array_equal(result,expected) + # validate all units # GH 6855 for unit in ['Y','M','W','D','y','w','d']: - result = to_timedelta(np.arange(5),unit=unit) - expected = Series([ np.timedelta64(i,unit.upper()) for i in np.arange(5).tolist() ]) - tm.assert_series_equal(result, expected) + testit(unit,lambda x: x.upper()) + for unit in ['days','day','Day','Days']: + testit(unit,lambda x: 'D') for unit in ['h','m','s','ms','us','ns','H','S','MS','US','NS']: - result = to_timedelta(np.arange(5),unit=unit) - expected = Series([ np.timedelta64(i,unit.lower()) for i in np.arange(5).tolist() ]) - tm.assert_series_equal(result, expected) + testit(unit,lambda x: x.lower()) + + # offsets + + # m + testit('T',lambda x: 'm') + + # ms + testit('L',lambda x: 'ms') # these will error self.assertRaises(ValueError, lambda : to_timedelta(['1h'])) self.assertRaises(ValueError, lambda : to_timedelta(['1m'])) + self.assertRaises(ValueError, lambda : to_timedelta([1,2],unit='foo')) + self.assertRaises(ValueError, lambda : to_timedelta(1,unit='foo')) def test_to_timedelta_via_apply(self): _skip_if_numpy_not_friendly() diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py index 1dc8b5cfea132..0d6d74db6f18c 100644 --- a/pandas/tseries/timedeltas.py +++ b/pandas/tseries/timedeltas.py @@ -32,6 +32,8 @@ def to_timedelta(arg, box=True, unit='ns'): if _np_version_under1p7: raise ValueError("to_timedelta is not support for numpy < 1.7") + unit = _validate_timedelta_unit(unit) + def _convert_listlike(arg, box, unit): if isinstance(arg, (list,tuple)): @@ -40,7 +42,6 @@ def _convert_listlike(arg, box, unit): if is_timedelta64_dtype(arg): value = arg.astype('timedelta64[ns]') elif is_integer_dtype(arg): - unit = _validate_timedelta_unit(unit) # these are shortcutable value = arg.astype('timedelta64[{0}]'.format(unit)).astype('timedelta64[ns]') @@ -67,14 +68,39 @@ def _convert_listlike(arg, box, unit): # ...so it must be a scalar value. Return scalar. return _coerce_scalar_to_timedelta_type(arg, unit=unit) +_unit_map = { + 'Y' : 'Y', + 'y' : 'Y', + 'W' : 'W', + 'w' : 'W', + 'D' : 'D', + 'd' : 'D', + 'days' : 'D', + 'Days' : 'D', + 'day' : 'D', + 'Day' : 'D', + 'M' : 'M', + 'H' : 'h', + 'h' : 'h', + 'm' : 'm', + 'T' : 'm', + 'S' : 's', + 's' : 's', + 'L' : 'ms', + 'MS' : 'ms', + 'ms' : 'ms', + 'US' : 'us', + 'us' : 'us', + 'NS' : 'ns', + 'ns' : 'ns', + } + def _validate_timedelta_unit(arg): """ provide validation / translation for timedelta short units """ - - if re.search("Y|W|D",arg,re.IGNORECASE) or arg == 'M': - return arg.upper() - elif re.search("h|m|s|ms|us|ns",arg,re.IGNORECASE): - return arg.lower() - raise ValueError("invalid timedelta unit {0} provided".format(arg)) + try: + return _unit_map[arg] + except: + raise ValueError("invalid timedelta unit {0} provided".format(arg)) _short_search = re.compile( "^\s*(?P<neg>-?)\s*(?P<value>\d*\.?\d*)\s*(?P<unit>d|s|ms|us|ns)?\s*$",re.IGNORECASE) diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index c957884b3cebb..d8aed3a42ca72 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -1387,11 +1387,17 @@ cdef inline convert_to_timedelta64(object ts, object unit, object coerce): else: if util.is_array(ts): ts = ts.astype('int64').item() - ts = cast_from_unit(ts, unit) - if _np_version_under1p7: - ts = timedelta(microseconds=ts/1000.0) + if unit in ['Y','M','W']: + if _np_version_under1p7: + raise ValueError("unsupported unit for native timedelta under this numpy {0}".format(unit)) + else: + ts = np.timedelta64(ts,unit) else: - ts = np.timedelta64(ts) + ts = cast_from_unit(ts, unit) + if _np_version_under1p7: + ts = timedelta(microseconds=ts/1000.0) + else: + ts = np.timedelta64(ts) elif util.is_string_object(ts): if ts in _nat_strings or coerce: return np.timedelta64(iNaT) @@ -1747,6 +1753,12 @@ cpdef inline int64_t cast_from_unit(object ts, object unit) except -1: if unit == 'D' or unit == 'd': m = 1000000000L * 86400 p = 6 + elif unit == 'h': + m = 1000000000L * 3600 + p = 6 + elif unit == 'm': + m = 1000000000L * 60 + p = 6 elif unit == 's': m = 1000000000L p = 6 @@ -1756,9 +1768,11 @@ cpdef inline int64_t cast_from_unit(object ts, object unit) except -1: elif unit == 'us': m = 1000L p = 0 - else: + elif unit == 'ns' or unit is None: m = 1L p = 0 + else: + raise ValueError("cannot cast unit {0}".format(unit)) # just give me the unit back if ts is None:
closes #7611 closes #6423
https://api.github.com/repos/pandas-dev/pandas/pulls/7616
2014-06-30T13:32:38Z
2014-06-30T14:35:18Z
2014-06-30T14:35:18Z
2014-09-05T03:18:03Z
Fix typo in documentation
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 795bbca673f77..7ca4ff0529b4e 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1448,7 +1448,7 @@ Elements can be set to ``NaT`` using ``np.nan`` analagously to datetimes y[1] = np.nan y -Operands can also appear in a reversed order (a singluar object operated with a Series) +Operands can also appear in a reversed order (a singular object operated with a Series) .. ipython:: python
null
https://api.github.com/repos/pandas-dev/pandas/pulls/7613
2014-06-30T09:16:50Z
2014-06-30T09:41:27Z
2014-06-30T09:41:27Z
2014-06-30T09:41:37Z
BUG: Timestamp.__new__ doesnt preserve nanosecond
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index ab2c4a697e1ea..7030598040ea6 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -255,7 +255,7 @@ Bug Fixes - Bug in ``Timestamp.tz_localize`` resets ``nanosecond`` info (:issue:`7534`) - Bug in ``DatetimeIndex.asobject`` raises ``ValueError`` when it contains ``NaT`` (:issue:`7539`) - +- Bug in ``Timestamp.__new__`` doesn't preserve nanosecond properly (:issue:`7610`) - Bug in ``Index.astype(float)`` where it would return an ``object`` dtype ``Index`` (:issue:`7464`). diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py index 9499f05a4aa5f..72ab9141609b4 100644 --- a/pandas/tseries/tests/test_tslib.py +++ b/pandas/tseries/tests/test_tslib.py @@ -283,7 +283,7 @@ def test_timedelta_ns_based_arithmetic(self): def test_timedelta_us_arithmetic(self): self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'us'), -123000) - def test_timedelta_ns_arithmetic(self): + def test_timedelta_ms_arithmetic(self): time = self.timestamp + np.timedelta64(-123, 'ms') self.assert_ns_timedelta(time, -123000000) @@ -291,6 +291,40 @@ def test_nanosecond_string_parsing(self): self.timestamp = Timestamp('2013-05-01 07:15:45.123456789') self.assertEqual(self.timestamp.value, 1367392545123456000) + def test_nanosecond_timestamp(self): + # GH 7610 + expected = 1293840000000000005 + t = Timestamp('2011-01-01') + offsets.Nano(5) + self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000005')") + self.assertEqual(t.value, expected) + self.assertEqual(t.nanosecond, 5) + + t = Timestamp(t) + self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000005')") + self.assertEqual(t.value, expected) + self.assertEqual(t.nanosecond, 5) + + t = Timestamp(np.datetime64('2011-01-01 00:00:00.000000005Z')) + self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000005')") + self.assertEqual(t.value, expected) + self.assertEqual(t.nanosecond, 5) + + expected = 1293840000000000010 + t = t + offsets.Nano(5) + self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000010')") + self.assertEqual(t.value, expected) + self.assertEqual(t.nanosecond, 10) + + t = Timestamp(t) + self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000010')") + self.assertEqual(t.value, expected) + self.assertEqual(t.nanosecond, 10) + + t = Timestamp(np.datetime64('2011-01-01 00:00:00.000000010Z')) + self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000010')") + self.assertEqual(t.value, expected) + self.assertEqual(t.nanosecond, 10) + def test_nat_arithmetic(self): # GH 6873 nat = tslib.NaT diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index c957884b3cebb..70675875c8143 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -956,6 +956,7 @@ cdef convert_to_tsobject(object ts, object tz, object unit): if is_timestamp(ts): obj.value += ts.nanosecond + obj.dts.ps = ts.nanosecond * 1000 _check_dts_bounds(&obj.dts) return obj elif PyDate_Check(ts):
When `Timestamp.__new__` accepts a timestamp with nanosecond, nanosecond is not preserved properly. ``` # create Timestamp with ns (OK) t = pd.Timestamp('2011-01-01') + pd.offsets.Nano(5) t, t.value # (Timestamp('2011-01-01 00:00:00.000000005'), 1293840000000000005) # If it is passed to Timestamp.__init__, ns is not displayed even though internal value includes it. (NG) t = pd.Timestamp(t) t, t.value # (Timestamp('2011-01-01 00:00:00'), 1293840000000000005) # If offset is added to above result, ns is displayed properly (OK) t = t + pd.offsets.Nano(5) t, t.value # (Timestamp('2011-01-01 00:00:00.000000010'), 1293840000000000010) ``` NOTE: Unrelated to this issue, test_tslib had `test_timedelta_ns_arithmetic` method duplicatelly. Thus renamed.
https://api.github.com/repos/pandas-dev/pandas/pulls/7610
2014-06-29T14:14:44Z
2014-06-30T13:29:09Z
2014-06-30T13:29:09Z
2014-07-02T16:45:22Z
BUG: PeriodIndex.min/max returns int
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 3160b35386fa2..dbd8b15333d50 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -185,6 +185,7 @@ Bug Fixes - Bug in ``DatetimeIndex.asobject`` doesn't preserve ``name`` (:issue:`7299`) - Bug in multi-index slicing with datetimelike ranges (strings and Timestamps), (:issue:`7429`) - Bug in ``Index.min`` and ``max`` doesn't handle ``nan`` and ``NaT`` properly (:issue:`7261`) +- Bug in ``PeriodIndex.min/max`` results in ``int`` (:issue:`7609`) - Bug in ``resample`` where ``fill_method`` was ignored if you passed ``how`` (:issue:`7261`) - Bug in ``TimeGrouper`` doesn't exclude column specified by ``key`` (:issue:`7227`) - Bug in ``DataFrame`` and ``Series`` bar and barh plot raises ``TypeError`` when ``bottom`` diff --git a/pandas/core/base.py b/pandas/core/base.py index cc676b9682277..aff2713ee85f5 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -402,3 +402,34 @@ def tolist(self): """ return list(self.asobject) + def min(self, axis=None): + """ + Overridden ndarray.min to return an object + """ + import pandas.tslib as tslib + mask = self.asi8 == tslib.iNaT + masked = self[~mask] + if len(masked) == 0: + return self._na_value + elif self.is_monotonic: + return masked[0] + else: + min_stamp = masked.asi8.min() + return self._box_func(min_stamp) + + def max(self, axis=None): + """ + Overridden ndarray.max to return an object + """ + import pandas.tslib as tslib + mask = self.asi8 == tslib.iNaT + masked = self[~mask] + if len(masked) == 0: + return self._na_value + elif self.is_monotonic: + return masked[-1] + else: + max_stamp = masked.asi8.max() + return self._box_func(max_stamp) + + diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 291b10c70c83c..f41e745013f08 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -8,6 +8,7 @@ from pandas.util.testing import assertRaisesRegexp, assert_isinstance from pandas import Series, Index, Int64Index, DatetimeIndex, PeriodIndex from pandas import _np_version_under1p7 +import pandas.tslib as tslib import nose import pandas.util.testing as tm @@ -202,7 +203,10 @@ def test_ops(self): for op in ['max','min']: for o in self.objs: result = getattr(o,op)() - expected = getattr(o.values,op)() + if not isinstance(o, PeriodIndex): + expected = getattr(o.values, op)() + else: + expected = pd.Period(ordinal=getattr(o.values, op)(), freq=o.freq) try: self.assertEqual(result, expected) except ValueError: @@ -232,17 +236,6 @@ def test_nanops(self): # check DatetimeIndex non-monotonic path self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1)) - # explicitly create DatetimeIndex - obj = DatetimeIndex([]) - self.assertTrue(pd.isnull(getattr(obj, op)())) - - obj = DatetimeIndex([pd.NaT]) - self.assertTrue(pd.isnull(getattr(obj, op)())) - - obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT]) - self.assertTrue(pd.isnull(getattr(obj, op)())) - - def test_value_counts_unique_nunique(self): for o in self.objs: klass = type(o) @@ -552,6 +545,33 @@ def test_asobject_tolist(self): self.assertEqual(result.name, expected.name) self.assertEqual(idx.tolist(), expected_list) + def test_minmax(self): + for tz in [None, 'Asia/Tokyo', 'US/Eastern']: + # monotonic + idx1 = pd.DatetimeIndex([pd.NaT, '2011-01-01', '2011-01-02', + '2011-01-03'], tz=tz) + self.assertTrue(idx1.is_monotonic) + + # non-monotonic + idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03', + '2011-01-02', pd.NaT], tz=tz) + self.assertFalse(idx2.is_monotonic) + + for idx in [idx1, idx2]: + self.assertEqual(idx.min(), pd.Timestamp('2011-01-01', tz=tz)) + self.assertEqual(idx.max(), pd.Timestamp('2011-01-03', tz=tz)) + + for op in ['min', 'max']: + # Return NaT + obj = DatetimeIndex([]) + self.assertTrue(pd.isnull(getattr(obj, op)())) + + obj = DatetimeIndex([pd.NaT]) + self.assertTrue(pd.isnull(getattr(obj, op)())) + + obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT]) + self.assertTrue(pd.isnull(getattr(obj, op)())) + class TestPeriodIndexOps(Ops): _allowed = '_allow_period_index_ops' @@ -597,6 +617,39 @@ def test_asobject_tolist(self): self.assertTrue(result_list[2].ordinal, pd.tslib.iNaT) self.assertTrue(result_list[2].freq, 'D') + def test_minmax(self): + + # monotonic + idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02', + '2011-01-03'], freq='D') + self.assertTrue(idx1.is_monotonic) + + # non-monotonic + idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03', + '2011-01-02', pd.NaT], freq='D') + self.assertFalse(idx2.is_monotonic) + + for idx in [idx1, idx2]: + self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D')) + self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D')) + + for op in ['min', 'max']: + # Return NaT + obj = PeriodIndex([], freq='M') + result = getattr(obj, op)() + self.assertEqual(result.ordinal, tslib.iNaT) + self.assertEqual(result.freq, 'M') + + obj = PeriodIndex([pd.NaT], freq='M') + result = getattr(obj, op)() + self.assertEqual(result.ordinal, tslib.iNaT) + self.assertEqual(result.freq, 'M') + + obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M') + result = getattr(obj, op)() + self.assertEqual(result.ordinal, tslib.iNaT) + self.assertEqual(result.freq, 'M') + if __name__ == '__main__': import nose diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index ff585d80af830..ac002b86f3de9 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1783,34 +1783,6 @@ def indexer_between_time(self, start_time, end_time, include_start=True, return mask.nonzero()[0] - def min(self, axis=None): - """ - Overridden ndarray.min to return a Timestamp - """ - mask = self.asi8 == tslib.iNaT - masked = self[~mask] - if len(masked) == 0: - return tslib.NaT - elif self.is_monotonic: - return masked[0] - else: - min_stamp = masked.asi8.min() - return Timestamp(min_stamp, tz=self.tz) - - def max(self, axis=None): - """ - Overridden ndarray.max to return a Timestamp - """ - mask = self.asi8 == tslib.iNaT - masked = self[~mask] - if len(masked) == 0: - return tslib.NaT - elif self.is_monotonic: - return masked[-1] - else: - max_stamp = masked.asi8.max() - return Timestamp(max_stamp, tz=self.tz) - def to_julian_date(self): """ Convert DatetimeIndex to Float64Index of Julian Dates. diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index d41438bbfd208..5ded7161130bc 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -712,6 +712,10 @@ def _simple_new(cls, values, name, freq=None, **kwargs): result.freq = freq return result + @property + def _na_value(self): + return self._box_func(tslib.iNaT) + def __contains__(self, key): if not isinstance(key, Period) or key.freq != self.freq: if isinstance(key, compat.string_types):
Related to #7279. `PeriodIndex.min/max` should return `Period` ignoring `NaT`. ``` pidx = pd.PeriodIndex(['2011-01', 'NaT'], freq='M') pidx.min() # -9223372036854775808 pidx.max() #492 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7609
2014-06-29T10:26:56Z
2014-06-29T12:43:00Z
2014-06-29T12:43:00Z
2014-06-30T10:58:00Z
BUG: DatetimeIndex.delete with tz raises ValueError
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index d022911fe2909..de758c4c8a579 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1626,7 +1626,7 @@ def delete(self, loc): freq = self.freq if self.tz is not None: - new_dates = tslib.date_normalize(new_dates, self.tz) + new_dates = _tz_convert_with_transitions(new_dates, 'UTC', self.tz) return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz) def _view_like(self, ndarray): diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index eb7ee6e87477c..f353f08114a2c 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -2455,27 +2455,31 @@ def test_insert(self): with tm.assertRaises(ValueError): result = idx.insert(3, datetime(2000, 1, 4, tzinfo=pytz.timezone('US/Eastern'))) - # preserve freq - expected = date_range('1/1/2000', periods=4, freq='D', tz='Asia/Tokyo', name='idx') - for d in [pd.Timestamp('2000-01-04', tz='Asia/Tokyo'), - datetime(2000, 1, 4, tzinfo=pytz.timezone('Asia/Tokyo'))]: - - result = idx.insert(3, d) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertEqual(result.freqstr, expected.freq) - - expected = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03', - '2000-01-02'], name='idx', - tz='Asia/Tokyo', freq=None) - # reset freq to None - for d in [pd.Timestamp('2000-01-02', tz='Asia/Tokyo'), - datetime(2000, 1, 2, tzinfo=pytz.timezone('Asia/Tokyo'))]: - result = idx.insert(3, d) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertTrue(result.freq is None) + for tz in ['US/Pacific', 'Asia/Singapore']: + idx = date_range('1/1/2000 09:00', periods=6, freq='H', tz=tz, name='idx') + # preserve freq + expected = date_range('1/1/2000 09:00', periods=7, freq='H', tz=tz, name='idx') + for d in [pd.Timestamp('2000-01-01 15:00', tz=tz), + pytz.timezone(tz).localize(datetime(2000, 1, 1, 15))]: + + result = idx.insert(6, d) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freq, expected.freq) + self.assertEqual(result.tz, expected.tz) + expected = DatetimeIndex(['2000-01-01 09:00', '2000-01-01 10:00', '2000-01-01 11:00', + '2000-01-01 12:00', '2000-01-01 13:00', '2000-01-01 14:00', + '2000-01-01 10:00'], name='idx', + tz=tz, freq=None) + # reset freq to None + for d in [pd.Timestamp('2000-01-01 10:00', tz=tz), + pytz.timezone(tz).localize(datetime(2000, 1, 1, 10))]: + result = idx.insert(6, d) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertTrue(result.freq is None) + self.assertEqual(result.tz, expected.tz) def test_delete(self): idx = date_range(start='2000-01-01', periods=5, freq='M', name='idx') @@ -2501,16 +2505,25 @@ def test_delete(self): # either depeidnig on numpy version result = idx.delete(5) - idx = date_range(start='2000-01-01', periods=5, - freq='D', name='idx', tz='US/Pacific') + for tz in [None, 'Asia/Tokyo', 'US/Pacific']: + idx = date_range(start='2000-01-01 09:00', periods=10, + freq='H', name='idx', tz=tz) - expected = date_range(start='2000-01-02', periods=4, - freq='D', name='idx', tz='US/Pacific') - result = idx.delete(0) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertEqual(result.freqstr, 'D') - self.assertEqual(result.tz, expected.tz) + expected = date_range(start='2000-01-01 10:00', periods=9, + freq='H', name='idx', tz=tz) + result = idx.delete(0) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freqstr, 'H') + self.assertEqual(result.tz, expected.tz) + + expected = date_range(start='2000-01-01 09:00', periods=9, + freq='H', name='idx', tz=tz) + result = idx.delete(-1) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freqstr, 'H') + self.assertEqual(result.tz, expected.tz) def test_delete_slice(self): idx = date_range(start='2000-01-01', periods=10, freq='D', name='idx') @@ -2538,30 +2551,34 @@ def test_delete_slice(self): self.assertEqual(result.name, expected.name) self.assertEqual(result.freq, expected.freq) - ts = pd.Series(1, index=pd.date_range('2000-01-01', periods=10, - freq='D', name='idx')) - # preserve freq - result = ts.drop(ts.index[:5]).index - expected = pd.date_range('2000-01-06', periods=5, freq='D', name='idx') - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertEqual(result.freq, expected.freq) + for tz in [None, 'Asia/Tokyo', 'US/Pacific']: + ts = pd.Series(1, index=pd.date_range('2000-01-01 09:00', periods=10, + freq='H', name='idx', tz=tz)) + # preserve freq + result = ts.drop(ts.index[:5]).index + expected = pd.date_range('2000-01-01 14:00', periods=5, freq='H', name='idx', tz=tz) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freq, expected.freq) + self.assertEqual(result.tz, expected.tz) - # reset freq to None - result = ts.drop(ts.index[[1, 3, 5, 7, 9]]).index - expected = DatetimeIndex(['2000-01-01', '2000-01-03', '2000-01-05', - '2000-01-07', '2000-01-09'], freq=None, name='idx') - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertEqual(result.freq, expected.freq) + # reset freq to None + result = ts.drop(ts.index[[1, 3, 5, 7, 9]]).index + expected = DatetimeIndex(['2000-01-01 09:00', '2000-01-01 11:00', '2000-01-01 13:00', + '2000-01-01 15:00', '2000-01-01 17:00'], + freq=None, name='idx', tz=tz) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freq, expected.freq) + self.assertEqual(result.tz, expected.tz) def test_take(self): - dates = [datetime(2010, 1, 6), datetime(2010, 1, 7), - datetime(2010, 1, 9), datetime(2010, 1, 13)] + dates = [datetime(2010, 1, 1, 14), datetime(2010, 1, 1, 15), + datetime(2010, 1, 1, 17), datetime(2010, 1, 1, 21)] for tz in [None, 'US/Eastern', 'Asia/Tokyo']: - idx = DatetimeIndex(start='1/1/10', end='12/31/12', - freq='D', tz=tz, name='idx') + idx = DatetimeIndex(start='2010-01-01 09:00', end='2010-02-01 09:00', + freq='H', tz=tz, name='idx') expected = DatetimeIndex(dates, freq=None, name='idx', tz=tz) taken1 = idx.take([5, 6, 8, 12])
Found and fixed `DatetimeIndex.delete` (#7302) results in `ValueError` when it has `tz` and `freq` is less than daily.. ``` idx = pd.date_range(start='2011-01-01 09:00', periods=40, freq='H', tz='Asia/Tokyo') idx.delete(0) # ValueError: Inferred frequency None from passed dates does notconform to passed frequency H ``` `insert` (#7299) and `take` don't have problems, but changed test case to use hourly frequencies to detect this kind of problem.
https://api.github.com/repos/pandas-dev/pandas/pulls/7608
2014-06-29T10:16:42Z
2014-07-03T21:31:22Z
2014-07-03T21:31:22Z
2014-07-05T04:48:06Z
CLN: Simplify Period Construction / Resolution
diff --git a/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_x86_64_darwin_2.7.6.pickle b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_x86_64_darwin_2.7.6.pickle new file mode 100644 index 0000000000000..4f5bdada3c7da Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_x86_64_darwin_2.7.6.pickle differ diff --git a/pandas/io/tests/data/legacy_pickle/0.14.0/0.14.0_x86_64_darwin_2.7.6.pickle b/pandas/io/tests/data/legacy_pickle/0.14.0/0.14.0_x86_64_darwin_2.7.6.pickle new file mode 100644 index 0000000000000..1b1652fc03c27 Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.14.0/0.14.0_x86_64_darwin_2.7.6.pickle differ diff --git a/pandas/io/tests/generate_legacy_pickles.py b/pandas/io/tests/generate_legacy_pickles.py index 48d0fd57d831b..3a0386c7660d4 100644 --- a/pandas/io/tests/generate_legacy_pickles.py +++ b/pandas/io/tests/generate_legacy_pickles.py @@ -58,7 +58,7 @@ def create_data(): from pandas import (Series,TimeSeries,DataFrame,Panel, SparseSeries,SparseTimeSeries,SparseDataFrame,SparsePanel, Index,MultiIndex,PeriodIndex, - date_range,bdate_range,Timestamp) + date_range,period_range,bdate_range,Timestamp) nan = np.nan data = { @@ -70,7 +70,9 @@ def create_data(): } index = dict(int = Index(np.arange(10)), - date = date_range('20130101',periods=10)) + date = date_range('20130101',periods=10), + period = period_range('2013-01-01', freq='M', periods=10)) + mi = dict(reg2 = MultiIndex.from_tuples(tuple(zip(*[['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']])), names=['first', 'second'])) diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 441a5e8a99c78..fe61e5f0acd9b 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -30,20 +30,40 @@ class FreqGroup(object): class Resolution(object): - RESO_US = 0 - RESO_SEC = 1 - RESO_MIN = 2 - RESO_HR = 3 - RESO_DAY = 4 + RESO_US = tslib.US_RESO + RESO_MS = tslib.MS_RESO + RESO_SEC = tslib.S_RESO + RESO_MIN = tslib.T_RESO + RESO_HR = tslib.H_RESO + RESO_DAY = tslib.D_RESO + + _reso_str_map = { + RESO_US: 'microsecond', + RESO_MS: 'millisecond', + RESO_SEC: 'second', + RESO_MIN: 'minute', + RESO_HR: 'hour', + RESO_DAY: 'day'} + + _reso_period_map = { + 'year': 'A', + 'quarter': 'Q', + 'month': 'M', + 'day': 'D', + 'hour': 'H', + 'minute': 'T', + 'second': 'S', + 'millisecond': 'L', + 'microsecond': 'U', + 'nanosecond': 'N'} @classmethod def get_str(cls, reso): - return {cls.RESO_US: 'microsecond', - cls.RESO_SEC: 'second', - cls.RESO_MIN: 'minute', - cls.RESO_HR: 'hour', - cls.RESO_DAY: 'day'}.get(reso, 'day') + return cls._reso_str_map.get(reso, 'day') + @classmethod + def get_freq(cls, resostr): + return cls._reso_period_map[resostr] def get_reso_string(reso): return Resolution.get_str(reso) @@ -571,22 +591,9 @@ def _period_alias_dictionary(): return alias_dict -_reso_period_map = { - "year": "A", - "quarter": "Q", - "month": "M", - "day": "D", - "hour": "H", - "minute": "T", - "second": "S", - "millisecond": "L", - "microsecond": "U", - "nanosecond": "N", -} - def _infer_period_group(freqstr): - return _period_group(_reso_period_map[freqstr]) + return _period_group(Resolution._reso_period_map[freqstr]) def _period_group(freqstr): diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 77e9677f0b723..cceac61f392a8 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -111,8 +111,14 @@ def __init__(self, value=None, freq=None, ordinal=None, elif isinstance(value, compat.string_types) or com.is_integer(value): if com.is_integer(value): value = str(value) + value = value.upper() - dt, freq = _get_date_and_freq(value, freq) + dt, _, reso = parse_time_string(value, freq) + if freq is None: + try: + freq = _freq_mod.Resolution.get_freq(reso) + except KeyError: + raise ValueError("Invalid frequency or could not infer: %s" % reso) elif isinstance(value, datetime): dt = value @@ -451,36 +457,6 @@ def strftime(self, fmt): return tslib.period_format(self.ordinal, base, fmt) -def _get_date_and_freq(value, freq): - value = value.upper() - dt, _, reso = parse_time_string(value, freq) - - if freq is None: - if reso == 'year': - freq = 'A' - elif reso == 'quarter': - freq = 'Q' - elif reso == 'month': - freq = 'M' - elif reso == 'day': - freq = 'D' - elif reso == 'hour': - freq = 'H' - elif reso == 'minute': - freq = 'T' - elif reso == 'second': - freq = 'S' - elif reso == 'microsecond': - if dt.microsecond % 1000 == 0: - freq = 'L' - else: - freq = 'U' - else: - raise ValueError("Invalid frequency or could not infer: %s" % reso) - - return dt, freq - - def _get_ordinals(data, freq): f = lambda x: Period(x, freq=freq).ordinal if isinstance(data[0], Period): diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index c4bac4b9b14f0..42edb799b4c89 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -227,6 +227,14 @@ def test_period_constructor(self): i2 = Period(datetime(2007, 1, 1), freq='M') self.assertEqual(i1, i2) + i1 = Period('2007-01-01 09:00:00.001') + expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L') + self.assertEqual(i1, expected) + + i1 = Period('2007-01-01 09:00:00.00101') + expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U') + self.assertEqual(i1, expected) + self.assertRaises(ValueError, Period, ordinal=200701) self.assertRaises(ValueError, Period, '2007-1-1', freq='X') diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py index 72ab9141609b4..82f05a0de4588 100644 --- a/pandas/tseries/tests/test_tslib.py +++ b/pandas/tseries/tests/test_tslib.py @@ -476,6 +476,17 @@ def test_addition_subtraction_preserve_frequency(self): self.assertEqual((timestamp_instance + timedelta64_instance).freq, original_freq) self.assertEqual((timestamp_instance - timedelta64_instance).freq, original_freq) + def test_resolution(self): + + for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'], + [tslib.D_RESO, tslib.D_RESO, tslib.D_RESO, tslib.D_RESO, + tslib.H_RESO, tslib.T_RESO,tslib.S_RESO, tslib.MS_RESO, tslib.US_RESO]): + for tz in [None, 'Asia/Tokyo', 'US/Eastern']: + idx = date_range(start='2013-04-01', periods=30, freq=freq, tz=tz) + result = tslib.resolution(idx.asi8, idx.tz) + self.assertEqual(result, expected) + + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False) diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index f8043b23a58af..b4ab813d3debe 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -508,8 +508,11 @@ def dateutil_parse(timestr, default, if reso is None: raise ValueError("Cannot parse date.") - if reso == 'microsecond' and repl['microsecond'] == 0: - reso = 'second' + if reso == 'microsecond': + if repl['microsecond'] == 0: + reso = 'second' + elif repl['microsecond'] % 1000 == 0: + reso = 'millisecond' ret = default.replace(**repl) if res.weekday is not None and not res.day: diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 39d80521fbdb5..090b49bde68a6 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -3372,13 +3372,16 @@ cpdef resolution(ndarray[int64_t] stamps, tz=None): return reso US_RESO = 0 -S_RESO = 1 -T_RESO = 2 -H_RESO = 3 -D_RESO = 4 +MS_RESO = 1 +S_RESO = 2 +T_RESO = 3 +H_RESO = 4 +D_RESO = 5 cdef inline int _reso_stamp(pandas_datetimestruct *dts): if dts.us != 0: + if dts.us % 1000 == 0: + return MS_RESO return US_RESO elif dts.sec != 0: return S_RESO
Simplified `Period` construction a little by adding `millisecond` resolution.
https://api.github.com/repos/pandas-dev/pandas/pulls/7607
2014-06-29T06:49:15Z
2014-07-06T13:36:20Z
2014-07-06T13:36:20Z
2014-07-09T12:37:56Z
BUG: DTI.freqstr raises AttributeError when freq is None
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 3160b35386fa2..a46718c8e88f3 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -258,7 +258,7 @@ Bug Fixes - Bug in ``Index.astype(float)`` where it would return an ``object`` dtype ``Index`` (:issue:`7464`). - Bug in ``DataFrame.reset_index`` loses ``tz`` (:issue:`3950`) - +- Bug in ``DatetimeIndex.freqstr`` raises ``AttributeError`` when ``freq`` is ``None`` (:issue:`7606`) - Bug in non-monotonic ``Index.union`` may preserve ``name`` incorrectly (:issue:`7458`) - Bug in ``DatetimeIndex.intersection`` doesn't preserve timezone (:issue:`4690`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b4e69e2056507..e7678e31d50ab 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4311,12 +4311,8 @@ def to_period(self, freq=None, axis=0, copy=True): axis = self._get_axis_number(axis) if axis == 0: - if freq is None: - freq = self.index.freqstr or self.index.inferred_freq new_data.set_axis(1, self.index.to_period(freq=freq)) elif axis == 1: - if freq is None: - freq = self.columns.freqstr or self.columns.inferred_freq new_data.set_axis(0, self.columns.to_period(freq=freq)) else: # pragma: no cover raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis)) diff --git a/pandas/core/series.py b/pandas/core/series.py index 78b049f4bf8d5..a484efe75e284 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2374,8 +2374,6 @@ def to_period(self, freq=None, copy=True): if copy: new_values = new_values.copy() - if freq is None: - freq = self.index.freqstr or self.index.inferred_freq new_index = self.index.to_period(freq=freq) return self._constructor(new_values, index=new_index).__finalize__(self) diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index ff585d80af830..e439d7d523bd3 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -48,8 +48,7 @@ def f(self): 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']: month_kw = self.freq.kwds.get('startingMonth', self.freq.kwds.get('month', 12)) if self.freq else 12 - freqstr = self.freqstr if self.freq else None - return tslib.get_start_end_field(values, field, freqstr, month_kw) + return tslib.get_start_end_field(values, field, self.freqstr, month_kw) else: return tslib.get_date_field(values, field) f.__name__ = name @@ -573,10 +572,7 @@ def __unicode__(self): values = self.values - freq = None - if self.offset is not None: - freq = self.offset.freqstr - + freq = self.freqstr summary = str(self.__class__) if len(self) == 1: first = formatter(values[0], tz=self.tz) @@ -794,12 +790,14 @@ def to_period(self, freq=None): """ from pandas.tseries.period import PeriodIndex - if self.freq is None and freq is None: - msg = "You must pass a freq argument as current index has none." - raise ValueError(msg) - if freq is None: - freq = get_period_alias(self.freqstr) + freq = self.freqstr or self.inferred_freq + + if freq is None: + msg = "You must pass a freq argument as current index has none." + raise ValueError(msg) + + freq = get_period_alias(freq) return PeriodIndex(self.values, name=self.name, freq=freq, tz=self.tz) @@ -1440,6 +1438,8 @@ def inferred_freq(self): @property def freqstr(self): """ return the frequency object as a string if its set, otherwise None """ + if self.freq is None: + return None return self.offset.freqstr _year = _field_accessor('year', 'Y') diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 11161308be279..eb7ee6e87477c 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -1477,7 +1477,28 @@ def test_to_period(self): assert_series_equal(pts, exp) pts = ts.to_period('M') + exp.index = exp.index.asfreq('M') self.assertTrue(pts.index.equals(exp.index.asfreq('M'))) + assert_series_equal(pts, exp) + + # GH 7606 without freq + idx = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04']) + exp_idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03', + '2011-01-04'], freq='D') + + s = Series(np.random.randn(4), index=idx) + expected = s.copy() + expected.index = exp_idx + assert_series_equal(s.to_period(), expected) + + df = DataFrame(np.random.randn(4, 4), index=idx, columns=idx) + expected = df.copy() + expected.index = exp_idx + assert_frame_equal(df.to_period(), expected) + + expected = df.copy() + expected.columns = exp_idx + assert_frame_equal(df.to_period(axis=1), expected) def create_dt64_based_index(self): data = [Timestamp('2007-01-01 10:11:12.123456Z'), @@ -2102,7 +2123,14 @@ def test_to_period_nofreq(self): idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'], freq='infer') - idx.to_period() + self.assertEqual(idx.freqstr, 'D') + expected = pd.PeriodIndex(['2000-01-01', '2000-01-02', '2000-01-03'], freq='D') + self.assertTrue(idx.to_period().equals(expected)) + + # GH 7606 + idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03']) + self.assertEqual(idx.freqstr, None) + self.assertTrue(idx.to_period().equals(expected)) def test_000constructor_resolution(self): # 2252
`DatetimeIndex.freqstr` raises `AttributeError` if `freq/offset` is `None`, even though docstring says "return the frequency object as a string if its set, otherwise None" ``` idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04']) idx.freqstr # AttributeError: 'NoneType' object has no attribute 'freqstr' ``` Also, `DataFrame/Series.to_period` has a logic to use `inferred_freq` when the freq is not passed, but it doesn't work actually because of the bug. Moved the logic to `DatetimeIndex.to_period` for consistency and made it works. The fix will simplify #7602 a little.
https://api.github.com/repos/pandas-dev/pandas/pulls/7606
2014-06-29T06:37:38Z
2014-06-29T11:12:57Z
2014-06-29T11:12:57Z
2014-06-30T10:58:09Z
BUG: {expanding,rolling}_{cov,corr} don't handle arguments with different index sets properly
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index f6519a17205a4..639554b70a913 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -268,3 +268,5 @@ Bug Fixes - Bug in ``Float64Index`` assignment with a non scalar indexer (:issue:`7586`) - Bug in ``pandas.core.strings.str_contains`` does not properly match in a case insensitive fashion when ``regex=False`` and ``case=False`` (:issue:`7505`) + +- Bug in ``expanding_cov``, ``expanding_corr``, ``rolling_cov``, and ``rolling_corr`` for two arguments with mismatched index (:issue:`7512`) diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py index 42da19f1a241d..e5d96ee6b8f0f 100644 --- a/pandas/stats/moments.py +++ b/pandas/stats/moments.py @@ -209,11 +209,11 @@ def rolling_cov(arg1, arg2=None, window=None, min_periods=None, freq=None, pairwise = True if pairwise is None else pairwise # only default unset arg1 = _conv_timerule(arg1, freq, how) arg2 = _conv_timerule(arg2, freq, how) - window = min(window, len(arg1), len(arg2)) def _get_cov(X, Y): - mean = lambda x: rolling_mean(x, window, min_periods, center=center) - count = rolling_count(X + Y, window, center=center) + adj_window = min(window, len(X), len(Y)) + mean = lambda x: rolling_mean(x, adj_window, min_periods, center=center) + count = rolling_count(X + Y, adj_window, center=center) bias_adj = count / (count - 1) return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj rs = _flex_binary_moment(arg1, arg2, _get_cov, pairwise=bool(pairwise)) @@ -234,16 +234,17 @@ def rolling_corr(arg1, arg2=None, window=None, min_periods=None, freq=None, pairwise = True if pairwise is None else pairwise # only default unset arg1 = _conv_timerule(arg1, freq, how) arg2 = _conv_timerule(arg2, freq, how) - window = min(window, len(arg1), len(arg2)) def _get_corr(a, b): - num = rolling_cov(a, b, window, min_periods, freq=freq, + adj_window = min(window, len(a), len(b)) + num = rolling_cov(a, b, adj_window, min_periods, freq=freq, center=center) - den = (rolling_std(a, window, min_periods, freq=freq, + den = (rolling_std(a, adj_window, min_periods, freq=freq, center=center) * - rolling_std(b, window, min_periods, freq=freq, + rolling_std(b, adj_window, min_periods, freq=freq, center=center)) return num / den + return _flex_binary_moment(arg1, arg2, _get_corr, pairwise=bool(pairwise)) @@ -261,9 +262,9 @@ def _flex_binary_moment(arg1, arg2, f, pairwise=False): results = {} if isinstance(arg2, DataFrame): X, Y = arg1.align(arg2, join='outer') - X = X + 0 * Y - Y = Y + 0 * X if pairwise is False: + X = X + 0 * Y + Y = Y + 0 * X res_columns = arg1.columns.union(arg2.columns) for col in res_columns: if col in X and col in Y: @@ -276,7 +277,7 @@ def _flex_binary_moment(arg1, arg2, f, pairwise=False): # Symmetric case results[k1][k2] = results[k2][k1] else: - results[k1][k2] = f(arg1[k1], arg2[k2]) + results[k1][k2] = f(*_prep_binary(arg1[k1], arg2[k2])) return Panel.from_dict(results).swapaxes('items', 'major') else: raise ValueError("'pairwise' is not True/False") @@ -917,7 +918,7 @@ def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, center=False, min_periods = arg2 arg2 = arg1 pairwise = True if pairwise is None else pairwise - window = max(len(arg1), len(arg2)) + window = len(arg1) + len(arg2) return rolling_cov(arg1, arg2, window, min_periods=min_periods, freq=freq, center=center, pairwise=pairwise) @@ -935,7 +936,7 @@ def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, center=False, min_periods = arg2 arg2 = arg1 pairwise = True if pairwise is None else pairwise - window = max(len(arg1), len(arg2)) + window = len(arg1) + len(arg2) return rolling_corr(arg1, arg2, window, min_periods=min_periods, freq=freq, center=center, pairwise=pairwise) diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py index dd91952cf537c..6cd187ddf8981 100644 --- a/pandas/stats/tests/test_moments.py +++ b/pandas/stats/tests/test_moments.py @@ -759,6 +759,98 @@ def test_expanding_corr_pairwise(self): for i in result.items: assert_almost_equal(result[i], rolling_result[i]) + def test_expanding_cov_diff_index(self): + # GH 7512 + s1 = Series([1, 2, 3], index=[0, 1, 2]) + s2 = Series([1, 3], index=[0, 2]) + result = mom.expanding_cov(s1, s2) + expected = Series([None, None, 2.0]) + assert_series_equal(result, expected) + + s2a = Series([1, None, 3], index=[0, 1, 2]) + result = mom.expanding_cov(s1, s2a) + assert_series_equal(result, expected) + + s1 = Series([7, 8, 10], index=[0, 1, 3]) + s2 = Series([7, 9, 10], index=[0, 2, 3]) + result = mom.expanding_cov(s1, s2) + expected = Series([None, None, None, 4.5]) + assert_series_equal(result, expected) + + def test_expanding_corr_diff_index(self): + # GH 7512 + s1 = Series([1, 2, 3], index=[0, 1, 2]) + s2 = Series([1, 3], index=[0, 2]) + result = mom.expanding_corr(s1, s2) + expected = Series([None, None, 1.0]) + assert_series_equal(result, expected) + + s2a = Series([1, None, 3], index=[0, 1, 2]) + result = mom.expanding_corr(s1, s2a) + assert_series_equal(result, expected) + + s1 = Series([7, 8, 10], index=[0, 1, 3]) + s2 = Series([7, 9, 10], index=[0, 2, 3]) + result = mom.expanding_corr(s1, s2) + expected = Series([None, None, None, 1.]) + assert_series_equal(result, expected) + + def test_rolling_cov_diff_length(self): + # GH 7512 + s1 = Series([1, 2, 3], index=[0, 1, 2]) + s2 = Series([1, 3], index=[0, 2]) + result = mom.rolling_cov(s1, s2, window=3, min_periods=2) + expected = Series([None, None, 2.0]) + assert_series_equal(result, expected) + + s2a = Series([1, None, 3], index=[0, 1, 2]) + result = mom.rolling_cov(s1, s2a, window=3, min_periods=2) + assert_series_equal(result, expected) + + def test_rolling_corr_diff_length(self): + # GH 7512 + s1 = Series([1, 2, 3], index=[0, 1, 2]) + s2 = Series([1, 3], index=[0, 2]) + result = mom.rolling_corr(s1, s2, window=3, min_periods=2) + expected = Series([None, None, 1.0]) + assert_series_equal(result, expected) + + s2a = Series([1, None, 3], index=[0, 1, 2]) + result = mom.rolling_corr(s1, s2a, window=3, min_periods=2) + assert_series_equal(result, expected) + + def test_expanding_cov_pairwise_diff_length(self): + # GH 7512 + df1 = DataFrame([[1,5], [3, 2], [3,9]], columns=['A','B']) + df1a = DataFrame([[1,5], [3,9]], index=[0,2], columns=['A','B']) + df2 = DataFrame([[5,6], [None,None], [2,1]], columns=['X','Y']) + df2a = DataFrame([[5,6], [2,1]], index=[0,2], columns=['X','Y']) + result1 = mom.expanding_cov(df1, df2, pairwise=True)[2] + result2 = mom.expanding_cov(df1, df2a, pairwise=True)[2] + result3 = mom.expanding_cov(df1a, df2, pairwise=True)[2] + result4 = mom.expanding_cov(df1a, df2a, pairwise=True)[2] + expected = DataFrame([[-3., -5.], [-6., -10.]], index=['A','B'], columns=['X','Y']) + assert_frame_equal(result1, expected) + assert_frame_equal(result2, expected) + assert_frame_equal(result3, expected) + assert_frame_equal(result4, expected) + + def test_expanding_corr_pairwise_diff_length(self): + # GH 7512 + df1 = DataFrame([[1,2], [3, 2], [3,4]], columns=['A','B']) + df1a = DataFrame([[1,2], [3,4]], index=[0,2], columns=['A','B']) + df2 = DataFrame([[5,6], [None,None], [2,1]], columns=['X','Y']) + df2a = DataFrame([[5,6], [2,1]], index=[0,2], columns=['X','Y']) + result1 = mom.expanding_corr(df1, df2, pairwise=True)[2] + result2 = mom.expanding_corr(df1, df2a, pairwise=True)[2] + result3 = mom.expanding_corr(df1a, df2, pairwise=True)[2] + result4 = mom.expanding_corr(df1a, df2a, pairwise=True)[2] + expected = DataFrame([[-1.0, -1.0], [-1.0, -1.0]], index=['A','B'], columns=['X','Y']) + assert_frame_equal(result1, expected) + assert_frame_equal(result2, expected) + assert_frame_equal(result3, expected) + assert_frame_equal(result4, expected) + def test_rolling_skew_edge_cases(self): all_nan = Series([np.NaN] * 5)
Closes https://github.com/pydata/pandas/issues/7512.
https://api.github.com/repos/pandas-dev/pandas/pulls/7604
2014-06-29T00:20:04Z
2014-07-01T10:14:18Z
2014-07-01T10:14:18Z
2014-09-10T00:12:47Z
API: DatetimeIndex and PeriodIndex have same representation
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index f4a179c60c76c..97e6ee51511bc 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -89,7 +89,7 @@ Enhancements - Add ``NotImplementedError`` for simultaneous use of ``chunksize`` and ``nrows`` for read_csv() (:issue:`6774`). - +- ``PeriodIndex`` is represented as the same format as ``DatetimeIndex`` (:issue:`7601`) diff --git a/pandas/core/base.py b/pandas/core/base.py index aff2713ee85f5..b06b0856d5909 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -432,4 +432,36 @@ def max(self, axis=None): max_stamp = masked.asi8.max() return self._box_func(max_stamp) + @property + def _formatter_func(self): + """ + Format function to convert value to representation + """ + return str + + def _format_footer(self): + tagline = 'Length: %d, Freq: %s, Timezone: %s' + return tagline % (len(self), self.freqstr, self.tz) + + def __unicode__(self): + formatter = self._formatter_func + summary = str(self.__class__) + '\n' + + n = len(self) + if n == 0: + pass + elif n == 1: + first = formatter(self[0]) + summary += '[%s]\n' % first + elif n == 2: + first = formatter(self[0]) + last = formatter(self[-1]) + summary += '[%s, %s]\n' % (first, last) + else: + first = formatter(self[0]) + last = formatter(self[-1]) + summary += '[%s, ..., %s]\n' % (first, last) + + summary += self._format_footer() + return summary diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index f41e745013f08..c2fb7017ee4d6 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -572,6 +572,40 @@ def test_minmax(self): obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT]) self.assertTrue(pd.isnull(getattr(obj, op)())) + def test_representation(self): + idx1 = DatetimeIndex([], freq='D') + idx2 = DatetimeIndex(['2011-01-01'], freq='D') + idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D') + idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') + idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], + freq='H', tz='Asia/Tokyo') + idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], + tz='US/Eastern') + + exp1 = """<class 'pandas.tseries.index.DatetimeIndex'> +Length: 0, Freq: D, Timezone: None""" + exp2 = """<class 'pandas.tseries.index.DatetimeIndex'> +[2011-01-01] +Length: 1, Freq: D, Timezone: None""" + exp3 = """<class 'pandas.tseries.index.DatetimeIndex'> +[2011-01-01, 2011-01-02] +Length: 2, Freq: D, Timezone: None""" + exp4 = """<class 'pandas.tseries.index.DatetimeIndex'> +[2011-01-01, ..., 2011-01-03] +Length: 3, Freq: D, Timezone: None""" + exp5 = """<class 'pandas.tseries.index.DatetimeIndex'> +[2011-01-01 09:00:00+09:00, ..., 2011-01-01 11:00:00+09:00] +Length: 3, Freq: H, Timezone: Asia/Tokyo""" + exp6 = """<class 'pandas.tseries.index.DatetimeIndex'> +[2011-01-01 09:00:00-05:00, ..., NaT] +Length: 3, Freq: None, Timezone: US/Eastern""" + + for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6], + [exp1, exp2, exp3, exp4, exp5, exp6]): + for func in ['__repr__', '__unicode__', '__str__']: + result = getattr(idx, func)() + self.assertEqual(result, expected) + class TestPeriodIndexOps(Ops): _allowed = '_allow_period_index_ops' @@ -650,6 +684,52 @@ def test_minmax(self): self.assertEqual(result.ordinal, tslib.iNaT) self.assertEqual(result.freq, 'M') + def test_representation(self): + # GH 7601 + idx1 = PeriodIndex([], freq='D') + idx2 = PeriodIndex(['2011-01-01'], freq='D') + idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D') + idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') + idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A') + idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H') + + idx7 = pd.period_range('2013Q1', periods=1, freq="Q") + idx8 = pd.period_range('2013Q1', periods=2, freq="Q") + idx9 = pd.period_range('2013Q1', periods=3, freq="Q") + + exp1 = """<class 'pandas.tseries.period.PeriodIndex'> +Length: 0, Freq: D""" + exp2 = """<class 'pandas.tseries.period.PeriodIndex'> +[2011-01-01] +Length: 1, Freq: D""" + exp3 = """<class 'pandas.tseries.period.PeriodIndex'> +[2011-01-01, 2011-01-02] +Length: 2, Freq: D""" + exp4 = """<class 'pandas.tseries.period.PeriodIndex'> +[2011-01-01, ..., 2011-01-03] +Length: 3, Freq: D""" + exp5 = """<class 'pandas.tseries.period.PeriodIndex'> +[2011, ..., 2013] +Length: 3, Freq: A-DEC""" + exp6 = """<class 'pandas.tseries.period.PeriodIndex'> +[2011-01-01 09:00, ..., NaT] +Length: 3, Freq: H""" + exp7 = """<class 'pandas.tseries.period.PeriodIndex'> +[2013Q1] +Length: 1, Freq: Q-DEC""" + exp8 = """<class 'pandas.tseries.period.PeriodIndex'> +[2013Q1, 2013Q2] +Length: 2, Freq: Q-DEC""" + exp9 = """<class 'pandas.tseries.period.PeriodIndex'> +[2013Q1, ..., 2013Q3] +Length: 3, Freq: Q-DEC""" + + for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9], + [exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]): + for func in ['__repr__', '__unicode__', '__str__']: + result = getattr(idx, func)() + self.assertEqual(result, expected) + if __name__ == '__main__': import nose diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 9761f22e4520d..9473b10876600 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -565,31 +565,11 @@ def _is_dates_only(self): from pandas.core.format import _is_dates_only return _is_dates_only(self.values) - def __unicode__(self): + @property + def _formatter_func(self): from pandas.core.format import _get_format_datetime64 - formatter = _get_format_datetime64(is_dates_only=self._is_dates_only) - - values = self.values - - freq = self.freqstr - summary = str(self.__class__) - if len(self) == 1: - first = formatter(values[0], tz=self.tz) - summary += '\n[%s]' % first - elif len(self) == 2: - first = formatter(values[0], tz=self.tz) - last = formatter(values[-1], tz=self.tz) - summary += '\n[%s, %s]' % (first, last) - elif len(self) > 2: - first = formatter(values[0], tz=self.tz) - last = formatter(values[-1], tz=self.tz) - summary += '\n[%s, ..., %s]' % (first, last) - - tagline = '\nLength: %d, Freq: %s, Timezone: %s' - summary += tagline % (len(self), freq, self.tz) - - return summary + return lambda x: formatter(x, tz=self.tz) def __reduce__(self): """Necessary for making this object picklable""" diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 5ded7161130bc..77e9677f0b723 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -1129,37 +1129,9 @@ def __array_finalize__(self, obj): self.name = getattr(obj, 'name', None) self._reset_identity() - def __repr__(self): - output = com.pprint_thing(self.__class__) + '\n' - output += 'freq: %s\n' % self.freq - n = len(self) - if n == 1: - output += '[%s]\n' % (self[0]) - elif n == 2: - output += '[%s, %s]\n' % (self[0], self[-1]) - elif n: - output += '[%s, ..., %s]\n' % (self[0], self[-1]) - output += 'length: %d' % n - return output - - def __unicode__(self): - output = self.__class__.__name__ - output += u('(') - prefix = '' if compat.PY3 else 'u' - mapper = "{0}'{{0}}'".format(prefix) - output += '[{0}]'.format(', '.join(map(mapper.format, self))) - output += ", freq='{0}'".format(self.freq) - output += ')' - return output - - def __bytes__(self): - encoding = com.get_option('display.encoding') - return self.__unicode__().encode(encoding, 'replace') - - def __str__(self): - if compat.PY3: - return self.__unicode__() - return self.__bytes__() + def _format_footer(self): + tagline = 'Length: %d, Freq: %s' + return tagline % (len(self), self.freqstr) def take(self, indices, axis=None): """ diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 84c0c40de369a..c4bac4b9b14f0 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -1798,65 +1798,39 @@ def test_asfreq_nat(self): expected = PeriodIndex(['2011Q1', '2011Q1', 'NaT', '2011Q2'], freq='Q') self.assertTrue(result.equals(expected)) - def test_ts_repr(self): - index = PeriodIndex(freq='A', start='1/1/2001', end='12/31/2010') - ts = Series(np.random.randn(len(index)), index=index) - repr(ts) # ?? - - val = period_range('2013Q1', periods=1, freq="Q") - expected = "<class 'pandas.tseries.period.PeriodIndex'>\nfreq: Q-DEC\n[2013Q1]\nlength: 1" - assert_equal(repr(val), expected) - - val = period_range('2013Q1', periods=2, freq="Q") - expected = "<class 'pandas.tseries.period.PeriodIndex'>\nfreq: Q-DEC\n[2013Q1, 2013Q2]\nlength: 2" - assert_equal(repr(val), expected) - - val = period_range('2013Q1', periods=3, freq="Q") - expected = "<class 'pandas.tseries.period.PeriodIndex'>\nfreq: Q-DEC\n[2013Q1, ..., 2013Q3]\nlength: 3" - assert_equal(repr(val), expected) - - def test_period_index_unicode(self): + def test_period_index_length(self): pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') assert_equal(len(pi), 9) - assert_equal(pi, eval(compat.text_type(pi))) pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009') assert_equal(len(pi), 4 * 9) - assert_equal(pi, eval(compat.text_type(pi))) pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009') assert_equal(len(pi), 12 * 9) - assert_equal(pi, eval(compat.text_type(pi))) start = Period('02-Apr-2005', 'B') i1 = PeriodIndex(start=start, periods=20) assert_equal(len(i1), 20) assert_equal(i1.freq, start.freq) assert_equal(i1[0], start) - assert_equal(i1, eval(compat.text_type(i1))) end_intv = Period('2006-12-31', 'W') i1 = PeriodIndex(end=end_intv, periods=10) assert_equal(len(i1), 10) assert_equal(i1.freq, end_intv.freq) assert_equal(i1[-1], end_intv) - assert_equal(i1, eval(compat.text_type(i1))) end_intv = Period('2006-12-31', '1w') i2 = PeriodIndex(end=end_intv, periods=10) assert_equal(len(i1), len(i2)) self.assertTrue((i1 == i2).all()) assert_equal(i1.freq, i2.freq) - assert_equal(i1, eval(compat.text_type(i1))) - assert_equal(i2, eval(compat.text_type(i2))) end_intv = Period('2006-12-31', ('w', 1)) i2 = PeriodIndex(end=end_intv, periods=10) assert_equal(len(i1), len(i2)) self.assertTrue((i1 == i2).all()) assert_equal(i1.freq, i2.freq) - assert_equal(i1, eval(compat.text_type(i1))) - assert_equal(i2, eval(compat.text_type(i2))) try: PeriodIndex(start=start, end=end_intv) @@ -1866,7 +1840,6 @@ def test_period_index_unicode(self): end_intv = Period('2005-05-01', 'B') i1 = PeriodIndex(start=start, end=end_intv) - assert_equal(i1, eval(compat.text_type(i1))) try: PeriodIndex(start=start) @@ -1879,12 +1852,10 @@ def test_period_index_unicode(self): i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')]) assert_equal(len(i2), 2) assert_equal(i2[0], end_intv) - assert_equal(i2, eval(compat.text_type(i2))) i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')])) assert_equal(len(i2), 2) assert_equal(i2[0], end_intv) - assert_equal(i2, eval(compat.text_type(i2))) # Mixed freq should fail vals = [end_intv, Period('2006-12-31', 'w')]
Closes #7601. Made `PeriodIndex` to have dummy timezone representation to be consistent with `DatetimeIndex`, but this should be ommited? ### Output after the fix ``` # DatetimeIndex (No change) pd.date_range(start='2011-01-01', periods=10, freq='M') # <class 'pandas.tseries.index.DatetimeIndex'> # [2011-01-31, ..., 2011-10-31] # Length: 10, Freq: M, Timezone: None # PeriodIndex pd.period_range(start='2011-01-01', periods=10, freq='M') # <class 'pandas.tseries.period.PeriodIndex'> # [2011-01, ..., 2011-10] # Length: 10, Freq: M ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7602
2014-06-28T14:48:14Z
2014-06-29T20:15:53Z
2014-06-29T20:15:53Z
2014-09-02T13:21:29Z
BUG: GroupBy.size created by TimeGrouper raises AttributeError
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 97e6ee51511bc..8a3ab4c83ef4b 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -260,6 +260,7 @@ Bug Fixes ``Index`` (:issue:`7464`). - Bug in ``DataFrame.reset_index`` loses ``tz`` (:issue:`3950`) - Bug in ``DatetimeIndex.freqstr`` raises ``AttributeError`` when ``freq`` is ``None`` (:issue:`7606`) +- Bug in ``GroupBy.size`` created by ``TimeGrouper`` raises ``AttributeError`` (:issue:`7453`) - Bug in non-monotonic ``Index.union`` may preserve ``name`` incorrectly (:issue:`7458`) - Bug in ``DatetimeIndex.intersection`` doesn't preserve timezone (:issue:`4690`) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index c7611d9829308..7e32fc75be8fb 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1643,8 +1643,9 @@ def indices(self): i = 0 for label, bin in zip(self.binlabels, self.bins): - if label is not tslib.NaT and i < bin: - indices[label] = list(range(i, bin)) + if i < bin: + if label is not tslib.NaT: + indices[label] = list(range(i, bin)) i = bin return indices @@ -1665,6 +1666,22 @@ def levels(self): def names(self): return [self.binlabels.name] + def size(self): + """ + Compute group sizes + + """ + base = Series(np.zeros(len(self.result_index), dtype=np.int64), + index=self.result_index) + indices = self.indices + for k, v in compat.iteritems(indices): + indices[k] = len(v) + bin_counts = Series(indices, dtype=np.int64) + result = base.add(bin_counts, fill_value=0) + # addition with fill_value changes dtype to float64 + result = result.astype(np.int64) + return result + #---------------------------------------------------------------------- # cython aggregation diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index 7c73933d9b001..ff8b6945a23be 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -1326,6 +1326,13 @@ def test_aggregate_normal(self): dt_result = getattr(dt_grouped, func)() assert_frame_equal(expected, dt_result) + # GH 7453 + for func in ['size']: + expected = getattr(normal_grouped, func)() + expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key') + dt_result = getattr(dt_grouped, func)() + assert_series_equal(expected, dt_result) + """ for func in ['first', 'last']: expected = getattr(normal_grouped, func)() @@ -1339,7 +1346,7 @@ def test_aggregate_normal(self): dt_result = getattr(dt_grouped, func)(3) assert_frame_equal(expected, dt_result) """ - # if TimeGrouper is used included, 'size' 'first','last' and 'nth' doesn't work yet + # if TimeGrouper is used included, 'first','last' and 'nth' doesn't work yet def test_aggregate_with_nat(self): # check TimeGrouper's aggregation is identical as normal groupby @@ -1375,7 +1382,16 @@ def test_aggregate_with_nat(self): dt_result = getattr(dt_grouped, func)() assert_frame_equal(expected, dt_result) - # if NaT is included, 'var', 'std', 'mean', 'size', 'first','last' and 'nth' doesn't work yet + for func in ['size']: + normal_result = getattr(normal_grouped, func)() + pad = Series([0], index=[3]) + expected = normal_result.append(pad) + expected = expected.sort_index() + expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key') + dt_result = getattr(dt_grouped, func)() + assert_series_equal(expected, dt_result) + + # if NaT is included, 'var', 'std', 'mean', 'first','last' and 'nth' doesn't work yet if __name__ == '__main__': diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py index 6a444d0a09af7..d1bea79a639cc 100644 --- a/vb_suite/groupby.py +++ b/vb_suite/groupby.py @@ -108,16 +108,26 @@ def f(): # size() speed setup = common_setup + """ -df = DataFrame({'key1': np.random.randint(0, 500, size=100000), - 'key2': np.random.randint(0, 100, size=100000), - 'value1' : np.random.randn(100000), - 'value2' : np.random.randn(100000), - 'value3' : np.random.randn(100000)}) +n = 100000 +offsets = np.random.randint(n, size=n).astype('timedelta64[ns]') +dates = np.datetime64('now') + offsets +df = DataFrame({'key1': np.random.randint(0, 500, size=n), + 'key2': np.random.randint(0, 100, size=n), + 'value1' : np.random.randn(n), + 'value2' : np.random.randn(n), + 'value3' : np.random.randn(n), + 'dates' : dates}) """ groupby_multi_size = Benchmark("df.groupby(['key1', 'key2']).size()", setup, start_date=datetime(2011, 10, 1)) +groupby_dt_size = Benchmark("df.groupby(['dates']).size()", + setup, start_date=datetime(2011, 10, 1)) + +groupby_dt_timegrouper_size = Benchmark("df.groupby(TimeGrouper(key='dates', freq='M')).size()", + setup, start_date=datetime(2011, 10, 1)) + #---------------------------------------------------------------------- # count() speed
Related to #7453 2nd issue: - `GroupBy.size` created by `TimeGrouper` raises `AttributeError`
https://api.github.com/repos/pandas-dev/pandas/pulls/7600
2014-06-28T11:56:31Z
2014-06-30T12:33:49Z
2014-06-30T12:33:49Z
2014-07-05T04:47:11Z
API/BUG: Make consistent datetime string parse function
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index ce1035e91391a..a07991d69d48b 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -71,6 +71,23 @@ Resample: ts.resample('D', how='mean') +.. _timeseries.overview: + +Overview +-------- + +Following table shows the type of time-related classes pandas can handle and +how to create them. + +================= ============================== ================================================== +Class Remarks How to create +================= ============================== ================================================== +``Timestamp`` Represents a single time stamp ``to_datetime``, ``Timestamp`` +``DatetimeIndex`` Index of ``Timestamps`` ``to_datetime``, ``date_range``, ``DatetimeIndex`` +``Period`` Represents a single time span ``Period`` +``PeriodIndex`` Index of ``Period`` ``period_range``, ``PeriodIndex`` +================= ============================== ================================================== + .. _timeseries.representation: Time Stamps vs. Time Spans @@ -78,30 +95,45 @@ Time Stamps vs. Time Spans Time-stamped data is the most basic type of timeseries data that associates values with points in time. For pandas objects it means using the points in -time to create the index +time. .. ipython:: python - dates = [datetime(2012, 5, 1), datetime(2012, 5, 2), datetime(2012, 5, 3)] - ts = Series(np.random.randn(3), dates) - - type(ts.index) - - ts + Timestamp(datetime(2012, 5, 1)) + Timestamp('2012-05-01') However, in many cases it is more natural to associate things like change -variables with a time span instead. +variables with a time span instead. The span represented by ``Period`` can be +specified explicitly, or inferred from datetime string format. For example: .. ipython:: python - periods = PeriodIndex([Period('2012-01'), Period('2012-02'), - Period('2012-03')]) + Period('2011-01') + + Period('2012-05', freq='D') + +``Timestamp`` and ``Period`` can be the index. Lists of ``Timestamp`` and +``Period`` are automatically coerce to ``DatetimeIndex`` and ``PeriodIndex`` +respectively. + +.. ipython:: python + + dates = [Timestamp('2012-05-01'), Timestamp('2012-05-02'), Timestamp('2012-05-03')] + ts = Series(np.random.randn(3), dates) + + type(ts.index) + ts.index + + ts + + periods = [Period('2012-01'), Period('2012-02'), Period('2012-03')] ts = Series(np.random.randn(3), periods) type(ts.index) + ts.index ts @@ -150,6 +182,17 @@ you can pass the ``dayfirst`` flag: considerably and on versions later then 0.13.0 explicitly specifying a format string of '%Y%m%d' takes a faster path still. +If you pass a single string to ``to_datetime``, it returns single ``Timestamp``. +Also, ``Timestamp`` can accept the string input. +Note that ``Timestamp`` doesn't accept string parsing option like ``dayfirst`` +or ``format``, use ``to_datetime`` if these are required. + +.. ipython:: python + + to_datetime('2010/11/12') + + Timestamp('2010/11/12') + Invalid Data ~~~~~~~~~~~~ diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 05b69bae42c28..dbe07d74854e0 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -33,6 +33,45 @@ New features Other enhancements ^^^^^^^^^^^^^^^^^^ +- ``DatetimeIndex`` can be instantiated using strings contains ``NaT`` (:issue:`7599`) +- The string parsing of ``to_datetime``, ``Timestamp`` and ``DatetimeIndex`` has been made consistent" (:issue:`7599`) + + Prior to v0.17.0, ``Timestamp`` and ``to_datetime`` may parse year-only datetime-string incorrectly using today's date, otherwise ``DatetimeIndex`` uses the beginning of the year. + ``Timestamp`` and ``to_datetime`` may raise ``ValueError`` in some types of datetime-string which ``DatetimeIndex`` can parse, such as quarterly string. + + Previous Behavior + + .. code-block:: python + + In [1]: Timestamp('2012Q2') + Traceback + ... + ValueError: Unable to parse 2012Q2 + + # Results in today's date. + In [2]: Timestamp('2014') + Out [2]: 2014-08-12 00:00:00 + + v0.17.0 can parse them as below. It works on ``DatetimeIndex`` also. + + New Behaviour + + .. ipython:: python + + Timestamp('2012Q2') + Timestamp('2014') + DatetimeIndex(['2012Q2', '2014']) + + .. note:: If you want to perform calculations based on today's date, use ``Timestamp.now()`` and ``pandas.tseries.offsets``. + + .. ipython:: python + + import pandas.tseries.offsets as offsets + Timestamp.now() + Timestamp.now() + offsets.DateOffset(years=1) + +- ``to_datetime`` can now accept ``yearfirst`` keyword (:issue:`7599`) + - ``.as_blocks`` will now take a ``copy`` optional argument to return a copy of the data, default is to copy (no change in behavior from prior versions), (:issue:`9607`) - ``regex`` argument to ``DataFrame.filter`` now handles numeric column names instead of raising ``ValueError`` (:issue:`10384`). diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 7d4c9df64c0bb..f6a487664046c 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -2047,8 +2047,9 @@ def _make_date_converter(date_parser=None, dayfirst=False, def converter(*date_cols): if date_parser is None: strs = _concat_date_cols(date_cols) + try: - return tools.to_datetime( + return tools._to_datetime( com._ensure_object(strs), utc=None, box=False, diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 9fb06d2854b11..85de5e083d6d9 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -314,14 +314,12 @@ def _get_freq_str(base, mult=1): } need_suffix = ['QS', 'BQ', 'BQS', 'AS', 'BA', 'BAS'] -_months = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', - 'OCT', 'NOV', 'DEC'] for __prefix in need_suffix: - for _m in _months: + for _m in tslib._MONTHS: _offset_to_period_map['%s-%s' % (__prefix, _m)] = \ _offset_to_period_map[__prefix] for __prefix in ['A', 'Q']: - for _m in _months: + for _m in tslib._MONTHS: _alias = '%s-%s' % (__prefix, _m) _offset_to_period_map[_alias] = _alias @@ -1188,12 +1186,7 @@ def is_superperiod(source, target): return target in ['N'] -def _get_rule_month(source, default='DEC'): - source = source.upper() - if '-' not in source: - return default - else: - return source.split('-')[1] +_get_rule_month = tslib._get_rule_month def _is_annual(rule): @@ -1224,15 +1217,10 @@ def _is_weekly(rule): DAYS = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN'] -MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', - 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'] - -_month_numbers = dict((k, i) for i, k in enumerate(MONTHS)) - - +MONTHS = tslib._MONTHS +_month_numbers = tslib._MONTH_NUMBERS +_month_aliases = tslib._MONTH_ALIASES _weekday_rule_aliases = dict((k, v) for k, v in enumerate(DAYS)) -_month_aliases = dict((k + 1, v) for k, v in enumerate(MONTHS)) - def _is_multiple(us, mult): return us % mult == 0 diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index a549c44d119c7..a8b6fb4389459 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -239,8 +239,9 @@ def __new__(cls, data=None, # try a few ways to make it datetime64 if lib.is_string_array(data): - data = _str_to_dt_array(data, freq, dayfirst=dayfirst, - yearfirst=yearfirst) + data = tslib.parse_str_array_to_datetime(data, freq=freq, + dayfirst=dayfirst, + yearfirst=yearfirst) else: data = tools.to_datetime(data, errors='raise') data.offset = freq @@ -254,8 +255,9 @@ def __new__(cls, data=None, return data if issubclass(data.dtype.type, compat.string_types): - data = _str_to_dt_array(data, freq, dayfirst=dayfirst, - yearfirst=yearfirst) + data = tslib.parse_str_array_to_datetime(data, freq=freq, + dayfirst=dayfirst, + yearfirst=yearfirst) if issubclass(data.dtype.type, np.datetime64): if isinstance(data, ABCSeries): @@ -288,8 +290,9 @@ def __new__(cls, data=None, values = data if lib.is_string_array(values): - subarr = _str_to_dt_array(values, freq, dayfirst=dayfirst, - yearfirst=yearfirst) + subarr = tslib.parse_str_array_to_datetime(values, freq=freq, dayfirst=dayfirst, + yearfirst=yearfirst) + else: try: subarr = tools.to_datetime(data, box=False) @@ -298,11 +301,11 @@ def __new__(cls, data=None, if isinstance(subarr, ABCSeries): subarr = subarr.values if subarr.dtype == np.object_: - subarr = tools.to_datetime(subarr, box=False) + subarr = tools._to_datetime(subarr, box=False) except ValueError: # tz aware - subarr = tools.to_datetime(data, box=False, utc=True) + subarr = tools._to_datetime(data, box=False, utc=True) if not np.issubdtype(subarr.dtype, np.datetime64): raise ValueError('Unable to convert %s to datetime dtype' @@ -332,7 +335,7 @@ def __new__(cls, data=None, if inferred != freq.freqstr: on_freq = cls._generate(subarr[0], None, len(subarr), None, freq, tz=tz) if not np.array_equal(subarr.asi8, on_freq.asi8): - raise ValueError('Inferred frequency {0} from passed dates does not' + raise ValueError('Inferred frequency {0} from passed dates does not ' 'conform to passed frequency {1}'.format(inferred, freq.freqstr)) if freq_infer: @@ -534,7 +537,7 @@ def _cached_range(cls, start=None, end=None, periods=None, offset=None, xdr = generate_range(offset=offset, start=_CACHE_START, end=_CACHE_END) - arr = tools.to_datetime(list(xdr), box=False) + arr = tools._to_datetime(list(xdr), box=False) cachedRange = DatetimeIndex._simple_new(arr) cachedRange.offset = offset @@ -1926,17 +1929,6 @@ def _to_m8(key, tz=None): return np.int64(tslib.pydt_to_i8(key)).view(_NS_DTYPE) -def _str_to_dt_array(arr, offset=None, dayfirst=None, yearfirst=None): - def parser(x): - result = parse_time_string(x, offset, dayfirst=dayfirst, - yearfirst=yearfirst) - return result[0] - - arr = np.asarray(arr, dtype=object) - data = _algos.arrmap_object(arr, parser) - return tools.to_datetime(data) - - _CACHE_START = Timestamp(datetime(1950, 1, 1)) _CACHE_END = Timestamp(datetime(2030, 1, 1)) diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 3a69a13739e5d..941456fa07cfa 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -1508,22 +1508,7 @@ def onOffset(self, dt): modMonth = (dt.month - self.startingMonth) % 3 return BMonthEnd().onOffset(dt) and modMonth == 0 - -_int_to_month = { - 1: 'JAN', - 2: 'FEB', - 3: 'MAR', - 4: 'APR', - 5: 'MAY', - 6: 'JUN', - 7: 'JUL', - 8: 'AUG', - 9: 'SEP', - 10: 'OCT', - 11: 'NOV', - 12: 'DEC' -} - +_int_to_month = tslib._MONTH_ALIASES _month_to_int = dict((v, k) for k, v in _int_to_month.items()) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index d167982b5b0bd..c8b96076b26bd 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -1432,6 +1432,25 @@ def test_dti_constructor_preserve_dti_freq(self): rng2 = DatetimeIndex(rng) self.assertEqual(rng.freq, rng2.freq) + def test_dti_constructor_years_only(self): + # GH 6961 + for tz in [None, 'UTC', 'Asia/Tokyo', 'dateutil/US/Pacific']: + rng1 = date_range('2014', '2015', freq='M', tz=tz) + expected1 = date_range('2014-01-31', '2014-12-31', freq='M', tz=tz) + + rng2 = date_range('2014', '2015', freq='MS', tz=tz) + expected2 = date_range('2014-01-01', '2015-01-01', freq='MS', tz=tz) + + rng3 = date_range('2014', '2020', freq='A', tz=tz) + expected3 = date_range('2014-12-31', '2019-12-31', freq='A', tz=tz) + + rng4 = date_range('2014', '2020', freq='AS', tz=tz) + expected4 = date_range('2014-01-01', '2020-01-01', freq='AS', tz=tz) + + for rng, expected in [(rng1, expected1), (rng2, expected2), + (rng3, expected3), (rng4, expected4)]: + tm.assert_index_equal(rng, expected) + def test_normalize(self): rng = date_range('1/1/2000 9:30', periods=10, freq='D') @@ -2146,6 +2165,15 @@ def test_constructor_coverage(self): from_ints = DatetimeIndex(expected.asi8) self.assertTrue(from_ints.equals(expected)) + # string with NaT + strings = np.array(['2000-01-01', '2000-01-02', 'NaT']) + result = DatetimeIndex(strings) + expected = DatetimeIndex(strings.astype('O')) + self.assertTrue(result.equals(expected)) + + from_ints = DatetimeIndex(expected.asi8) + self.assertTrue(from_ints.equals(expected)) + # non-conforming self.assertRaises(ValueError, DatetimeIndex, ['2000-01-01', '2000-01-02', '2000-01-04'], diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py index 341450f504e2a..397d3f7d2656f 100644 --- a/pandas/tseries/tests/test_tslib.py +++ b/pandas/tseries/tests/test_tslib.py @@ -6,13 +6,15 @@ import pandas._period as period import datetime -from pandas.core.api import Timestamp, Series, Timedelta, Period +from pandas.core.api import Timestamp, Series, Timedelta, Period, to_datetime from pandas.tslib import get_timezone from pandas._period import period_asfreq, period_ordinal -from pandas.tseries.index import date_range +from pandas.tseries.index import date_range, DatetimeIndex from pandas.tseries.frequencies import get_freq +import pandas.tseries.tools as tools import pandas.tseries.offsets as offsets import pandas.util.testing as tm +import pandas.compat as compat from pandas.util.testing import assert_series_equal import pandas.compat as compat @@ -416,6 +418,7 @@ def test_nat_fields(self): class TestDatetimeParsingWrappers(tm.TestCase): + def test_does_not_convert_mixed_integer(self): bad_date_strings = ( '-50000', @@ -444,6 +447,179 @@ def test_does_not_convert_mixed_integer(self): tslib._does_string_look_like_datetime(good_date_string) ) + def test_parsers(self): + cases = {'2011-01-01': datetime.datetime(2011, 1, 1), + '2Q2005': datetime.datetime(2005, 4, 1), + '2Q05': datetime.datetime(2005, 4, 1), + '2005Q1': datetime.datetime(2005, 1, 1), + '05Q1': datetime.datetime(2005, 1, 1), + '2011Q3': datetime.datetime(2011, 7, 1), + '11Q3': datetime.datetime(2011, 7, 1), + '3Q2011': datetime.datetime(2011, 7, 1), + '3Q11': datetime.datetime(2011, 7, 1), + + # quarterly without space + '2000Q4': datetime.datetime(2000, 10, 1), + '00Q4': datetime.datetime(2000, 10, 1), + '4Q2000': datetime.datetime(2000, 10, 1), + '4Q00': datetime.datetime(2000, 10, 1), + '2000q4': datetime.datetime(2000, 10, 1), + + '2000-Q4': datetime.datetime(2000, 10, 1), + '00-Q4': datetime.datetime(2000, 10, 1), + '4Q-2000': datetime.datetime(2000, 10, 1), + '4Q-00': datetime.datetime(2000, 10, 1), + + '2000q4': datetime.datetime(2000, 10, 1), + '00q4': datetime.datetime(2000, 10, 1), + + '2005': datetime.datetime(2005, 1, 1), + '2005-11': datetime.datetime(2005, 11, 1), + '2005 11': datetime.datetime(2005, 11, 1), + '11-2005': datetime.datetime(2005, 11, 1), + '11 2005': datetime.datetime(2005, 11, 1), + '200511': datetime.datetime(2020, 5, 11), + '20051109': datetime.datetime(2005, 11, 9), + + '20051109 10:15': datetime.datetime(2005, 11, 9, 10, 15), + '20051109 08H': datetime.datetime(2005, 11, 9, 8, 0), + + '2005-11-09 10:15': datetime.datetime(2005, 11, 9, 10, 15), + '2005-11-09 08H': datetime.datetime(2005, 11, 9, 8, 0), + '2005/11/09 10:15': datetime.datetime(2005, 11, 9, 10, 15), + '2005/11/09 08H': datetime.datetime(2005, 11, 9, 8, 0), + + "Thu Sep 25 10:36:28 2003": datetime.datetime(2003, 9, 25, 10, 36, 28), + "Thu Sep 25 2003": datetime.datetime(2003, 9, 25), + "Sep 25 2003": datetime.datetime(2003, 9, 25), + "January 1 2014": datetime.datetime(2014, 1, 1), + + # GH 10537 + '2014-06': datetime.datetime(2014, 6, 1), + '06-2014': datetime.datetime(2014, 6, 1), + '2014-6': datetime.datetime(2014, 6, 1), + '6-2014': datetime.datetime(2014, 6, 1), + } + + for date_str, expected in compat.iteritems(cases): + result1, _, _ = tools.parse_time_string(date_str) + result2 = to_datetime(date_str) + result3 = to_datetime([date_str]) + result4 = to_datetime(np.array([date_str], dtype=object)) + result5 = Timestamp(date_str) + result6 = DatetimeIndex([date_str])[0] + result7 = date_range(date_str, freq='S', periods=1) + self.assertEqual(result1, expected) + self.assertEqual(result2, expected) + self.assertEqual(result3, expected) + self.assertEqual(result4, expected) + self.assertEqual(result5, expected) + self.assertEqual(result6, expected) + self.assertEqual(result7, expected) + + # NaT + result1, _, _ = tools.parse_time_string('NaT') + result2 = to_datetime('NaT') + result3 = Timestamp('NaT') + result4 = DatetimeIndex(['NaT'])[0] + self.assertTrue(result1 is tslib.NaT) + self.assertTrue(result1 is tslib.NaT) + self.assertTrue(result1 is tslib.NaT) + self.assertTrue(result1 is tslib.NaT) + + def test_parsers_quarter_invalid(self): + + cases = ['2Q 2005', '2Q-200A', '2Q-200', + '22Q2005', '6Q-20', '2Q200.'] + for case in cases: + self.assertRaises(ValueError, tools.parse_time_string, case) + + def test_parsers_dayfirst_yearfirst(self): + # str : dayfirst, yearfirst, expected + cases = {'10-11-12': [(False, False, datetime.datetime(2012, 10, 11)), + (True, False, datetime.datetime(2012, 11, 10)), + (False, True, datetime.datetime(2010, 11, 12)), + (True, True, datetime.datetime(2010, 11, 12))], + '20/12/21': [(False, False, datetime.datetime(2021, 12, 20)), + (True, False, datetime.datetime(2021, 12, 20)), + (False, True, datetime.datetime(2020, 12, 21)), + (True, True, datetime.datetime(2020, 12, 21))]} + + tm._skip_if_no_dateutil() + from dateutil.parser import parse + for date_str, values in compat.iteritems(cases): + for dayfirst, yearfirst ,expected in values: + result1, _, _ = tools.parse_time_string(date_str, dayfirst=dayfirst, + yearfirst=yearfirst) + + result2 = to_datetime(date_str, dayfirst=dayfirst, + yearfirst=yearfirst) + + result3 = DatetimeIndex([date_str], dayfirst=dayfirst, + yearfirst=yearfirst)[0] + + # Timestamp doesn't support dayfirst and yearfirst + + self.assertEqual(result1, expected) + self.assertEqual(result2, expected) + self.assertEqual(result3, expected) + + # compare with dateutil result + dateutil_result = parse(date_str, dayfirst=dayfirst, yearfirst=yearfirst) + self.assertEqual(dateutil_result, expected) + + def test_parsers_timestring(self): + tm._skip_if_no_dateutil() + from dateutil.parser import parse + + # must be the same as dateutil result + cases = {'10:15': (parse('10:15'), datetime.datetime(1, 1, 1, 10, 15)), + '9:05': (parse('9:05'), datetime.datetime(1, 1, 1, 9, 5)) } + + for date_str, (exp_now, exp_def) in compat.iteritems(cases): + result1, _, _ = tools.parse_time_string(date_str) + result2 = to_datetime(date_str) + result3 = to_datetime([date_str]) + result4 = Timestamp(date_str) + result5 = DatetimeIndex([date_str])[0] + # parse time string return time string based on default date + # others are not, and can't be changed because it is used in + # time series plot + self.assertEqual(result1, exp_def) + self.assertEqual(result2, exp_now) + self.assertEqual(result3, exp_now) + self.assertEqual(result4, exp_now) + self.assertEqual(result5, exp_now) + + def test_parsers_monthfreq(self): + cases = {'201101': datetime.datetime(2011, 1, 1, 0, 0), + '200005': datetime.datetime(2000, 5, 1, 0, 0)} + + for date_str, expected in compat.iteritems(cases): + result1, _, _ = tools.parse_time_string(date_str, freq='M') + result2 = tools._to_datetime(date_str, freq='M') + self.assertEqual(result1, expected) + self.assertEqual(result2, expected) + + def test_parsers_quarterly_with_freq(self): + + msg = 'Incorrect quarterly string is given, quarter must be between 1 and 4: 2013Q5' + with tm.assertRaisesRegexp(tslib.DateParseError, msg): + tools.parse_time_string('2013Q5') + + # GH 5418 + msg = 'Unable to retrieve month information from given freq: INVLD-L-DEC-SAT' + with tm.assertRaisesRegexp(tslib.DateParseError, msg): + tools.parse_time_string('2013Q1', freq='INVLD-L-DEC-SAT') + + cases = {('2013Q2', None): datetime.datetime(2013, 4, 1), + ('2013Q2', 'A-APR'): datetime.datetime(2012, 8, 1), + ('2013-Q2', 'A-DEC'): datetime.datetime(2013, 4, 1)} + + for (date_str, freq), exp in compat.iteritems(cases): + result, _, _ = tools.parse_time_string(date_str, freq=freq) + self.assertEqual(result, exp) + class TestArrayToDatetime(tm.TestCase): def test_parsing_valid_dates(self): diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index 65fe3420f670c..5ff6a48981ceb 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -12,9 +12,6 @@ try: import dateutil - from dateutil.parser import parse, DEFAULTPARSER - from dateutil.relativedelta import relativedelta - # raise exception if dateutil 2.0 install on 2.x platform if (sys.version_info[0] == 2 and dateutil.__version__ == '2.0'): # pragma: no cover @@ -173,9 +170,10 @@ def _guess_datetime_format_for_array(arr, **kwargs): if len(non_nan_elements): return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs) -def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True, - format=None, exact=True, coerce=False, unit='ns', - infer_datetime_format=False): + +def to_datetime(arg, errors='ignore', dayfirst=False, yearfirst=False, + utc=None, box=True, format=None, exact=True, coerce=False, + unit='ns', infer_datetime_format=False): """ Convert argument to datetime. @@ -183,19 +181,26 @@ def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True, ---------- arg : string, datetime, array of strings (with possible NAs) errors : {'ignore', 'raise'}, default 'ignore' - Errors are ignored by default (values left untouched) + Errors are ignored by default (values left untouched). dayfirst : boolean, default False - If True parses dates with the day first, eg 20/01/2005 + Specify a date parse order if `arg` is str or its list-likes. + If True, parses dates with the day first, eg 10/11/12 is parsed as 2012-11-10. Warning: dayfirst=True is not strict, but will prefer to parse - with day first (this is a known bug). + with day first (this is a known bug, based on dateutil behavior). + yearfirst : boolean, default False + Specify a date parse order if `arg` is str or its list-likes. + If True parses dates with the year first, eg 10/11/12 is parsed as 2010-11-12. + If both dayfirst and yearfirst are True, yearfirst is preceded (same as dateutil). + Warning: yearfirst=True is not strict, but will prefer to parse + with year first (this is a known bug, based on dateutil beahavior). utc : boolean, default None Return UTC DatetimeIndex if True (converting any tz-aware - datetime.datetime objects as well) + datetime.datetime objects as well). box : boolean, default True - If True returns a DatetimeIndex, if False returns ndarray of values + If True returns a DatetimeIndex, if False returns ndarray of values. format : string, default None strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse - all the way up to nanoseconds + all the way up to nanoseconds. exact : boolean, True by default If True, require an exact format match. If False, allow the format to match anywhere in the target string. @@ -203,7 +208,7 @@ def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True, Timestamps outside the interval between Timestamp.min and Timestamp.max (approximately 1677-09-22 to 2262-04-11) will be also forced to NaT. unit : unit of the arg (D,s,ms,us,ns) denote the unit in epoch - (e.g. a unix timestamp), which is an integer/float number + (e.g. a unix timestamp), which is an integer/float number. infer_datetime_format : boolean, default False If no `format` is given, try to infer the format based on the first datetime string. Provides a large speed-up in many cases. @@ -254,7 +259,18 @@ def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True, >>> pd.to_datetime('13000101', format='%Y%m%d', coerce=True) NaT """ - from pandas import Timestamp + return _to_datetime(arg, errors=errors, dayfirst=dayfirst, yearfirst=yearfirst, + utc=utc, box=box, format=format, exact=exact, coerce=coerce, + unit=unit, infer_datetime_format=infer_datetime_format) + + +def _to_datetime(arg, errors='ignore', dayfirst=False, yearfirst=False, + utc=None, box=True, format=None, exact=True, coerce=False, + unit='ns', freq=None, infer_datetime_format=False): + """ + Same as to_datetime, but accept freq for + DatetimeIndex internal construction + """ from pandas.core.series import Series from pandas.tseries.index import DatetimeIndex @@ -326,6 +342,7 @@ def _convert_listlike(arg, box, format): if result is None and (format is None or infer_datetime_format): result = tslib.array_to_datetime(arg, raise_=errors == 'raise', utc=utc, dayfirst=dayfirst, + yearfirst=yearfirst, freq=freq, coerce=coerce, unit=unit) if com.is_datetime64_dtype(result) and box: @@ -341,7 +358,7 @@ def _convert_listlike(arg, box, format): if arg is None: return arg - elif isinstance(arg, Timestamp): + elif isinstance(arg, tslib.Timestamp): return arg elif isinstance(arg, Series): values = _convert_listlike(arg.values, False, format) @@ -351,8 +368,6 @@ def _convert_listlike(arg, box, format): return _convert_listlike(np.array([ arg ]), box, format)[0] -class DateParseError(ValueError): - pass def _attempt_YYYYMMDD(arg, coerce): """ try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like, @@ -392,14 +407,6 @@ def calc_with_mask(carg,mask): return None -# patterns for quarters like '4Q2005', '05Q1' -qpat1full = re.compile(r'(\d)Q-?(\d\d\d\d)') -qpat2full = re.compile(r'(\d\d\d\d)-?Q(\d)') -qpat1 = re.compile(r'(\d)Q-?(\d\d)') -qpat2 = re.compile(r'(\d\d)-?Q(\d)') -ypat = re.compile(r'(\d\d\d\d)$') -has_time = re.compile('(.+)([\s]|T)+(.+)') - def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None): """ @@ -421,183 +428,19 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None): datetime, datetime/dateutil.parser._result, str """ from pandas.core.config import get_option - from pandas.tseries.offsets import DateOffset - from pandas.tseries.frequencies import (_get_rule_month, _month_numbers, - _get_freq_str) - if not isinstance(arg, compat.string_types): return arg - arg = arg.upper() - - default = datetime(1, 1, 1).replace(hour=0, minute=0, - second=0, microsecond=0) - - # special handling for possibilities eg, 2Q2005, 2Q05, 2005Q1, 05Q1 - if len(arg) in [4, 5, 6, 7]: - m = ypat.match(arg) - if m: - ret = default.replace(year=int(m.group(1))) - return ret, ret, 'year' - - add_century = False - if len(arg) > 5: - qpats = [(qpat1full, 1), (qpat2full, 0)] - else: - add_century = True - qpats = [(qpat1, 1), (qpat2, 0)] - - for pat, yfirst in qpats: - qparse = pat.match(arg) - if qparse is not None: - if yfirst: - yi, qi = 1, 2 - else: - yi, qi = 2, 1 - q = int(qparse.group(yi)) - y_str = qparse.group(qi) - y = int(y_str) - if add_century: - y += 2000 - - if freq is not None: - # hack attack, #1228 - mnum = _month_numbers[_get_rule_month(freq)] + 1 - month = (mnum + (q - 1) * 3) % 12 + 1 - if month > mnum: - y -= 1 - else: - month = (q - 1) * 3 + 1 - - ret = default.replace(year=y, month=month) - return ret, ret, 'quarter' - - is_mo_str = freq is not None and freq == 'M' - is_mo_off = getattr(freq, 'rule_code', None) == 'M' - is_monthly = is_mo_str or is_mo_off - if len(arg) == 6 and is_monthly: - try: - ret = _try_parse_monthly(arg) - if ret is not None: - return ret, ret, 'month' - except Exception: - pass - - # montly f7u12 - mresult = _attempt_monthly(arg) - if mresult: - return mresult - if dayfirst is None: dayfirst = get_option("display.date_dayfirst") if yearfirst is None: yearfirst = get_option("display.date_yearfirst") - try: - parsed, reso = dateutil_parse(arg, default, dayfirst=dayfirst, - yearfirst=yearfirst) - except Exception as e: - # TODO: allow raise of errors within instead - raise DateParseError(e) - - if parsed is None: - raise DateParseError("Could not parse %s" % arg) - - return parsed, parsed, reso # datetime, resolution - - -def dateutil_parse(timestr, default, - ignoretz=False, tzinfos=None, - **kwargs): - """ lifted from dateutil to get resolution""" - from dateutil import tz - import time - fobj = StringIO(str(timestr)) - - res = DEFAULTPARSER._parse(fobj, **kwargs) - - # dateutil 2.2 compat - if isinstance(res, tuple): - res, _ = res - - if res is None: - raise ValueError("unknown string format") - - repl = {} - reso = None - for attr in ["year", "month", "day", "hour", - "minute", "second", "microsecond"]: - value = getattr(res, attr) - if value is not None: - repl[attr] = value - reso = attr - - if reso is None: - raise ValueError("Cannot parse date.") - - if reso == 'microsecond': - if repl['microsecond'] == 0: - reso = 'second' - elif repl['microsecond'] % 1000 == 0: - reso = 'millisecond' - - ret = default.replace(**repl) - if res.weekday is not None and not res.day: - ret = ret + relativedelta.relativedelta(weekday=res.weekday) - if not ignoretz: - if callable(tzinfos) or tzinfos and res.tzname in tzinfos: - if callable(tzinfos): - tzdata = tzinfos(res.tzname, res.tzoffset) - else: - tzdata = tzinfos.get(res.tzname) - if isinstance(tzdata, datetime.tzinfo): - tzinfo = tzdata - elif isinstance(tzdata, compat.string_types): - tzinfo = tz.tzstr(tzdata) - elif isinstance(tzdata, int): - tzinfo = tz.tzoffset(res.tzname, tzdata) - else: - raise ValueError("offset must be tzinfo subclass, " - "tz string, or int offset") - ret = ret.replace(tzinfo=tzinfo) - elif res.tzname and res.tzname in time.tzname: - ret = ret.replace(tzinfo=tz.tzlocal()) - elif res.tzoffset == 0: - ret = ret.replace(tzinfo=tz.tzutc()) - elif res.tzoffset: - ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) - return ret, reso - - -def _attempt_monthly(val): - pats = ['%Y-%m', '%m-%Y', '%b %Y', '%b-%Y'] - for pat in pats: - try: - ret = datetime.strptime(val, pat) - return ret, ret, 'month' - except Exception: - pass - - -def _try_parse_monthly(arg): - base = 2000 - add_base = False - default = datetime(1, 1, 1).replace(hour=0, minute=0, second=0, - microsecond=0) - - if len(arg) == 4: - add_base = True - y = int(arg[:2]) - m = int(arg[2:4]) - elif len(arg) >= 6: # 201201 - y = int(arg[:4]) - m = int(arg[4:6]) - if add_base: - y += base - ret = default.replace(year=y, month=m) - return ret + return tslib.parse_datetime_string_with_reso(arg, freq=freq, dayfirst=dayfirst, + yearfirst=yearfirst) +DateParseError = tslib.DateParseError normalize_date = tslib.normalize_date diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 27cd5e89220a9..8dc7fe824247b 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -43,17 +43,22 @@ cimport cython from datetime import timedelta, datetime from datetime import time as datetime_time +import re + # dateutil compat from dateutil.tz import (tzoffset, tzlocal as _dateutil_tzlocal, tzfile as _dateutil_tzfile, - tzutc as _dateutil_tzutc) + tzutc as _dateutil_tzutc, tzstr as _dateutil_tzstr) + from pandas.compat import is_platform_windows if is_platform_windows(): from dateutil.zoneinfo import gettz as _dateutil_gettz else: from dateutil.tz import gettz as _dateutil_gettz +from dateutil.relativedelta import relativedelta +from dateutil.parser import DEFAULTPARSER from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo -from pandas.compat import parse_date, string_types, iteritems +from pandas.compat import parse_date, string_types, iteritems, StringIO import operator import collections @@ -219,8 +224,22 @@ class Timestamp(_Timestamp): and is interchangable with it in most cases. It's the type used for the entries that make up a DatetimeIndex, and other timeseries oriented data structures in pandas. + + Parameters + ---------- + ts_input : datetime-like, str, int, float + Value to be converted to Timestamp + offset : str, DateOffset + Offset which Timestamp will have + tz : string, pytz.timezone, dateutil.tz.tzfile or None + Time zone for time which Timestamp will have. + unit : string + numpy unit used for conversion, if ts_input is int or float """ + # Do not add ``dayfirst`` and ``yearfist`` to Timestamp based on the discussion + # https://github.com/pydata/pandas/pull/7599 + @classmethod def fromordinal(cls, ordinal, offset=None, tz=None): """ passed an ordinal, translate and convert to a ts @@ -1079,40 +1098,7 @@ cdef convert_to_tsobject(object ts, object tz, object unit): obj = _TSObject() if util.is_string_object(ts): - if ts in _nat_strings: - ts = NaT - elif ts == 'now': - # Issue 9000, we short-circuit rather than going - # into np_datetime_strings which returns utc - ts = Timestamp.now(tz) - elif ts == 'today': - # Issue 9000, we short-circuit rather than going - # into np_datetime_strings which returns a normalized datetime - ts = Timestamp.today(tz) - else: - try: - _string_to_dts(ts, &obj.dts, &out_local, &out_tzoffset) - obj.value = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &obj.dts) - _check_dts_bounds(&obj.dts) - if out_local == 1: - obj.tzinfo = pytz.FixedOffset(out_tzoffset) - obj.value = tz_convert_single(obj.value, obj.tzinfo, 'UTC') - if tz is None: - _check_dts_bounds(&obj.dts) - return obj - else: - # Keep the converter same as PyDateTime's - ts = Timestamp(obj.value, tz=obj.tzinfo) - else: - ts = obj.value - if tz is not None: - # shift for _localize_tso - ts = tz_convert_single(ts, tz, 'UTC') - except ValueError: - try: - ts = parse_datetime_string(ts) - except Exception: - raise ValueError + return convert_str_to_tsobject(ts, tz, unit) if ts is None or ts is NaT or ts is np_NaT: obj.value = NPY_NAT @@ -1196,6 +1182,56 @@ cdef convert_to_tsobject(object ts, object tz, object unit): return obj + +cpdef convert_str_to_tsobject(object ts, object tz, object unit, + dayfirst=False, yearfirst=False): + cdef: + _TSObject obj + int out_local = 0, out_tzoffset = 0 + + if tz is not None: + tz = maybe_get_tz(tz) + + obj = _TSObject() + + if ts in _nat_strings: + ts = NaT + elif ts == 'now': + # Issue 9000, we short-circuit rather than going + # into np_datetime_strings which returns utc + ts = Timestamp.now(tz) + elif ts == 'today': + # Issue 9000, we short-circuit rather than going + # into np_datetime_strings which returns a normalized datetime + ts = Timestamp.today(tz) + else: + try: + _string_to_dts(ts, &obj.dts, &out_local, &out_tzoffset) + obj.value = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &obj.dts) + _check_dts_bounds(&obj.dts) + if out_local == 1: + obj.tzinfo = pytz.FixedOffset(out_tzoffset) + obj.value = tz_convert_single(obj.value, obj.tzinfo, 'UTC') + if tz is None: + _check_dts_bounds(&obj.dts) + return obj + else: + # Keep the converter same as PyDateTime's + ts = Timestamp(obj.value, tz=obj.tzinfo) + else: + ts = obj.value + if tz is not None: + # shift for _localize_tso + ts = tz_convert_single(ts, tz, 'UTC') + except ValueError: + try: + ts = parse_datetime_string(ts, dayfirst=dayfirst, yearfirst=yearfirst) + except Exception: + raise ValueError + + return convert_to_tsobject(ts, tz, unit) + + cdef inline void _localize_tso(_TSObject obj, object tz): ''' Take a TSObject in UTC and localizes to timezone tz. @@ -1377,9 +1413,10 @@ def datetime_to_datetime64(ndarray[object] values): return result, inferred_tz -_not_datelike_strings = set(['a','A','m','M','p','P','t','T']) +cdef: + set _not_datelike_strings = set(['a','A','m','M','p','P','t','T']) -def _does_string_look_like_datetime(date_string): +cpdef object _does_string_look_like_datetime(object date_string): if date_string.startswith('0'): # Strings starting with 0 are more consistent with a # date-like string than a number @@ -1396,14 +1433,9 @@ def _does_string_look_like_datetime(date_string): return True -def parse_datetime_string(date_string, **kwargs): - if not _does_string_look_like_datetime(date_string): - raise ValueError('Given date string not likely a datetime.') - dt = parse_date(date_string, **kwargs) - return dt - -def format_array_from_datetime(ndarray[int64_t] values, object tz=None, object format=None, object na_rep=None): +def format_array_from_datetime(ndarray[int64_t] values, object tz=None, + object format=None, object na_rep=None): """ return a np object array of the string formatted values @@ -1484,8 +1516,260 @@ def format_array_from_datetime(ndarray[int64_t] values, object tz=None, object f return result -def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False, - format=None, utc=None, coerce=False, unit=None): + +class DateParseError(ValueError): + pass + + +cdef object _TIMEPAT = re.compile(r'^([01]?[0-9]|2[0-3]):([0-5][0-9])') + + +def parse_datetime_string(object date_string, object freq=None, + dayfirst=False, yearfirst=False, **kwargs): + + """parse datetime string, only returns datetime. + Also cares special handling matching time patterns. + + Returns + ------- + datetime + """ + + cdef: + object dt + + if not _does_string_look_like_datetime(date_string): + raise ValueError('Given date string not likely a datetime.') + + if _TIMEPAT.match(date_string): + # use current datetime as default, not pass _DEFAULT_DATETIME + dt = parse_date(date_string, dayfirst=dayfirst, + yearfirst=yearfirst, **kwargs) + return dt + try: + dt, _, _ = _parse_dateabbr_string(date_string, _DEFAULT_DATETIME, freq) + return dt + except DateParseError: + raise + except ValueError: + pass + + dt = parse_date(date_string, default=_DEFAULT_DATETIME, + dayfirst=dayfirst, yearfirst=yearfirst, **kwargs) + return dt + + +def parse_datetime_string_with_reso(object date_string, object freq=None, + dayfirst=False, yearfirst=False, **kwargs): + """parse datetime string, only returns datetime + + Returns + ------- + datetime + """ + + cdef: + object parsed, reso + + if not _does_string_look_like_datetime(date_string): + raise ValueError('Given date string not likely a datetime.') + + try: + return _parse_dateabbr_string(date_string, _DEFAULT_DATETIME, freq) + except DateParseError: + raise + except ValueError: + pass + + try: + parsed, reso = dateutil_parse(date_string, _DEFAULT_DATETIME, + dayfirst=dayfirst, yearfirst=yearfirst) + except Exception as e: + # TODO: allow raise of errors within instead + raise DateParseError(e) + if parsed is None: + raise DateParseError("Could not parse %s" % date_string) + return parsed, parsed, reso + + +cdef inline object _parse_dateabbr_string(object date_string, object default, + object freq): + cdef: + object ret + int year, quarter, month, mnum, date_len + + # special handling for possibilities eg, 2Q2005, 2Q05, 2005Q1, 05Q1 + + if date_string in _nat_strings: + return NaT, NaT, '' + + date_string = date_string.upper() + date_len = len(date_string) + + if date_len == 4: + # parse year only like 2000 + try: + ret = default.replace(year=int(date_string)) + return ret, ret, 'year' + except ValueError: + pass + + try: + if 4 <= date_len <= 7: + i = date_string.index('Q', 1, 6) + if i == 1: + quarter = int(date_string[0]) + if date_len == 4 or (date_len == 5 and date_string[i + 1] == '-'): + # r'(\d)Q-?(\d\d)') + year = 2000 + int(date_string[-2:]) + elif date_len == 6 or (date_len == 7 and date_string[i + 1] == '-'): + # r'(\d)Q-?(\d\d\d\d)') + year = int(date_string[-4:]) + else: + raise ValueError + elif i == 2 or i == 3: + # r'(\d\d)-?Q(\d)' + if date_len == 4 or (date_len == 5 and date_string[i - 1] == '-'): + quarter = int(date_string[-1]) + year = 2000 + int(date_string[:2]) + else: + raise ValueError + elif i == 4 or i == 5: + if date_len == 6 or (date_len == 7 and date_string[i - 1] == '-'): + # r'(\d\d\d\d)-?Q(\d)' + quarter = int(date_string[-1]) + year = int(date_string[:4]) + else: + raise ValueError + + if not (1 <= quarter <= 4): + msg = 'Incorrect quarterly string is given, quarter must be between 1 and 4: {0}' + raise DateParseError(msg.format(date_string)) + + if freq is not None: + # hack attack, #1228 + try: + mnum = _MONTH_NUMBERS[_get_rule_month(freq)] + 1 + except (KeyError, ValueError): + msg = 'Unable to retrieve month information from given freq: {0}'.format(freq) + raise DateParseError(msg) + + month = (mnum + (quarter - 1) * 3) % 12 + 1 + if month > mnum: + year -= 1 + else: + month = (quarter - 1) * 3 + 1 + + ret = default.replace(year=year, month=month) + return ret, ret, 'quarter' + + except DateParseError: + raise + except ValueError: + pass + + if date_len == 6 and (freq == 'M' or getattr(freq, 'rule_code', None) == 'M'): + year = int(date_string[:4]) + month = int(date_string[4:6]) + try: + ret = default.replace(year=year, month=month) + return ret, ret, 'month' + except ValueError: + pass + + for pat in ['%Y-%m', '%m-%Y', '%b %Y', '%b-%Y']: + try: + ret = datetime.strptime(date_string, pat) + return ret, ret, 'month' + except ValueError: + pass + + raise ValueError('Unable to parse {0}'.format(date_string)) + + +def dateutil_parse(object timestr, object default, ignoretz=False, + tzinfos=None, **kwargs): + """ lifted from dateutil to get resolution""" + + cdef: + object fobj, res, attr, ret, tzdata + object reso = None + dict repl = {} + + fobj = StringIO(str(timestr)) + res = DEFAULTPARSER._parse(fobj, **kwargs) + + # dateutil 2.2 compat + if isinstance(res, tuple): + res, _ = res + + if res is None: + raise ValueError("unknown string format") + + for attr in ["year", "month", "day", "hour", + "minute", "second", "microsecond"]: + value = getattr(res, attr) + if value is not None: + repl[attr] = value + reso = attr + + if reso is None: + raise ValueError("Cannot parse date.") + + if reso == 'microsecond': + if repl['microsecond'] == 0: + reso = 'second' + elif repl['microsecond'] % 1000 == 0: + reso = 'millisecond' + + ret = default.replace(**repl) + if res.weekday is not None and not res.day: + ret = ret + relativedelta.relativedelta(weekday=res.weekday) + if not ignoretz: + if callable(tzinfos) or tzinfos and res.tzname in tzinfos: + if callable(tzinfos): + tzdata = tzinfos(res.tzname, res.tzoffset) + else: + tzdata = tzinfos.get(res.tzname) + if isinstance(tzdata, datetime.tzinfo): + tzinfo = tzdata + elif isinstance(tzdata, string_types): + tzinfo = _dateutil_tzstr(tzdata) + elif isinstance(tzdata, int): + tzinfo = tzoffset(res.tzname, tzdata) + else: + raise ValueError("offset must be tzinfo subclass, " + "tz string, or int offset") + ret = ret.replace(tzinfo=tzinfo) + elif res.tzname and res.tzname in time.tzname: + ret = ret.replace(tzinfo=_dateutil_tzlocal()) + elif res.tzoffset == 0: + ret = ret.replace(tzinfo=_dateutil_tzutc()) + elif res.tzoffset: + ret = ret.replace(tzinfo=tzoffset(res.tzname, res.tzoffset)) + return ret, reso + + +# const for parsers + +_DEFAULT_DATETIME = datetime(1, 1, 1).replace(hour=0, minute=0, second=0, microsecond=0) +_MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', + 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'] +_MONTH_NUMBERS = dict((k, i) for i, k in enumerate(_MONTHS)) +_MONTH_ALIASES = dict((k + 1, v) for k, v in enumerate(_MONTHS)) + + +cpdef object _get_rule_month(object source, object default='DEC'): + source = source.upper() + if '-' not in source: + return default + else: + return source.split('-')[1] + + +cpdef array_to_datetime(ndarray[object] values, raise_=False, + dayfirst=False, yearfirst=False, freq=None, + format=None, utc=None, coerce=False, unit=None): cdef: Py_ssize_t i, n = len(values) object val, py_dt @@ -1577,7 +1861,6 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False, elif val in _nat_strings: iresult[i] = iNaT continue - _string_to_dts(val, &dts, &out_local, &out_tzoffset) value = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts) if out_local == 1: @@ -1587,7 +1870,8 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False, _check_dts_bounds(&dts) except ValueError: try: - py_dt = parse_datetime_string(val, dayfirst=dayfirst) + py_dt = parse_datetime_string(val, dayfirst=dayfirst, + yearfirst=yearfirst, freq=freq) except Exception: if coerce: iresult[i] = iNaT @@ -1647,7 +1931,8 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False, oresult[i] = 'NaT' continue try: - oresult[i] = parse_datetime_string(val, dayfirst=dayfirst) + oresult[i] = parse_datetime_string(val, dayfirst=dayfirst, + yearfirst=yearfirst, freq=freq) _pydatetime_to_dts(oresult[i], &dts) _check_dts_bounds(&dts) except Exception: @@ -1662,6 +1947,29 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False, return oresult +def parse_str_array_to_datetime(ndarray values, dayfirst=False, + yearfirst=False, object freq=None): + """Shortcut to parse str array for quicker DatetimeIndex construction""" + cdef: + Py_ssize_t i, n = len(values) + object val, py_dt + ndarray[int64_t] iresult + _TSObject _ts + + iresult = np.empty(n, dtype='i8') + + for i in range(n): + val = values[i] + try: + py_dt = parse_datetime_string(val, dayfirst=dayfirst, + yearfirst=yearfirst, freq=freq) + except Exception: + raise ValueError + _ts = convert_to_tsobject(py_dt, None, None) + iresult[i] = _ts.value + + return iresult + # Similar to Timestamp/datetime, this is a construction requirement for timedeltas # we need to do object instantiation in python # This will serve as a C extension type that
### Original Description Allows `DatetimeIndex.__init__` to accept `NaT` string representation. ### Updated Description The original problem is caused by the inconsistencies in parsing function. Make following results to be consistent. ``` date_str = '2011Q3' pd.tseries.tools.parse_time_string(date_str)[0] #2011-07-01 00:00:00 pd.DatetimeIndex([date_str])[0] #2011-07-01 00:00:00 pd.tseries.tools.to_datetime(date_str) # 2011Q3 <type str>, not parsed pd.Timestamp(date_str) # ValueError: Unable to parse 2011Q3 ``` Summary: - Added `yearfirst` to `to_datetime`. - Defined private func `_to_datetime`, because `DatetimeIndex` uses `freq` kw to parse 6 character monthly strings (and do not want `to_datetime` to accept `freq`) - `Timestamp` is left as it is (without `dayfirst` and `yearfirst`) Also, closes #5418. closes #10537
https://api.github.com/repos/pandas-dev/pandas/pulls/7599
2014-06-28T11:48:04Z
2015-07-14T15:52:23Z
2015-07-14T15:52:23Z
2015-07-20T03:46:52Z
TST: Change nose.SkipTest to raise nose.SkipTest for io.data.Options.
diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py index d2080fe5e1262..5d2a8ef08c95b 100644 --- a/pandas/io/tests/test_data.py +++ b/pandas/io/tests/test_data.py @@ -265,7 +265,7 @@ def test_get_options_data(self): try: options = self.aapl.get_options_data(expiry=self.expiry) except RemoteDataError as e: - nose.SkipTest(e) + raise nose.SkipTest(e) self.assertTrue(len(options) > 1) @network @@ -274,7 +274,7 @@ def test_get_near_stock_price(self): options = self.aapl.get_near_stock_price(call=True, put=True, expiry=self.expiry) except RemoteDataError as e: - nose.SkipTest(e) + raise nose.SkipTest(e) self.assertTrue(len(options) > 1) @network @@ -282,7 +282,7 @@ def test_get_call_data(self): try: calls = self.aapl.get_call_data(expiry=self.expiry) except RemoteDataError as e: - nose.SkipTest(e) + raise nose.SkipTest(e) self.assertTrue(len(calls) > 1) @network @@ -290,7 +290,7 @@ def test_get_put_data(self): try: puts = self.aapl.get_put_data(expiry=self.expiry) except RemoteDataError as e: - nose.SkipTest(e) + raise nose.SkipTest(e) self.assertTrue(len(puts) > 1) @network @@ -367,7 +367,7 @@ def test_get_options_data_warning(self): try: self.aapl.get_options_data(month=self.month, year=self.year) except RemoteDataError as e: - nose.SkipTest(e) + raise nose.SkipTest(e) @network def test_get_near_stock_price_warning(self): @@ -378,7 +378,7 @@ def test_get_near_stock_price_warning(self): month=self.month, year=self.year) except RemoteDataError as e: - nose.SkipTest(e) + raise nose.SkipTest(e) @network def test_get_call_data_warning(self): @@ -386,7 +386,7 @@ def test_get_call_data_warning(self): try: self.aapl.get_call_data(month=self.month, year=self.year) except RemoteDataError as e: - nose.SkipTest(e) + raise nose.SkipTest(e) @network def test_get_put_data_warning(self): @@ -394,7 +394,7 @@ def test_get_put_data_warning(self): try: self.aapl.get_put_data(month=self.month, year=self.year) except RemoteDataError as e: - nose.SkipTest(e) + raise nose.SkipTest(e) class TestDataReader(tm.TestCase):
Fixes #7596
https://api.github.com/repos/pandas-dev/pandas/pulls/7598
2014-06-28T02:34:17Z
2014-06-28T13:59:16Z
2014-06-28T13:59:16Z
2014-06-28T13:59:21Z
COMPAT: comparisons vs numpy nat not working on windows
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 679359f1b4d33..c957884b3cebb 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -67,7 +67,7 @@ cdef int64_t NPY_NAT = util.get_nat() compat_NaT = np.array([NPY_NAT]).astype('m8[ns]').item() # numpy actual nat object -np_NaT = np.datetime64('NaT',dtype='M8') +np_NaT = np.datetime64('NaT') try: basestring @@ -892,8 +892,11 @@ cdef convert_to_tsobject(object ts, object tz, object unit): if ts is None or ts is NaT or ts is np_NaT: obj.value = NPY_NAT elif is_datetime64_object(ts): - obj.value = _get_datetime64_nanos(ts) - pandas_datetime_to_datetimestruct(obj.value, PANDAS_FR_ns, &obj.dts) + if ts == np_NaT: + obj.value = NPY_NAT + else: + obj.value = _get_datetime64_nanos(ts) + pandas_datetime_to_datetimestruct(obj.value, PANDAS_FR_ns, &obj.dts) elif is_integer_object(ts): if ts == NPY_NAT: obj.value = NPY_NAT @@ -1218,7 +1221,7 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False, continue raise elif util.is_datetime64_object(val): - if val is np_NaT or val.view('i8') == iNaT: + if val == np_NaT or val.view('i8') == iNaT: iresult[i] = iNaT else: try: @@ -1296,10 +1299,13 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False, val = values[i] # set as nan if is even a datetime NaT - if _checknull_with_nat(val) or val is np_NaT: + if _checknull_with_nat(val): oresult[i] = np.nan elif util.is_datetime64_object(val): - oresult[i] = val.item() + if val == np_NaT: + oresult[i] = np.nan + else: + oresult[i] = val.item() else: oresult[i] = val return oresult
https://api.github.com/repos/pandas-dev/pandas/pulls/7597
2014-06-28T01:44:01Z
2014-06-28T02:24:58Z
2014-06-28T02:24:58Z
2014-07-22T18:40:54Z
TST: tests for GH7594, ensure conversion on tz-aware datetimes in the constuctors
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index ef57256c7ee06..7368fcf8dac26 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -3587,6 +3587,19 @@ def test_constructor_with_datetimes(self): expected.sort_index() assert_series_equal(result, expected) + # GH 7594 + # don't coerce tz-aware + import pytz + tz = pytz.timezone('US/Eastern') + dt = tz.localize(datetime(2012, 1, 1)) + df = DataFrame({'End Date': dt}, index=[0]) + self.assertEqual(df.iat[0,0],dt) + assert_series_equal(df.dtypes,Series({'End Date' : np.dtype('object') })) + + df = DataFrame([{'End Date': dt}]) + self.assertEqual(df.iat[0,0],dt) + assert_series_equal(df.dtypes,Series({'End Date' : np.dtype('object') })) + def test_constructor_for_list_with_dtypes(self): intname = np.dtype(np.int_).name floatname = np.dtype(np.float_).name
closes #7594
https://api.github.com/repos/pandas-dev/pandas/pulls/7595
2014-06-28T01:09:23Z
2014-06-28T01:38:16Z
2014-06-28T01:38:16Z
2014-06-28T01:38:17Z
BUG: Bug in timedelta inference when assigning an incomplete Series (GH7592)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 45a5d55ca047d..cf14e3696b90f 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -172,7 +172,7 @@ Bug Fixes - Regression in datetimelike slice indexing with a duplicated index and non-exact end-points (:issue:`7523`) - Bug in setitem with list-of-lists and single vs mixed types (:issue:`7551`:) - Bug in timeops with non-aligned Series (:issue:`7500`) - +- Bug in timedelta inference when assigning an incomplete Series (:issue:`7592`) - Bug in ``value_counts`` where ``NaT`` did not qualify as missing (``NaN``) (:issue:`7423`) diff --git a/pandas/core/common.py b/pandas/core/common.py index c0aab2ca1428a..c0432b53e346a 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -2603,7 +2603,7 @@ def check_main(): def in_qtconsole(): """ check if we're inside an IPython qtconsole - + DEPRECATED: This is no longer needed, or working, in IPython 3 and above. """ try: @@ -2622,7 +2622,7 @@ def in_qtconsole(): def in_ipnb(): """ check if we're inside an IPython Notebook - + DEPRECATED: This is no longer used in pandas, and won't work in IPython 3 and above. """ diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 8100b98d6e42d..accaf4ea5cd29 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1037,9 +1037,11 @@ class FloatBlock(FloatOrComplexBlock): def _can_hold_element(self, element): if is_list_like(element): element = np.array(element) - return issubclass(element.dtype.type, (np.floating, np.integer)) - return (isinstance(element, (float, int, np.float_, np.int_)) and - not isinstance(bool, np.bool_)) + tipo = element.dtype.type + return issubclass(tipo, (np.floating, np.integer)) and not issubclass( + tipo, (np.datetime64, np.timedelta64)) + return isinstance(element, (float, int, np.float_, np.int_)) and not isinstance( + element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64)) def _try_cast(self, element): try: @@ -1099,7 +1101,8 @@ class IntBlock(NumericBlock): def _can_hold_element(self, element): if is_list_like(element): element = np.array(element) - return issubclass(element.dtype.type, np.integer) + tipo = element.dtype.type + return issubclass(tipo, np.integer) and not issubclass(tipo, (np.datetime64, np.timedelta64)) return com.is_integer(element) def _try_cast(self, element): diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 4360e2b9066df..ef57256c7ee06 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -32,7 +32,8 @@ import pandas.core.format as fmt import pandas.core.datetools as datetools from pandas import (DataFrame, Index, Series, notnull, isnull, - MultiIndex, DatetimeIndex, Timestamp, date_range, read_csv) + MultiIndex, DatetimeIndex, Timestamp, date_range, read_csv, + _np_version_under1p7) import pandas as pd from pandas.parser import CParserError from pandas.util.misc import is_little_endian @@ -2180,11 +2181,11 @@ def test_set_index_cast_datetimeindex(self): # reset_index with single level for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']: idx = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz, name='idx') - df = pd.DataFrame({'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx) + df = pd.DataFrame({'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx) expected = pd.DataFrame({'idx': [datetime(2011, 1, 1), datetime(2011, 1, 2), datetime(2011, 1, 3), datetime(2011, 1, 4), - datetime(2011, 1, 5)], + datetime(2011, 1, 5)], 'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, columns=['idx', 'a', 'b']) expected['idx'] = expected['idx'].apply(lambda d: pd.Timestamp(d, tz=tz)) @@ -3757,6 +3758,28 @@ def test_operators_timedelta64(self): self.assertTrue(df['off1'].dtype == 'timedelta64[ns]') self.assertTrue(df['off2'].dtype == 'timedelta64[ns]') + def test_datetimelike_setitem_with_inference(self): + if _np_version_under1p7: + raise nose.SkipTest("numpy < 1.7") + + # GH 7592 + # assignment of timedeltas with NaT + + one_hour = timedelta(hours=1) + df = DataFrame(index=date_range('20130101',periods=4)) + df['A'] = np.array([1*one_hour]*4, dtype='m8[ns]') + df.loc[:,'B'] = np.array([2*one_hour]*4, dtype='m8[ns]') + df.loc[:3,'C'] = np.array([3*one_hour]*3, dtype='m8[ns]') + df.ix[:,'D'] = np.array([4*one_hour]*4, dtype='m8[ns]') + df.ix[:3,'E'] = np.array([5*one_hour]*3, dtype='m8[ns]') + df['F'] = np.timedelta64('NaT') + df.ix[:-1,'F'] = np.array([6*one_hour]*3, dtype='m8[ns]') + df.ix[-3:,'G'] = date_range('20130101',periods=3) + df['H'] = np.datetime64('NaT') + result = df.dtypes + expected = Series([np.dtype('timedelta64[ns]')]*6+[np.dtype('datetime64[ns]')]*2,index=list('ABCDEFGH')) + assert_series_equal(result,expected) + def test_new_empty_index(self): df1 = DataFrame(randn(0, 3)) df2 = DataFrame(randn(0, 3))
closes #7592
https://api.github.com/repos/pandas-dev/pandas/pulls/7593
2014-06-27T19:52:29Z
2014-06-27T20:44:03Z
2014-06-27T20:44:03Z
2014-06-27T20:44:03Z
ENH: read_{csv,table} look for index columns in row after header with C engine
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 3160b35386fa2..7a9b9ddaf19f4 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -136,6 +136,9 @@ Enhancements - ``Period`` and ``PeriodIndex`` can contain ``NaT`` in its values (:issue:`7485`) +- ``read_csv`` and ``read_table`` can now read index columns from the first + line after the header when using the C engine (:issue:`6893`) + .. _whatsnew_0141.performance: diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 22fe3ef16e34d..12e5820953b2e 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1079,6 +1079,10 @@ def __init__(self, src, **kwds): self.orig_names = self.names + # index_col may be specified on line after the header + if self.index_col is None: + self.index_col = self._reader.index_col + if not self._has_complex_date_col: if (self._reader.leading_cols == 0 and _is_index_col(self.index_col)): diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index c02a3172f4adc..cc7d844cedd32 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -1568,21 +1568,22 @@ def test_converter_return_string_bug(self): self.assertEqual(df2['Number1'].dtype, float) def test_read_table_buglet_4x_multiindex(self): - # GH 6607 - # Parsing multi-level index currently causes an error in the C parser. - # Temporarily copied to TestPythonParser. - # Here test that CParserError is raised: - - with tm.assertRaises(CParserError): - text = """ A B C D E + text = """ A B C D E one two three four a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640 a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744 x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838""" - # it works! - df = self.read_table(StringIO(text), sep='\s+') - self.assertEqual(df.index.names, ('one', 'two', 'three', 'four')) + # it works! + df = self.read_table(StringIO(text), sep='\s+') + self.assertEqual(df.index.names, ('one', 'two', 'three', 'four')) + + # GH 6893 + data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9' + expected = DataFrame.from_records([(1,3,7,0,3,6), (3,1,4,1,5,9)], + columns=list('abcABC'), index=list('abc')) + actual = self.read_table(StringIO(data), sep='\s+') + tm.assert_frame_equal(actual, expected) def test_read_csv_parse_simple_list(self): text = """foo @@ -2713,28 +2714,6 @@ def test_decompression_regex_sep(self): self.assertRaises(ValueError, self.read_csv, path, compression='bz3') - def test_read_table_buglet_4x_multiindex(self): - # GH 6607 - # This is a copy which should eventually be merged into ParserTests - # when the issue with multi-level index is fixed in the C parser. - - text = """ A B C D E -one two three four -a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640 -a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744 -x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838""" - - # it works! - df = self.read_table(StringIO(text), sep='\s+') - self.assertEqual(df.index.names, ('one', 'two', 'three', 'four')) - - # GH 6893 - data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9' - expected = DataFrame.from_records([(1,3,7,0,3,6), (3,1,4,1,5,9)], - columns=list('abcABC'), index=list('abc')) - actual = self.read_table(StringIO(data), sep='\s+') - tm.assert_frame_equal(actual, expected) - class TestFwfColspaceSniffing(tm.TestCase): def test_full_file(self): # File with all values diff --git a/pandas/parser.pyx b/pandas/parser.pyx index f303298e88273..8e80862cb19fb 100644 --- a/pandas/parser.pyx +++ b/pandas/parser.pyx @@ -574,6 +574,17 @@ cdef class TextReader: raise IOError('Expected file path name or file-like object,' ' got %s type' % type(source)) + cdef _word2name(self, word, char *errors): + if self.c_encoding == NULL and not PY3: + name = PyBytes_FromString(word) + else: + if self.c_encoding == NULL or self.c_encoding == b'utf-8': + name = PyUnicode_FromString(word) + else: + name = PyUnicode_Decode(word, strlen(word), + self.c_encoding, errors) + return name + cdef _get_header(self): # header is now a list of lists, so field_count should use header[0] @@ -612,16 +623,7 @@ cdef class TextReader: counts = {} unnamed_count = 0 for i in range(field_count): - word = self.parser.words[start + i] - - if self.c_encoding == NULL and not PY3: - name = PyBytes_FromString(word) - else: - if self.c_encoding == NULL or self.c_encoding == b'utf-8': - name = PyUnicode_FromString(word) - else: - name = PyUnicode_Decode(word, strlen(word), - self.c_encoding, errors) + name = self._word2name(self.parser.words[start + i], errors) if name == '': if self.has_mi_columns: @@ -685,13 +687,56 @@ cdef class TextReader: else: # not self.has_usecols: field_count = self.parser.line_fields[data_line] + passed_count = len(header[0]) + + # #6893: look for index columns on first line after header + + # hack: temporarily set expected_fields to prevent parser from + # raising if it sees extra columns + ex_fields = self.parser.expected_fields + self.parser.expected_fields = field_count + + datapos = self.parser.datapos # save position + self._tokenize_rows(1) + self.parser.expected_fields = ex_fields # restore expected_fields + + if self.parser.lines == data_line + 2: + field_count_next = self.parser.line_fields[data_line + 1] + + if field_count_next > field_count: + # found extra columns in the second row after the header + # check whether previous row contains index columns + start = self.parser.line_start[data_line] + + line = [self._word2name(self.parser.words[start + i], errors) + for i in range(self.parser.line_fields[data_line])] + + # remove trailing empty fields + while not line[-1]: + line.pop() + + if passed_count + len(line) == field_count_next: + for h in header: + for c in reversed(line): + h.insert(0, c) + + field_count = field_count_next + passed_count = field_count + self.index_col = line + self.parser_start += 1 + + else: + # hack: didn't find index columns, back up a line and + # let the parser code hande this... + self.parser.datapos = datapos + self.parser.lines -= 1 + self.parser.file_lines -= 1 + self.parser.line_fields[self.parser.lines] = 0 # #2981 if self.names is not None: field_count = max(field_count, len(self.names)) - passed_count = len(header[0]) - # if passed_count > field_count: # raise CParserError('Column names have %d fields, ' # 'data has %d fields'
Closes #6893. Currently the Python parser can read data with the index columns specified on the first line after the header, e.g. ``` python In [3]: pd.__version__ Out[3]: '0.14.0-271-gf8b101c' In [4]: text = """ A B C D E one two three four a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640 a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744 x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838""" In [5]: pd.read_table(StringIO(text), sep='\s+', engine='python') Out[5]: A B C D E one two three four a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640 q 20.0000 4 0.4473 1.4152 0.2834 1.00661 0.1744 x q 30.0000 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838 ``` but the C parser fails: ``` python In [6]: pd.read_table(StringIO(text), sep='\s+', engine='c') --------------------------------------------------------------------------- CParserError Traceback (most recent call last) . . . CParserError: Error tokenizing data. C error: Expected 5 fields in line 3, saw 9 ``` This PR patches the C parser to enable this feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/7591
2014-06-27T18:25:31Z
2014-06-30T19:26:49Z
2014-06-30T19:26:49Z
2014-06-30T21:05:10Z
BUG: bug in float64index assignment with a non scalar indexer
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 45a5d55ca047d..e7eaf49265fe5 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -262,3 +262,5 @@ Bug Fixes - Bug in non-monotonic ``Index.union`` may preserve ``name`` incorrectly (:issue:`7458`) - Bug in ``DatetimeIndex.intersection`` doesn't preserve timezone (:issue:`4690`) + +- Bug in ``Float64Index`` assignment with a non scalar indexer (:issue:`7586`) diff --git a/pandas/core/index.py b/pandas/core/index.py index 030f902eb13ce..2138ecfa5281f 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2074,7 +2074,7 @@ def __contains__(self, other): def get_loc(self, key): try: - if np.isnan(key): + if np.all(np.isnan(key)): try: return self._nan_idxs.item() except ValueError: diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 9b72d2f92182f..1a4da63a135a2 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -3684,6 +3684,18 @@ def test_duplicate_ix_returns_series(self): e = df.loc[0.2, 'a'] tm.assert_series_equal(r, e) + def test_float_index_non_scalar_assignment(self): + df = DataFrame({'a': [1,2,3], 'b': [3,4,5]},index=[1.,2.,3.]) + df.loc[df.index[:2]] = 1 + expected = DataFrame({'a':[1,1,3],'b':[1,1,5]},index=df.index) + tm.assert_frame_equal(expected, df) + + df = DataFrame({'a': [1,2,3], 'b': [3,4,5]},index=[1.,2.,3.]) + df2 = df.copy() + df.loc[df.index] = df.loc[df.index] + tm.assert_frame_equal(df,df2) + + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
closes #7586
https://api.github.com/repos/pandas-dev/pandas/pulls/7587
2014-06-27T13:30:21Z
2014-06-27T19:35:09Z
2014-06-27T19:35:09Z
2014-06-27T19:35:10Z
Ignore comment lines in read_csv parsing
diff --git a/doc/source/io.rst b/doc/source/io.rst index bc58b04de4473..0f698306a6517 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -98,8 +98,10 @@ They can take a number of arguments: data. Defaults to 0 if no ``names`` passed, otherwise ``None``. Explicitly pass ``header=0`` to be able to replace existing names. The header can be a list of integers that specify row locations for a multi-index on the columns - E.g. [0,1,3]. Intervening rows that are not specified will be skipped. - (E.g. 2 in this example are skipped) + E.g. [0,1,3]. Intervening rows that are not specified will be + skipped (e.g. 2 in this example are skipped). Note that this parameter + ignores commented lines, so header=0 denotes the first line of + data rather than the first line of the file. - ``skiprows``: A collection of numbers for rows in the file to skip. Can also be an integer to skip the first ``n`` rows - ``index_col``: column number, column name, or list of column numbers/names, @@ -145,8 +147,12 @@ They can take a number of arguments: Acceptable values are 0, 1, 2, and 3 for QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONE, and QUOTE_NONNUMERIC, respectively. - ``skipinitialspace`` : boolean, default ``False``, Skip spaces after delimiter - ``escapechar`` : string, to specify how to escape quoted data - - ``comment``: denotes the start of a comment and ignores the rest of the line. - Currently line commenting is not supported. + - ``comment``: Indicates remainder of line should not be parsed. If found at the + beginning of a line, the line will be ignored altogether. This parameter + must be a single character. Also, fully commented lines + are ignored by the parameter `header` but not by `skiprows`. For example, + if comment='#', parsing '#empty\n1,2,3\na,b,c' with `header=0` will + result in '1,2,3' being treated as the header. - ``nrows``: Number of rows to read out of the file. Useful to only read a small portion of a large file - ``iterator``: If True, return a ``TextFileReader`` to enable reading a file @@ -252,6 +258,27 @@ after a delimiter: data = 'a, b, c\n1, 2, 3\n4, 5, 6' print(data) pd.read_csv(StringIO(data), skipinitialspace=True) + +Moreover, ``read_csv`` ignores any completely commented lines: + +.. ipython:: python + + data = 'a,b,c\n# commented line\n1,2,3\n#another comment\n4,5,6' + print(data) + pd.read_csv(StringIO(data), comment='#') + +.. note:: + + The presence of ignored lines might create ambiguities involving line numbers; + the parameter ``header`` uses row numbers (ignoring commented + lines), while ``skiprows`` uses line numbers (including commented lines): + + .. ipython:: python + + data = '#comment\na,b,c\nA,B,C\n1,2,3' + pd.read_csv(StringIO(data), comment='#', header=1) + data = 'A,B,C\n#comment\na,b,c\n1,2,3' + pd.read_csv(StringIO(data), comment='#', skiprows=2) The parsers make every attempt to "do the right thing" and not be very fragile. Type inference is a pretty big deal. So if a column can be coerced to diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 45a5d55ca047d..abe2505b5adf7 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -102,9 +102,9 @@ Enhancements - - - +- The file parsers ``read_csv`` and ``read_table`` now ignore line comments provided by + the parameter `comment`, which accepts only a single character for the C reader. + In particular, they allow for comments before file data begins (:issue:`2685`) - Tests for basic reading of public S3 buckets now exist (:issue:`7281`). - ``read_html`` now sports an ``encoding`` argument that is passed to the underlying parser library. You can use this to read non-ascii encoded web diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 22fe3ef16e34d..3e4155491fc9c 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -64,9 +64,11 @@ class ParserWarning(Warning): pass ``header=0`` to be able to replace existing names. The header can be a list of integers that specify row locations for a multi-index on the columns E.g. [0,1,3]. Intervening rows that are not specified will be - skipped. (E.g. 2 in this example are skipped) + skipped (e.g. 2 in this example are skipped). Note that this parameter + ignores commented lines, so header=0 denotes the first line of + data rather than the first line of the file. skiprows : list-like or integer - Row numbers to skip (0-indexed) or number of rows to skip (int) + Line numbers to skip (0-indexed) or number of lines to skip (int) at the start of the file index_col : int or sequence or False, default None Column to use as the row labels of the DataFrame. If a sequence is given, a @@ -106,8 +108,12 @@ class ParserWarning(Warning): thousands : str, default None Thousands separator comment : str, default None - Indicates remainder of line should not be parsed - Does not support line commenting (will return empty line) + Indicates remainder of line should not be parsed. If found at the + beginning of a line, the line will be ignored altogether. This parameter + must be a single character. Also, fully commented lines + are ignored by the parameter `header` but not by `skiprows`. For example, + if comment='#', parsing '#empty\n1,2,3\na,b,c' with `header=0` will + result in '1,2,3' being treated as the header. decimal : str, default '.' Character to recognize as decimal point. E.g. use ',' for European data nrows : int, default None @@ -1313,6 +1319,7 @@ def __init__(self, f, **kwds): self.data = None self.buf = [] self.pos = 0 + self.line_pos = 0 self.encoding = kwds['encoding'] self.compression = kwds['compression'] @@ -1459,6 +1466,7 @@ class MyDialect(csv.Dialect): line = self._check_comments([line])[0] self.pos += 1 + self.line_pos += 1 sniffed = csv.Sniffer().sniff(line) dia.delimiter = sniffed.delimiter if self.encoding is not None: @@ -1566,7 +1574,7 @@ def _infer_columns(self): if self.header is not None: header = self.header - # we have a mi columns, so read and extra line + # we have a mi columns, so read an extra line if isinstance(header, (list, tuple, np.ndarray)): have_mi_columns = True header = list(header) + [header[-1] + 1] @@ -1578,9 +1586,8 @@ def _infer_columns(self): for level, hr in enumerate(header): line = self._buffered_line() - while self.pos <= hr: + while self.line_pos <= hr: line = self._next_line() - unnamed_count = 0 this_columns = [] for i, c in enumerate(line): @@ -1705,25 +1712,36 @@ def _buffered_line(self): else: return self._next_line() + def _empty(self, line): + return not line or all(not x for x in line) + def _next_line(self): if isinstance(self.data, list): while self.pos in self.skiprows: self.pos += 1 - try: - line = self.data[self.pos] - except IndexError: - raise StopIteration + while True: + try: + line = self._check_comments([self.data[self.pos]])[0] + self.pos += 1 + # either uncommented or blank to begin with + if self._empty(self.data[self.pos - 1]) or line: + break + except IndexError: + raise StopIteration else: while self.pos in self.skiprows: next(self.data) self.pos += 1 - line = next(self.data) - - line = self._check_comments([line])[0] + while True: + orig_line = next(self.data) + line = self._check_comments([orig_line])[0] + self.pos += 1 + if self._empty(orig_line) or line: + break - self.pos += 1 + self.line_pos += 1 self.buf.append(line) return line diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index c02a3172f4adc..5f219d86ecff3 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -1584,6 +1584,65 @@ def test_read_table_buglet_4x_multiindex(self): df = self.read_table(StringIO(text), sep='\s+') self.assertEqual(df.index.names, ('one', 'two', 'three', 'four')) + def test_line_comment(self): + data = """# empty +A,B,C +1,2.,4.#hello world +#ignore this line +5.,NaN,10.0 +""" + expected = [[1., 2., 4.], + [5., np.nan, 10.]] + df = self.read_csv(StringIO(data), comment='#') + tm.assert_almost_equal(df.values, expected) + + def test_comment_skiprows(self): + data = """# empty +random line +# second empty line +1,2,3 +A,B,C +1,2.,4. +5.,NaN,10.0 +""" + expected = [[1., 2., 4.], + [5., np.nan, 10.]] + # this should ignore the first four lines (including comments) + df = self.read_csv(StringIO(data), comment='#', skiprows=4) + tm.assert_almost_equal(df.values, expected) + + def test_comment_header(self): + data = """# empty +# second empty line +1,2,3 +A,B,C +1,2.,4. +5.,NaN,10.0 +""" + expected = [[1., 2., 4.], + [5., np.nan, 10.]] + # header should begin at the second non-comment line + df = self.read_csv(StringIO(data), comment='#', header=1) + tm.assert_almost_equal(df.values, expected) + + def test_comment_skiprows_header(self): + data = """# empty +# second empty line +# third empty line +X,Y,Z +1,2,3 +A,B,C +1,2.,4. +5.,NaN,10.0 +""" + expected = [[1., 2., 4.], + [5., np.nan, 10.]] + # skiprows should skip the first 4 lines (including comments), while + # header should start from the second non-commented line starting + # with line 5 + df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1) + tm.assert_almost_equal(df.values, expected) + def test_read_csv_parse_simple_list(self): text = """foo bar baz @@ -2874,6 +2933,65 @@ def test_parse_dates_empty_string(self): def test_usecols(self): raise nose.SkipTest("Usecols is not supported in C High Memory engine.") + def test_line_comment(self): + data = """# empty +A,B,C +1,2.,4.#hello world +#ignore this line +5.,NaN,10.0 +""" + expected = [[1., 2., 4.], + [5., np.nan, 10.]] + df = self.read_csv(StringIO(data), comment='#') + tm.assert_almost_equal(df.values, expected) + + def test_comment_skiprows(self): + data = """# empty +random line +# second empty line +1,2,3 +A,B,C +1,2.,4. +5.,NaN,10.0 +""" + expected = [[1., 2., 4.], + [5., np.nan, 10.]] + # this should ignore the first four lines (including comments) + df = self.read_csv(StringIO(data), comment='#', skiprows=4) + tm.assert_almost_equal(df.values, expected) + + def test_comment_header(self): + data = """# empty +# second empty line +1,2,3 +A,B,C +1,2.,4. +5.,NaN,10.0 +""" + expected = [[1., 2., 4.], + [5., np.nan, 10.]] + # header should begin at the second non-comment line + df = self.read_csv(StringIO(data), comment='#', header=1) + tm.assert_almost_equal(df.values, expected) + + def test_comment_skiprows_header(self): + data = """# empty +# second empty line +# third empty line +X,Y,Z +1,2,3 +A,B,C +1,2.,4. +5.,NaN,10.0 +""" + expected = [[1., 2., 4.], + [5., np.nan, 10.]] + # skiprows should skip the first 4 lines (including comments), while + # header should start from the second non-commented line starting + # with line 5 + df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1) + tm.assert_almost_equal(df.values, expected) + def test_passing_dtype(self): # GH 6607 # This is a copy which should eventually be merged into ParserTests diff --git a/pandas/parser.pyx b/pandas/parser.pyx index f303298e88273..199d4ab44abfa 100644 --- a/pandas/parser.pyx +++ b/pandas/parser.pyx @@ -78,8 +78,10 @@ cdef extern from "parser/tokenizer.h": ESCAPE_IN_QUOTED_FIELD QUOTE_IN_QUOTED_FIELD EAT_CRNL + EAT_CRNL_NOP EAT_WHITESPACE EAT_COMMENT + EAT_LINE_COMMENT FINISHED enum: ERROR_OVERFLOW diff --git a/pandas/src/parser/tokenizer.c b/pandas/src/parser/tokenizer.c index f3da2175092e7..1e9576487b9ed 100644 --- a/pandas/src/parser/tokenizer.c +++ b/pandas/src/parser/tokenizer.c @@ -698,6 +698,9 @@ int tokenize_delimited(parser_t *self, size_t line_limit) } else if (c == '\r') { self->state = EAT_CRNL; break; + } else if (c == self->commentchar) { + self->state = EAT_LINE_COMMENT; + break; } /* normal character - handle as START_FIELD */ @@ -752,6 +755,16 @@ int tokenize_delimited(parser_t *self, size_t line_limit) self->state = IN_FIELD; break; + case EAT_LINE_COMMENT: + if (c == '\n') { + self->file_lines++; + self->state = START_RECORD; + } else if (c == '\r') { + self->file_lines++; + self->state = EAT_CRNL_NOP; + } + break; + case IN_FIELD: /* in unquoted field */ if (c == '\n') { @@ -883,6 +896,15 @@ int tokenize_delimited(parser_t *self, size_t line_limit) } break; + case EAT_CRNL_NOP: /* inside an ignored comment line */ + self->state = START_RECORD; + /* \r line terminator -- parse this character again */ + if (c != '\n' && c != self->delimiter) { + --i; + --buf; + } + break; + default: break; diff --git a/pandas/src/parser/tokenizer.h b/pandas/src/parser/tokenizer.h index 4e40d892a8b4a..6af63c07f1104 100644 --- a/pandas/src/parser/tokenizer.h +++ b/pandas/src/parser/tokenizer.h @@ -121,8 +121,10 @@ typedef enum { ESCAPE_IN_QUOTED_FIELD, QUOTE_IN_QUOTED_FIELD, EAT_CRNL, + EAT_CRNL_NOP, EAT_WHITESPACE, EAT_COMMENT, + EAT_LINE_COMMENT, FINISHED } ParserState;
This is the first part of #7470. closes #2685
https://api.github.com/repos/pandas-dev/pandas/pulls/7582
2014-06-26T20:32:33Z
2014-06-30T19:26:24Z
2014-06-30T19:26:24Z
2014-08-27T15:01:32Z
SQL: don't reflect full database GH7396
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index bb6f9cee5766e..3fdfa687abd78 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -76,6 +76,17 @@ def _parse_date_columns(data_frame, parse_dates): return data_frame +def _is_sqlalchemy_engine(con): + try: + import sqlalchemy + if isinstance(con, sqlalchemy.engine.Engine): + return True + else: + return False + except ImportError: + return False + + def execute(sql, con, cur=None, params=None): """ Execute the given SQL query using the provided connection object. @@ -262,7 +273,15 @@ def read_sql_table(table_name, con, index_col=None, coerce_float=True, """ - pandas_sql = PandasSQLAlchemy(con) + import sqlalchemy + from sqlalchemy.schema import MetaData + meta = MetaData(con) + try: + meta.reflect(only=[table_name]) + except sqlalchemy.exc.InvalidRequestError: + raise ValueError("Table %s not found" % table_name) + + pandas_sql = PandasSQLAlchemy(con, meta=meta) table = pandas_sql.read_table( table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) @@ -380,6 +399,7 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None, coerce_float=coerce_float, parse_dates=parse_dates) if pandas_sql.has_table(sql): + pandas_sql.meta.reflect(only=[sql]) return pandas_sql.read_table( sql, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) @@ -471,17 +491,9 @@ def pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False): """ # When support for DBAPI connections is removed, # is_cursor should not be necessary. - try: - import sqlalchemy - - if isinstance(con, sqlalchemy.engine.Engine): - return PandasSQLAlchemy(con, meta=meta) - else: - if flavor == 'mysql': - warnings.warn(_MYSQL_WARNING, FutureWarning) - return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) - - except ImportError: + if _is_sqlalchemy_engine(con): + return PandasSQLAlchemy(con, meta=meta) + else: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) @@ -767,7 +779,6 @@ def __init__(self, engine, meta=None): if not meta: from sqlalchemy.schema import MetaData meta = MetaData(self.engine) - meta.reflect(self.engine) self.meta = meta @@ -812,19 +823,16 @@ def tables(self): return self.meta.tables def has_table(self, name): - if self.meta.tables.get(name) is not None: - return True - else: - return False + return self.engine.has_table(name) def get_table(self, table_name): return self.meta.tables.get(table_name) def drop_table(self, table_name): if self.engine.has_table(table_name): + self.meta.reflect(only=[table_name]) self.get_table(table_name).drop() self.meta.clear() - self.meta.reflect() def _create_sql_schema(self, frame, table_name): table = PandasSQLTable(table_name, self, frame=frame) diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index a34f278fc5a96..94e24929e2d50 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -623,6 +623,22 @@ def test_read_sql_delegate(self): iris_frame2 = sql.read_sql('iris', self.conn) tm.assert_frame_equal(iris_frame1, iris_frame2) + def test_not_reflect_all_tables(self): + # create invalid table + qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);""" + self.conn.execute(qry) + qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);""" + self.conn.execute(qry) + + with warnings.catch_warnings(record=True) as w: + # Cause all warnings to always be triggered. + warnings.simplefilter("always") + # Trigger a warning. + sql.read_sql_table('other_table', self.conn) + sql.read_sql_query('SELECT * FROM other_table', self.conn) + # Verify some things + self.assertEqual(len(w), 0, "Warning triggered for other table") + class TestSQLLegacyApi(_TestSQLApi): """ @@ -736,6 +752,8 @@ def setup_connect(self): try: self.conn = self.connect() self.pandasSQL = sql.PandasSQLAlchemy(self.conn) + # to test if connection can be made: + self.conn.connect() except sqlalchemy.exc.OperationalError: raise nose.SkipTest("Can't connect to {0} server".format(self.flavor))
Closes #7396 For now, let PandasSQLAlchemy not reflect anything by default, and `read_sql_table` provides its own reflected meta. Had to change some of the other functions to get this working. @danielballan @mangecoeur Can someone review? (it feeled a bit messy)
https://api.github.com/repos/pandas-dev/pandas/pulls/7581
2014-06-26T20:01:57Z
2014-07-05T13:49:29Z
2014-07-05T13:49:29Z
2014-07-05T13:49:31Z
BUG/TST: fix tests for groupby nth on Series (GH7559)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 3160b35386fa2..1289f8a386f9f 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -173,6 +173,7 @@ Bug Fixes - Bug in setitem with list-of-lists and single vs mixed types (:issue:`7551`:) - Bug in timeops with non-aligned Series (:issue:`7500`) - Bug in timedelta inference when assigning an incomplete Series (:issue:`7592`) +- Bug in groupby ``.nth`` with a Series and integer-like column name (:issue:`7559`) - Bug in ``value_counts`` where ``NaT`` did not qualify as missing (``NaN``) (:issue:`7423`) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index c7611d9829308..1a10ad912211b 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -467,7 +467,7 @@ def _selected_obj(self): def _set_selection_from_grouper(self): """ we may need create a selection if we have non-level groupers """ grp = self.grouper - if self.as_index and getattr(grp,'groupings',None) is not None: + if self.as_index and getattr(grp,'groupings',None) is not None and self.obj.ndim > 1: ax = self.obj._info_axis groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ] if len(groupers): @@ -759,7 +759,7 @@ def nth(self, n, dropna=None): Examples -------- - >>> DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B']) + >>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B']) >>> g = df.groupby('A') >>> g.nth(0) A B @@ -804,7 +804,10 @@ def nth(self, n, dropna=None): if self.as_index: ax = self.obj._info_axis names = self.grouper.names - if all([ n in ax for n in names ]): + if self.obj.ndim == 1: + # this is a pass-thru + pass + elif all([ n in ax for n in names ]): result.index = Index(self.obj[names][is_nth].values.ravel()).set_names(names) elif self._group_selection is not None: result.index = self.obj._get_axis(self.axis)[is_nth] @@ -821,17 +824,29 @@ def nth(self, n, dropna=None): "(was passed %s)." % (dropna),) # old behaviour, but with all and any support for DataFrames. - + # modified in GH 7559 to have better perf max_len = n if n >= 0 else - 1 - n + dropped = self.obj.dropna(how=dropna, axis=self.axis) - def picker(x): - x = x.dropna(how=dropna) # Note: how is ignored if Series - if len(x) <= max_len: - return np.nan - else: - return x.iloc[n] + # get a new grouper for our dropped obj + grouper, exclusions, obj = _get_grouper(dropped, key=self.keys, axis=self.axis, + level=self.level, sort=self.sort) + + sizes = obj.groupby(grouper).size() + result = obj.groupby(grouper).nth(n) + mask = (sizes<max_len).values + + # set the results which don't meet the criteria + if len(result) and mask.any(): + result.loc[mask] = np.nan - return self.agg(picker) + # reset/reindex to the original groups + if len(self.obj) == len(dropped): + result.index = self.grouper.result_index + else: + result = result.reindex(self.grouper.result_index) + + return result def cumcount(self, **kwargs): """ @@ -942,6 +957,9 @@ def tail(self, n=5): def _cumcount_array(self, arr=None, **kwargs): """ arr is where cumcount gets it's values from + + note: this is currently implementing sort=False (though the default is sort=True) + for groupby in general """ ascending = kwargs.pop('ascending', True) @@ -949,14 +967,23 @@ def _cumcount_array(self, arr=None, **kwargs): arr = np.arange(self.grouper._max_groupsize, dtype='int64') len_index = len(self._selected_obj.index) - cumcounts = np.empty(len_index, dtype=arr.dtype) + cumcounts = np.zeros(len_index, dtype=arr.dtype) + if not len_index: + return cumcounts + + indices, values = [], [] + for v in self.indices.values(): + indices.append(v) + + if ascending: + values.append(arr[:len(v)]) + else: + values.append(arr[len(v)-1::-1]) + + indices = np.concatenate(indices) + values = np.concatenate(values) + cumcounts[indices] = values - if ascending: - for v in self.indices.values(): - cumcounts[v] = arr[:len(v)] - else: - for v in self.indices.values(): - cumcounts[v] = arr[len(v)-1::-1] return cumcounts def _index_with_as_index(self, b): @@ -1270,6 +1297,7 @@ def group_info(self): comp_ids = com._ensure_int64(comp_ids) return comp_ids, obs_group_ids, ngroups + def _get_compressed_labels(self): all_labels = [ping.labels for ping in self.groupings] if self._overflow_possible: @@ -1892,7 +1920,6 @@ def groups(self): self._groups = self.index.groupby(self.grouper) return self._groups - def _get_grouper(obj, key=None, axis=0, level=None, sort=True): """ create and return a BaseGrouper, which is an internal @@ -2141,7 +2168,10 @@ def _wrap_aggregated_output(self, output, names=None): if names is not None: return DataFrame(output, index=index, columns=names) else: - return Series(output, index=index, name=self.name) + name = self.name + if name is None: + name = self._selected_obj.name + return Series(output, index=index, name=name) def _wrap_applied_output(self, keys, values, not_indexed_same=False): if len(keys) == 0: diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 14380c83de79e..f60ffd006bcf1 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -282,6 +282,28 @@ def test_nth(self): expected = df.loc[[]] assert_frame_equal(result,expected) + # GH 7559 + # from the vbench + df = DataFrame(np.random.randint(1, 10, (100, 2))) + s = df[1] + g = df[0] + expected = s.groupby(g).first() + expected2 = s.groupby(g).apply(lambda x: x.iloc[0]) + assert_series_equal(expected2,expected) + + # validate first + v = s[g==1].iloc[0] + self.assertEqual(expected.iloc[0],v) + self.assertEqual(expected2.iloc[0],v) + + # this is NOT the same as .first (as sorted is default!) + # as it keeps the order in the series (and not the group order) + # related GH 7287 + expected = s.groupby(g,sort=False).first() + expected.index = range(1,10) + result = s.groupby(g).nth(0,dropna='all') + assert_series_equal(result,expected) + def test_grouper_index_types(self): # related GH5375 # groupby misbehaving when using a Floatlike index diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py index 6a444d0a09af7..c6ddefbdae451 100644 --- a/vb_suite/groupby.py +++ b/vb_suite/groupby.py @@ -244,11 +244,14 @@ def f(): groupby_last_float32 = Benchmark('data2.groupby(labels).last()', setup, start_date=datetime(2013, 1, 1)) -groupby_nth_float64 = Benchmark('data.groupby(labels).nth(0)', setup, - start_date=datetime(2012, 5, 1)) - -groupby_nth_float32 = Benchmark('data2.groupby(labels).nth(0)', setup, - start_date=datetime(2013, 1, 1)) +groupby_nth_float64_none = Benchmark('data.groupby(labels).nth(0)', setup, + start_date=datetime(2012, 5, 1)) +groupby_nth_float32_none = Benchmark('data2.groupby(labels).nth(0)', setup, + start_date=datetime(2013, 1, 1)) +groupby_nth_float64_any = Benchmark('data.groupby(labels).nth(0,dropna="all")', setup, + start_date=datetime(2012, 5, 1)) +groupby_nth_float32_any = Benchmark('data2.groupby(labels).nth(0,dropna="all")', setup, + start_date=datetime(2013, 1, 1)) # with datetimes (GH7555) setup = common_setup + """ @@ -259,8 +262,10 @@ def f(): start_date=datetime(2013, 5, 1)) groupby_last_datetimes = Benchmark('df.groupby("b").last()', setup, start_date=datetime(2013, 5, 1)) -groupby_nth_datetimes = Benchmark('df.groupby("b").nth(0)', setup, - start_date=datetime(2013, 5, 1)) +groupby_nth_datetimes_none = Benchmark('df.groupby("b").nth(0)', setup, + start_date=datetime(2013, 5, 1)) +groupby_nth_datetimes_any = Benchmark('df.groupby("b").nth(0,dropna="all")', setup, + start_date=datetime(2013, 5, 1)) # with object setup = common_setup + """ @@ -271,8 +276,10 @@ def f(): start_date=datetime(2013, 5, 1)) groupby_last_object = Benchmark('df.groupby("b").last()', setup, start_date=datetime(2013, 5, 1)) -groupby_nth_object = Benchmark('df.groupby("b").nth(0)', setup, - start_date=datetime(2013, 5, 1)) +groupby_nth_object_none = Benchmark('df.groupby("b").nth(0)', setup, + start_date=datetime(2013, 5, 1)) +groupby_nth_object_any = Benchmark('df.groupby("b").nth(0,dropna="any")', setup, + start_date=datetime(2013, 5, 1)) #---------------------------------------------------------------------- # groupby_indices replacement, chop up Series @@ -351,11 +358,16 @@ def f(g): """ # Not really a fair test as behaviour has changed! -groupby_frame_nth = Benchmark("df.groupby(0).nth(0)", setup, - start_date=datetime(2014, 3, 1)) +groupby_frame_nth_none = Benchmark("df.groupby(0).nth(0)", setup, + start_date=datetime(2014, 3, 1)) + +groupby_series_nth_none = Benchmark("df[1].groupby(df[0]).nth(0)", setup, + start_date=datetime(2014, 3, 1)) +groupby_frame_nth_any= Benchmark("df.groupby(0).nth(0,dropna='any')", setup, + start_date=datetime(2014, 3, 1)) -groupby_series_nth = Benchmark("df[1].groupby(df[0]).nth(0)", setup, - start_date=datetime(2014, 3, 1)) +groupby_series_nth_any = Benchmark("df[1].groupby(df[0]).nth(0,dropna='any')", setup, + start_date=datetime(2014, 3, 1)) #----------------------------------------------------------------------
closes #7559 related #7287 And some improvements when using any/all for nth (still the nth are 10x slower than the cythonized first/last, but that's another issue) ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- groupby_nth_object_any | 503.6680 | 17163.4649 | 0.0293 | groupby_nth_datetimes_any | 553.3033 | 13455.8423 | 0.0411 | groupby_series_nth_any | 2.7003 | 26.1161 | 0.1034 | groupby_frame_nth_any | 5.4483 | 15.4017 | 0.3537 | groupby_multi_different_functions | 11.3816 | 12.2033 | 0.9327 | groupby_multi_different_numpy_functions | 10.4934 | 11.1040 | 0.9450 | groupby_object_nth | 397.3277 | 416.7441 | 0.9534 | groupby_pivot_table | 16.2361 | 16.9867 | 0.9558 | groupby_frame_apply_overhead | 8.8013 | 9.1234 | 0.9647 | groupby_multi_python | 129.7946 | 133.2393 | 0.9741 | groupby_apply_dict_return | 37.9413 | 38.7277 | 0.9797 | groupby_frame_apply | 41.9403 | 42.6593 | 0.9831 | groupby_object_first | 15.0506 | 15.2097 | 0.9895 | groupby_frame_cython_many_columns | 3.3430 | 3.3733 | 0.9910 | groupby_nth_float64 | 51.4960 | 51.9506 | 0.9912 | groupby_frame_nth | 2.7520 | 2.7746 | 0.9918 | groupby_multi_size | 21.6714 | 21.8496 | 0.9918 | groupby_transform2 | 159.0919 | 160.3904 | 0.9919 | groupby_indices | 6.2350 | 6.2837 | 0.9923 | groupby_multi_cython | 13.9600 | 14.0460 | 0.9939 | groupby_frame_median | 6.1007 | 6.1367 | 0.9941 | groupby_sum_booleans | 1.2400 | 1.2473 | 0.9941 | groupby_datetimes_last | 10.7520 | 10.8067 | 0.9949 | groupby_series_simple_cython | 178.8233 | 179.7036 | 0.9951 | groupby_nth_float32 | 52.6213 | 52.8786 | 0.9951 | groupby_multi_series_op | 12.5150 | 12.5673 | 0.9958 | groupby_transform | 166.5533 | 167.0943 | 0.9968 | groupby_frame_singlekey_integer | 2.3321 | 2.3394 | 0.9969 | groupby_multi_count | 8.7133 | 8.7360 | 0.9974 | groupby_last_float32 | 3.5094 | 3.5140 | 0.9987 | groupby_transform_ufunc | 6.1913 | 6.1987 | 0.9988 | groupby_last | 3.5419 | 3.5423 | 0.9999 | groupby_first | 3.3600 | 3.3563 | 1.0011 | groupby_object_last | 14.8129 | 14.7574 | 1.0038 | groupby_int_count | 4.4247 | 4.4080 | 1.0038 | groupby_first_float32 | 3.3464 | 3.3300 | 1.0049 | groupby_mixed_first | 10.9203 | 10.8653 | 1.0051 | groupby_datetimes_nth | 445.6376 | 423.4857 | 1.0523 | groupby_simple_compress_timing | 33.8430 | 27.2744 | 1.2408 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [def0155] : BUG/TST: fix tests for groupby nth on Series (GH7559) Base [4082c1a] : Merge pull request #7593 from jreback/timedelta_nat BUG: Bug in timedelta inference when assigning an incomplete Series (GH7592) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7580
2014-06-26T19:37:22Z
2014-06-30T11:05:58Z
2014-06-30T11:05:58Z
2014-06-30T11:05:59Z
DOC: closes gh6838. Breakout options.rst from basics.rst
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index f614e1b7edcf4..1979b180b71b9 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1552,152 +1552,3 @@ While float dtypes are unchanged. casted = dfa[df2>0] casted casted.dtypes - - -Working with package options ----------------------------- - -.. _basics.working_with_options: -.. versionadded:: 0.10.1 - -pandas has an options system that let's you customize some aspects of it's behaviour, -display-related options being those the user is must likely to adjust. - -Options have a full "dotted-style", case-insensitive name (e.g. ``display.max_rows``), -You can get/set options directly as attributes of the top-level ``options`` attribute: - -.. ipython:: python - - import pandas as pd - pd.options.display.max_rows - pd.options.display.max_rows = 999 - pd.options.display.max_rows - - -There is also an API composed of 4 relevant functions, available directly from the ``pandas`` -namespace, and they are: - -- ``get_option`` / ``set_option`` - get/set the value of a single option. -- ``reset_option`` - reset one or more options to their default value. -- ``describe_option`` - print the descriptions of one or more options. - -**Note:** developers can check out pandas/core/config.py for more info. - -All of the functions above accept a regexp pattern (``re.search`` style) as an argument, -and so passing in a substring will work - as long as it is unambiguous : - -.. ipython:: python - - get_option("display.max_rows") - set_option("display.max_rows",101) - get_option("display.max_rows") - set_option("max_r",102) - get_option("display.max_rows") - - -The following will **not work** because it matches multiple option names, e.g. ``display.max_colwidth``, ``display.max_rows``, ``display.max_columns``: - -.. ipython:: python - :okexcept: - - try: - get_option("display.max_") - except KeyError as e: - print(e) - - -**Note:** Using this form of shorthand may cause your code to break if new options with similar names are added in future versions. - - -You can get a list of available options and their descriptions with ``describe_option``. When called -with no argument ``describe_option`` will print out the descriptions for all available options. - -.. ipython:: python - :suppress: - - reset_option("all") - - -.. ipython:: python - - describe_option() - - -or you can get the description for just the options that match the regexp you pass in: - -.. ipython:: python - - describe_option("date") - - -All options also have a default value, and you can use the ``reset_option`` to do just that: - -.. ipython:: python - :suppress: - - reset_option("display.max_rows") - - -.. ipython:: python - - get_option("display.max_rows") - set_option("display.max_rows",999) - get_option("display.max_rows") - reset_option("display.max_rows") - get_option("display.max_rows") - - -It's also possible to reset multiple options at once (using a regex): - -.. ipython:: python - - reset_option("^display") - - -.. versionadded:: 0.13.1 - - Beginning with v0.13.1 the `option_context` context manager has been exposed through - the top-level API, allowing you to execute code with given option values. Option values - are restored automatically when you exit the `with` block: - -.. ipython:: python - - with option_context("display.max_rows",10,"display.max_columns", 5): - print get_option("display.max_rows") - print get_option("display.max_columns") - - print get_option("display.max_rows") - print get_option("display.max_columns") - - -Console Output Formatting -------------------------- - -.. _basics.console_output: - -Use the ``set_eng_float_format`` function in the ``pandas.core.common`` module -to alter the floating-point formatting of pandas objects to produce a particular -format. - -For instance: - -.. ipython:: python - - set_eng_float_format(accuracy=3, use_eng_prefix=True) - s = Series(randn(5), index=['a', 'b', 'c', 'd', 'e']) - s/1.e3 - s/1.e6 - -.. ipython:: python - :suppress: - - reset_option('^display\.') - - -The ``set_printoptions`` function has a number of options for controlling how -floating point numbers are formatted (using the ``precision`` argument) in the -console and . The ``max_rows`` and ``max_columns`` control how many rows and -columns of DataFrame objects are shown by default. If ``max_columns`` is set to -0 (the default, in fact), the library will attempt to fit the DataFrame's -string representation into the current terminal width, and defaulting to the -summary view otherwise. diff --git a/doc/source/faq.rst b/doc/source/faq.rst index 6154d21e12336..81bebab46dac9 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -24,44 +24,6 @@ Frequently Asked Questions (FAQ) options.display.mpl_style='default' from pandas.compat import lrange - -.. _ref-repr-control: - -How do I control the way my DataFrame is displayed? ---------------------------------------------------- - -pandas users rely on a variety of environments for using pandas: scripts, terminal, -IPython qtconsole/ notebook, (IDLE, spyder, etc'). -Each environment has it's own capabilities and limitations: HTML support, -horizontal scrolling, auto-detection of width/height. -To appropriately address all these environments, the display behavior is controlled -by several options, which you're encouraged to tweak to suit your setup. - -As of 0.13, these are the relevant options, all under the `display` namespace, -(e.g. ``display.width``, etc.): - -- notebook_repr_html: if True, IPython frontends with HTML support will display - dataframes as HTML tables when possible. -- large_repr (default 'truncate'): when a :class:`~pandas.DataFrame` - exceeds max_columns or max_rows, it can be displayed either as a - truncated table or, with this set to 'info', as a short summary view. -- max_columns (default 20): max dataframe columns to display. -- max_rows (default 60): max dataframe rows display. -- show_dimensions (default True): controls the display of the row/col counts footer. - -Two additional options only apply to displaying DataFrames in terminals, -not to the HTML view: - -- expand_repr (default True): when the frame width cannot fit within - the screen, the output will be broken into multiple pages. -- width: width of display screen in characters, used to determine the - width of lines when expand_repr is active. Setting this to None will - trigger auto-detection of terminal width. - -IPython users can use the IPython startup file to import pandas and set these -options automatically when starting up. - - .. _ref-monkey-patching: Adding Features to your pandas Installation diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template index 905e76aee88eb..f5352bc1031bc 100644 --- a/doc/source/index.rst.template +++ b/doc/source/index.rst.template @@ -122,6 +122,7 @@ See the package overview for more detail about what's in the library. cookbook dsintro basics + options indexing computation missing_data diff --git a/doc/source/options.rst b/doc/source/options.rst new file mode 100644 index 0000000000000..bae6eac8c5e41 --- /dev/null +++ b/doc/source/options.rst @@ -0,0 +1,410 @@ +.. _options: + +.. currentmodule:: pandas + +.. ipython:: python + :suppress: + + import pandas as pd + import numpy as np + np.random.seed(123456) + +******************** +Options and Settings +******************** +Overview +-------- +pandas has an options system that lets you customize some aspects of it's behaviour, +display-related options being those the user is most likely to adjust. + +Options have a full "dotted-style", case-insensitive name (e.g. ``display.max_rows``), +You can get/set options directly as attributes of the top-level ``options`` attribute: + +.. ipython:: python + + import pandas as pd + pd.options.display.max_rows + pd.options.display.max_rows = 999 + pd.options.display.max_rows + +There is also an API composed of 5 relevant functions, available directly from the ``pandas`` +namespace, and they are: + +- :func:`~pandas.get_option` / :func:`~pandas.set_option` - get/set the value of a single option. +- :func:`~pandas.reset_option` - reset one or more options to their default value. +- :func:`~pandas.describe_option` - print the descriptions of one or more options. +- :func:`~pandas.option_context` - execute a codeblock with a set of options + that revert to prior settings after execution. + +**Note:** developers can check out pandas/core/config.py for more info. + +All of the functions above accept a regexp pattern (``re.search`` style) as an argument, +and so passing in a substring will work - as long as it is unambiguous : + +.. ipython:: python + + pd.get_option("display.max_rows") + pd.set_option("display.max_rows",101) + pd.get_option("display.max_rows") + pd.set_option("max_r",102) + pd.get_option("display.max_rows") + + +The following will **not work** because it matches multiple option names, e.g. +``display.max_colwidth``, ``display.max_rows``, ``display.max_columns``: + +.. ipython:: python + :okexcept: + + try: + pd.get_option("column") + except KeyError as e: + print(e) + + +**Note:** Using this form of shorthand may cause your code to break if new options with similar names are added in future versions. + + +You can get a list of available options and their descriptions with ``describe_option``. When called +with no argument ``describe_option`` will print out the descriptions for all available options. + +.. ipython:: python + :suppress: + + pd.reset_option("all") + +Getting and Setting Options +--------------------------- + +As described above, ``get_option()`` and ``set_option()`` are available from the +pandas namespace. To change an option, call ``set_option('option regex', new_value)`` + +.. ipython:: python + + pd.get_option('mode.sim_interactive') + pd.set_option('mode.sim_interactive', True) + pd.get_option('mode.sim_interactive') + +All options also have a default value, and you can use ``reset_option`` to do just that: + +.. ipython:: python + :suppress: + + pd.reset_option("display.max_rows") + +.. ipython:: python + + pd.get_option("display.max_rows") + pd.set_option("display.max_rows",999) + pd.get_option("display.max_rows") + pd.reset_option("display.max_rows") + pd.get_option("display.max_rows") + + +It's also possible to reset multiple options at once (using a regex): + +.. ipython:: python + + pd.reset_option("^display") + + +``option_context`` context manager has been exposed through +the top-level API, allowing you to execute code with given option values. Option values +are restored automatically when you exit the `with` block: + +.. ipython:: python + + with pd.option_context("display.max_rows",10,"display.max_columns", 5): + print(pd.get_option("display.max_rows")) + print(pd.get_option("display.max_columns")) + print(pd.get_option("display.max_rows")) + print(pd.get_option("display.max_columns")) + + +Frequently Used Options +----------------------- +The following is a walkthrough of the more frequently used display options. + +``display.max_rows`` and ``display.max_columns`` sets the maximum number +of rows and columns displayed when a frame is pretty-printed. Truncated +lines are replaced by an ellipsis. + +.. ipython:: python + + df=pd.DataFrame(np.random.randn(7,2)) + pd.set_option('max_rows', 7) + df + pd.set_option('max_rows', 5) + df + pd.reset_option('max_rows') + +``display.expand_frame_repr`` allows for the the representation of +dataframes to stretch across pages, wrapped over the full column vs row-wise. + +.. ipython:: python + + df=pd.DataFrame(np.random.randn(5,10)) + pd.set_option('expand_frame_repr', True) + df + pd.set_option('expand_frame_repr', False) + df + pd.reset_option('expand_frame_repr') + +``display.large_repr`` lets you select whether to display dataframes that exceed +``max_columns`` or ``max_rows`` as a truncated frame, or as a summary. + +.. ipython:: python + + df=pd.DataFrame(np.random.randn(10,10)) + pd.set_option('max_rows', 5) + pd.set_option('large_repr', 'truncate') + df + pd.set_option('large_repr', 'info') + df + pd.reset_option('large_repr') + pd.reset_option('max_rows') + +``display.max_columnwidth`` sets the maximum width of columns. Cells +of this length or longer will be truncated with an elipsis. + +.. ipython:: python + + df=pd.DataFrame(np.array([['foo', 'bar', 'bim', 'uncomfortably long string'], + ['horse', 'cow', 'banana', 'apple']])) + pd.set_option('max_colwidth',40) + df + pd.set_option('max_colwidth', 6) + df + pd.reset_option('max_colwidth') + +``display.max_info_columns`` sets a threshold for when by-column info +will be given. + +.. ipython:: python + + df=pd.DataFrame(np.random.randn(10,10)) + pd.set_option('max_info_columns', 11) + df.info() + pd.set_option('max_info_columns', 5) + df.info() + pd.reset_option('max_info_columns') + +``display.max_info_rows``: ``df.info()`` will usually show null-counts for each column. +For large frames this can be quite slow. ``max_info_rows`` and ``max_info_cols`` +limit this null check only to frames with smaller dimensions then specified. + +.. ipython:: python + + df=pd.DataFrame(np.random.choice([0,1,np.nan],size=(10,10))) + df + pd.set_option('max_info_rows', 11) + df.info() + pd.set_option('max_info_rows', 5) + df.info() + pd.reset_option('max_info_rows') + +``display.precision`` sets the output display precision. This is only a +suggestion. + +.. ipython:: python + + df=pd.DataFrame(np.random.randn(5,5)) + pd.set_option('precision',7) + df + pd.set_option('precision',4) + df + +``display.chop_threshold`` sets at what level pandas rounds to zero when +it displays a Series of DataFrame. Note, this does not effect the +precision at which the number is stored. + +.. ipython:: python + + df=pd.DataFrame(np.random.randn(6,6)) + pd.set_option('chop_threshold', 0) + df + pd.set_option('chop_threshold', .5) + df + pd.reset_option('chop_threshold') + +``display.colheader_justify`` controls the justification of the headers. +Options are 'right', and 'left'. + +.. ipython:: python + + df=pd.DataFrame(np.array([np.random.randn(6), np.random.randint(1,9,6)*.1, np.zeros(6)]).T, columns=['A', 'B', 'C'], dtype='float') + pd.set_option('colheader_justify', 'right') + df + pd.set_option('colheader_justify', 'left') + df + pd.reset_option('colheader_justify') + + + +List of Options +--------------- + +========================== ============ ================================== +Option Default Function +========================== ============ ================================== +display.chop_threshold None If set to a float value, all float + values smaller then the given + threshold will be displayed as + exactly 0 by repr and friends. +display.colheader_justify right Controls the justification of + column headers. used by DataFrameFormatter. +display.column_space 12 No description available. +display.date_dayfirst False When True, prints and parses dates + with the day first, eg 20/01/2005 +display.date_yearfirst False When True, prints and parses dates + with the year first, eg 2005/01/20 +display.encoding UTF-8 Defaults to the detected encoding + of the console. Specifies the encoding + to be used for strings returned by + to_string, these are generally strings + meant to be displayed on the console. +display.expand_frame_repr True Whether to print out the full DataFrame + repr for wide DataFrames across + multiple lines, `max_columns` is + still respected, but the output will + wrap-around across multiple "pages" + if it's width exceeds `display.width`. +display.float_format None The callable should accept a floating + point number and return a string with + the desired format of the number. + This is used in some places like + SeriesFormatter. + See core.format.EngFormatter for an example. +display.height 60 Deprecated. Use `display.max_rows` instead. +display.large_repr truncate For DataFrames exceeding max_rows/max_cols, + the repr (and HTML repr) can show + a truncated table (the default from 0.13), + or switch to the view from df.info() + (the behaviour in earlier versions of pandas). + allowable settings, ['truncate', 'info'] +display.line_width 80 Deprecated. Use `display.width` instead. +display.max_columns 20 max_rows and max_columns are used + in __repr__() methods to decide if + to_string() or info() is used to + render an object to a string. In + case python/IPython is running in + a terminal this can be set to 0 and + pandas will correctly auto-detect + the width the terminal and swap to + a smaller format in case all columns + would not fit vertically. The IPython + notebook, IPython qtconsole, or IDLE + do not run in a terminal and hence + it is not possible to do correct + auto-detection. 'None' value means + unlimited. +display.max_colwidth 50 The maximum width in characters of + a column in the repr of a pandas + data structure. When the column overflows, + a "..." placeholder is embedded in + the output. +display.max_info_columns 100 max_info_columns is used in DataFrame.info + method to decide if per column information + will be printed. +display.max_info_rows 1690785 df.info() will usually show null-counts + for each column. For large frames + this can be quite slow. max_info_rows + and max_info_cols limit this null + check only to frames with smaller + dimensions then specified. +display.max_rows 60 This sets the maximum number of rows + pandas should output when printing + out various output. For example, + this value determines whether the + repr() for a dataframe prints out + fully or just a summary repr. + 'None' value means unlimited. +display.max_seq_items 100 when pretty-printing a long sequence, + no more then `max_seq_items` will + be printed. If items are omitted, + they will be denoted by the addition + of "..." to the resulting string. + If set to None, the number of items + to be printed is unlimited. +display.mpl_style None Setting this to 'default' will modify + the rcParams used by matplotlib + to give plots a more pleasing visual + style by default. Setting this to + None/False restores the values to + their initial value. +display.multi_sparse True "Sparsify" MultiIndex display (don't + display repeated elements in outer + levels within groups) +display.notebook_repr_html True When True, IPython notebook will + use html representation for + pandas objects (if it is available). +display.pprint_nest_depth 3 Controls the number of nested levels + to process when pretty-printing +display.precision 7 Floating point output precision + (number of significant digits). This is + only a suggestion +display.show_dimensions truncate Whether to print out dimensions + at the end of DataFrame repr. + If 'truncate' is specified, only + print out the dimensions if the + frame is truncated (e.g. not display + all rows and/or columns) +display.width 80 Width of the display in characters. + In case python/IPython is running in + a terminal this can be set to None + and pandas will correctly auto-detect + the width. Note that the IPython notebook, + IPython qtconsole, or IDLE do not run in a + terminal and hence it is not possible + to correctly detect the width. +io.excel.xls.writer xlwt The default Excel writer engine for + 'xls' files. +io.excel.xlsm.writer openpyxl The default Excel writer engine for + 'xlsm' files. Available options: + 'openpyxl' (the default). +io.excel.xlsx.writer openpyxl The default Excel writer engine for + 'xlsx' files. +io.hdf.default_format None default format writing format, if + None, then put will default to + 'fixed' and append will default to + 'table' +io.hdf.dropna_table True drop ALL nan rows when appending + to a table +mode.chained_assignment warn Raise an exception, warn, or no + action if trying to use chained + assignment, The default is warn +mode.sim_interactive False Whether to simulate interactive mode + for purposes of testing +mode.use_inf_as_null False True means treat None, NaN, -INF, + INF as null (old way), False means + None and NaN are null, but INF, -INF + are not null (new way). +========================== ============ ================================== + +.. _basics.console_output: + +Number Formatting +------------------ + +pandas also allow you to set how numbers are displayed in the console. +This option is not set through the ``set_options`` API. + +Use the ``set_eng_float_format`` function +to alter the floating-point formatting of pandas objects to produce a particular +format. + +For instance: + +.. ipython:: python + + import numpy as np + + pd.set_eng_float_format(accuracy=3, use_eng_prefix=True) + s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e']) + s/1.e3 + s/1.e6 + +.. ipython:: python + :suppress: + + pd.reset_option('^display\.')
closes #6838. closes #3104 Breakout "options" section for docs, heavily using info already in `basics.rst`. Expands examples, and turns `describe_options` into a table.
https://api.github.com/repos/pandas-dev/pandas/pulls/7578
2014-06-26T18:54:54Z
2014-07-07T07:08:31Z
2014-07-07T07:08:31Z
2015-01-17T05:24:58Z
BUG: Error in rolling_var if window is larger than array, fixes #7297
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 571e2dc2692b0..63517f532f7e5 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -271,6 +271,8 @@ Bug Fixes - Bug in non-monotonic ``Index.union`` may preserve ``name`` incorrectly (:issue:`7458`) - Bug in ``DatetimeIndex.intersection`` doesn't preserve timezone (:issue:`4690`) +- Bug in ``rolling_var`` where a window larger than the array would raise an error(:issue:`7297`) + - Bug with last plotted timeseries dictating ``xlim`` (:issue:`2960`) - Bug with ``secondary_y`` axis not being considered for timeseries ``xlim`` (:issue:`3490`) diff --git a/pandas/algos.pyx b/pandas/algos.pyx index 431ef97debae6..2a07272acd0e8 100644 --- a/pandas/algos.pyx +++ b/pandas/algos.pyx @@ -1173,6 +1173,10 @@ def roll_var(ndarray[double_t] input, int win, int minp, int ddof=1): minp = _check_minp(win, minp, N) + # Check for windows larger than array, addresses #7297 + win = min(win, N) + + # Over the first window, observations can only be added, never removed for i from 0 <= i < win: val = input[i] @@ -1196,23 +1200,27 @@ def roll_var(ndarray[double_t] input, int win, int minp, int ddof=1): output[i] = val + # After the first window, observations can both be added and removed for i from win <= i < N: val = input[i] prev = input[i - win] if val == val: if prev == prev: + # Adding one observation and removing another one delta = val - prev prev -= mean_x mean_x += delta / nobs val -= mean_x ssqdm_x += (val + prev) * delta else: + # Adding one observation and not removing any nobs += 1 delta = (val - mean_x) mean_x += delta / nobs ssqdm_x += delta * (val - mean_x) elif prev == prev: + # Adding no new observation, but removing one nobs -= 1 if nobs: delta = (prev - mean_x) @@ -1221,6 +1229,7 @@ def roll_var(ndarray[double_t] input, int win, int minp, int ddof=1): else: mean_x = 0 ssqdm_x = 0 + # Variance is unchanged if no observation is added or removed if nobs >= minp: #pathological case diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py index 6cd187ddf8981..8f20a4d421045 100644 --- a/pandas/stats/tests/test_moments.py +++ b/pandas/stats/tests/test_moments.py @@ -366,7 +366,8 @@ def _check_ndarray(self, func, static_comp, window=50, preserve_nan=True, has_center=True, fill_value=None, - test_stable=False): + test_stable=False, + test_window=True): result = func(self.arr, window) assert_almost_equal(result[-1], @@ -429,6 +430,27 @@ def _check_ndarray(self, func, static_comp, window=50, assert_almost_equal(result[-1], static_comp(self.arr[-50:] + 1e9)) + # Test window larger than array, #7297 + if test_window: + if has_min_periods: + for minp in (0, len(self.arr)-1, len(self.arr)): + result = func(self.arr, len(self.arr)+1, min_periods=minp) + expected = func(self.arr, len(self.arr), min_periods=minp) + nan_mask = np.isnan(result) + self.assertTrue(np.array_equal(nan_mask, + np.isnan(expected))) + nan_mask = ~nan_mask + assert_almost_equal(result[nan_mask], expected[nan_mask]) + else: + result = func(self.arr, len(self.arr)+1) + expected = func(self.arr, len(self.arr)) + nan_mask = np.isnan(result) + self.assertTrue(np.array_equal(nan_mask, np.isnan(expected))) + nan_mask = ~nan_mask + assert_almost_equal(result[nan_mask], expected[nan_mask]) + + + def _check_structures(self, func, static_comp, has_min_periods=True, has_time_rule=True,
fixes #7297 Other rolling window functions rely on the logic in `_check_minp`, and split the iteration into the ranges `[0, minp-1)` and `(minp-1, N)`. Because of the way `rolling_var` handles things, splitting the iteration into `[0, win)` and `(win, N)` makes more sense. Added also some comments as to what is going on elsewhere in the code. No tests have been added, as @kdiether seems to have that very advanced.
https://api.github.com/repos/pandas-dev/pandas/pulls/7572
2014-06-26T04:10:41Z
2014-07-02T11:46:29Z
2014-07-02T11:46:29Z
2014-07-02T14:53:03Z
BUG/PERF: perf issues in object groupby aggregations (GH7555)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index c2debb9bfe1c0..c7611d9829308 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1414,10 +1414,11 @@ def aggregate(self, values, how, axis=0): else: is_numeric = issubclass(values.dtype.type, (np.datetime64, np.timedelta64)) - out_dtype = 'float64' if is_numeric: + out_dtype = 'float64' values = values.view('int64') else: + out_dtype = 'object' values = values.astype(object) # will be filled in Cython function diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py index b432ddd03d17f..4098ac06c2da2 100644 --- a/pandas/src/generate_code.py +++ b/pandas/src/generate_code.py @@ -2234,7 +2234,7 @@ def generate_put_template(template, use_ints=True, use_floats=True, date_like_list = [ ('int64', 'int64_t', 'float64_t', 'np.float64'), ] - object_list = [('object', 'object', 'float64_t', 'np.float64')] + object_list = [('object', 'object', 'object', 'np.object_')] function_list = [] if use_floats: function_list.extend(floats_list) diff --git a/pandas/src/generated.pyx b/pandas/src/generated.pyx index 42ae043847ba1..97a34582d2ef2 100644 --- a/pandas/src/generated.pyx +++ b/pandas/src/generated.pyx @@ -6697,7 +6697,7 @@ def group_count_float32(ndarray[float32_t, ndim=2] out, @cython.boundscheck(False) @cython.wraparound(False) -def group_count_object(ndarray[float64_t, ndim=2] out, +def group_count_object(ndarray[object, ndim=2] out, ndarray[int64_t] counts, ndarray[object, ndim=2] values, ndarray[int64_t] labels): @@ -6838,7 +6838,7 @@ def group_count_bin_float32(ndarray[float32_t, ndim=2] out, @cython.boundscheck(False) @cython.wraparound(False) -def group_count_bin_object(ndarray[float64_t, ndim=2] out, +def group_count_bin_object(ndarray[object, ndim=2] out, ndarray[int64_t] counts, ndarray[object, ndim=2] values, ndarray[int64_t] bins): diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py index 2b6c6f55e5776..6a444d0a09af7 100644 --- a/vb_suite/groupby.py +++ b/vb_suite/groupby.py @@ -232,24 +232,46 @@ def f(): labels = labels.take(np.random.permutation(len(labels))) """ -groupby_first = Benchmark('data.groupby(labels).first()', setup, +groupby_first_float64 = Benchmark('data.groupby(labels).first()', setup, start_date=datetime(2012, 5, 1)) groupby_first_float32 = Benchmark('data2.groupby(labels).first()', setup, start_date=datetime(2013, 1, 1)) -groupby_last = Benchmark('data.groupby(labels).last()', setup, +groupby_last_float64 = Benchmark('data.groupby(labels).last()', setup, start_date=datetime(2012, 5, 1)) groupby_last_float32 = Benchmark('data2.groupby(labels).last()', setup, start_date=datetime(2013, 1, 1)) +groupby_nth_float64 = Benchmark('data.groupby(labels).nth(0)', setup, + start_date=datetime(2012, 5, 1)) + +groupby_nth_float32 = Benchmark('data2.groupby(labels).nth(0)', setup, + start_date=datetime(2013, 1, 1)) + # with datetimes (GH7555) setup = common_setup + """ df = DataFrame({'a' : date_range('1/1/2011',periods=100000,freq='s'),'b' : range(100000)}) """ -groupby_mixed_first = Benchmark('df.groupby("b").first()', setup, +groupby_first_datetimes = Benchmark('df.groupby("b").first()', setup, + start_date=datetime(2013, 5, 1)) +groupby_last_datetimes = Benchmark('df.groupby("b").last()', setup, + start_date=datetime(2013, 5, 1)) +groupby_nth_datetimes = Benchmark('df.groupby("b").nth(0)', setup, + start_date=datetime(2013, 5, 1)) + +# with object +setup = common_setup + """ +df = DataFrame({'a' : ['foo']*100000,'b' : range(100000)}) +""" + +groupby_first_object = Benchmark('df.groupby("b").first()', setup, + start_date=datetime(2013, 5, 1)) +groupby_last_object = Benchmark('df.groupby("b").last()', setup, + start_date=datetime(2013, 5, 1)) +groupby_nth_object = Benchmark('df.groupby("b").nth(0)', setup, start_date=datetime(2013, 5, 1)) #----------------------------------------------------------------------
related #7555 ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- groupby_object_last | 15.2340 | 4175.6846 | 0.0036 | groupby_object_first | 15.6957 | 4166.6920 | 0.0038 | groupby_transform_ufunc | 5.4990 | 5.7740 | 0.9524 | groupby_indices | 6.7443 | 7.0153 | 0.9614 | groupby_multi_different_functions | 12.6866 | 12.9040 | 0.9832 | groupby_multi_series_op | 13.2560 | 13.4466 | 0.9858 | groupby_multi_cython | 15.0924 | 15.2720 | 0.9882 | groupby_multi_different_numpy_functions | 11.1883 | 11.2376 | 0.9956 | groupby_first_float32 | 3.4907 | 3.4963 | 0.9984 | groupby_last_float32 | 3.6677 | 3.6586 | 1.0025 | groupby_frame_apply_overhead | 9.2754 | 9.2440 | 1.0034 | groupby_first | 3.5706 | 3.5577 | 1.0036 | groupby_pivot_table | 17.2007 | 17.1343 | 1.0039 | groupby_transform | 174.6823 | 173.8520 | 1.0048 | groupby_frame_singlekey_integer | 2.4533 | 2.4397 | 1.0056 | groupby_frame_nth | 2.7863 | 2.7630 | 1.0085 | groupby_simple_compress_timing | 35.6836 | 35.3360 | 1.0098 | groupby_sum_booleans | 1.2880 | 1.2727 | 1.0121 | groupby_transform2 | 162.5650 | 160.2323 | 1.0146 | groupby_apply_dict_return | 39.7460 | 39.1597 | 1.0150 | groupby_multi_size | 23.2656 | 22.9087 | 1.0156 | groupby_datetimes_last | 12.0660 | 11.8797 | 1.0157 | groupby_series_simple_cython | 192.2614 | 188.9360 | 1.0176 | groupby_last | 3.8017 | 3.7254 | 1.0205 | groupby_int_count | 4.6777 | 4.5680 | 1.0240 | groupby_mixed_first | 12.3363 | 12.0014 | 1.0279 | groupby_frame_median | 7.6403 | 7.4034 | 1.0320 | groupby_frame_apply | 45.0197 | 43.6123 | 1.0323 | groupby_object_nth | 430.5693 | 416.2673 | 1.0344 | groupby_datetimes_nth | 428.6557 | 414.3833 | 1.0344 | groupby_frame_cython_many_columns | 4.3843 | 4.2194 | 1.0391 | groupby_multi_python | 142.0707 | 135.8123 | 1.0461 | groupby_nth_float32 | 63.5343 | 59.9463 | 1.0599 | groupby_nth_float64 | 64.0316 | 59.8179 | 1.0704 | groupby_multi_count | 9.0540 | 8.2114 | 1.1026 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [e28ec0d] : BUG/PERF: perf issues in object groupby aggregations (GH7555) Base [69bb0e8] : Merge pull request #7556 from onesandzeroes/expand-grid DOC: Cookbook recipe for emulating R's expand.grid() (#7426) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7568
2014-06-25T12:52:08Z
2014-06-25T13:48:23Z
2014-06-25T13:48:23Z
2014-06-25T13:48:23Z
DOC: Fix parameter in reindex_axis docstring
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 756de479a471a..4c963622acf00 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1663,7 +1663,7 @@ def _reindex_multi(self, axes, copy, fill_value): Parameters ---------- - index : array-like, optional + labels : array-like New labels / index to conform to. Preferably an Index object to avoid duplicating data axis : %(axes_single_arg)s
https://api.github.com/repos/pandas-dev/pandas/pulls/7566
2014-06-25T05:05:51Z
2014-07-06T19:10:16Z
2014-07-06T19:10:16Z
2014-07-08T02:12:58Z
ENH: Implement _Openpyxl2Writer for pandas.io.excel
diff --git a/doc/source/io.rst b/doc/source/io.rst index 273cbd5daae7d..06600208758bd 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2009,7 +2009,12 @@ files if `Xlsxwriter`_ is not available. .. _xlwt: http://www.python-excel.org To specify which writer you want to use, you can pass an engine keyword -argument to ``to_excel`` and to ``ExcelWriter``. +argument to ``to_excel`` and to ``ExcelWriter``. The built-in engines are: + +- `'openpyxl`': This includes stable support for OpenPyxl 1.6.1 up to but + not including 2.0.0, and experimental support for OpenPyxl 2.0.0 and later. +- `'xlsxwriter'` +- `'xlwt'` .. code-block:: python diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 6db3fcaa832c0..6ae767f90208d 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -673,6 +673,12 @@ Enhancements +- Added experimental compatibility with openpyxl v2. The ``DataFrame.to_excel`` + method ``engine`` keyword now recognizes ``openpyxl1`` and ``openpyxl2`` + which will explicitly require openpyxl v1 and v2 respectively, failing if + the requested version is not available. The ``openpyxl`` engine is a now a + meta-engine that automatically uses whichever version of openpyxl is + installed. (:issue:`7177`) diff --git a/pandas/compat/openpyxl_compat.py b/pandas/compat/openpyxl_compat.py index d0c2a807e14db..266aded2071b6 100644 --- a/pandas/compat/openpyxl_compat.py +++ b/pandas/compat/openpyxl_compat.py @@ -10,15 +10,26 @@ stop_ver = '2.0.0' -def is_compat(): - """Detect whether the installed version of openpyxl is supported. +def is_compat(major_ver=1): + """Detect whether the installed version of openpyxl is supported + Parameters + ---------- + ver : int + 1 requests compatibility status among the 1.x.y series + 2 requests compatibility status of 2.0.0 and later Returns ------- compat : bool - ``True`` if openpyxl is installed and is between versions 1.6.1 and - 2.0.0, ``False`` otherwise. + ``True`` if openpyxl is installed and is a compatible version. + ``False`` otherwise. """ import openpyxl ver = LooseVersion(openpyxl.__version__) - return LooseVersion(start_ver) <= ver < LooseVersion(stop_ver) + if major_ver == 1: + return LooseVersion(start_ver) <= ver < LooseVersion(stop_ver) + elif major_ver == 2: + return LooseVersion(stop_ver) <= ver + else: + raise ValueError('cannot test for openpyxl compatibility with ver {0}' + .format(major_ver)) diff --git a/pandas/io/excel.py b/pandas/io/excel.py index f81cf6502a0e6..84f04188b7906 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -46,6 +46,20 @@ def register_writer(klass): def get_writer(engine_name): + if engine_name == 'openpyxl': + try: + import openpyxl + + # with version-less openpyxl engine + # make sure we make the intelligent choice for the user + if LooseVersion(openpyxl.__version__) < '2.0.0': + return _writers['openpyxl1'] + else: + return _writers['openpyxl2'] + except ImportError: + # fall through to normal exception handling below + pass + try: return _writers[engine_name] except KeyError: @@ -527,20 +541,20 @@ def close(self): return self.save() -class _OpenpyxlWriter(ExcelWriter): - engine = 'openpyxl' +class _Openpyxl1Writer(ExcelWriter): + engine = 'openpyxl1' supported_extensions = ('.xlsx', '.xlsm') + openpyxl_majorver = 1 def __init__(self, path, engine=None, **engine_kwargs): - if not openpyxl_compat.is_compat(): + if not openpyxl_compat.is_compat(major_ver=self.openpyxl_majorver): raise ValueError('Installed openpyxl is not supported at this ' - 'time. Use >={0} and ' - '<{1}.'.format(openpyxl_compat.start_ver, - openpyxl_compat.stop_ver)) + 'time. Use {0}.x.y.' + .format(self.openpyxl_majorver)) # Use the openpyxl module as the Excel writer. from openpyxl.workbook import Workbook - super(_OpenpyxlWriter, self).__init__(path, **engine_kwargs) + super(_Openpyxl1Writer, self).__init__(path, **engine_kwargs) # Create workbook object with default optimized_write=True. self.book = Workbook() @@ -632,9 +646,427 @@ def _convert_to_style(cls, style_dict): return xls_style +register_writer(_Openpyxl1Writer) + + +class _OpenpyxlWriter(_Openpyxl1Writer): + engine = 'openpyxl' + register_writer(_OpenpyxlWriter) +class _Openpyxl2Writer(_Openpyxl1Writer): + """ + Note: Support for OpenPyxl v2 is currently EXPERIMENTAL (GH7565). + """ + engine = 'openpyxl2' + openpyxl_majorver = 2 + + def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): + # Write the frame cells using openpyxl. + from openpyxl.cell import get_column_letter + + sheet_name = self._get_sheet_name(sheet_name) + + if sheet_name in self.sheets: + wks = self.sheets[sheet_name] + else: + wks = self.book.create_sheet() + wks.title = sheet_name + self.sheets[sheet_name] = wks + + for cell in cells: + colletter = get_column_letter(startcol + cell.col + 1) + xcell = wks.cell("%s%s" % (colletter, startrow + cell.row + 1)) + xcell.value = _conv_value(cell.val) + style_kwargs = {} + + # Apply format codes before cell.style to allow override + if isinstance(cell.val, datetime.datetime): + style_kwargs.update(self._convert_to_style_kwargs({ + 'number_format':{'format_code': self.datetime_format}})) + elif isinstance(cell.val, datetime.date): + style_kwargs.update(self._convert_to_style_kwargs({ + 'number_format':{'format_code': self.date_format}})) + + if cell.style: + style_kwargs.update(self._convert_to_style_kwargs(cell.style)) + + if style_kwargs: + xcell.style = xcell.style.copy(**style_kwargs) + + if cell.mergestart is not None and cell.mergeend is not None: + cletterstart = get_column_letter(startcol + cell.col + 1) + cletterend = get_column_letter(startcol + cell.mergeend + 1) + + wks.merge_cells('%s%s:%s%s' % (cletterstart, + startrow + cell.row + 1, + cletterend, + startrow + cell.mergestart + 1)) + + # Excel requires that the format of the first cell in a merged + # range is repeated in the rest of the merged range. + if style: + first_row = startrow + cell.row + 1 + last_row = startrow + cell.mergestart + 1 + first_col = startcol + cell.col + 1 + last_col = startcol + cell.mergeend + 1 + + for row in range(first_row, last_row + 1): + for col in range(first_col, last_col + 1): + if row == first_row and col == first_col: + # Ignore first cell. It is already handled. + continue + colletter = get_column_letter(col) + xcell = wks.cell("%s%s" % (colletter, row)) + xcell.style = xcell.style.copy(**style_kwargs) + + @classmethod + def _convert_to_style_kwargs(cls, style_dict): + """ + Convert a style_dict to a set of kwargs suitable for initializing + or updating-on-copy an openpyxl v2 style object + Parameters + ---------- + style_dict : dict + A dict with zero or more of the following keys (or their synonyms). + 'font' + 'fill' + 'border' ('borders') + 'alignment' + 'number_format' + 'protection' + Returns + ------- + style_kwargs : dict + A dict with the same, normalized keys as ``style_dict`` but each + value has been replaced with a native openpyxl style object of the + appropriate class. + """ + + _style_key_map = { + 'borders': 'border', + } + + style_kwargs = {} + for k, v in style_dict.items(): + if k in _style_key_map: + k = _style_key_map[k] + _conv_to_x = getattr(cls, '_convert_to_{0}'.format(k), + lambda x: None) + new_v = _conv_to_x(v) + if new_v: + style_kwargs[k] = new_v + + return style_kwargs + + + @classmethod + def _convert_to_color(cls, color_spec): + """ + Convert ``color_spec`` to an openpyxl v2 Color object + Parameters + ---------- + color_spec : str, dict + A 32-bit ARGB hex string, or a dict with zero or more of the + following keys. + 'rgb' + 'indexed' + 'auto' + 'theme' + 'tint' + 'index' + 'type' + Returns + ------- + color : openpyxl.styles.Color + """ + + from openpyxl.styles import Color + + if isinstance(color_spec, str): + return Color(color_spec) + else: + return Color(**color_spec) + + + @classmethod + def _convert_to_font(cls, font_dict): + """ + Convert ``font_dict`` to an openpyxl v2 Font object + Parameters + ---------- + font_dict : dict + A dict with zero or more of the following keys (or their synonyms). + 'name' + 'size' ('sz') + 'bold' ('b') + 'italic' ('i') + 'underline' ('u') + 'strikethrough' ('strike') + 'color' + 'vertAlign' ('vertalign') + 'charset' + 'scheme' + 'family' + 'outline' + 'shadow' + 'condense' + Returns + ------- + font : openpyxl.styles.Font + """ + + from openpyxl.styles import Font + + _font_key_map = { + 'sz': 'size', + 'b': 'bold', + 'i': 'italic', + 'u': 'underline', + 'strike': 'strikethrough', + 'vertalign': 'vertAlign', + } + + font_kwargs = {} + for k, v in font_dict.items(): + if k in _font_key_map: + k = _font_key_map[k] + if k == 'color': + v = cls._convert_to_color(v) + font_kwargs[k] = v + + return Font(**font_kwargs) + + + @classmethod + def _convert_to_stop(cls, stop_seq): + """ + Convert ``stop_seq`` to a list of openpyxl v2 Color objects, + suitable for initializing the ``GradientFill`` ``stop`` parameter. + Parameters + ---------- + stop_seq : iterable + An iterable that yields objects suitable for consumption by + ``_convert_to_color``. + Returns + ------- + stop : list of openpyxl.styles.Color + """ + + return map(cls._convert_to_color, stop_seq) + + + @classmethod + def _convert_to_fill(cls, fill_dict): + """ + Convert ``fill_dict`` to an openpyxl v2 Fill object + Parameters + ---------- + fill_dict : dict + A dict with one or more of the following keys (or their synonyms), + 'fill_type' ('patternType', 'patterntype') + 'start_color' ('fgColor', 'fgcolor') + 'end_color' ('bgColor', 'bgcolor') + or one or more of the following keys (or their synonyms). + 'type' ('fill_type') + 'degree' + 'left' + 'right' + 'top' + 'bottom' + 'stop' + Returns + ------- + fill : openpyxl.styles.Fill + """ + + from openpyxl.styles import PatternFill, GradientFill + + _pattern_fill_key_map = { + 'patternType': 'fill_type', + 'patterntype': 'fill_type', + 'fgColor': 'start_color', + 'fgcolor': 'start_color', + 'bgColor': 'end_color', + 'bgcolor': 'end_color', + } + + _gradient_fill_key_map = { + 'fill_type': 'type', + } + + pfill_kwargs = {} + gfill_kwargs = {} + for k, v in fill_dict.items(): + pk = gk = None + if k in _pattern_fill_key_map: + pk = _pattern_fill_key_map[k] + if k in _gradient_fill_key_map: + gk = _gradient_fill_key_map[k] + if pk in ['start_color', 'end_color']: + v = cls._convert_to_color(v) + if gk == 'stop': + v = cls._convert_to_stop(v) + if pk: + pfill_kwargs[pk] = v + elif gk: + gfill_kwargs[gk] = v + else: + pfill_kwargs[k] = v + gfill_kwargs[k] = v + + try: + return PatternFill(**pfill_kwargs) + except TypeError: + return GradientFill(**gfill_kwargs) + + + @classmethod + def _convert_to_side(cls, side_spec): + """ + Convert ``side_spec`` to an openpyxl v2 Side object + Parameters + ---------- + side_spec : str, dict + A string specifying the border style, or a dict with zero or more + of the following keys (or their synonyms). + 'style' ('border_style') + 'color' + Returns + ------- + side : openpyxl.styles.Side + """ + + from openpyxl.styles import Side + + _side_key_map = { + 'border_style': 'style', + } + + if isinstance(side_spec, str): + return Side(style=side_spec) + + side_kwargs = {} + for k, v in side_spec.items(): + if k in _side_key_map: + k = _side_key_map[k] + if k == 'color': + v = cls._convert_to_color(v) + side_kwargs[k] = v + + return Side(**side_kwargs) + + + @classmethod + def _convert_to_border(cls, border_dict): + """ + Convert ``border_dict`` to an openpyxl v2 Border object + Parameters + ---------- + border_dict : dict + A dict with zero or more of the following keys (or their synonyms). + 'left' + 'right' + 'top' + 'bottom' + 'diagonal' + 'diagonal_direction' + 'vertical' + 'horizontal' + 'diagonalUp' ('diagonalup') + 'diagonalDown' ('diagonaldown') + 'outline' + Returns + ------- + border : openpyxl.styles.Border + """ + + from openpyxl.styles import Border + + _border_key_map = { + 'diagonalup': 'diagonalUp', + 'diagonaldown': 'diagonalDown', + } + + border_kwargs = {} + for k, v in border_dict.items(): + if k in _border_key_map: + k = _border_key_map[k] + if k == 'color': + v = cls._convert_to_color(v) + if k in ['left', 'right', 'top', 'bottom', 'diagonal']: + v = cls._convert_to_side(v) + border_kwargs[k] = v + + return Border(**border_kwargs) + + + @classmethod + def _convert_to_alignment(cls, alignment_dict): + """ + Convert ``alignment_dict`` to an openpyxl v2 Alignment object + Parameters + ---------- + alignment_dict : dict + A dict with zero or more of the following keys (or their synonyms). + 'horizontal' + 'vertical' + 'text_rotation' + 'wrap_text' + 'shrink_to_fit' + 'indent' + Returns + ------- + alignment : openpyxl.styles.Alignment + """ + + from openpyxl.styles import Alignment + + return Alignment(**alignment_dict) + + + @classmethod + def _convert_to_number_format(cls, number_format_dict): + """ + Convert ``number_format_dict`` to an openpyxl v2 NumberFormat object. + Parameters + ---------- + number_format_dict : dict + A dict with zero or more of the following keys. + 'format_code' + Returns + ------- + number_format : openpyxl.styles.NumberFormat + """ + + from openpyxl.styles import NumberFormat + + return NumberFormat(**number_format_dict) + + + @classmethod + def _convert_to_protection(cls, protection_dict): + """ + Convert ``protection_dict`` to an openpyxl v2 Protection object. + Parameters + ---------- + protection_dict : dict + A dict with zero or more of the following keys. + 'locked' + 'hidden' + Returns + ------- + """ + + from openpyxl.styles import Protection + + return Protection(**protection_dict) + + +register_writer(_Openpyxl2Writer) + + class _XlwtWriter(ExcelWriter): engine = 'xlwt' supported_extensions = ('.xls',) diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index 96db535347921..17407e3a864e2 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -17,8 +17,8 @@ from pandas import DataFrame, Index, MultiIndex from pandas.io.parsers import read_csv from pandas.io.excel import ( - ExcelFile, ExcelWriter, read_excel, _XlwtWriter, _OpenpyxlWriter, - register_writer, _XlsxWriter + ExcelFile, ExcelWriter, read_excel, _XlwtWriter, _Openpyxl1Writer, + _Openpyxl2Writer, register_writer, _XlsxWriter ) from pandas.io.common import URLError from pandas.util.testing import ensure_clean @@ -1127,35 +1127,39 @@ def test_swapped_columns(self): tm.assert_series_equal(write_frame['B'], read_frame['B']) -def raise_wrapper(orig_method): - @functools.wraps(orig_method) - def wrapped(self, *args, **kwargs): - _skip_if_no_openpyxl() - if openpyxl_compat.is_compat(): - orig_method(self, *args, **kwargs) - else: - msg = 'Installed openpyxl is not supported at this time\. Use.+' - with tm.assertRaisesRegexp(ValueError, msg): +def raise_wrapper(major_ver): + def versioned_raise_wrapper(orig_method): + @functools.wraps(orig_method) + def wrapped(self, *args, **kwargs): + _skip_if_no_openpyxl() + if openpyxl_compat.is_compat(major_ver=major_ver): orig_method(self, *args, **kwargs) - return wrapped + else: + msg = 'Installed openpyxl is not supported at this time\. Use.+' + with tm.assertRaisesRegexp(ValueError, msg): + orig_method(self, *args, **kwargs) + return wrapped + return versioned_raise_wrapper -def raise_on_incompat_version(cls): - methods = filter(operator.methodcaller('startswith', 'test_'), dir(cls)) - for method in methods: - setattr(cls, method, raise_wrapper(getattr(cls, method))) - return cls +def raise_on_incompat_version(major_ver): + def versioned_raise_on_incompat_version(cls): + methods = filter(operator.methodcaller('startswith', 'test_'), dir(cls)) + for method in methods: + setattr(cls, method, raise_wrapper(major_ver)(getattr(cls, method))) + return cls + return versioned_raise_on_incompat_version -@raise_on_incompat_version +@raise_on_incompat_version(1) class OpenpyxlTests(ExcelWriterBase, tm.TestCase): ext = '.xlsx' - engine_name = 'openpyxl' + engine_name = 'openpyxl1' check_skip = staticmethod(lambda *args, **kwargs: None) def test_to_excel_styleconverter(self): _skip_if_no_openpyxl() - if not openpyxl_compat.is_compat(): + if not openpyxl_compat.is_compat(major_ver=1): raise nose.SkipTest('incompatiable openpyxl version') import openpyxl @@ -1167,7 +1171,7 @@ def test_to_excel_styleconverter(self): "left": "thin"}, "alignment": {"horizontal": "center", "vertical": "top"}} - xlsx_style = _OpenpyxlWriter._convert_to_style(hstyle) + xlsx_style = _Openpyxl1Writer._convert_to_style(hstyle) self.assertTrue(xlsx_style.font.bold) self.assertEqual(openpyxl.style.Border.BORDER_THIN, xlsx_style.borders.top.border_style) @@ -1183,6 +1187,70 @@ def test_to_excel_styleconverter(self): xlsx_style.alignment.vertical) +@raise_on_incompat_version(2) +class Openpyxl2Tests(ExcelWriterBase, tm.TestCase): + ext = '.xlsx' + engine_name = 'openpyxl2' + check_skip = staticmethod(lambda *args, **kwargs: None) + + def test_to_excel_styleconverter(self): + _skip_if_no_openpyxl() + if not openpyxl_compat.is_compat(major_ver=2): + raise nose.SkipTest('incompatiable openpyxl version') + + from openpyxl import styles + + hstyle = { + "font": { + "color": '00FF0000', + "bold": True, + }, + "borders": { + "top": "thin", + "right": "thin", + "bottom": "thin", + "left": "thin", + }, + "alignment": { + "horizontal": "center", + "vertical": "top", + }, + "fill": { + "patternType": 'solid', + 'fgColor': { + 'rgb': '006666FF', + 'tint': 0.3, + }, + }, + "number_format": { + "format_code": "0.00" + }, + "protection": { + "locked": True, + "hidden": False, + }, + } + + font_color = styles.Color('00FF0000') + font = styles.Font(bold=True, color=font_color) + side = styles.Side(style=styles.borders.BORDER_THIN) + border = styles.Border(top=side, right=side, bottom=side, left=side) + alignment = styles.Alignment(horizontal='center', vertical='top') + fill_color = styles.Color(rgb='006666FF', tint=0.3) + fill = styles.PatternFill(patternType='solid', fgColor=fill_color) + number_format = styles.NumberFormat(format_code='0.00') + protection = styles.Protection(locked=True, hidden=False) + + kw = _Openpyxl2Writer._convert_to_style_kwargs(hstyle) + self.assertEqual(kw['font'], font) + self.assertEqual(kw['border'], border) + self.assertEqual(kw['alignment'], alignment) + self.assertEqual(kw['fill'], fill) + self.assertEqual(kw['number_format'], number_format) + self.assertEqual(kw['protection'], protection) + + + class XlwtTests(ExcelWriterBase, tm.TestCase): ext = '.xls' engine_name = 'xlwt' @@ -1216,7 +1284,6 @@ class XlsxWriterTests(ExcelWriterBase, tm.TestCase): check_skip = staticmethod(_skip_if_no_xlsxwriter) -@raise_on_incompat_version class OpenpyxlTests_NoMerge(ExcelWriterBase, tm.TestCase): ext = '.xlsx' engine_name = 'openpyxl' @@ -1254,9 +1321,9 @@ def test_ExcelWriter_dispatch(self): writer_klass = _XlsxWriter except ImportError: _skip_if_no_openpyxl() - if not openpyxl_compat.is_compat(): + if not openpyxl_compat.is_compat(major_ver=1): raise nose.SkipTest('incompatible openpyxl version') - writer_klass = _OpenpyxlWriter + writer_klass = _Openpyxl1Writer with ensure_clean('.xlsx') as path: writer = ExcelWriter(path)
This patch is an implementation of a `pandas.io.excel.ExcelWriter` subclass that is compatible with the style interface of `openpyxl` 2.0.0 or later. closes #7177
https://api.github.com/repos/pandas-dev/pandas/pulls/7565
2014-06-25T04:06:18Z
2014-09-20T08:53:31Z
2014-09-20T08:53:31Z
2014-09-22T01:40:54Z
TST: Remove else after except in tests for io.data.Options.
diff --git a/pandas/io/data.py b/pandas/io/data.py index dab9862bae2e2..67a841a27f992 100644 --- a/pandas/io/data.py +++ b/pandas/io/data.py @@ -680,6 +680,10 @@ def _get_option_tables(self, month, year, expiry): root = self._parse_url(url) tables = root.xpath('.//table') + ntables = len(tables) + if ntables == 0: + raise RemoteDataError("No tables found at {0!r}".format(url)) + table_name = '_tables' + m1 + str(year)[-2:] setattr(self, table_name, tables) @@ -723,9 +727,7 @@ def _get_option_data(self, month, year, expiry, name): ntables = len(tables) table_loc = self._TABLE_LOC[name] - if ntables == 0: - raise RemoteDataError("No tables found at {0!r}".format(url)) - elif table_loc - 1 > ntables: + if table_loc - 1 > ntables: raise RemoteDataError("Table location {0} invalid, {1} tables" " found".format(table_loc, ntables)) diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py index 2f3da23612449..d2080fe5e1262 100644 --- a/pandas/io/tests/test_data.py +++ b/pandas/io/tests/test_data.py @@ -266,8 +266,7 @@ def test_get_options_data(self): options = self.aapl.get_options_data(expiry=self.expiry) except RemoteDataError as e: nose.SkipTest(e) - else: - assert len(options) > 1 + self.assertTrue(len(options) > 1) @network def test_get_near_stock_price(self): @@ -276,9 +275,6 @@ def test_get_near_stock_price(self): expiry=self.expiry) except RemoteDataError as e: nose.SkipTest(e) - else: - assert len(options) > 1 - self.assertTrue(len(options) > 1) @network @@ -287,8 +283,7 @@ def test_get_call_data(self): calls = self.aapl.get_call_data(expiry=self.expiry) except RemoteDataError as e: nose.SkipTest(e) - else: - assert len(calls) > 1 + self.assertTrue(len(calls) > 1) @network def test_get_put_data(self): @@ -296,33 +291,30 @@ def test_get_put_data(self): puts = self.aapl.get_put_data(expiry=self.expiry) except RemoteDataError as e: nose.SkipTest(e) - else: - assert len(puts) > 1 + self.assertTrue(len(puts) > 1) @network def test_get_expiry_months(self): try: dates = self.aapl._get_expiry_months() - except RemoteDataError: - raise nose.SkipTest("RemoteDataError thrown no dates found") + except RemoteDataError as e: + raise nose.SkipTest(e) self.assertTrue(len(dates) > 1) @network def test_get_all_data(self): try: data = self.aapl.get_all_data(put=True) - except RemoteDataError: - raise nose.SkipTest("RemoteDataError thrown") - + except RemoteDataError as e: + raise nose.SkipTest(e) self.assertTrue(len(data) > 1) @network def test_get_all_data_calls_only(self): try: data = self.aapl.get_all_data(call=True, put=False) - except RemoteDataError: - raise nose.SkipTest("RemoteDataError thrown") - + except RemoteDataError as e: + raise nose.SkipTest(e) self.assertTrue(len(data) > 1) @network
Also moved no tables found error to get_options_tables method (url was not defined in the get_option_data method). Fixes #7561
https://api.github.com/repos/pandas-dev/pandas/pulls/7564
2014-06-25T02:48:04Z
2014-06-25T14:50:17Z
2014-06-25T14:50:17Z
2014-06-25T14:50:24Z
PERF: vbench for mixed groupby with datetime (GH7555)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 44541033fbc43..30617b76b91eb 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -143,7 +143,7 @@ Performance - Improvements in dtype inference for numeric operations involving yielding performance gains for dtypes: ``int64``, ``timedelta64``, ``datetime64`` (:issue:`7223`) - Improvements in Series.transform for significant performance gains (:issue:`6496`) - Improvements in DataFrame.transform with ufuncs and built-in grouper functions for signifcant performance gains (:issue:`7383`) - +- Regression in groupby aggregation of datetime64 dtypes (:issue:`7555`) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 4d3927428cef2..c2debb9bfe1c0 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -2332,17 +2332,16 @@ def _cython_agg_blocks(self, how, numeric_only=True): data = data.get_numeric_data(copy=False) for block in data.blocks: - values = block.values - is_numeric = is_numeric_dtype(values.dtype) + values = block._try_operate(block.values) - if is_numeric: + if block.is_numeric: values = com.ensure_float(values) result, _ = self.grouper.aggregate(values, how, axis=agg_axis) # see if we can cast the block back to the original dtype - result = block._try_cast_result(result) + result = block._try_coerce_and_cast_result(result) newb = make_block(result, placement=block.mgr_locs) new_blocks.append(newb) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 6b2d6bcfe3c80..8100b98d6e42d 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -412,6 +412,11 @@ def _try_coerce_result(self, result): """ reverse of try_coerce_args """ return result + def _try_coerce_and_cast_result(self, result, dtype=None): + result = self._try_coerce_result(result) + result = self._try_cast_result(result, dtype=dtype) + return result + def _try_fill(self, value): return value @@ -513,8 +518,7 @@ def setitem(self, indexer, value): dtype, _ = _infer_dtype_from_scalar(value) else: dtype = 'infer' - values = self._try_coerce_result(values) - values = self._try_cast_result(values, dtype) + values = self._try_coerce_and_cast_result(values, dtype) return [make_block(transf(values), ndim=self.ndim, placement=self.mgr_locs, fastpath=True)] diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py index eac313481aca7..2b6c6f55e5776 100644 --- a/vb_suite/groupby.py +++ b/vb_suite/groupby.py @@ -244,6 +244,13 @@ def f(): groupby_last_float32 = Benchmark('data2.groupby(labels).last()', setup, start_date=datetime(2013, 1, 1)) +# with datetimes (GH7555) +setup = common_setup + """ +df = DataFrame({'a' : date_range('1/1/2011',periods=100000,freq='s'),'b' : range(100000)}) +""" + +groupby_mixed_first = Benchmark('df.groupby("b").first()', setup, + start_date=datetime(2013, 5, 1)) #---------------------------------------------------------------------- # groupby_indices replacement, chop up Series
closes #7555 ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- groupby_mixed_first | 10.8150 | 3900.5923 | 0.0028 | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [bc5599a] : PERF: vbench for mixed groupby with datetime (GH7555) PERF: perform coercing and casting on groupby agg by blocks Base [2bd4517] : Merge pull request #7544 from sinhrks/dtmixin ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7560
2014-06-24T13:13:05Z
2014-06-24T13:32:37Z
2014-06-24T13:32:37Z
2014-06-24T13:32:37Z
DOC: Cookbook recipe for emulating R's expand.grid() (#7426)
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index b16c71bffd64d..283cf0438857a 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -663,3 +663,25 @@ To globally provide aliases for axis names, one can define these 2 functions: df2 = DataFrame(randn(3,2),columns=['c1','c2'],index=['i1','i2','i3']) df2.sum(axis='myaxis2') clear_axis_alias(DataFrame,'columns', 'myaxis2') + +Creating Example Data +--------------------- + +To create a dataframe from every combination of some given values, like R's ``expand.grid()`` +function, we can create a dict where the keys are column names and the values are lists +of the data values: + +.. ipython:: python + + import itertools + + def expand_grid(data_dict): + rows = itertools.product(*data_dict.values()) + return pd.DataFrame.from_records(rows, columns=data_dict.keys()) + + df = expand_grid( + {'height': [60, 70], + 'weight': [100, 140, 180], + 'sex': ['Male', 'Female']} + ) + df
closes #7426 As suggested by @jreback in #7426, I've added a quick cookbook recipe for emulating R's `expand.grid()`, which creates a dataframe from every combination of the values you give it. None of the existing sections really seemed to apply, so I've added it in a new section. Example usage: ``` python import itertools import pandas as pd def expand_grid(data_dict): rows = itertools.product(*data_dict.values()) return pd.DataFrame.from_records(rows, columns=data_dict.keys()) df = expand_grid( {'height': [60, 70], 'weight': [100, 140, 180], 'sex': ['Male', 'Female']} ) df Out[2]: sex weight height 0 Male 100 60 1 Male 100 70 2 Male 140 60 3 Male 140 70 4 Male 180 60 5 Male 180 70 6 Female 100 60 7 Female 100 70 8 Female 140 60 9 Female 140 70 10 Female 180 60 11 Female 180 70 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7556
2014-06-24T08:49:09Z
2014-06-25T05:01:27Z
2014-06-25T05:01:27Z
2014-06-25T05:01:27Z
DOC: fix docstring for DataFrame.interpolate
diff --git a/pandas/core/common.py b/pandas/core/common.py index c0432b53e346a..8791dcc124a6e 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1392,7 +1392,7 @@ def backfill_2d(values, limit=None, mask=None, dtype=None): def _clean_interp_method(method, order=None, **kwargs): - valid = ['linear', 'time', 'values', 'nearest', 'zero', 'slinear', + valid = ['linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'polynomial', 'krogh', 'piecewise_polynomial', 'pchip', 'spline'] @@ -1457,7 +1457,7 @@ def _interp_limit(invalid, limit): result.fill(np.nan) return result - if method in ['linear', 'time', 'values']: + if method in ['linear', 'time', 'index', 'values']: if method in ('values', 'index'): inds = np.asarray(xvalues) # hack for DatetimeIndex, #1646 diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9b1ddc1fcf6f2..59a457229d512 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2532,7 +2532,7 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False, Parameters ---------- - method : {'linear', 'time', 'values', 'index' 'nearest', 'zero', + method : {'linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh', 'polynomial', 'spline' 'piecewise_polynomial', 'pchip'} @@ -2540,7 +2540,7 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False, spaced. default * 'time': interpolation works on daily and higher resolution data to interpolate given length of interval - * 'index': use the actual numerical values of the index + * 'index', 'values': use the actual numerical values of the index * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'polynomial' is passed to `scipy.interpolate.interp1d` with the order given both diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 1a123eda601a2..82447635473a3 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -512,7 +512,7 @@ def test_interpolate_index_values(self): vals = s.index.values.astype(float) - result = s.interpolate(method='values') + result = s.interpolate(method='index') expected = s.copy() bad = isnull(expected.values) @@ -522,6 +522,12 @@ def test_interpolate_index_values(self): assert_series_equal(result[bad], expected) + # 'values' is synonymous with 'index' for the method kwarg + other_result = s.interpolate(method='values') + + assert_series_equal(other_result, result) + assert_series_equal(other_result[bad], expected) + def test_interpolate_non_ts(self): s = Series([1, 3, np.nan, np.nan, np.nan, 11]) with tm.assertRaises(ValueError):
In the old docstring, `'index'` is listed as a valid value for the `method` kwarg for `DataFrame.interpolate`. It looks like a typo, because there was no comma after `'index'`, and the explanation for `'index'` corresponded to `'values'`. Furthermore, it appears `'index'` is not a valid value for `method`: ``` python In [67]: df = pd.DataFrame([[1], [2], [np.nan], [3]], index=[0, 0.7, 1.1, 4]) In [68]: df Out[68]: 0 0.0 1 0.7 2 1.1 NaN 4.0 3 In [69]: df.interpolate(method="index") --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-69-4cef75cee92e> in <module>() ----> 1 df.interpolate(method="index") /home/david/miniconda3/envs/frackoptima/lib/python3.4/site-packages/pandas/core/generic.py in interp olate(self, method, axis, limit, inplace, downcast, **kwargs) 2580 inplace=inplace, 2581 downcast=downcast, -> 2582 **kwargs) 2583 if inplace: 2584 if axis == 1: /home/david/miniconda3/envs/frackoptima/lib/python3.4/site-packages/pandas/core/internals.py in inte rpolate(self, **kwargs) 2195 2196 def interpolate(self, **kwargs): -> 2197 return self.apply('interpolate', **kwargs) 2198 2199 def shift(self, **kwargs): /home/david/miniconda3/envs/frackoptima/lib/python3.4/site-packages/pandas/core/internals.py in appl y(self, f, axes, filter, do_integrity_check, **kwargs) 2162 copy=align_copy) 2163 -> 2164 applied = getattr(b, f)(**kwargs) 2165 2166 if isinstance(applied, list): /home/david/miniconda3/envs/frackoptima/lib/python3.4/site-packages/pandas/core/internals.py in inte rpolate(self, method, axis, index, values, inplace, limit, fill_value, coerce, downcast, **kwargs) 667 **kwargs) 668 --> 669 raise ValueError("invalid method '{0}' to interpolate.".format(method)) 670 671 def _interpolate_with_fill(self, method='pad', axis=0, inplace=False, ValueError: invalid method 'index' to interpolate. In [70]: df.interpolate(method="values") Out[70]: 0 0.0 1.000000 0.7 2.000000 1.1 2.121212 4.0 3.000000 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7553
2014-06-23T18:11:36Z
2014-07-07T00:02:26Z
2014-07-07T00:02:26Z
2014-07-07T00:07:17Z
BUG: Bug in setitem with list-of-lists and single vs mixed types (GH7551)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index a5bcdc9ec4aff..cdc35564cce61 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -169,7 +169,7 @@ Bug Fixes - Bug in ``DataFrame.where`` with a symmetric shaped frame and a passed other of a DataFrame (:issue:`7506`) - Bug in Panel indexing with a multi-index axis (:issue:`7516`) - Regression in datetimelike slice indexing with a duplicated index and non-exact end-points (:issue:`7523`) - +- Bug in setitem with list-of-lists and single vs mixed types (:issue:`7551`:) - Bug in timeops with non-aligned Series (:issue:`7500`) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 0c695a0c2d632..280c4073b0f94 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -419,14 +419,20 @@ def can_do_equal_len(): else: setter(item, np.nan) - # we have an equal len ndarray to our labels - elif isinstance(value, np.ndarray) and value.ndim == 2: + # we have an equal len ndarray/convertible to our labels + elif np.array(value).ndim == 2: + + # note that this coerces the dtype if we are mixed + # GH 7551 + value = np.array(value,dtype=object) if len(labels) != value.shape[1]: raise ValueError('Must have equal len keys and value ' 'when setting with an ndarray') for i, item in enumerate(labels): - setter(item, value[:, i]) + + # setting with a list, recoerces + setter(item, value[:, i].tolist()) # we have an equal len list/ndarray elif can_do_equal_len(): diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 4ee6bd1d949a5..9b72d2f92182f 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -1272,7 +1272,6 @@ def test_iloc_setitem_series(self): result = df.iloc[:,2:3] assert_frame_equal(result, expected) - def test_iloc_setitem_series(self): s = Series(np.random.randn(10), index=lrange(0,20,2)) s.iloc[1] = 1 @@ -1284,6 +1283,20 @@ def test_iloc_setitem_series(self): result = s.iloc[:4] assert_series_equal(result, expected) + def test_iloc_setitem_list_of_lists(self): + + # GH 7551 + # list-of-list is set incorrectly in mixed vs. single dtyped frames + df = DataFrame(dict(A = np.arange(5,dtype='int64'), B = np.arange(5,10,dtype='int64'))) + df.iloc[2:4] = [[10,11],[12,13]] + expected = DataFrame(dict(A = [0,1,10,12,4], B = [5,6,11,13,9])) + assert_frame_equal(df, expected) + + df = DataFrame(dict(A = list('abcde'), B = np.arange(5,10,dtype='int64'))) + df.iloc[2:4] = [['x',11],['y',13]] + expected = DataFrame(dict(A = ['a','b','x','y','e'], B = [5,6,11,13,9])) + assert_frame_equal(df, expected) + def test_iloc_getitem_multiindex(self): mi_labels = DataFrame(np.random.randn(4, 3), columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
closes #7551
https://api.github.com/repos/pandas-dev/pandas/pulls/7552
2014-06-23T16:37:05Z
2014-06-23T17:18:29Z
2014-06-23T17:18:29Z
2014-06-23T17:18:29Z
DOC: correct relase note
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index c41bc13b18606..4d0308d377443 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -185,8 +185,8 @@ Bug Fixes - Bug in ``TimeGrouper`` doesn't exclude column specified by ``key`` (:issue:`7227`) - Bug in ``DataFrame`` and ``Series`` bar and barh plot raises ``TypeError`` when ``bottom`` and ``left`` keyword is specified (:issue:`7226`) -- BUG in ``DataFrame.hist`` raises ``TypeError`` when it contains non numeric column (:issue:`7277`) -- BUG in ``Index.delete`` does not preserve ``name`` and ``freq`` attributes (:issue:`7302`) +- Bug in ``DataFrame.hist`` raises ``TypeError`` when it contains non numeric column (:issue:`7277`) +- Bug in ``Index.delete`` does not preserve ``name`` and ``freq`` attributes (:issue:`7302`) - Bug in ``DataFrame.query()``/``eval`` where local string variables with the @ sign were being treated as temporaries attempting to be deleted (:issue:`7300`). @@ -242,10 +242,10 @@ Bug Fixes - Bug in ``DatetimeIndex.to_period``, ``PeriodIndex.asobject``, ``PeriodIndex.to_timestamp`` doesn't preserve ``name`` (:issue:`7485`) - Bug in ``DatetimeIndex.to_period`` and ``PeriodIndex.to_timestanp`` handle ``NaT`` incorrectly (:issue:`7228`) -- BUG in ``offsets.apply``, ''rollforward`` and ``rollback`` may return normal ``datetime`` (:issue:`7502`) +- Bug in ``offsets.apply``, ``rollforward`` and ``rollback`` may return normal ``datetime`` (:issue:`7502`) -- BUG in ``resample`` raises ``ValueError`` when target contains ``NaT`` (:issue:`7227`) +- Bug in ``resample`` raises ``ValueError`` when target contains ``NaT`` (:issue:`7227`) - Bug in ``Timestamp.tz_localize`` resets ``nanosecond`` info (:issue:`7534`)
Minor release note corrections.
https://api.github.com/repos/pandas-dev/pandas/pulls/7550
2014-06-23T13:32:37Z
2014-06-23T13:35:23Z
2014-06-23T13:35:23Z
2014-06-24T09:09:50Z
TST: some yahoo options tests missing network decorator
diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py index c13553d14b861..2f3da23612449 100644 --- a/pandas/io/tests/test_data.py +++ b/pandas/io/tests/test_data.py @@ -3,7 +3,7 @@ import warnings import nose from nose.tools import assert_equal -from datetime import datetime, date +from datetime import datetime import os import numpy as np @@ -21,6 +21,7 @@ else: from urllib2 import HTTPError + def _skip_if_no_lxml(): try: import lxml @@ -125,8 +126,8 @@ def test_yahoo(self): start = datetime(2010, 1, 1) end = datetime(2013, 1, 27) - self.assertEqual( web.DataReader("F", 'yahoo', start, - end)['Close'][-1], 13.68) + self.assertEqual(web.DataReader("F", 'yahoo', start, end)['Close'][-1], + 13.68) @network def test_yahoo_fails(self): @@ -200,17 +201,18 @@ def test_get_data_multiple_symbols(self): @network def test_get_data_multiple_symbols_two_dates(self): - pan = web.get_data_yahoo(['GE', 'MSFT', 'INTC'], 'JAN-01-12', 'JAN-31-12') + pan = web.get_data_yahoo(['GE', 'MSFT', 'INTC'], 'JAN-01-12', + 'JAN-31-12') result = pan.Close.ix['01-18-12'] self.assertEqual(len(result), 3) # sanity checking assert np.issubdtype(result.dtype, np.floating) - expected = np.array([[ 18.99, 28.4 , 25.18], - [ 18.58, 28.31, 25.13], - [ 19.03, 28.16, 25.52], - [ 18.81, 28.82, 25.87]]) + expected = np.array([[18.99, 28.4, 25.18], + [18.58, 28.31, 25.13], + [19.03, 28.16, 25.52], + [18.81, 28.82, 25.87]]) result = pan.Open.ix['Jan-15-12':'Jan-20-12'] self.assertEqual(expected.shape, result.shape) @@ -249,7 +251,6 @@ def setUpClass(cls): cls.root1 = cls.aapl._parse_url(cls.html1) cls.root2 = cls.aapl._parse_url(cls.html2) - @classmethod def tearDownClass(cls): super(TestYahooOptions, cls).tearDownClass() @@ -257,33 +258,29 @@ def tearDownClass(cls): @network def test_get_options_data(self): + # regression test GH6105 + self.assertRaises(ValueError, self.aapl.get_options_data, month=3) + self.assertRaises(ValueError, self.aapl.get_options_data, year=1992) + try: options = self.aapl.get_options_data(expiry=self.expiry) except RemoteDataError as e: nose.SkipTest(e) else: - assert len(options)>1 - - - def test_get_options_data(self): - - # regression test GH6105 - self.assertRaises(ValueError, self.aapl.get_options_data, month=3) - self.assertRaises(ValueError, self.aapl.get_options_data, year=1992) + assert len(options) > 1 @network def test_get_near_stock_price(self): try: options = self.aapl.get_near_stock_price(call=True, put=True, - expiry=self.expiry) + expiry=self.expiry) except RemoteDataError as e: nose.SkipTest(e) else: - assert len(options)> 1 + assert len(options) > 1 self.assertTrue(len(options) > 1) - @network def test_get_call_data(self): try: @@ -291,7 +288,7 @@ def test_get_call_data(self): except RemoteDataError as e: nose.SkipTest(e) else: - assert len(calls)>1 + assert len(calls) > 1 @network def test_get_put_data(self): @@ -300,7 +297,7 @@ def test_get_put_data(self): except RemoteDataError as e: nose.SkipTest(e) else: - assert len(puts)>1 + assert len(puts) > 1 @network def test_get_expiry_months(self): @@ -328,18 +325,21 @@ def test_get_all_data_calls_only(self): self.assertTrue(len(data) > 1) + @network def test_sample_page_price_quote_time1(self): #Tests the weekend quote time format price, quote_time = self.aapl._get_underlying_price(self.root1) self.assertIsInstance(price, (int, float, complex)) self.assertIsInstance(quote_time, (datetime, Timestamp)) + @network def test_sample_page_price_quote_time2(self): #Tests the weekday quote time format price, quote_time = self.aapl._get_underlying_price(self.root2) self.assertIsInstance(price, (int, float, complex)) self.assertIsInstance(quote_time, (datetime, Timestamp)) + @network def test_sample_page_chg_float(self): #Tests that numeric columns with comma's are appropriately dealt with tables = self.root1.xpath('.//table') @@ -348,7 +348,6 @@ def test_sample_page_chg_float(self): self.assertEqual(option_data['Chg'].dtype, 'float64') - class TestOptionsWarnings(tm.TestCase): @classmethod def setUpClass(cls): @@ -383,9 +382,9 @@ def test_get_near_stock_price_warning(self): with assert_produces_warning(): try: options_near = self.aapl.get_near_stock_price(call=True, - put=True, - month=self.month, - year=self.year) + put=True, + month=self.month, + year=self.year) except RemoteDataError as e: nose.SkipTest(e) @@ -430,7 +429,7 @@ def test_read_fred(self): def test_read_famafrench(self): for name in ("F-F_Research_Data_Factors", "F-F_Research_Data_Factors_weekly", "6_Portfolios_2x3", - "F-F_ST_Reversal_Factor","F-F_Momentum_Factor"): + "F-F_ST_Reversal_Factor", "F-F_Momentum_Factor"): ff = DataReader(name, "famafrench") assert ff assert isinstance(ff, dict) @@ -508,6 +507,7 @@ def test_fred_multi_bad_series(self): with tm.assertRaises(HTTPError): DataReader(names, data_source="fred") + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
null
https://api.github.com/repos/pandas-dev/pandas/pulls/7549
2014-06-22T21:13:48Z
2014-06-22T21:48:58Z
2014-06-22T21:48:58Z
2014-06-22T21:49:00Z
Minor cleanups for nanops
diff --git a/pandas/core/common.py b/pandas/core/common.py index 92d60ae8d8847..3098fedf0fefc 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -2175,94 +2175,98 @@ def is_number(obj): return isinstance(obj, (numbers.Number, np.number)) -def is_integer_dtype(arr_or_dtype): +def _get_dtype(arr_or_dtype): if isinstance(arr_or_dtype, np.dtype): - tipo = arr_or_dtype.type - else: - tipo = arr_or_dtype.dtype.type - return (issubclass(tipo, np.integer) and not - (issubclass(tipo, np.datetime64) or - issubclass(tipo, np.timedelta64))) + return arr_or_dtype + if isinstance(arr_or_dtype, type): + return np.dtype(arr_or_dtype) + return arr_or_dtype.dtype -def _is_int_or_datetime_dtype(arr_or_dtype): - # also timedelta64 +def _get_dtype_type(arr_or_dtype): if isinstance(arr_or_dtype, np.dtype): - tipo = arr_or_dtype.type - else: - tipo = arr_or_dtype.dtype.type + return arr_or_dtype.type + if isinstance(arr_or_dtype, type): + return np.dtype(arr_or_dtype).type + return arr_or_dtype.dtype.type + + +def _is_any_int_dtype(arr_or_dtype): + tipo = _get_dtype_type(arr_or_dtype) return issubclass(tipo, np.integer) +def is_integer_dtype(arr_or_dtype): + tipo = _get_dtype_type(arr_or_dtype) + return (issubclass(tipo, np.integer) and + not issubclass(tipo, (np.datetime64, np.timedelta64))) + + +def _is_int_or_datetime_dtype(arr_or_dtype): + tipo = _get_dtype_type(arr_or_dtype) + return (issubclass(tipo, np.integer) or + issubclass(tipo, (np.datetime64, np.timedelta64))) + + def is_datetime64_dtype(arr_or_dtype): - if isinstance(arr_or_dtype, np.dtype): - tipo = arr_or_dtype.type - elif isinstance(arr_or_dtype, type): - tipo = np.dtype(arr_or_dtype).type - else: - tipo = arr_or_dtype.dtype.type + tipo = _get_dtype_type(arr_or_dtype) return issubclass(tipo, np.datetime64) def is_datetime64_ns_dtype(arr_or_dtype): - if isinstance(arr_or_dtype, np.dtype): - tipo = arr_or_dtype - elif isinstance(arr_or_dtype, type): - tipo = np.dtype(arr_or_dtype) - else: - tipo = arr_or_dtype.dtype + tipo = _get_dtype(arr_or_dtype) return tipo == _NS_DTYPE def is_timedelta64_dtype(arr_or_dtype): - if isinstance(arr_or_dtype, np.dtype): - tipo = arr_or_dtype.type - elif isinstance(arr_or_dtype, type): - tipo = np.dtype(arr_or_dtype).type - else: - tipo = arr_or_dtype.dtype.type + tipo = _get_dtype_type(arr_or_dtype) return issubclass(tipo, np.timedelta64) def is_timedelta64_ns_dtype(arr_or_dtype): - if isinstance(arr_or_dtype, np.dtype): - tipo = arr_or_dtype.type - elif isinstance(arr_or_dtype, type): - tipo = np.dtype(arr_or_dtype).type - else: - tipo = arr_or_dtype.dtype.type + tipo = _get_dtype_type(arr_or_dtype) return tipo == _TD_DTYPE -def needs_i8_conversion(arr_or_dtype): - return (is_datetime64_dtype(arr_or_dtype) or - is_timedelta64_dtype(arr_or_dtype)) +def _is_datetime_or_timedelta_dtype(arr_or_dtype): + tipo = _get_dtype_type(arr_or_dtype) + return issubclass(tipo, (np.datetime64, np.timedelta64)) + + +needs_i8_conversion = _is_datetime_or_timedelta_dtype def is_numeric_dtype(arr_or_dtype): - if isinstance(arr_or_dtype, np.dtype): - tipo = arr_or_dtype.type - else: - tipo = arr_or_dtype.dtype.type + tipo = _get_dtype_type(arr_or_dtype) return (issubclass(tipo, (np.number, np.bool_)) and not issubclass(tipo, (np.datetime64, np.timedelta64))) + def is_float_dtype(arr_or_dtype): - if isinstance(arr_or_dtype, np.dtype): - tipo = arr_or_dtype.type - else: - tipo = arr_or_dtype.dtype.type + tipo = _get_dtype_type(arr_or_dtype) return issubclass(tipo, np.floating) +def _is_floating_dtype(arr_or_dtype): + tipo = _get_dtype_type(arr_or_dtype) + return isinstance(tipo, np.floating) + + +def is_bool_dtype(arr_or_dtype): + tipo = _get_dtype_type(arr_or_dtype) + return issubclass(tipo, np.bool_) + + def is_complex_dtype(arr_or_dtype): - if isinstance(arr_or_dtype, np.dtype): - tipo = arr_or_dtype.type - else: - tipo = arr_or_dtype.dtype.type + tipo = _get_dtype_type(arr_or_dtype) return issubclass(tipo, np.complexfloating) +def is_object_dtype(arr_or_dtype): + tipo = _get_dtype_type(arr_or_dtype) + return issubclass(tipo, np.object_) + + def is_re(obj): return isinstance(obj, re._pattern_type) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 431cb1ac451c0..aa6140383a27a 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -14,7 +14,17 @@ import pandas.hashtable as _hash from pandas import compat, lib, algos, tslib from pandas.compat import builtins -from pandas.core.common import isnull, notnull, _values_from_object, is_float +from pandas.core.common import (isnull, notnull, _values_from_object, + _maybe_upcast_putmask, + ensure_float, _ensure_float64, + _ensure_int64, _ensure_object, + is_float, is_integer, is_complex, + is_float_dtype, _is_floating_dtype, + is_complex_dtype, is_integer_dtype, + is_bool_dtype, is_object_dtype, + is_datetime64_dtype, is_timedelta64_dtype, + _is_datetime_or_timedelta_dtype, + _is_int_or_datetime_dtype, _is_any_int_dtype) class disallow(object): @@ -90,8 +100,8 @@ def f(values, axis=None, skipna=True, **kwds): def _bn_ok_dtype(dt, name): # Bottleneck chokes on datetime64 - if dt != np.object_ and not issubclass(dt.type, (np.datetime64, - np.timedelta64)): + if (not is_object_dtype(dt) and + not _is_datetime_or_timedelta_dtype(dt)): # bottleneck does not properly upcast during the sum # so can overflow @@ -166,8 +176,7 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None, # promote if needed else: - values, changed = com._maybe_upcast_putmask(values, mask, - fill_value) + values, changed = _maybe_upcast_putmask(values, mask, fill_value) elif copy: values = values.copy() @@ -176,34 +185,29 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None, # return a platform independent precision dtype dtype_max = dtype - if dtype.kind == 'i' and not issubclass(dtype.type, (np.bool, - np.datetime64, - np.timedelta64)): + if is_integer_dtype(dtype) or is_bool_dtype(dtype): dtype_max = np.int64 - elif dtype.kind in ['b'] or issubclass(dtype.type, np.bool): - dtype_max = np.int64 - elif dtype.kind in ['f']: + elif is_float_dtype(dtype): dtype_max = np.float64 return values, mask, dtype, dtype_max def _isfinite(values): - if issubclass(values.dtype.type, (np.timedelta64, np.datetime64)): + if _is_datetime_or_timedelta_dtype(values): return isnull(values) - elif isinstance(values.dtype, object): - return ~np.isfinite(values.astype('float64')) - - return ~np.isfinite(values) + if (is_complex_dtype(values) or is_float_dtype(values) or + is_integer_dtype(values) or is_bool_dtype(values)): + return ~np.isfinite(values) + return ~np.isfinite(values.astype('float64')) def _na_ok_dtype(dtype): - return not issubclass(dtype.type, (np.integer, np.datetime64, - np.timedelta64)) + return not _is_int_or_datetime_dtype(dtype) def _view_if_needed(values): - if issubclass(values.dtype.type, (np.datetime64, np.timedelta64)): + if _is_datetime_or_timedelta_dtype(values): return values.view(np.int64) return values @@ -211,12 +215,12 @@ def _view_if_needed(values): def _wrap_results(result, dtype): """ wrap our results if needed """ - if issubclass(dtype.type, np.datetime64): + if is_datetime64_dtype(dtype): if not isinstance(result, np.ndarray): result = lib.Timestamp(result) else: result = result.view(dtype) - elif issubclass(dtype.type, np.timedelta64): + elif is_timedelta64_dtype(dtype): if not isinstance(result, np.ndarray): # this is a scalar timedelta result! @@ -334,7 +338,7 @@ def _get_counts_nanvar(mask, axis, ddof): @disallow('M8') @bottleneck_switch(ddof=1) def nanvar(values, axis=None, skipna=True, ddof=1): - if not isinstance(values.dtype.type, np.floating): + if not _is_floating_dtype(values): values = values.astype('f8') mask = isnull(values) @@ -353,7 +357,7 @@ def nanvar(values, axis=None, skipna=True, ddof=1): def nansem(values, axis=None, skipna=True, ddof=1): var = nanvar(values, axis, skipna, ddof=ddof) - if not isinstance(values.dtype.type, np.floating): + if not _is_floating_dtype(values): values = values.astype('f8') mask = isnull(values) count, _ = _get_counts_nanvar(mask, axis, ddof) @@ -367,7 +371,7 @@ def nanmin(values, axis=None, skipna=True): fill_value_typ='+inf') # numpy 1.6.1 workaround in Python 3.x - if (values.dtype == np.object_ and compat.PY3): + if is_object_dtype(values) and compat.PY3: if values.ndim > 1: apply_ax = axis if axis is not None else 0 result = np.apply_along_axis(builtins.min, apply_ax, values) @@ -380,7 +384,7 @@ def nanmin(values, axis=None, skipna=True): if ((axis is not None and values.shape[axis] == 0) or values.size == 0): try: - result = com.ensure_float(values.sum(axis, dtype=dtype_max)) + result = ensure_float(values.sum(axis, dtype=dtype_max)) result.fill(np.nan) except: result = np.nan @@ -397,7 +401,7 @@ def nanmax(values, axis=None, skipna=True): fill_value_typ='-inf') # numpy 1.6.1 workaround in Python 3.x - if (values.dtype == np.object_ and compat.PY3): + if is_object_dtype(values) and compat.PY3: if values.ndim > 1: apply_ax = axis if axis is not None else 0 @@ -411,7 +415,7 @@ def nanmax(values, axis=None, skipna=True): if ((axis is not None and values.shape[axis] == 0) or values.size == 0): try: - result = com.ensure_float(values.sum(axis, dtype=dtype_max)) + result = ensure_float(values.sum(axis, dtype=dtype_max)) result.fill(np.nan) except: result = np.nan @@ -446,7 +450,7 @@ def nanargmin(values, axis=None, skipna=True): @disallow('M8') def nanskew(values, axis=None, skipna=True): - if not isinstance(values.dtype.type, np.floating): + if not _is_floating_dtype(values): values = values.astype('f8') mask = isnull(values) @@ -480,7 +484,7 @@ def nanskew(values, axis=None, skipna=True): @disallow('M8') def nankurt(values, axis=None, skipna=True): - if not isinstance(values.dtype.type, np.floating): + if not _is_floating_dtype(values): values = values.astype('f8') mask = isnull(values) @@ -515,7 +519,7 @@ def nankurt(values, axis=None, skipna=True): @disallow('M8') def nanprod(values, axis=None, skipna=True): mask = isnull(values) - if skipna and not issubclass(values.dtype.type, np.integer): + if skipna and not _is_any_int_dtype(values): values = values.copy() values[mask] = 1 result = values.prod(axis) @@ -644,9 +648,9 @@ def nancov(a, b, min_periods=None): def _ensure_numeric(x): if isinstance(x, np.ndarray): - if x.dtype.kind in ['i', 'b']: + if is_integer_dtype(x) or is_bool_dtype(x): x = x.astype(np.float64) - elif x.dtype == np.object_: + elif is_object_dtype(x): try: x = x.astype(np.complex128) except: @@ -654,7 +658,7 @@ def _ensure_numeric(x): else: if not np.any(x.imag): x = x.real - elif not (com.is_float(x) or com.is_integer(x) or com.is_complex(x)): + elif not (is_float(x) or is_integer(x) or is_complex(x)): try: x = float(x) except Exception: @@ -678,7 +682,7 @@ def f(x, y): result = op(x, y) if mask.any(): - if result.dtype == np.bool_: + if is_bool_dtype(result): result = result.astype('O') np.putmask(result, mask, np.nan) @@ -699,16 +703,16 @@ def unique1d(values): """ if np.issubdtype(values.dtype, np.floating): table = _hash.Float64HashTable(len(values)) - uniques = np.array(table.unique(com._ensure_float64(values)), + uniques = np.array(table.unique(_ensure_float64(values)), dtype=np.float64) elif np.issubdtype(values.dtype, np.datetime64): table = _hash.Int64HashTable(len(values)) - uniques = table.unique(com._ensure_int64(values)) + uniques = table.unique(_ensure_int64(values)) uniques = uniques.view('M8[ns]') elif np.issubdtype(values.dtype, np.integer): table = _hash.Int64HashTable(len(values)) - uniques = table.unique(com._ensure_int64(values)) + uniques = table.unique(_ensure_int64(values)) else: table = _hash.PyObjectHashTable(len(values)) - uniques = table.unique(com._ensure_object(values)) + uniques = table.unique(_ensure_object(values)) return uniques
This includes the requested fix for `nanops`, using `common.is_*_dtype` functions whenever possible.
https://api.github.com/repos/pandas-dev/pandas/pulls/7547
2014-06-22T15:03:28Z
2014-06-26T10:26:02Z
2014-06-26T10:26:02Z
2014-06-26T10:29:04Z
DOC: Use correct, non-deprecated arg name
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d770f8c8f853a..f702714fdc37e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1166,7 +1166,7 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', Missing data representation float_format : string, default None Format string for floating point numbers - cols : sequence, optional + columns : sequence, optional Columns to write header : boolean or list of string, default True Write out column names. If a list of string is given it is
Fix a minor typo in the `pandas.frame.to_excel` docstring.
https://api.github.com/repos/pandas-dev/pandas/pulls/7545
2014-06-22T00:57:09Z
2014-06-23T13:41:18Z
2014-06-23T13:41:18Z
2014-06-25T02:17:52Z
BUG: DatetimeIndex.asobject raises ValueError when contains NaT
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index c41bc13b18606..91cb18d2accd1 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -248,7 +248,7 @@ Bug Fixes - BUG in ``resample`` raises ``ValueError`` when target contains ``NaT`` (:issue:`7227`) - Bug in ``Timestamp.tz_localize`` resets ``nanosecond`` info (:issue:`7534`) - +- Bug in ``DatetimeIndex.asobject`` raises ``ValueError`` when it contains ``NaT`` (:issue:`7539`) - Bug in ``Index.astype(float)`` where it would return an ``object`` dtype diff --git a/pandas/core/base.py b/pandas/core/base.py index b43883885e962..cc676b9682277 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -376,3 +376,29 @@ def _ops_compat(self, name, op_accessor): is_quarter_end = _field_accessor('is_quarter_end', "Logical indicating if last day of quarter (defined by frequency)") is_year_start = _field_accessor('is_year_start', "Logical indicating if first day of year (defined by frequency)") is_year_end = _field_accessor('is_year_end', "Logical indicating if last day of year (defined by frequency)") + + @property + def _box_func(self): + """ + box function to get object from internal representation + """ + raise NotImplementedError + + def _box_values(self, values): + """ + apply box func to passed values + """ + import pandas.lib as lib + return lib.map_infer(values, self._box_func) + + @property + def asobject(self): + from pandas.core.index import Index + return Index(self._box_values(self.asi8), name=self.name, dtype=object) + + def tolist(self): + """ + See ndarray.tolist + """ + return list(self.asobject) + diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 6c8dd3478835f..291b10c70c83c 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -515,6 +515,44 @@ def test_ops_properties_basic(self): self.assertEquals(s.day,10) self.assertRaises(AttributeError, lambda : s.weekday) + def test_asobject_tolist(self): + idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx') + expected_list = [pd.Timestamp('2013-01-31'), pd.Timestamp('2013-02-28'), + pd.Timestamp('2013-03-31'), pd.Timestamp('2013-04-30')] + expected = pd.Index(expected_list, dtype=object, name='idx') + result = idx.asobject + self.assertTrue(isinstance(result, Index)) + self.assertEqual(result.dtype, object) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(idx.tolist(), expected_list) + + idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx', tz='Asia/Tokyo') + expected_list = [pd.Timestamp('2013-01-31', tz='Asia/Tokyo'), + pd.Timestamp('2013-02-28', tz='Asia/Tokyo'), + pd.Timestamp('2013-03-31', tz='Asia/Tokyo'), + pd.Timestamp('2013-04-30', tz='Asia/Tokyo')] + expected = pd.Index(expected_list, dtype=object, name='idx') + result = idx.asobject + self.assertTrue(isinstance(result, Index)) + self.assertEqual(result.dtype, object) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(idx.tolist(), expected_list) + + idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2), + pd.NaT, datetime(2013, 1, 4)], name='idx') + expected_list = [pd.Timestamp('2013-01-01'), pd.Timestamp('2013-01-02'), + pd.NaT, pd.Timestamp('2013-01-04')] + expected = pd.Index(expected_list, dtype=object, name='idx') + result = idx.asobject + self.assertTrue(isinstance(result, Index)) + self.assertEqual(result.dtype, object) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(idx.tolist(), expected_list) + + class TestPeriodIndexOps(Ops): _allowed = '_allow_period_index_ops' @@ -528,6 +566,38 @@ def test_ops_properties(self): self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter']) self.check_ops_properties(['qyear'], lambda x: isinstance(x,PeriodIndex)) + def test_asobject_tolist(self): + idx = pd.period_range(start='2013-01-01', periods=4, freq='M', name='idx') + expected_list = [pd.Period('2013-01-31', freq='M'), pd.Period('2013-02-28', freq='M'), + pd.Period('2013-03-31', freq='M'), pd.Period('2013-04-30', freq='M')] + expected = pd.Index(expected_list, dtype=object, name='idx') + result = idx.asobject + self.assertTrue(isinstance(result, Index)) + self.assertEqual(result.dtype, object) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(idx.tolist(), expected_list) + + idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT', '2013-01-04'], freq='D', name='idx') + expected_list = [pd.Period('2013-01-01', freq='D'), pd.Period('2013-01-02', freq='D'), + pd.Period('NaT', freq='D'), pd.Period('2013-01-04', freq='D')] + expected = pd.Index(expected_list, dtype=object, name='idx') + result = idx.asobject + self.assertTrue(isinstance(result, Index)) + self.assertEqual(result.dtype, object) + for i in [0, 1, 3]: + self.assertTrue(result[i], expected[i]) + self.assertTrue(result[2].ordinal, pd.tslib.iNaT) + self.assertTrue(result[2].freq, 'D') + self.assertEqual(result.name, expected.name) + + result_list = idx.tolist() + for i in [0, 1, 3]: + self.assertTrue(result_list[i], expected_list[i]) + self.assertTrue(result_list[2].ordinal, pd.tslib.iNaT) + self.assertTrue(result_list[2].freq, 'D') + + if __name__ == '__main__': import nose diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 8bc6f1a21b68a..c0ca5451ef1d2 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -72,6 +72,7 @@ def test_append(self): tm.assert_series_equal(result, self.frame['A']) def test_append_index(self): + tm._skip_if_no_pytz() idx1 = Index([1.1, 1.2, 1.3]) idx2 = pd.date_range('2011-01-01', freq='D', periods=3, tz='Asia/Tokyo') @@ -81,17 +82,18 @@ def test_append_index(self): midx_lv3 = MultiIndex.from_arrays([idx1, idx2, idx3]) result = idx1.append(midx_lv2) - expected = Index([1.1, 1.2, 1.3, - (1.1, datetime.datetime(2010, 12, 31, 15, 0)), - (1.2, datetime.datetime(2011, 1, 1, 15, 0)), - (1.3, datetime.datetime(2011, 1, 2, 15, 0))]) + + # GH 7112 + import pytz + tz = pytz.timezone('Asia/Tokyo') + expected_tuples = [(1.1, datetime.datetime(2011, 1, 1, tzinfo=tz)), + (1.2, datetime.datetime(2011, 1, 2, tzinfo=tz)), + (1.3, datetime.datetime(2011, 1, 3, tzinfo=tz))] + expected = Index([1.1, 1.2, 1.3] + expected_tuples) self.assert_(result.equals(expected)) result = midx_lv2.append(idx1) - expected = Index([(1.1, datetime.datetime(2010, 12, 31, 15, 0)), - (1.2, datetime.datetime(2011, 1, 1, 15, 0)), - (1.3, datetime.datetime(2011, 1, 2, 15, 0)), - 1.1, 1.2, 1.3]) + expected = Index(expected_tuples + [1.1, 1.2, 1.3]) self.assert_(result.equals(expected)) result = midx_lv2.append(midx_lv2) @@ -103,12 +105,10 @@ def test_append_index(self): result = midx_lv3.append(midx_lv2) expected = Index._simple_new( - np.array([(1.1, datetime.datetime(2010, 12, 31, 15, 0), 'A'), - (1.2, datetime.datetime(2011, 1, 1, 15, 0), 'B'), - (1.3, datetime.datetime(2011, 1, 2, 15, 0), 'C'), - (1.1, datetime.datetime(2010, 12, 31, 15, 0)), - (1.2, datetime.datetime(2011, 1, 1, 15, 0)), - (1.3, datetime.datetime(2011, 1, 2, 15, 0))]), None) + np.array([(1.1, datetime.datetime(2011, 1, 1, tzinfo=tz), 'A'), + (1.2, datetime.datetime(2011, 1, 2, tzinfo=tz), 'B'), + (1.3, datetime.datetime(2011, 1, 3, tzinfo=tz), 'C')] + + expected_tuples), None) self.assert_(result.equals(expected)) def test_dataframe_constructor(self): diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 50296a417479e..ff585d80af830 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -6,15 +6,15 @@ import numpy as np -from pandas.core.common import (isnull, _NS_DTYPE, _INT64_DTYPE, - is_list_like,_values_from_object, _maybe_box, - notnull, ABCSeries) -from pandas.core.index import Index, Int64Index, _Identity, Float64Index +from pandas.core.common import (_NS_DTYPE, _INT64_DTYPE, + _values_from_object, _maybe_box, + ABCSeries) +from pandas.core.index import Index, Int64Index, Float64Index import pandas.compat as compat from pandas.compat import u from pandas.tseries.frequencies import ( infer_freq, to_offset, get_period_alias, - Resolution, get_reso_string, get_offset) + Resolution, get_reso_string) from pandas.core.base import DatetimeIndexOpsMixin from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay from pandas.tseries.tools import parse_time_string, normalize_date @@ -29,7 +29,6 @@ import pandas.algos as _algos import pandas.index as _index -from pandas.tslib import isleapyear def _utc(): import pytz @@ -452,8 +451,9 @@ def _generate(cls, start, end, periods, name, offset, return index - def _box_values(self, values): - return lib.map_infer(values, lib.Timestamp) + @property + def _box_func(self): + return lambda x: Timestamp(x, offset=self.offset, tz=self.tz) def _local_timestamps(self): utc = _utc() @@ -673,7 +673,7 @@ def _format_with_header(self, header, **kwargs): def _format_native_types(self, na_rep=u('NaT'), date_format=None, **kwargs): - data = self._get_object_index() + data = self.asobject from pandas.core.format import Datetime64Formatter return Datetime64Formatter(values=data, nat_rep=na_rep, @@ -778,27 +778,6 @@ def _to_embed(self, keep_tz=False): return self.asobject.values return self.values - @property - def asobject(self): - """ - Convert to Index of datetime objects - """ - if isnull(self).any(): - msg = 'DatetimeIndex with NaT cannot be converted to object' - raise ValueError(msg) - return self._get_object_index() - - def tolist(self): - """ - See ndarray.tolist - """ - return list(self.asobject) - - def _get_object_index(self): - boxfunc = lambda x: Timestamp(x, offset=self.offset, tz=self.tz) - boxed_values = lib.map_infer(self.asi8, boxfunc) - return Index(boxed_values, dtype=object, name=self.name) - def to_pydatetime(self): """ Return DatetimeIndex as object ndarray of datetime.datetime objects @@ -1515,7 +1494,7 @@ def normalize(self): tz=self.tz) def __iter__(self): - return iter(self._get_object_index()) + return iter(self.asobject) def searchsorted(self, key, side='left'): if isinstance(key, np.ndarray): diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index b3a29ab4110d7..d41438bbfd208 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -723,9 +723,9 @@ def __contains__(self, key): return False return key.ordinal in self._engine - def _box_values(self, values): - f = lambda x: Period(ordinal=x, freq=self.freq) - return lib.map_infer(values, f) + @property + def _box_func(self): + return lambda x: Period(ordinal=x, freq=self.freq) def asof_locs(self, where, mask): """ @@ -747,10 +747,6 @@ def asof_locs(self, where, mask): return result - @property - def asobject(self): - return Index(self._box_values(self.values), name=self.name, dtype=object) - def _array_values(self): return self.asobject @@ -854,12 +850,6 @@ def equals(self, other): return np.array_equal(self.asi8, other.asi8) - def tolist(self): - """ - Return a list of Period objects - """ - return self._get_object_array().tolist() - def to_timestamp(self, freq=None, how='start'): """ Cast to DatetimeIndex diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index af39bba8e43af..84c0c40de369a 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -1448,18 +1448,6 @@ def test_to_timestamp_period_nat(self): self.assertTrue(result2.equals(index)) self.assertEqual(result2.name, 'idx') - def test_asobject_period_nat(self): - index = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='M', name='idx') - - result = index.asobject - self.assertTrue(isinstance(result, Index)) - self.assertEqual(result.dtype, object) - self.assertTrue(isinstance(result[0], Period)) - self.assertEqual(result[0].ordinal, tslib.iNaT) - self.assertEqual(result[1], Period('2011-01', freq='M')) - self.assertEqual(result[2], Period('2011-02', freq='M')) - self.assertEqual(result.name, 'idx') - def test_as_frame_columns(self): rng = period_range('1/1/2000', periods=5) df = DataFrame(randn(10, 5), columns=rng) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 9eb8f9b30b957..11161308be279 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -1,5 +1,5 @@ # pylint: disable-msg=E1101,W0612 -from datetime import datetime, time, timedelta, date +from datetime import datetime, time, timedelta import sys import operator @@ -2363,16 +2363,6 @@ def test_order(self): self.assertTrue(ordered[::-1].is_monotonic) self.assert_numpy_array_equal(dexer, [0, 2, 1]) - def test_asobject(self): - idx = date_range(start='2013-01-01', periods=4, freq='M', name='idx') - expected = Index([Timestamp('2013-01-31'), Timestamp('2013-02-28'), - Timestamp('2013-03-31'), Timestamp('2013-04-30')], - dtype=object, name='idx') - - result = idx.asobject - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - def test_insert(self): idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'], name='idx')
Closes #7539. Also, this fixed #7112 case 3 and case 4 to preserve tz. I've modified the tests for it. Related to #6469.
https://api.github.com/repos/pandas-dev/pandas/pulls/7544
2014-06-21T23:37:26Z
2014-06-24T11:49:40Z
2014-06-24T11:49:40Z
2014-06-26T10:59:59Z
DOC: Release note correction for GH7534
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 158fa1561eb30..e8c7a6f9ab462 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -248,7 +248,7 @@ Bug Fixes - BUG in ``resample`` raises ``ValueError`` when target contains ``NaT`` (:issue:`7227`) -- Bug in ``Timestamp.tz_convert`` resets ``nanosecond`` info (:issue:`7534`) +- Bug in ``Timestamp.tz_localize`` resets ``nanosecond`` info (:issue:`7534`) - Bug in ``Index.astype(float)`` where it would return an ``object`` dtype ``Index`` (:issue:`7464`).
Release note correction for #7534. `tz_convert` has no problem.
https://api.github.com/repos/pandas-dev/pandas/pulls/7538
2014-06-21T14:46:41Z
2014-06-21T14:49:05Z
2014-06-21T14:49:04Z
2014-06-21T22:30:08Z
CLN: Remove executable flag from frame.py
diff --git a/pandas/core/frame.py b/pandas/core/frame.py old mode 100755 new mode 100644
Closes #7536.
https://api.github.com/repos/pandas-dev/pandas/pulls/7537
2014-06-21T14:31:32Z
2014-06-21T14:48:38Z
2014-06-21T14:48:38Z
2014-06-21T14:48:38Z
should fix issue #7520, test_nanargmin fails with datetime64 objects
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 417cef92412b1..3e8a5fecbb579 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -237,10 +237,20 @@ def check_funs(self, testfunc, targfunc, self.arr_utf.astype('O')] if allow_date: - self.check_fun(testfunc, targfunc, 'arr_date', **kwargs) - self.check_fun(testfunc, targfunc, 'arr_tdelta', **kwargs) - objs += [self.arr_date.astype('O'), - self.arr_tdelta.astype('O')] + try: + targfunc(self.arr_date) + except TypeError: + pass + else: + self.check_fun(testfunc, targfunc, 'arr_date', **kwargs) + objs += [self.arr_date.astype('O')] + try: + targfunc(self.arr_tdelta) + except TypeError: + pass + else: + self.check_fun(testfunc, targfunc, 'arr_tdelta', **kwargs) + objs += [self.arr_tdelta.astype('O')] if allow_obj: self.arr_obj = np.vstack(objs)
I think this will fix the issue #7520. The issue appears to be that older versions of `numpy`, probably less than `1.7.0`, can't handle `datetime64` dtypes very well. I have added simple check to see if `numpy` can handle a standard operation with `datetime64` dtypes, and if not it skips those tests. I can't test this personally since I don't have the older version of `numpy`.
https://api.github.com/repos/pandas-dev/pandas/pulls/7535
2014-06-21T12:52:19Z
2014-06-21T13:40:17Z
2014-06-21T13:40:17Z
2014-06-26T10:28:55Z
BUG: Timestamp.tz_localize resets nanosecond
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 197bc9bae5c9d..158fa1561eb30 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -248,7 +248,7 @@ Bug Fixes - BUG in ``resample`` raises ``ValueError`` when target contains ``NaT`` (:issue:`7227`) - +- Bug in ``Timestamp.tz_convert`` resets ``nanosecond`` info (:issue:`7534`) - Bug in ``Index.astype(float)`` where it would return an ``object`` dtype ``Index`` (:issue:`7464`). diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py index b1d8bdd9f81ce..0fec3e48c674a 100644 --- a/pandas/tseries/tests/test_timezones.py +++ b/pandas/tseries/tests/test_timezones.py @@ -1,5 +1,5 @@ # pylint: disable-msg=E1101,W0612 -from datetime import datetime, time, timedelta, tzinfo, date +from datetime import datetime, timedelta, tzinfo, date import sys import os import unittest @@ -8,10 +8,9 @@ import numpy as np import pytz -from pandas import (Index, Series, TimeSeries, DataFrame, isnull, - date_range, Timestamp) +from pandas import (Index, Series, DataFrame, isnull, Timestamp) -from pandas import DatetimeIndex, Int64Index, to_datetime, NaT +from pandas import DatetimeIndex, to_datetime, NaT from pandas import tslib import pandas.core.datetools as datetools @@ -20,17 +19,10 @@ import pandas.tseries.tools as tools from pytz import NonExistentTimeError -from pandas.util.testing import assert_series_equal, assert_almost_equal, assertRaisesRegexp import pandas.util.testing as tm -import pandas.lib as lib -import pandas.core.datetools as dt -from numpy.random import rand from pandas.util.testing import assert_frame_equal -import pandas.compat as compat -from pandas.compat import range, lrange, zip, cPickle as pickle -from pandas.core.datetools import BDay -import pandas.core.common as com +from pandas.compat import lrange, zip from pandas import _np_version_under1p7 @@ -544,13 +536,13 @@ def test_localized_at_time_between_time(self): result = ts_local.at_time(time(10, 0)) expected = ts.at_time(time(10, 0)).tz_localize(self.tzstr('US/Eastern')) - assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) self.assertTrue(self.cmptz(result.index.tz, self.tz('US/Eastern'))) t1, t2 = time(10, 0), time(11, 0) result = ts_local.between_time(t1, t2) expected = ts.between_time(t1, t2).tz_localize(self.tzstr('US/Eastern')) - assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) self.assertTrue(self.cmptz(result.index.tz, self.tz('US/Eastern'))) def test_string_index_alias_tz_aware(self): @@ -631,7 +623,7 @@ def test_frame_no_datetime64_dtype(self): 'datetimes_with_tz' : datetimes_with_tz }) result = df.get_dtype_counts() expected = Series({ 'datetime64[ns]' : 3, 'object' : 1 }) - assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) def test_hongkong_tz_convert(self): # #1673 @@ -863,7 +855,7 @@ def test_series_frame_tz_localize(self): # Can't localize if already tz-aware rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') ts = Series(1, index=rng) - assertRaisesRegexp(TypeError, 'Already tz-aware', ts.tz_localize, 'US/Eastern') + tm.assertRaisesRegexp(TypeError, 'Already tz-aware', ts.tz_localize, 'US/Eastern') def test_series_frame_tz_convert(self): rng = date_range('1/1/2011', periods=200, freq='D', @@ -887,7 +879,7 @@ def test_series_frame_tz_convert(self): # can't convert tz-naive rng = date_range('1/1/2011', periods=200, freq='D') ts = Series(1, index=rng) - assertRaisesRegexp(TypeError, "Cannot convert tz-naive", ts.tz_convert, 'US/Eastern') + tm.assertRaisesRegexp(TypeError, "Cannot convert tz-naive", ts.tz_convert, 'US/Eastern') def test_join_utc_convert(self): rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') @@ -1033,7 +1025,7 @@ def test_arith_utc_convert(self): expected = uts1 + uts2 self.assertEqual(result.index.tz, pytz.UTC) - assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) def test_intersection(self): rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py index bf1f7879ba774..9499f05a4aa5f 100644 --- a/pandas/tseries/tests/test_tslib.py +++ b/pandas/tseries/tests/test_tslib.py @@ -9,6 +9,7 @@ from pandas.tslib import period_asfreq, period_ordinal from pandas.tseries.index import date_range from pandas.tseries.frequencies import get_freq +import pandas.tseries.offsets as offsets from pandas import _np_version_under1p7 import pandas.util.testing as tm from pandas.util.testing import assert_series_equal @@ -61,7 +62,7 @@ def test_bounds_with_different_units(self): for unit in time_units: self.assertRaises( ValueError, - tslib.Timestamp, + Timestamp, np.datetime64(date_string, dtype='M8[%s]' % unit) ) @@ -72,27 +73,48 @@ def test_bounds_with_different_units(self): for date_string in in_bounds_dates: for unit in time_units: - tslib.Timestamp( + Timestamp( np.datetime64(date_string, dtype='M8[%s]' % unit) ) + def test_tz(self): + t = '2014-02-01 09:00' + ts = Timestamp(t) + local = ts.tz_localize('Asia/Tokyo') + self.assertEqual(local.hour, 9) + self.assertEqual(local, Timestamp(t, tz='Asia/Tokyo')) + conv = local.tz_convert('US/Eastern') + self.assertEqual(conv, + Timestamp('2014-01-31 19:00', tz='US/Eastern')) + self.assertEqual(conv.hour, 19) + + # preserves nanosecond + ts = Timestamp(t) + offsets.Nano(5) + local = ts.tz_localize('Asia/Tokyo') + self.assertEqual(local.hour, 9) + self.assertEqual(local.nanosecond, 5) + conv = local.tz_convert('US/Eastern') + self.assertEqual(conv.nanosecond, 5) + self.assertEqual(conv.hour, 19) + def test_barely_oob_dts(self): one_us = np.timedelta64(1) # By definition we can't go out of bounds in [ns], so we # convert the datetime64s to [us] so we can go out of bounds - min_ts_us = np.datetime64(tslib.Timestamp.min).astype('M8[us]') - max_ts_us = np.datetime64(tslib.Timestamp.max).astype('M8[us]') + min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]') + max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]') # No error for the min/max datetimes - tslib.Timestamp(min_ts_us) - tslib.Timestamp(max_ts_us) + Timestamp(min_ts_us) + Timestamp(max_ts_us) # One us less than the minimum is an error - self.assertRaises(ValueError, tslib.Timestamp, min_ts_us - one_us) + self.assertRaises(ValueError, Timestamp, min_ts_us - one_us) # One us more than the maximum is an error - self.assertRaises(ValueError, tslib.Timestamp, max_ts_us + one_us) + self.assertRaises(ValueError, Timestamp, max_ts_us + one_us) + class TestDatetimeParsingWrappers(tm.TestCase): def test_does_not_convert_mixed_integer(self): diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 24b1215b949a3..679359f1b4d33 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -341,13 +341,15 @@ class Timestamp(_Timestamp): def is_year_end(self): return self._get_start_end_field('is_year_end') - def tz_localize(self, tz): + def tz_localize(self, tz, infer_dst=False): """ Convert naive Timestamp to local time zone Parameters ---------- tz : pytz.timezone or dateutil.tz.tzfile + infer_dst : boolean, default False + Attempt to infer fall dst-transition hours based on order Returns ------- @@ -355,7 +357,10 @@ class Timestamp(_Timestamp): """ if self.tzinfo is None: # tz naive, localize - return Timestamp(self.to_pydatetime(), tz=tz) + tz = maybe_get_tz(tz) + value = tz_localize_to_utc(np.array([self.value]), tz, + infer_dst=infer_dst)[0] + return Timestamp(value, tz=tz) else: raise Exception('Cannot localize tz-aware Timestamp, use ' 'tz_convert for conversions')
`Timestamp.tz_localize` resets `nanosecond`. ``` t = pd.Timestamp('2011-01-01') + pd.offsets.Nano(1) t.tz_localize('US/Eastern') # Warning: discarding nonzero nanoseconds #2011-01-01 00:00:00-05:00 ``` Even though `DatetimeIndex.tz_convert` can preserve it. ``` idx = pd.date_range('3/11/2012 04:00', periods=10, freq='N') idx.tz_localize('US/Eastern') # <class 'pandas.tseries.index.DatetimeIndex'> # [2012-03-11 04:00:00-04:00, ..., 2012-03-11 04:00:00.000000009-04:00] # Length: 10, Freq: N, Timezone: US/Eastern ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7534
2014-06-21T08:01:17Z
2014-06-21T13:41:19Z
2014-06-21T13:41:19Z
2014-06-21T14:47:49Z
BUG: df.reset_index loses tz
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index e8c7a6f9ab462..bde685e0e6167 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -252,7 +252,7 @@ Bug Fixes - Bug in ``Index.astype(float)`` where it would return an ``object`` dtype ``Index`` (:issue:`7464`). - +- Bug in ``DataFrame.reset_index`` loses ``tz`` (:issue:`3950`) - Bug in non-monotonic ``Index.union`` may preserve ``name`` incorrectly (:issue:`7458`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9645d09a5fd0d..d770f8c8f853a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2326,19 +2326,24 @@ def reset_index(self, level=None, drop=False, inplace=False, col_level=0, else: new_obj = self.copy() - def _maybe_cast(values, labels=None): - - if values.dtype == np.object_: - values = lib.maybe_convert_objects(values) - - # if we have the labels, extract the values with a mask - if labels is not None: - mask = labels == -1 - values = values.take(labels) - if mask.any(): - values, changed = com._maybe_upcast_putmask( - values, mask, np.nan) - + def _maybe_casted_values(index, labels=None): + if isinstance(index, PeriodIndex): + values = index.asobject + elif (isinstance(index, DatetimeIndex) and + index.tz is not None): + values = index.asobject + else: + values = index.values + if values.dtype == np.object_: + values = lib.maybe_convert_objects(values) + + # if we have the labels, extract the values with a mask + if labels is not None: + mask = labels == -1 + values = values.take(labels) + if mask.any(): + values, changed = com._maybe_upcast_putmask(values, + mask, np.nan) return values new_index = np.arange(len(new_obj)) @@ -2371,7 +2376,7 @@ def _maybe_cast(values, labels=None): col_name = tuple(name_lst) # to ndarray and maybe infer different dtype - level_values = _maybe_cast(lev.values, lab) + level_values = _maybe_casted_values(lev, lab) if level is None or i in level: new_obj.insert(0, col_name, level_values) @@ -2387,13 +2392,7 @@ def _maybe_cast(values, labels=None): lev_num = self.columns._get_level_number(col_level) name_lst[lev_num] = name name = tuple(name_lst) - if isinstance(self.index, PeriodIndex): - values = self.index.asobject - elif (isinstance(self.index, DatetimeIndex) and - self.index.tz is not None): - values = self.index.asobject - else: - values = _maybe_cast(self.index.values) + values = _maybe_casted_values(self.index) new_obj.insert(0, name, values) new_obj.index = new_index diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 6848b130dee3a..ed2b19f5f2e19 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -2182,6 +2182,20 @@ def test_set_index_cast_datetimeindex(self): df.pop('ts') assert_frame_equal(df, expected) + # GH 3950 + # reset_index with single level + for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']: + idx = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz, name='idx') + df = pd.DataFrame({'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx) + + expected = pd.DataFrame({'idx': [datetime(2011, 1, 1), datetime(2011, 1, 2), + datetime(2011, 1, 3), datetime(2011, 1, 4), + datetime(2011, 1, 5)], + 'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, + columns=['idx', 'a', 'b']) + expected['idx'] = expected['idx'].apply(lambda d: pd.Timestamp(d, tz=tz)) + assert_frame_equal(df.reset_index(), expected) + def test_set_index_multiindexcolumns(self): columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)]) df = DataFrame(np.random.randn(3, 3), columns=columns) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index b8ccfb3eb151b..54544a87e4038 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -2078,6 +2078,46 @@ def test_set_index_datetime(self): self.assertTrue(df.index.get_level_values(1).equals(idx2)) self.assertTrue(df.index.get_level_values(2).equals(idx3)) + def test_reset_index_datetime(self): + # GH 3950 + for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']: + idx1 = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz, name='idx1') + idx2 = pd.Index(range(5), name='idx2') + idx = pd.MultiIndex.from_arrays([idx1, idx2]) + df = pd.DataFrame({'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx) + + expected = pd.DataFrame({'idx1': [datetime.datetime(2011, 1, 1), + datetime.datetime(2011, 1, 2), + datetime.datetime(2011, 1, 3), + datetime.datetime(2011, 1, 4), + datetime.datetime(2011, 1, 5)], + 'idx2': range(5), + 'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, + columns=['idx1', 'idx2', 'a', 'b']) + expected['idx1'] = expected['idx1'].apply(lambda d: pd.Timestamp(d, tz=tz)) + assert_frame_equal(df.reset_index(), expected) + + idx3 = pd.date_range('1/1/2012', periods=5, freq='MS', tz='Europe/Paris', name='idx3') + idx = pd.MultiIndex.from_arrays([idx1, idx2, idx3]) + df = pd.DataFrame({'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx) + + expected = pd.DataFrame({'idx1': [datetime.datetime(2011, 1, 1), + datetime.datetime(2011, 1, 2), + datetime.datetime(2011, 1, 3), + datetime.datetime(2011, 1, 4), + datetime.datetime(2011, 1, 5)], + 'idx2': range(5), + 'idx3': [datetime.datetime(2012, 1, 1), + datetime.datetime(2012, 2, 1), + datetime.datetime(2012, 3, 1), + datetime.datetime(2012, 4, 1), + datetime.datetime(2012, 5, 1)], + 'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, + columns=['idx1', 'idx2', 'idx3', 'a', 'b']) + expected['idx1'] = expected['idx1'].apply(lambda d: pd.Timestamp(d, tz=tz)) + expected['idx3'] = expected['idx3'].apply(lambda d: pd.Timestamp(d, tz='Europe/Paris')) + assert_frame_equal(df.reset_index(), expected) + def test_set_index_period(self): # GH 6631 df = DataFrame(np.random.random(6))
Closes #3950.
https://api.github.com/repos/pandas-dev/pandas/pulls/7533
2014-06-21T07:51:17Z
2014-06-21T20:10:21Z
2014-06-21T20:10:21Z
2014-06-22T00:56:59Z
BUG: single column bar plot is misaligned
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index c41bc13b18606..6e02e771fecc7 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -256,5 +256,9 @@ Bug Fixes - Bug in ``DataFrame.reset_index`` loses ``tz`` (:issue:`3950`) +- Bug in single column bar plot is misaligned (:issue:`7498`). + + + - Bug in non-monotonic ``Index.union`` may preserve ``name`` incorrectly (:issue:`7458`) - Bug in ``DatetimeIndex.intersection`` doesn't preserve timezone (:issue:`4690`) diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index c96fd08233238..a09f6bc4aa9b0 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -1255,43 +1255,40 @@ def _check_bar_alignment(self, df, kind='bar', stacked=False, align=align, width=width, position=position, grid=True) - tick_pos = np.arange(len(df)) - axes = self._flatten_visible(axes) for ax in axes: if kind == 'bar': axis = ax.xaxis ax_min, ax_max = ax.get_xlim() + min_edge = min([p.get_x() for p in ax.patches]) + max_edge = max([p.get_x() + p.get_width() for p in ax.patches]) elif kind == 'barh': axis = ax.yaxis ax_min, ax_max = ax.get_ylim() + min_edge = min([p.get_y() for p in ax.patches]) + max_edge = max([p.get_y() + p.get_height() for p in ax.patches]) else: raise ValueError + # GH 7498 + # compare margins between lim and bar edges + self.assertAlmostEqual(ax_min, min_edge - 0.25) + self.assertAlmostEqual(ax_max, max_edge + 0.25) + p = ax.patches[0] if kind == 'bar' and (stacked is True or subplots is True): edge = p.get_x() center = edge + p.get_width() * position - tickoffset = width * position elif kind == 'bar' and stacked is False: center = p.get_x() + p.get_width() * len(df.columns) * position edge = p.get_x() - if align == 'edge': - tickoffset = width * (position - 0.5) + p.get_width() * 1.5 - else: - tickoffset = width * position + p.get_width() elif kind == 'barh' and (stacked is True or subplots is True): center = p.get_y() + p.get_height() * position edge = p.get_y() - tickoffset = width * position elif kind == 'barh' and stacked is False: center = p.get_y() + p.get_height() * len(df.columns) * position edge = p.get_y() - if align == 'edge': - tickoffset = width * (position - 0.5) + p.get_height() * 1.5 - else: - tickoffset = width * position + p.get_height() else: raise ValueError @@ -1307,59 +1304,43 @@ def _check_bar_alignment(self, df, kind='bar', stacked=False, else: raise ValueError - # Check starting point and axes limit margin - self.assertEqual(ax_min, tick_pos[0] - tickoffset - 0.25) - self.assertEqual(ax_max, tick_pos[-1] - tickoffset + 1) - # Check tick locations and axes limit margin - t_min = axis.get_ticklocs()[0] - tickoffset - t_max = axis.get_ticklocs()[-1] - tickoffset - self.assertAlmostEqual(ax_min, t_min - 0.25) - self.assertAlmostEqual(ax_max, t_max + 1.0) return axes @slow def test_bar_stacked_center(self): # GH2157 df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5)) - axes = self._check_bar_alignment(df, kind='bar', stacked=True) - # Check the axes has the same drawing range before fixing # GH4525 - self.assertEqual(axes[0].get_xlim(), (-0.5, 4.75)) - + self._check_bar_alignment(df, kind='bar', stacked=True) self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9) - - axes = self._check_bar_alignment(df, kind='barh', stacked=True) - self.assertEqual(axes[0].get_ylim(), (-0.5, 4.75)) - + self._check_bar_alignment(df, kind='barh', stacked=True) self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9) @slow def test_bar_center(self): df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5)) - axes = self._check_bar_alignment(df, kind='bar', stacked=False) - self.assertEqual(axes[0].get_xlim(), (-0.75, 4.5)) - + self._check_bar_alignment(df, kind='bar', stacked=False) self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9) - - axes = self._check_bar_alignment(df, kind='barh', stacked=False) - self.assertEqual(axes[0].get_ylim(), (-0.75, 4.5)) - + self._check_bar_alignment(df, kind='barh', stacked=False) self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9) @slow def test_bar_subplots_center(self): df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5)) - axes = self._check_bar_alignment(df, kind='bar', subplots=True) - for ax in axes: - self.assertEqual(ax.get_xlim(), (-0.5, 4.75)) - + self._check_bar_alignment(df, kind='bar', subplots=True) self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9) - - axes = self._check_bar_alignment(df, kind='barh', subplots=True) - for ax in axes: - self.assertEqual(ax.get_ylim(), (-0.5, 4.75)) - + self._check_bar_alignment(df, kind='barh', subplots=True) self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9) + @slow + def test_bar_align_single_column(self): + df = DataFrame(randn(5)) + self._check_bar_alignment(df, kind='bar', stacked=False) + self._check_bar_alignment(df, kind='bar', stacked=True) + self._check_bar_alignment(df, kind='barh', stacked=False) + self._check_bar_alignment(df, kind='barh', stacked=True) + self._check_bar_alignment(df, kind='bar', subplots=True) + self._check_bar_alignment(df, kind='barh', subplots=True) + @slow def test_bar_edge(self): df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5)) diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 37a982acc0bbd..03cfaa358c864 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1784,9 +1784,10 @@ def __init__(self, data, **kwargs): self.stacked = kwargs.pop('stacked', False) self.bar_width = kwargs.pop('width', 0.5) + pos = kwargs.pop('position', 0.5) - kwargs['align'] = kwargs.pop('align', 'center') + kwargs.setdefault('align', 'center') self.tick_pos = np.arange(len(data)) self.bottom = kwargs.pop('bottom', None) @@ -1797,14 +1798,19 @@ def __init__(self, data, **kwargs): if self.stacked or self.subplots: self.tickoffset = self.bar_width * pos - elif kwargs['align'] == 'edge': - K = self.nseries - w = self.bar_width / K - self.tickoffset = self.bar_width * (pos - 0.5) + w * 1.5 + if kwargs['align'] == 'edge': + self.lim_offset = self.bar_width / 2 + else: + self.lim_offset = 0 else: - K = self.nseries - w = self.bar_width / K - self.tickoffset = self.bar_width * pos + w + if kwargs['align'] == 'edge': + w = self.bar_width / self.nseries + self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5 + self.lim_offset = w * 0.5 + else: + self.tickoffset = self.bar_width * pos + self.lim_offset = 0 + self.ax_pos = self.tick_pos - self.tickoffset def _args_adjust(self): @@ -1881,9 +1887,8 @@ def _make_plot(self): neg_prior = neg_prior + np.where(mask, 0, y) else: w = self.bar_width / K - rect = bar_f(ax, self.ax_pos + (i + 1.5) * w, y, w, + rect = bar_f(ax, self.ax_pos + (i + 0.5) * w, y, w, start=start, label=label, **kwds) - self._add_legend_handle(rect, label, index=i) def _post_plot_logic(self): @@ -1894,8 +1899,12 @@ def _post_plot_logic(self): str_index = [com.pprint_thing(key) for key in range(self.data.shape[0])] name = self._get_index_name() + + s_edge = self.ax_pos[0] - 0.25 + self.lim_offset + e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset + if self.kind == 'bar': - ax.set_xlim([self.ax_pos[0] - 0.25, self.ax_pos[-1] + 1]) + ax.set_xlim((s_edge, e_edge)) ax.set_xticks(self.tick_pos) ax.set_xticklabels(str_index, rotation=self.rot, fontsize=self.fontsize) @@ -1905,7 +1914,7 @@ def _post_plot_logic(self): ax.set_xlabel(name) elif self.kind == 'barh': # horizontal bars - ax.set_ylim([self.ax_pos[0] - 0.25, self.ax_pos[-1] + 1]) + ax.set_ylim((s_edge, e_edge)) ax.set_yticks(self.tick_pos) ax.set_yticklabels(str_index, rotation=self.rot, fontsize=self.fontsize) @@ -1915,9 +1924,6 @@ def _post_plot_logic(self): else: raise NotImplementedError(self.kind) - # if self.subplots and self.legend: - # self.axes[0].legend(loc='best') - class PiePlot(MPLPlot):
Closes #7498.
https://api.github.com/repos/pandas-dev/pandas/pulls/7532
2014-06-21T07:04:50Z
2014-07-01T15:29:53Z
2014-07-01T15:29:53Z
2014-07-02T16:46:04Z
WIP/ENH: allow read_excel to accept URLs (GH6809)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 158fa1561eb30..97b7555d833f8 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -108,6 +108,9 @@ Enhancements - ``read_html`` now sports an ``encoding`` argument that is passed to the underlying parser library. You can use this to read non-ascii encoded web pages (:issue:`7323`). +- ``read_excel`` now supports reading from URLs in the same way + that ``read_csv`` does. (:issue:`6809`) + - Support for dateutil timezones, which can now be used in the same way as pytz timezones across pandas. (:issue:`4688`) diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 67107ee20b336..f81cf6502a0e6 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -10,6 +10,7 @@ import numpy as np from pandas.io.parsers import TextParser +from pandas.io.common import _is_url, _urlopen from pandas.tseries.period import Period from pandas import json from pandas.compat import map, zip, reduce, range, lrange, u, add_metaclass @@ -56,8 +57,10 @@ def read_excel(io, sheetname=0, **kwds): Parameters ---------- - io : string, file-like object or xlrd workbook - If a string, expected to be a path to xls or xlsx file + io : string, file-like object, or xlrd workbook. + The string could be a URL. Valid URL schemes include http, ftp, s3, + and file. For file URLs, a host is expected. For instance, a local + file could be file://localhost/path/to/workbook.xlsx sheetname : string or int, default 0 Name of Excel sheet or the page number of the sheet header : int, default 0 @@ -98,6 +101,7 @@ def read_excel(io, sheetname=0, **kwds): ------- parsed : DataFrame DataFrame from the passed in Excel file + """ if 'kind' in kwds: kwds.pop('kind') @@ -139,11 +143,16 @@ def __init__(self, io, **kwds): raise ValueError("Unknown engine: %s" % engine) if isinstance(io, compat.string_types): - self.book = xlrd.open_workbook(io) - elif engine == "xlrd" and isinstance(io, xlrd.Book): + if _is_url(io): + data = _urlopen(io).read() + self.book = xlrd.open_workbook(file_contents=data) + else: + self.book = xlrd.open_workbook(io) + elif engine == 'xlrd' and isinstance(io, xlrd.Book): self.book = io - elif hasattr(io, "read"): - data = io.read() + elif not isinstance(io, xlrd.Book) and hasattr(io, "read"): + # N.B. xlrd.Book has a read attribute too + data = io.read() self.book = xlrd.open_workbook(file_contents=data) else: raise ValueError('Must explicitly set engine if not passing in' diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index b45897dff9aa2..96db535347921 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -2,6 +2,7 @@ from pandas.compat import u, range, map, openpyxl_compat from datetime import datetime, date, time +import sys import os from distutils.version import LooseVersion @@ -11,6 +12,7 @@ from numpy import nan import numpy as np +from numpy.testing.decorators import slow from pandas import DataFrame, Index, MultiIndex from pandas.io.parsers import read_csv @@ -18,6 +20,7 @@ ExcelFile, ExcelWriter, read_excel, _XlwtWriter, _OpenpyxlWriter, register_writer, _XlsxWriter ) +from pandas.io.common import URLError from pandas.util.testing import ensure_clean from pandas.core.config import set_option, get_option import pandas.util.testing as tm @@ -280,6 +283,39 @@ def test_read_xlrd_Book(self): result = read_excel(book, sheetname="SheetA", engine="xlrd") tm.assert_frame_equal(df, result) + @tm.network + def test_read_from_http_url(self): + _skip_if_no_xlrd() + + url = ('https://raw.github.com/pydata/pandas/master/' + 'pandas/io/tests/data/test.xlsx') + url_table = read_excel(url) + dirpath = tm.get_data_path() + localtable = os.path.join(dirpath, 'test.xlsx') + local_table = read_excel(localtable) + tm.assert_frame_equal(url_table, local_table) + + @slow + def test_read_from_file_url(self): + _skip_if_no_xlrd() + + # FILE + if sys.version_info[:2] < (2, 6): + raise nose.SkipTest("file:// not supported with Python < 2.6") + dirpath = tm.get_data_path() + localtable = os.path.join(dirpath, 'test.xlsx') + local_table = read_excel(localtable) + + try: + url_table = read_excel('file://localhost/' + localtable) + except URLError: + # fails on some systems + raise nose.SkipTest("failing on %s" % + ' '.join(platform.uname()).strip()) + + tm.assert_frame_equal(url_table, local_table) + + def test_xlsx_table(self): _skip_if_no_xlrd() _skip_if_no_openpyxl()
closes #6809 This borrows the mechanisms already used in read_csv to enable reading of Excel objects via URLs. Not sure what tests are appropriate, and I'm not certain what the various decorators do, so I tried to mirror the read_csv tests as best I could.
https://api.github.com/repos/pandas-dev/pandas/pulls/7531
2014-06-21T04:31:06Z
2014-06-23T14:03:11Z
2014-06-23T14:03:11Z
2014-07-19T14:15:57Z
TST: io.data.Options changes to avoid test failures on bad data.
diff --git a/pandas/io/data.py b/pandas/io/data.py index fe87c0d9fb5e7..dab9862bae2e2 100644 --- a/pandas/io/data.py +++ b/pandas/io/data.py @@ -683,7 +683,10 @@ def _get_option_tables(self, month, year, expiry): table_name = '_tables' + m1 + str(year)[-2:] setattr(self, table_name, tables) - self.underlying_price, self.quote_time = self._get_underlying_price(root) + try: + self.underlying_price, self.quote_time = self._get_underlying_price(root) + except IndexError: + self.underlying_price, self.quote_time = np.nan, np.nan return tables @@ -723,7 +726,7 @@ def _get_option_data(self, month, year, expiry, name): if ntables == 0: raise RemoteDataError("No tables found at {0!r}".format(url)) elif table_loc - 1 > ntables: - raise IndexError("Table location {0} invalid, {1} tables" + raise RemoteDataError("Table location {0} invalid, {1} tables" " found".format(table_loc, ntables)) option_data = _parse_options_data(tables[table_loc])
Added try, except to underlying price / quote_time function. Changed to RemoteDataError in the event that some tables are found, but the correct table is not available. Fixes #7524.
https://api.github.com/repos/pandas-dev/pandas/pulls/7530
2014-06-20T23:46:44Z
2014-06-21T13:48:20Z
2014-06-21T13:48:20Z
2014-06-21T13:48:24Z
BUG: DatetimeIndex comparison handles NaT incorrectly
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 03caf47dc7127..197bc9bae5c9d 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -237,6 +237,7 @@ Bug Fixes - Bug in when writing Stata files where the encoding was ignored (:issue:`7286`) +- Bug in ``DatetimeIndex`` comparison doesn't handle ``NaT`` properly (:issue:`7529`) - Bug in passing input with ``tzinfo`` to some offsets ``apply``, ``rollforward`` or ``rollback`` resets ``tzinfo`` or raises ``ValueError`` (:issue:`7465`) diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 16468f24a0ee1..50296a417479e 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -74,22 +74,35 @@ def wrapper(left, right): return wrapper -def _dt_index_cmp(opname): +def _dt_index_cmp(opname, nat_result=False): """ Wrap comparison operations to convert datetime-like to datetime64 """ def wrapper(self, other): func = getattr(super(DatetimeIndex, self), opname) - if isinstance(other, datetime): + if isinstance(other, datetime) or isinstance(other, compat.string_types): other = _to_m8(other, tz=self.tz) - elif isinstance(other, list): - other = DatetimeIndex(other) - elif isinstance(other, compat.string_types): - other = _to_m8(other, tz=self.tz) - elif not isinstance(other, (np.ndarray, ABCSeries)): - other = _ensure_datetime64(other) - result = func(other) + result = func(other) + if com.isnull(other): + result.fill(nat_result) + else: + if isinstance(other, list): + other = DatetimeIndex(other) + elif not isinstance(other, (np.ndarray, ABCSeries)): + other = _ensure_datetime64(other) + result = func(other) + if isinstance(other, Index): + o_mask = other.values.view('i8') == tslib.iNaT + else: + o_mask = other.view('i8') == tslib.iNaT + + if o_mask.any(): + result[o_mask] = nat_result + + mask = self.asi8 == tslib.iNaT + if mask.any(): + result[mask] = nat_result return result.view(np.ndarray) return wrapper @@ -142,7 +155,7 @@ class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index): _arrmap = None __eq__ = _dt_index_cmp('__eq__') - __ne__ = _dt_index_cmp('__ne__') + __ne__ = _dt_index_cmp('__ne__', nat_result=True) __lt__ = _dt_index_cmp('__lt__') __gt__ = _dt_index_cmp('__gt__') __le__ = _dt_index_cmp('__le__') diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index c44c3c9272f6a..b3a29ab4110d7 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -498,16 +498,11 @@ def dt64arr_to_periodarr(data, freq, tz): # --- Period index sketch -def _period_index_cmp(opname): +def _period_index_cmp(opname, nat_result=False): """ Wrap comparison operations to convert datetime-like to datetime64 """ def wrapper(self, other): - if opname == '__ne__': - fill_value = True - else: - fill_value = False - if isinstance(other, Period): func = getattr(self.values, opname) if other.freq != self.freq: @@ -523,7 +518,7 @@ def wrapper(self, other): mask = (com.mask_missing(self.values, tslib.iNaT) | com.mask_missing(other.values, tslib.iNaT)) if mask.any(): - result[mask] = fill_value + result[mask] = nat_result return result else: @@ -532,10 +527,10 @@ def wrapper(self, other): result = func(other.ordinal) if other.ordinal == tslib.iNaT: - result.fill(fill_value) + result.fill(nat_result) mask = self.values == tslib.iNaT if mask.any(): - result[mask] = fill_value + result[mask] = nat_result return result return wrapper @@ -595,7 +590,7 @@ class PeriodIndex(DatetimeIndexOpsMixin, Int64Index): _allow_period_index_ops = True __eq__ = _period_index_cmp('__eq__') - __ne__ = _period_index_cmp('__ne__') + __ne__ = _period_index_cmp('__ne__', nat_result=True) __lt__ = _period_index_cmp('__lt__') __gt__ = _period_index_cmp('__gt__') __le__ = _period_index_cmp('__le__') diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index d32efe0d777f7..d2cfdff2b003d 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -2179,6 +2179,93 @@ def test_comparisons_coverage(self): exp = rng == rng self.assert_numpy_array_equal(result, exp) + def test_comparisons_nat(self): + fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0]) + fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0]) + + didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT, + '2014-05-01', '2014-07-01']) + didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT, + '2014-06-01', '2014-07-01']) + darr = np.array([np.datetime64('2014-02-01 00:00Z'), + np.datetime64('2014-03-01 00:00Z'), + np.datetime64('nat'), np.datetime64('nat'), + np.datetime64('2014-06-01 00:00Z'), + np.datetime64('2014-07-01 00:00Z')]) + + if _np_version_under1p7: + # cannot test array because np.datetime('nat') returns today's date + cases = [(fidx1, fidx2), (didx1, didx2)] + else: + cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)] + + # Check pd.NaT is handles as the same as np.nan + for idx1, idx2 in cases: + result = idx1 < idx2 + expected = np.array([True, False, False, False, True, False]) + self.assert_numpy_array_equal(result, expected) + result = idx2 > idx1 + expected = np.array([True, False, False, False, True, False]) + self.assert_numpy_array_equal(result, expected) + + result = idx1 <= idx2 + expected = np.array([True, False, False, False, True, True]) + self.assert_numpy_array_equal(result, expected) + result = idx2 >= idx1 + expected = np.array([True, False, False, False, True, True]) + self.assert_numpy_array_equal(result, expected) + + result = idx1 == idx2 + expected = np.array([False, False, False, False, False, True]) + self.assert_numpy_array_equal(result, expected) + + result = idx1 != idx2 + expected = np.array([True, True, True, True, True, False]) + self.assert_numpy_array_equal(result, expected) + + for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]: + result = idx1 < val + expected = np.array([False, False, False, False, False, False]) + self.assert_numpy_array_equal(result, expected) + result = idx1 > val + self.assert_numpy_array_equal(result, expected) + + result = idx1 <= val + self.assert_numpy_array_equal(result, expected) + result = idx1 >= val + self.assert_numpy_array_equal(result, expected) + + result = idx1 == val + self.assert_numpy_array_equal(result, expected) + + result = idx1 != val + expected = np.array([True, True, True, True, True, True]) + self.assert_numpy_array_equal(result, expected) + + # Check pd.NaT is handles as the same as np.nan + for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]: + result = idx1 < val + expected = np.array([True, False, False, False, False, False]) + self.assert_numpy_array_equal(result, expected) + result = idx1 > val + expected = np.array([False, False, False, False, True, True]) + self.assert_numpy_array_equal(result, expected) + + result = idx1 <= val + expected = np.array([True, False, True, False, False, False]) + self.assert_numpy_array_equal(result, expected) + result = idx1 >= val + expected = np.array([False, False, True, False, True, True]) + self.assert_numpy_array_equal(result, expected) + + result = idx1 == val + expected = np.array([False, False, True, False, False, False]) + self.assert_numpy_array_equal(result, expected) + + result = idx1 != val + expected = np.array([True, True, False, True, True, True]) + self.assert_numpy_array_equal(result, expected) + def test_map(self): rng = date_range('1/1/2000', periods=10)
Derived from #7485. Comparison with `DatetimeIndex` result incorrectly if `NaT` is included. ``` idx1 = pd.DatetimeIndex(['2011-01-01', pd.NaT], freq='M') idx2 = pd.DatetimeIndex([pd.NaT, '2011-01-01'], freq='M') print(idx1 > idx2) # [True, False] # it must be [False, False] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7529
2014-06-20T20:28:40Z
2014-06-20T21:38:59Z
2014-06-20T21:38:59Z
2014-06-21T02:39:07Z
REGR: Regression in datetimelike slice indexing with a duplicated index and non-exact end-points (GH7523)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index e038124cadc00..03caf47dc7127 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -167,6 +167,7 @@ Bug Fixes ~~~~~~~~~ - Bug in ``DataFrame.where`` with a symmetric shaped frame and a passed other of a DataFrame (:issue:`7506`) - Bug in Panel indexing with a multi-index axis (:issue:`7516`) +- Regression in datetimelike slice indexing with a duplicated index and non-exact end-points (:issue:`7523`) - Bug in timeops with non-aligned Series (:issue:`7500`) diff --git a/pandas/core/index.py b/pandas/core/index.py index 30002a719a556..030f902eb13ce 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1758,7 +1758,11 @@ def _get_slice(starting_value, offset, search_side, slice_property, except KeyError: if self.is_monotonic: - if not is_unique: + + # we are duplicated but non-unique + # so if we have an indexer then we are done + # else search for it (GH 7523) + if not is_unique and is_integer(search_value): slc = search_value else: slc = self.searchsorted(search_value, diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 01b3f866c6bfc..d32efe0d777f7 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -2966,6 +2966,42 @@ def test_slice_locs_indexerror(self): s = Series(lrange(100000), times) s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)] + def test_slicing_datetimes(self): + + # GH 7523 + + # unique + df = DataFrame(np.arange(4.,dtype='float64'), + index=[datetime(2001, 1, i, 10, 00) for i in [1,2,3,4]]) + result = df.ix[datetime(2001,1,1,10):] + assert_frame_equal(result,df) + result = df.ix[:datetime(2001,1,4,10)] + assert_frame_equal(result,df) + result = df.ix[datetime(2001,1,1,10):datetime(2001,1,4,10)] + assert_frame_equal(result,df) + + result = df.ix[datetime(2001,1,1,11):] + expected = df.iloc[1:] + assert_frame_equal(result,expected) + result = df.ix['20010101 11':] + assert_frame_equal(result,expected) + + # duplicates + df = pd.DataFrame(np.arange(5.,dtype='float64'), + index=[datetime(2001, 1, i, 10, 00) for i in [1,2,2,3,4]]) + + result = df.ix[datetime(2001,1,1,10):] + assert_frame_equal(result,df) + result = df.ix[:datetime(2001,1,4,10)] + assert_frame_equal(result,df) + result = df.ix[datetime(2001,1,1,10):datetime(2001,1,4,10)] + assert_frame_equal(result,df) + + result = df.ix[datetime(2001,1,1,11):] + expected = df.iloc[1:] + assert_frame_equal(result,expected) + result = df.ix['20010101 11':] + assert_frame_equal(result,expected) class TestSeriesDatetime64(tm.TestCase): @@ -3054,7 +3090,7 @@ def test_intersection(self): for tz in [None, 'Asia/Tokyo']: rng = date_range('6/1/2000', '6/30/2000', freq='D', name='idx') - # if target has the same name, it is preserved + # if target has the same name, it is preserved rng2 = date_range('5/15/2000', '6/20/2000', freq='D', name='idx') expected2 = date_range('6/1/2000', '6/20/2000', freq='D', name='idx')
closes #7523
https://api.github.com/repos/pandas-dev/pandas/pulls/7525
2014-06-20T13:05:25Z
2014-06-20T13:53:41Z
2014-06-20T13:53:41Z
2014-06-20T13:53:41Z
DOC: fix startrow typo in docstrings
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 1653eedd26bc3..9645d09a5fd0d 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1177,7 +1177,7 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. - startow : + startrow : upper left cell row to dump data frame startcol : upper left cell column to dump data frame diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 757bef0d1526d..982deae1190f1 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -417,7 +417,7 @@ def to_excel(self, path, na_rep='', engine=None, **kwargs): Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. - startow : upper left cell row to dump data frame + startrow : upper left cell row to dump data frame startcol : upper left cell column to dump data frame Notes
Minor, but worth fixing.
https://api.github.com/repos/pandas-dev/pandas/pulls/7521
2014-06-20T02:59:59Z
2014-06-20T07:54:37Z
2014-06-20T07:54:37Z
2014-06-20T07:54:37Z
BUG: Bug in Panel indexing with a multi-index axis (GH 7516)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index c6144619da963..e038124cadc00 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -166,7 +166,7 @@ Experimental Bug Fixes ~~~~~~~~~ - Bug in ``DataFrame.where`` with a symmetric shaped frame and a passed other of a DataFrame (:issue:`7506`) - +- Bug in Panel indexing with a multi-index axis (:issue:`7516`) - Bug in timeops with non-aligned Series (:issue:`7500`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index cf9ff8abff3ef..214994a6fc185 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1632,11 +1632,11 @@ def _reindex_axes(self, axes, level, limit, method, fill_value, copy): continue # convert to an index if we are not a multi-selection + ax = self._get_axis(a) if level is None: labels = _ensure_index(labels) axis = self._get_axis_number(a) - ax = self._get_axis(a) new_index, indexer = ax.reindex( labels, level=level, limit=limit, method=method) @@ -1929,11 +1929,11 @@ def _get_bool_data(self): def as_matrix(self, columns=None): """ - Convert the frame to its Numpy-array representation. - + Convert the frame to its Numpy-array representation. + Parameters ---------- - columns: list, optional, default:None + columns: list, optional, default:None If None, return all columns, otherwise, returns specified columns. Returns @@ -1942,23 +1942,23 @@ def as_matrix(self, columns=None): If the caller is heterogeneous and contains booleans or objects, the result will be of dtype=object. See Notes. - + Notes - ----- + ----- Return is NOT a Numpy-matrix, rather, a Numpy-array. - + The dtype will be a lower-common-denominator dtype (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. Use this with care if you are not dealing with the blocks. - e.g. If the dtypes are float16 and float32, dtype will be upcast to - float32. If dtypes are int32 and uint8, dtype will be upcase to + e.g. If the dtypes are float16 and float32, dtype will be upcast to + float32. If dtypes are int32 and uint8, dtype will be upcase to int32. This method is provided for backwards compatibility. Generally, it is recommended to use '.values'. - + See Also -------- pandas.DataFrame.values @@ -1971,7 +1971,7 @@ def as_matrix(self, columns=None): @property def values(self): """Numpy representation of NDFrame - + Notes ----- The dtype will be a lower-common-denominator dtype (implicit @@ -1979,8 +1979,8 @@ def values(self): are mixed, the one that accommodates all will be chosen. Use this with care if you are not dealing with the blocks. - e.g. If the dtypes are float16 and float32, dtype will be upcast to - float32. If dtypes are int32 and uint8, dtype will be upcase to + e.g. If the dtypes are float16 and float32, dtype will be upcast to + float32. If dtypes are int32 and uint8, dtype will be upcase to int32. """ return self.as_matrix() diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index bfff85ac4712c..0c695a0c2d632 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -816,7 +816,7 @@ def _getitem_nested_tuple(self, tup): # this is iterative obj = self.obj axis = 0 - for key in tup: + for i, key in enumerate(tup): if _is_null_slice(key): axis += 1 @@ -833,6 +833,13 @@ def _getitem_nested_tuple(self, tup): # has the dim of the obj changed? # GH 7199 if obj.ndim < current_ndim: + + # GH 7516 + # if had a 3 dim and are going to a 2d + # axes are reversed on a DataFrame + if i >= 1 and current_ndim == 3 and obj.ndim == 2: + obj = obj.T + axis -= 1 return obj diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 757bef0d1526d..23115999d074f 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -797,12 +797,15 @@ def _ixs(self, i, axis=0): axis : int """ - key = self._get_axis(axis)[i] + ax = self._get_axis(axis) + key = ax[i] # xs cannot handle a non-scalar key, so just reindex here - if _is_list_like(key): - indexer = {self._get_axis_name(axis): key} - return self.reindex(**indexer) + # if we have a multi-index and a single tuple, then its a reduction (GH 7516) + if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)): + if _is_list_like(key): + indexer = {self._get_axis_name(axis): key} + return self.reindex(**indexer) # a reduction if axis == 0: diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index cd569deac2ceb..4ee6bd1d949a5 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -1179,6 +1179,16 @@ def f(): result2 = wd2.iloc[0,[0],[0,1,2]] assert_frame_equal(result2,expected2) + # GH 7516 + mi = MultiIndex.from_tuples([(0,'x'), (1,'y'), (2,'z')]) + p = Panel(np.arange(3*3*3,dtype='int64').reshape(3,3,3), items=['a','b','c'], major_axis=mi, minor_axis=['u','v','w']) + result = p.iloc[:, 1, 0] + expected = Series([3,12,21],index=['a','b','c'], name='u') + assert_series_equal(result,expected) + + result = p.loc[:, (1,'y'), 'u'] + assert_series_equal(result,expected) + def test_iloc_getitem_doc_issue(self): # multi axis slicing issue with single block
closes #7516
https://api.github.com/repos/pandas-dev/pandas/pulls/7519
2014-06-20T00:51:47Z
2014-06-20T12:04:29Z
2014-06-20T12:04:29Z
2014-06-20T12:04:29Z
BUG: area plot raises ValueError with tz-aware data
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 8ede5f32dded6..9e992573f568d 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -262,6 +262,8 @@ Bug Fixes +- Bug in area plot with tz-aware time series raises ``ValueError`` (:issue:`7471`) + - Bug in non-monotonic ``Index.union`` may preserve ``name`` incorrectly (:issue:`7458`) - Bug in ``DatetimeIndex.intersection`` doesn't preserve timezone (:issue:`4690`) diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index d19d071833ea7..dfdd37c468a85 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -460,9 +460,33 @@ def test_plot_figsize_and_title(self): def test_ts_area_lim(self): ax = self.ts.plot(kind='area', stacked=False) xmin, xmax = ax.get_xlim() - lines = ax.get_lines() - self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0]) - self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1]) + line = ax.get_lines()[0].get_data(orig=False)[0] + self.assertEqual(xmin, line[0]) + self.assertEqual(xmax, line[-1]) + tm.close() + + # GH 7471 + ax = self.ts.plot(kind='area', stacked=False, x_compat=True) + xmin, xmax = ax.get_xlim() + line = ax.get_lines()[0].get_data(orig=False)[0] + self.assertEqual(xmin, line[0]) + self.assertEqual(xmax, line[-1]) + tm.close() + + tz_ts = self.ts.copy() + tz_ts.index = tz_ts.tz_localize('GMT').tz_convert('CET') + ax = tz_ts.plot(kind='area', stacked=False, x_compat=True) + xmin, xmax = ax.get_xlim() + line = ax.get_lines()[0].get_data(orig=False)[0] + self.assertEqual(xmin, line[0]) + self.assertEqual(xmax, line[-1]) + tm.close() + + ax = tz_ts.plot(kind='area', stacked=False, secondary_y=True) + xmin, xmax = ax.get_xlim() + line = ax.get_lines()[0].get_data(orig=False)[0] + self.assertEqual(xmin, line[0]) + self.assertEqual(xmax, line[-1]) def test_line_area_nan_series(self): values = [1, 2, np.nan, 3] diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 2b02523c143b4..7f2f583c5e20e 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1767,7 +1767,9 @@ def _post_plot_logic(self): else: if self.xlim is None: for ax in self.axes: - ax.set_xlim(0, len(self.data)-1) + lines = _get_all_lines(ax) + left, right = _get_xlim(lines) + ax.set_xlim(left, right) if self.ylim is None: if (self.data >= 0).all().all(): @@ -3083,21 +3085,12 @@ def _get_all_lines(ax): def _get_xlim(lines): left, right = np.inf, -np.inf for l in lines: - x = l.get_xdata() - left = min(_maybe_convert_date(x[0]), left) - right = max(_maybe_convert_date(x[-1]), right) + x = l.get_xdata(orig=False) + left = min(x[0], left) + right = max(x[-1], right) return left, right -def _maybe_convert_date(x): - if not com.is_integer(x): - conv_func = conv._dt_to_float_ordinal - if isinstance(x, datetime.time): - conv_func = conv._to_ordinalf - x = conv_func(x) - return x - - if __name__ == '__main__': # import pandas.rpy.common as com # sales = com.load_data('sanfrancisco.home.sales', package='nutshell')
Closes #7471. Must be revisit after #7322.
https://api.github.com/repos/pandas-dev/pandas/pulls/7515
2014-06-19T19:41:30Z
2014-07-05T01:31:30Z
2014-07-05T01:31:30Z
2014-07-05T04:46:54Z
BUG: Bug in DataFrame.where with a symmetric shaped frame and a passed other of a DataFrame
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index c1e5877d09004..af859b7be2558 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -163,12 +163,12 @@ Experimental Bug Fixes ~~~~~~~~~ +- Bug in ``DataFrame.where`` with a symmetric shaped frame and a passed other of a DataFrame (:issue:`7506`) - -- Bug in ``value_counts`` where ``NaT`` did not qualify as missing (``NaN``) (:issue:`7423`) +- Bug in ``value_counts`` where ``NaT`` did not qualify as missing (``NaN``) (:issue:`7423`) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 75ec53c95869a..6b2d6bcfe3c80 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -921,9 +921,13 @@ def where(self, other, cond, align=True, raise_on_error=True, if hasattr(other, 'ndim') and hasattr(values, 'ndim'): if values.ndim != other.ndim or values.shape == other.shape[::-1]: + # if its symmetric are ok, no reshaping needed (GH 7506) + if (values.shape[0] == np.array(values.shape)).all(): + pass + # pseodo broadcast (its a 2d vs 1d say and where needs it in a # specific direction) - if (other.ndim >= 1 and values.ndim - 1 == other.ndim and + elif (other.ndim >= 1 and values.ndim - 1 == other.ndim and values.shape[0] != other.shape[0]): other = _block_shape(other).T else: @@ -941,9 +945,11 @@ def where(self, other, cond, align=True, raise_on_error=True, # may need to undo transpose of values if hasattr(values, 'ndim'): if values.ndim != cond.ndim or values.shape == cond.shape[::-1]: + values = values.T is_transposed = not is_transposed + # our where function def func(c, v, o): if c.ravel().all(): diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 8ed1d2d2d4f95..6848b130dee3a 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -5564,27 +5564,27 @@ def test_to_csv_from_csv(self): with ensure_clean(pname) as path: self.frame['A'][:5] = nan - + self.frame.to_csv(path) self.frame.to_csv(path, columns=['A', 'B']) self.frame.to_csv(path, header=False) self.frame.to_csv(path, index=False) - + # test roundtrip self.tsframe.to_csv(path) recons = DataFrame.from_csv(path) - + assert_frame_equal(self.tsframe, recons) - + self.tsframe.to_csv(path, index_label='index') recons = DataFrame.from_csv(path, index_col=None) assert(len(recons.columns) == len(self.tsframe.columns) + 1) - + # no index self.tsframe.to_csv(path, index=False) recons = DataFrame.from_csv(path, index_col=None) assert_almost_equal(self.tsframe.values, recons.values) - + # corner case dm = DataFrame({'s1': Series(lrange(3), lrange(3)), 's2': Series(lrange(2), lrange(2))}) @@ -5600,7 +5600,7 @@ def test_to_csv_from_csv(self): df.to_csv(path) result = DataFrame.from_csv(path) assert_frame_equal(result, df) - + midx = MultiIndex.from_tuples([('A', 1, 2), ('A', 1, 2), ('B', 1, 2)]) df = DataFrame(np.random.randn(3, 3), index=midx, columns=['x', 'y', 'z']) @@ -5608,16 +5608,16 @@ def test_to_csv_from_csv(self): result = DataFrame.from_csv(path, index_col=[0, 1, 2], parse_dates=False) assert_frame_equal(result, df, check_names=False) # TODO from_csv names index ['Unnamed: 1', 'Unnamed: 2'] should it ? - + # column aliases col_aliases = Index(['AA', 'X', 'Y', 'Z']) self.frame2.to_csv(path, header=col_aliases) rs = DataFrame.from_csv(path) xp = self.frame2.copy() xp.columns = col_aliases - + assert_frame_equal(xp, rs) - + self.assertRaises(ValueError, self.frame2.to_csv, path, header=['AA', 'X']) @@ -5881,7 +5881,7 @@ def test_to_csv_from_csv_w_some_infs(self): with ensure_clean() as path: self.frame.to_csv(path) recons = DataFrame.from_csv(path) - + assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False) @@ -5940,11 +5940,11 @@ def test_to_csv_multiindex(self): frame.to_csv(path, header=False) frame.to_csv(path, columns=['A', 'B']) - + # round trip frame.to_csv(path) df = DataFrame.from_csv(path, index_col=[0, 1], parse_dates=False) - + assert_frame_equal(frame, df, check_names=False) # TODO to_csv drops column name self.assertEqual(frame.index.names, df.index.names) self.frame.index = old_index # needed if setUP becomes a classmethod @@ -9155,6 +9155,28 @@ def test_where_bug(self): result.where(result > 2, np.nan, inplace=True) assert_frame_equal(result, expected) + # transpositional issue + # GH7506 + a = DataFrame({ 0 : [1,2], 1 : [3,4], 2 : [5,6]}) + b = DataFrame({ 0 : [np.nan,8], 1:[9,np.nan], 2:[np.nan,np.nan]}) + do_not_replace = b.isnull() | (a > b) + + expected = a.copy() + expected[~do_not_replace] = b + + result = a.where(do_not_replace,b) + assert_frame_equal(result,expected) + + a = DataFrame({ 0 : [4,6], 1 : [1,0]}) + b = DataFrame({ 0 : [np.nan,3],1:[3,np.nan]}) + do_not_replace = b.isnull() | (a > b) + + expected = a.copy() + expected[~do_not_replace] = b + + result = a.where(do_not_replace,b) + assert_frame_equal(result,expected) + def test_where_datetime(self): # GH 3311
from SO: http://stackoverflow.com/questions/24296480/pandas-dataframe-where-misbehaving
https://api.github.com/repos/pandas-dev/pandas/pulls/7506
2014-06-19T00:23:26Z
2014-06-19T09:53:31Z
2014-06-19T09:53:31Z
2014-06-19T09:53:31Z
BUG, TST: Fix pandas.core.strings.str_contains when handling regex=False and case=False
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index c1e5877d09004..0d1606f701a8f 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -249,3 +249,4 @@ Bug Fixes - Bug in non-monotonic ``Index.union`` may preserve ``name`` incorrectly (:issue:`7458`) - Bug in ``DatetimeIndex.intersection`` doesn't preserve timezone (:issue:`4690`) +- Bug in ``pandas.core.strings.str_contains`` does not properly match in a case insensitive fashion when ``regex=False`` and ``case=False`` (:issue:`7505`) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index ad64d2bf6bdd9..3e730942ffc0e 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -189,7 +189,12 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): f = lambda x: bool(regex.search(x)) else: - f = lambda x: pat in x + if case: + f = lambda x: pat in x + else: + upper_pat = pat.upper() + f = lambda x: upper_pat in x + return _na_map(f, str_upper(arr), na, dtype=bool) return _na_map(f, arr, na, dtype=bool) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 55ab906544fc4..971d7acf73027 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -189,6 +189,17 @@ def test_contains(self): self.assertEqual(result.dtype, np.bool_) tm.assert_almost_equal(result, expected) + # case insensitive using regex + values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_'] + result = strings.str_contains(values, 'FOO|mmm', case=False) + expected = [True, False, True, True] + tm.assert_almost_equal(result, expected) + + # case insensitive without regex + result = strings.str_contains(values, 'foo', regex=False, case=False) + expected = [True, False, True, False] + tm.assert_almost_equal(result, expected) + # mixed mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.] rs = strings.str_contains(mixed, 'o')
`pandas.core.strings.str_contains` does not match in a case insensitive fashion _at all_ when given `regex=False` and `case=False`. This PR should fix the situation. Additionally, there is test coverage for `pandas.core.strings.str_contains` case insensitive matching both with and without regular expressions enabled.
https://api.github.com/repos/pandas-dev/pandas/pulls/7505
2014-06-18T22:40:48Z
2014-06-30T19:38:04Z
2014-06-30T19:38:04Z
2014-06-30T19:38:12Z
BUG: Bug in timeops with non-aligned Series (GH7500)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index c1e5877d09004..930ba13bef122 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -165,10 +165,10 @@ Bug Fixes ~~~~~~~~~ +- Bug in timeops with non-aligned Series (:issue:`7500`) - -- Bug in ``value_counts`` where ``NaT`` did not qualify as missing (``NaN``) (:issue:`7423`) +- Bug in ``value_counts`` where ``NaT`` did not qualify as missing (``NaN``) (:issue:`7423`) diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 0f19634cb5a38..780edec6ea25b 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -245,6 +245,12 @@ class _TimeOp(object): def __init__(self, left, right, name): self.name = name + # need to make sure that we are aligning the data + if isinstance(left, pd.Series) and isinstance(right, pd.Series): + left, right = left.align(right) + + self.left = left + self.right = right lvalues = self._convert_to_array(left, name=name) rvalues = self._convert_to_array(right, name=name, other=lvalues) @@ -426,6 +432,7 @@ def maybe_convert_for_time_op(cls, left, right, name): is_datetime_lhs = com.is_datetime64_dtype(left) if not (is_datetime_lhs or is_timedelta_lhs): return None + # rops are allowed. No need for special checks, just strip off # r part. if name.startswith('__r'): @@ -463,6 +470,7 @@ def wrapper(left, right, name=name): if isinstance(right, pd.DataFrame): return NotImplemented + time_converted = _TimeOp.maybe_convert_for_time_op(left, right, name) if time_converted is None: @@ -472,8 +480,8 @@ def wrapper(left, right, name=name): elif time_converted == NotImplemented: return NotImplemented else: - lvalues = time_converted.lvalues - rvalues = time_converted.rvalues + left, right = time_converted.left, time_converted.right + lvalues, rvalues = time_converted.lvalues, time_converted.rvalues dtype = time_converted.dtype wrap_results = time_converted.wrap_results diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 2e3a9d922bb47..eb2b53dff3879 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -2838,6 +2838,24 @@ def run_ops(ops, get_ser, test_ser): td1 + dt1 dt1 + td1 + def test_ops_datetimelike_align(self): + if _np_version_under1p7: + raise nose.SkipTest("timedelta broken in np < 1.7") + + # GH 7500 + # datetimelike ops need to align + dt = Series(date_range('2012-1-1', periods=3, freq='D')) + dt.iloc[2] = np.nan + dt2 = dt[::-1] + + expected = Series([timedelta(0),timedelta(0),pd.NaT]) + + result = dt2-dt + assert_series_equal(result,expected) + + result = (dt2.to_frame()-dt.to_frame())[0] + assert_series_equal(result,expected) + def test_timedelta64_functions(self): from datetime import timedelta
closes #7500
https://api.github.com/repos/pandas-dev/pandas/pulls/7503
2014-06-18T21:37:53Z
2014-06-19T00:01:34Z
2014-06-19T00:01:34Z
2014-06-19T00:01:34Z
BUG: offsets.apply may return datetime
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index e8c7a6f9ab462..a6ca9e1dfd033 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -244,12 +244,15 @@ Bug Fixes - Bug in ``DatetimeIndex.to_period``, ``PeriodIndex.asobject``, ``PeriodIndex.to_timestamp`` doesn't preserve ``name`` (:issue:`7485`) - Bug in ``DatetimeIndex.to_period`` and ``PeriodIndex.to_timestanp`` handle ``NaT`` incorrectly (:issue:`7228`) +- BUG in ``offsets.apply``, ''rollforward`` and ``rollback`` may return normal ``datetime`` (:issue:`7502`) - BUG in ``resample`` raises ``ValueError`` when target contains ``NaT`` (:issue:`7227`) - Bug in ``Timestamp.tz_localize`` resets ``nanosecond`` info (:issue:`7534`) + + - Bug in ``Index.astype(float)`` where it would return an ``object`` dtype ``Index`` (:issue:`7464`). diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index ff4d6a54d51d4..bcb68ded6fda7 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -45,7 +45,7 @@ def wrapper(self, other): return tslib.NaT if type(other) == date: other = datetime(other.year, other.month, other.day) - elif isinstance(other, np.datetime64): + if isinstance(other, (np.datetime64, datetime)): other = as_timestamp(other) tz = getattr(other, 'tzinfo', None) @@ -57,11 +57,8 @@ def wrapper(self, other): if isinstance(other, Timestamp) and not isinstance(result, Timestamp): result = as_timestamp(result) - if tz is not None: - if isinstance(result, Timestamp) and result.tzinfo is None: - result = result.tz_localize(tz) - elif isinstance(result, datetime) and result.tzinfo is None: - result = tz.localize(result) + if tz is not None and result.tzinfo is None: + result = result.tz_localize(tz) return result return wrapper diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index fddfb3e3b4b56..8c84598e35e1e 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -209,7 +209,7 @@ def _check_offsetfunc_works(self, offset, funcname, dt, expected, func = getattr(offset_s, funcname) result = func(dt) - self.assert_(isinstance(result, datetime)) + self.assert_(isinstance(result, Timestamp)) self.assertEqual(result, expected) result = func(Timestamp(dt)) @@ -227,11 +227,11 @@ def _check_offsetfunc_works(self, offset, funcname, dt, expected, dt_tz = pytz.timezone(tz).localize(dt) result = func(dt_tz) - self.assert_(isinstance(result, datetime)) + self.assert_(isinstance(result, Timestamp)) self.assertEqual(result, expected_localize) result = func(Timestamp(dt, tz=tz)) - self.assert_(isinstance(result, datetime)) + self.assert_(isinstance(result, Timestamp)) self.assertEqual(result, expected_localize) def _check_nanofunc_works(self, offset, funcname, dt, expected):
Currently, `offsets.appy`, `rollforward` and `rollback` returns `Timestamp` if argument is `Timestamp` or `np.datetime64`. If input is `datetime`, these functions return `datetime` or `Timestamp` inconsistently depending on internal process. It may better to always return `Timestanp`? ### Affected Offsets - 'pandas.tseries.offsets.Day' - 'pandas.tseries.offsets.MonthBegin' - 'pandas.tseries.offsets.FY5253Quarter' - 'pandas.tseries.offsets.FY5253' - 'pandas.tseries.offsets.Week' - 'pandas.tseries.offsets.WeekOfMonth' - 'pandas.tseries.offsets.Easter' - 'pandas.tseries.offsets.Hour' - 'pandas.tseries.offsets.Minute' - 'pandas.tseries.offsets.Second' - 'pandas.tseries.offsets.Milli' - 'pandas.tseries.offsets.Micro'
https://api.github.com/repos/pandas-dev/pandas/pulls/7502
2014-06-18T20:50:11Z
2014-06-21T20:13:29Z
2014-06-21T20:13:29Z
2014-06-21T22:30:16Z
Deprecate detection of IPython frontends
diff --git a/pandas/core/common.py b/pandas/core/common.py index 92d60ae8d8847..9fb79464d061b 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -2599,6 +2599,8 @@ def check_main(): def in_qtconsole(): """ check if we're inside an IPython qtconsole + + DEPRECATED: This is no longer needed, or working, in IPython 3 and above. """ try: ip = get_ipython() @@ -2616,6 +2618,9 @@ def in_qtconsole(): def in_ipnb(): """ check if we're inside an IPython Notebook + + DEPRECATED: This is no longer used in pandas, and won't work in IPython 3 + and above. """ try: ip = get_ipython() diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f702714fdc37e..b4e69e2056507 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -470,14 +470,12 @@ def _repr_html_(self): Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ - # ipnb in html repr mode allows scrolling - # users strongly prefer to h-scroll a wide HTML table in the browser - # then to get a summary view. GH3541, GH3573 - ipnbh = com.in_ipnb() and get_option('display.notebook_repr_html') - # qtconsole doesn't report it's line width, and also # behaves badly when outputting an HTML table # that doesn't fit the window, so disable it. + # XXX: In IPython 3.x and above, the Qt console will not attempt to + # display HTML, so this check can be removed when support for IPython 2.x + # is no longer needed. if com.in_qtconsole(): # 'HTML output is disabled in QtConsole' return None
The check for the Qt console won't be necessary in IPython 3 and above: we decided that the Qt console's display of HTML reprs was so bad that it shouldn't attempt to show them. We also want to remove the `parent_appname` config value (ipython/ipython#4980), which will break both of these functions (making them always return False). The assignment to `ipnbh` was redundant - nothing used that variable. I guess it was used previously and didn't get cleaned up during some changes (quite possibly my own omission).
https://api.github.com/repos/pandas-dev/pandas/pulls/7499
2014-06-18T18:58:40Z
2014-06-24T22:01:49Z
2014-06-24T22:01:49Z
2014-06-24T22:01:53Z
BUG: Bug in .loc performing fallback integer indexing with object dtype indices (GH7496)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 930ba13bef122..01cfa73b8b9f4 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -62,6 +62,8 @@ API changes when comparing a ``Period`` with another object using ``==`` if the other object isn't a ``Period`` ``False`` is returned. +- Bug in ``.loc`` performing fallback integer indexing with ``object`` dtype indices (:issue:`7496`) + .. _whatsnew_0141.prior_deprecations: Prior Version Deprecations/Changes diff --git a/pandas/core/index.py b/pandas/core/index.py index 5956cd7eea9c0..30002a719a556 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -609,7 +609,7 @@ def _convert_list_indexer_for_mixed(self, keyarr, typ=None): and we have a mixed index (e.g. number/labels). figure out the indexer. return None if we can't help """ - if com.is_integer_dtype(keyarr) and not self.is_floating(): + if (typ is None or typ in ['iloc','ix']) and (com.is_integer_dtype(keyarr) and not self.is_floating()): if self.inferred_type != 'integer': keyarr = np.where(keyarr < 0, len(self) + keyarr, keyarr) diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index c074c4333a774..cd569deac2ceb 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -806,6 +806,38 @@ def test_loc_to_fail(self): # raise a KeyError? self.assertRaises(KeyError, df.loc.__getitem__, tuple([[1, 2], [1, 2]])) + # GH 7496 + # loc should not fallback + + s = Series() + s.loc[1] = 1 + s.loc['a'] = 2 + + self.assertRaises(KeyError, lambda : s.loc[-1]) + + result = s.loc[[-1, -2]] + expected = Series(np.nan,index=[-1,-2]) + assert_series_equal(result, expected) + + result = s.loc[['4']] + expected = Series(np.nan,index=['4']) + assert_series_equal(result, expected) + + s.loc[-1] = 3 + result = s.loc[[-1,-2]] + expected = Series([3,np.nan],index=[-1,-2]) + assert_series_equal(result, expected) + + s['a'] = 2 + result = s.loc[[-2]] + expected = Series([np.nan],index=[-2]) + assert_series_equal(result, expected) + + del s['a'] + def f(): + s.loc[[-2]] = 0 + self.assertRaises(KeyError, f) + def test_loc_getitem_label_slice(self): # label slices (with ints)
closes #7496 ``` In [4]: s = Series() In [5]: s.loc[1] = 1 In [6]: s.loc['a'] = 2 In [7]: s.loc[-1] KeyError: 'the label [-1] is not in the [index]' In [8]: s.loc[[-1, -2]] Out[8]: -1 NaN -2 NaN dtype: float64 In [9]: s.loc[['4']] Out[9]: 4 NaN dtype: float64 In [10]: s.loc[-1] = 3 In [11]: s.loc[[-1,-2]] Out[11]: -1 3 -2 NaN dtype: float64 In [12]: s['a'] = 2 In [13]: s.loc[[-2]] Out[13]: -2 NaN dtype: float64 In [14]: del s['a'] In [15]: s.loc[[-2]] = 0 KeyError: '[-2] not in index' ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7497
2014-06-18T15:29:24Z
2014-06-19T09:53:10Z
2014-06-19T09:53:10Z
2014-06-19T09:53:10Z
DOC: Fix index error for remote data docs.
diff --git a/doc/source/remote_data.rst b/doc/source/remote_data.rst index 98d14b23e28bf..3b12023c47f1e 100644 --- a/doc/source/remote_data.rst +++ b/doc/source/remote_data.rst @@ -69,13 +69,13 @@ to the specific option you want. from pandas.io.data import Options aapl = Options('aapl', 'yahoo') data = aapl.get_all_data() - data.iloc[0:5:, 0:5] + data.iloc[0:5, 0:5] #Show the $100 strike puts at all expiry dates: - data.loc[(100, slice(None), 'put'),:].iloc[0:5:, 0:5] + data.loc[(100, slice(None), 'put'),:].iloc[0:5, 0:5] #Show the volume traded of $100 strike puts at all expiry dates: - data.loc[(100, slice(None), 'put'),'Vol'].iloc[0:5:, 0:5] + data.loc[(100, slice(None), 'put'),'Vol'].head() If you don't want to download all the data, more specific requests can be made. diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 2bd92953e2db7..c1e5877d09004 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -148,7 +148,7 @@ Performance Experimental ~~~~~~~~~~~~ -``pandas.io.data.Options`` has a get_all_data method and now consistently returns a multi-indexed ``DataFrame`` (PR `#5602`) +``pandas.io.data.Options`` has a get_all_data method and now consistently returns a multi-indexed ``DataFrame`` (:issue:`5602`) See :ref:`the docs<remote_data.yahoo_Options>` ***Experimental*** .. ipython:: python @@ -156,15 +156,7 @@ Experimental from pandas.io.data import Options aapl = Options('aapl', 'yahoo') data = aapl.get_all_data() - data.iloc[0:5:, 0:5] - - - .. ipython:: python - - from pandas.io.data import Options - aapl = Options('aapl', 'yahoo') - data = aapl.get_all_data() - data.iloc[0:5:, 0:5] + data.iloc[0:5, 0:5] .. _whatsnew_0141.bug_fixes:
Fixes the index error caused by #7484. Also removes duplicate example in v0.14.1.txt
https://api.github.com/repos/pandas-dev/pandas/pulls/7495
2014-06-18T14:40:36Z
2014-06-18T16:23:09Z
2014-06-18T16:23:09Z
2014-06-18T16:23:14Z
[doc fix] minor docstring update to fix df.to_latex() usepackage syntax
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9c1d593187b2c..1653eedd26bc3 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1358,7 +1358,7 @@ def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None, bold_rows=True, longtable=False, escape=True): """ Render a DataFrame to a tabular environment table. You can splice - this into a LaTeX document. Requires \\usepackage(booktabs}. + this into a LaTeX document. Requires \\usepackage{booktabs}. `to_latex`-specific options:
Minor docstring fix for option `DF.to_latex()` for `\\usepackage{booktabs}`
https://api.github.com/repos/pandas-dev/pandas/pulls/7489
2014-06-18T01:40:13Z
2014-06-18T06:49:39Z
2014-06-18T06:49:39Z
2014-06-18T06:49:43Z
Switch if to elif in test_frame.test_to_csv_moar._do_test
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index ea3dafa07715b..8ed1d2d2d4f95 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -5748,7 +5748,7 @@ def _to_uni(x): recons.index = np.array(lmap(_to_uni,recons.index), dtype=r_dtype) df.index = np.array(lmap(_to_uni,df.index),dtype=r_dtype) - if r_dtype == 'dt': # unicode + elif r_dtype == 'dt': # unicode r_dtype='O' recons.index = np.array(lmap(Timestamp,recons.index), dtype=r_dtype)
An if-elif-else chain had an "if" where there should have been an elif.
https://api.github.com/repos/pandas-dev/pandas/pulls/7488
2014-06-18T00:31:58Z
2014-06-18T12:19:26Z
2014-06-18T12:19:26Z
2014-06-18T12:19:29Z
ENH/BUG: Period/PeriodIndex supports NaT
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 18ae412c9711d..c6144619da963 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -130,7 +130,7 @@ Enhancements - All offsets ``apply``, ``rollforward`` and ``rollback`` can now handle ``np.datetime64``, previously results in ``ApplyTypeError`` (:issue:`7452`) - +- ``Period`` and ``PeriodIndex`` can contain ``NaT`` in its values (:issue:`7485`) .. _whatsnew_0141.performance: @@ -239,6 +239,9 @@ Bug Fixes - Bug in passing input with ``tzinfo`` to some offsets ``apply``, ``rollforward`` or ``rollback`` resets ``tzinfo`` or raises ``ValueError`` (:issue:`7465`) +- Bug in ``DatetimeIndex.to_period``, ``PeriodIndex.asobject``, ``PeriodIndex.to_timestamp`` doesn't preserve ``name`` (:issue:`7485`) +- Bug in ``DatetimeIndex.to_period`` and ``PeriodIndex.to_timestanp`` handle ``NaT`` incorrectly (:issue:`7228`) + - BUG in ``resample`` raises ``ValueError`` when target contains ``NaT`` (:issue:`7227`) diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 87c1742c54b01..16468f24a0ee1 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -809,7 +809,7 @@ def to_period(self, freq=None): if freq is None: freq = get_period_alias(self.freqstr) - return PeriodIndex(self.values, freq=freq, tz=self.tz) + return PeriodIndex(self.values, name=self.name, freq=freq, tz=self.tz) def order(self, return_indexer=False, ascending=True): """ diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 31785bb7a6753..c44c3c9272f6a 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -102,6 +102,12 @@ def __init__(self, value=None, freq=None, ordinal=None, converted = other.asfreq(freq) self.ordinal = converted.ordinal + elif com._is_null_datelike_scalar(value) or value in tslib._nat_strings: + self.ordinal = tslib.iNaT + if freq is None: + raise ValueError("If value is NaT, freq cannot be None " + "because it cannot be inferred") + elif isinstance(value, compat.string_types) or com.is_integer(value): if com.is_integer(value): value = str(value) @@ -136,6 +142,8 @@ def __eq__(self, other): if isinstance(other, Period): if other.freq != self.freq: raise ValueError("Cannot compare non-conforming periods") + if self.ordinal == tslib.iNaT or other.ordinal == tslib.iNaT: + return False return (self.ordinal == other.ordinal and _gfc(self.freq) == _gfc(other.freq)) return NotImplemented @@ -148,26 +156,38 @@ def __hash__(self): def __add__(self, other): if com.is_integer(other): - return Period(ordinal=self.ordinal + other, freq=self.freq) + if self.ordinal == tslib.iNaT: + ordinal = self.ordinal + else: + ordinal = self.ordinal + other + return Period(ordinal=ordinal, freq=self.freq) else: # pragma: no cover - raise TypeError(other) + return NotImplemented def __sub__(self, other): if com.is_integer(other): - return Period(ordinal=self.ordinal - other, freq=self.freq) + if self.ordinal == tslib.iNaT: + ordinal = self.ordinal + else: + ordinal = self.ordinal - other + return Period(ordinal=ordinal, freq=self.freq) if isinstance(other, Period): if other.freq != self.freq: raise ValueError("Cannot do arithmetic with " "non-conforming periods") + if self.ordinal == tslib.iNaT or other.ordinal == tslib.iNaT: + return Period(ordinal=tslib.iNaT, freq=self.freq) return self.ordinal - other.ordinal else: # pragma: no cover - raise TypeError(other) + return NotImplemented def _comp_method(func, name): def f(self, other): if isinstance(other, Period): if other.freq != self.freq: raise ValueError("Cannot compare non-conforming periods") + if self.ordinal == tslib.iNaT or other.ordinal == tslib.iNaT: + return False return func(self.ordinal, other.ordinal) else: raise TypeError(other) @@ -213,7 +233,10 @@ def start_time(self): @property def end_time(self): - ordinal = (self + 1).start_time.value - 1 + if self.ordinal == tslib.iNaT: + ordinal = self.ordinal + else: + ordinal = (self + 1).start_time.value - 1 return Timestamp(ordinal) def to_timestamp(self, freq=None, how='start', tz=None): @@ -480,6 +503,11 @@ def _period_index_cmp(opname): Wrap comparison operations to convert datetime-like to datetime64 """ def wrapper(self, other): + if opname == '__ne__': + fill_value = True + else: + fill_value = False + if isinstance(other, Period): func = getattr(self.values, opname) if other.freq != self.freq: @@ -489,12 +517,26 @@ def wrapper(self, other): elif isinstance(other, PeriodIndex): if other.freq != self.freq: raise AssertionError("Frequencies must be equal") - return getattr(self.values, opname)(other.values) + + result = getattr(self.values, opname)(other.values) + + mask = (com.mask_missing(self.values, tslib.iNaT) | + com.mask_missing(other.values, tslib.iNaT)) + if mask.any(): + result[mask] = fill_value + + return result else: other = Period(other, freq=self.freq) func = getattr(self.values, opname) result = func(other.ordinal) + if other.ordinal == tslib.iNaT: + result.fill(fill_value) + mask = self.values == tslib.iNaT + if mask.any(): + result[mask] = fill_value + return result return wrapper @@ -712,7 +754,7 @@ def asof_locs(self, where, mask): @property def asobject(self): - return Index(self._box_values(self.values), dtype=object) + return Index(self._box_values(self.values), name=self.name, dtype=object) def _array_values(self): return self.asobject @@ -768,11 +810,7 @@ def asfreq(self, freq=None, how='E'): end = how == 'E' new_data = tslib.period_asfreq_arr(self.values, base1, base2, end) - - result = new_data.view(PeriodIndex) - result.name = self.name - result.freq = freq - return result + return self._simple_new(new_data, self.name, freq=freq) def to_datetime(self, dayfirst=False): return self.to_timestamp() @@ -868,16 +906,23 @@ def shift(self, n): ------- shifted : PeriodIndex """ - if n == 0: - return self - - return PeriodIndex(data=self.values + n, freq=self.freq) + mask = self.values == tslib.iNaT + values = self.values + n + values[mask] = tslib.iNaT + return PeriodIndex(data=values, name=self.name, freq=self.freq) def __add__(self, other): - return PeriodIndex(ordinal=self.values + other, freq=self.freq) + try: + return self.shift(other) + except TypeError: + # self.values + other raises TypeError for invalid input + return NotImplemented def __sub__(self, other): - return PeriodIndex(ordinal=self.values - other, freq=self.freq) + try: + return self.shift(-other) + except TypeError: + return NotImplemented @property def inferred_type(self): @@ -1207,8 +1252,11 @@ def _get_ordinal_range(start, end, periods, freq): is_start_per = isinstance(start, Period) is_end_per = isinstance(end, Period) - if is_start_per and is_end_per and (start.freq != end.freq): + if is_start_per and is_end_per and start.freq != end.freq: raise ValueError('Start and end must have same freq') + if ((is_start_per and start.ordinal == tslib.iNaT) or + (is_end_per and end.ordinal == tslib.iNaT)): + raise ValueError('Start and end must not be NaT') if freq is None: if is_start_per: diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index d58621b320a84..af39bba8e43af 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -24,6 +24,7 @@ from pandas.compat import range, lrange, lmap, zip from pandas import Series, TimeSeries, DataFrame, _np_version_under1p9 +from pandas import tslib from pandas.util.testing import(assert_series_equal, assert_almost_equal, assertRaisesRegexp) import pandas.util.testing as tm @@ -77,6 +78,21 @@ def test_period_cons_weekly(self): expected = Period(daystr, freq='D').asfreq(freq) self.assertEqual(result, expected) + def test_period_cons_nat(self): + p = Period('NaT', freq='M') + self.assertEqual(p.ordinal, tslib.iNaT) + self.assertEqual(p.freq, 'M') + + p = Period('nat', freq='W-SUN') + self.assertEqual(p.ordinal, tslib.iNaT) + self.assertEqual(p.freq, 'W-SUN') + + p = Period(tslib.iNaT, freq='D') + self.assertEqual(p.ordinal, tslib.iNaT) + self.assertEqual(p.freq, 'D') + + self.assertRaises(ValueError, Period, 'NaT') + def test_timestamp_tz_arg(self): import pytz p = Period('1/1/2005', freq='M').to_timestamp(tz='Europe/Brussels') @@ -94,6 +110,13 @@ def test_timestamp_tz_arg_dateutil_from_string(self): p = Period('1/1/2005', freq='M').to_timestamp(tz='dateutil/Europe/Brussels') self.assertEqual(p.tz, dateutil.tz.gettz('Europe/Brussels')) + def test_timestamp_nat_tz(self): + t = Period('NaT', freq='M').to_timestamp() + self.assertTrue(t is tslib.NaT) + + t = Period('NaT', freq='M').to_timestamp(tz='Asia/Tokyo') + self.assertTrue(t is tslib.NaT) + def test_period_constructor(self): i1 = Period('1/1/2005', freq='M') i2 = Period('Jan 2005') @@ -219,6 +242,10 @@ def test_repr(self): p = Period('2000-12-15') self.assertIn('2000-12-15', repr(p)) + def test_repr_nat(self): + p = Period('nat', freq='M') + self.assertIn(repr(tslib.NaT), repr(p)) + def test_millisecond_repr(self): p = Period('2000-01-01 12:15:02.123') @@ -296,6 +323,9 @@ def _ex(p): assertRaisesRegexp(ValueError, 'Only mult == 1', p.to_timestamp, '5t') + p = Period('NaT', freq='W') + self.assertTrue(p.to_timestamp() is tslib.NaT) + def test_start_time(self): freq_lst = ['A', 'Q', 'M', 'D', 'H', 'T', 'S'] xp = datetime(2012, 1, 1) @@ -307,6 +337,9 @@ def test_start_time(self): self.assertEqual(Period('2012', freq='W').start_time, datetime(2011, 12, 26)) + p = Period('NaT', freq='W') + self.assertTrue(p.start_time is tslib.NaT) + def test_end_time(self): p = Period('2012', freq='A') @@ -338,6 +371,9 @@ def _ex(*args): xp = _ex(2012, 1, 2) self.assertEqual(Period('2012', freq='W').end_time, xp) + p = Period('NaT', freq='W') + self.assertTrue(p.end_time is tslib.NaT) + def test_anchor_week_end_time(self): def _ex(*args): return Timestamp(Timestamp(datetime(*args)).value - 1) @@ -449,6 +485,18 @@ def test_properties_secondly(self): assert_equal(s_date.minute, 0) assert_equal(s_date.second, 0) + def test_properties_nat(self): + p_nat = Period('NaT', freq='M') + t_nat = pd.Timestamp('NaT') + # confirm Period('NaT') work identical with Timestamp('NaT') + for f in ['year', 'month', 'day', 'hour', 'minute', 'second', + 'week', 'dayofyear', 'quarter']: + self.assertEqual(getattr(p_nat, f), -1) + self.assertEqual(getattr(t_nat, f), -1) + + for f in ['weekofyear', 'dayofweek', 'weekday', 'qyear']: + self.assertEqual(getattr(p_nat, f), -1) + def test_pnow(self): dt = datetime.now() @@ -1084,6 +1132,12 @@ def test_conv_secondly(self): assert_equal(ival_S.asfreq('S'), ival_S) + def test_asfreq_nat(self): + p = Period('NaT', freq='A') + result = p.asfreq('M') + self.assertEqual(result.ordinal, tslib.iNaT) + self.assertEqual(result.freq, 'M') + class TestPeriodIndex(tm.TestCase): @@ -1213,6 +1267,12 @@ def test_constructor_simple_new(self): result = idx._simple_new(idx.astype('i8'), 'p', freq=idx.freq) self.assertTrue(result.equals(idx)) + def test_constructor_nat(self): + self.assertRaises( + ValueError, period_range, start='NaT', end='2011-01-01', freq='M') + self.assertRaises( + ValueError, period_range, start='2011-01-01', end='NaT', freq='M') + def test_is_(self): create_index = lambda: PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') @@ -1374,6 +1434,32 @@ def test_to_timestamp_repr_is_code(self): for z in zs: self.assertEqual( eval(repr(z)), z) + def test_to_timestamp_period_nat(self): + # GH 7228 + index = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='M', name='idx') + + result = index.to_timestamp('D') + expected = DatetimeIndex([pd.NaT, datetime(2011, 1, 1), + datetime(2011, 2, 1)], name='idx') + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, 'idx') + + result2 = result.to_period(freq='M') + self.assertTrue(result2.equals(index)) + self.assertEqual(result2.name, 'idx') + + def test_asobject_period_nat(self): + index = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='M', name='idx') + + result = index.asobject + self.assertTrue(isinstance(result, Index)) + self.assertEqual(result.dtype, object) + self.assertTrue(isinstance(result[0], Period)) + self.assertEqual(result[0].ordinal, tslib.iNaT) + self.assertEqual(result[1], Period('2011-01', freq='M')) + self.assertEqual(result[2], Period('2011-02', freq='M')) + self.assertEqual(result.name, 'idx') + def test_as_frame_columns(self): rng = period_range('1/1/2000', periods=5) df = DataFrame(randn(10, 5), columns=rng) @@ -1649,6 +1735,13 @@ def test_shift(self): assert_equal(len(pi1), len(pi2)) assert_equal(pi1.shift(-1).values, pi2.values) + def test_shift_nat(self): + idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx') + result = idx.shift(1) + expected = PeriodIndex(['2011-02', '2011-03', 'NaT', '2011-05'], freq='M', name='idx') + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + def test_asfreq(self): pi1 = PeriodIndex(freq='A', start='1/1/2001', end='1/1/2001') pi2 = PeriodIndex(freq='Q', start='1/1/2001', end='1/1/2001') @@ -1711,6 +1804,12 @@ def test_asfreq(self): self.assertRaises(ValueError, pi7.asfreq, 'T', 'foo') self.assertRaises(ValueError, pi1.asfreq, '5t') + def test_asfreq_nat(self): + idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M') + result = idx.asfreq(freq='Q') + expected = PeriodIndex(['2011Q1', '2011Q1', 'NaT', '2011Q2'], freq='Q') + self.assertTrue(result.equals(expected)) + def test_ts_repr(self): index = PeriodIndex(freq='A', start='1/1/2001', end='12/31/2010') ts = Series(np.random.randn(len(index)), index=index) @@ -1987,6 +2086,11 @@ def test_range_slice_outofbounds(self): tm.assert_frame_equal(df['2013-06':'2013-09'], empty) tm.assert_frame_equal(df['2013-11':'2013-12'], empty) + def test_pindex_fieldaccessor_nat(self): + idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2012-03', '2012-04'], freq='D') + self.assert_numpy_array_equal(idx.year, np.array([2011, 2011, -1, 2012, 2012])) + self.assert_numpy_array_equal(idx.month, np.array([1, 2, -1, 3, 4])) + def test_pindex_qaccess(self): pi = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q') s = Series(np.random.rand(len(pi)), index=pi).cumsum() @@ -2382,8 +2486,33 @@ def test_add(self): dt2 = Period(freq='D', year=2008, month=1, day=2) assert_equal(dt1 + 1, dt2) # - self.assertRaises(TypeError, dt1.__add__, "str") - self.assertRaises(TypeError, dt1.__add__, dt2) + # GH 4731 + msg = "unsupported operand type\(s\)" + with tm.assertRaisesRegexp(TypeError, msg): + dt1 + "str" + + with tm.assertRaisesRegexp(TypeError, msg): + dt1 + dt2 + + def test_nat_ops(self): + p = Period('NaT', freq='M') + self.assertEqual((p + 1).ordinal, tslib.iNaT) + self.assertEqual((p - 1).ordinal, tslib.iNaT) + self.assertEqual((p - Period('2011-01', freq='M')).ordinal, tslib.iNaT) + self.assertEqual((Period('2011-01', freq='M') - p).ordinal, tslib.iNaT) + + def test_pi_ops_nat(self): + idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx') + result = idx + 2 + expected = PeriodIndex(['2011-03', '2011-04', 'NaT', '2011-06'], freq='M', name='idx') + self.assertTrue(result.equals(expected)) + + result2 = result - 2 + self.assertTrue(result2.equals(idx)) + + msg = "unsupported operand type\(s\)" + with tm.assertRaisesRegexp(TypeError, msg): + idx + "str" class TestPeriodRepresentation(tm.TestCase): @@ -2459,7 +2588,8 @@ def test_equal(self): self.assertEqual(self.january1, self.january2) def test_equal_Raises_Value(self): - self.assertRaises(ValueError, self.january1.__eq__, self.day) + with tm.assertRaises(ValueError): + self.january1 == self.day def test_notEqual(self): self.assertNotEqual(self.january1, 1) @@ -2469,41 +2599,87 @@ def test_greater(self): self.assertTrue(self.february > self.january1) def test_greater_Raises_Value(self): - self.assertRaises(ValueError, self.january1.__gt__, self.day) + with tm.assertRaises(ValueError): + self.january1 > self.day def test_greater_Raises_Type(self): - self.assertRaises(TypeError, self.january1.__gt__, 1) + with tm.assertRaises(TypeError): + self.january1 > 1 def test_greaterEqual(self): self.assertTrue(self.january1 >= self.january2) def test_greaterEqual_Raises_Value(self): - self.assertRaises(ValueError, self.january1.__ge__, self.day) - self.assertRaises(TypeError, self.january1.__ge__, 1) + with tm.assertRaises(ValueError): + self.january1 >= self.day + with tm.assertRaises(TypeError): + print(self.january1 >= 1) def test_smallerEqual(self): self.assertTrue(self.january1 <= self.january2) def test_smallerEqual_Raises_Value(self): - self.assertRaises(ValueError, self.january1.__le__, self.day) + with tm.assertRaises(ValueError): + self.january1 <= self.day def test_smallerEqual_Raises_Type(self): - self.assertRaises(TypeError, self.january1.__le__, 1) + with tm.assertRaises(TypeError): + self.january1 <= 1 def test_smaller(self): self.assertTrue(self.january1 < self.february) def test_smaller_Raises_Value(self): - self.assertRaises(ValueError, self.january1.__lt__, self.day) + with tm.assertRaises(ValueError): + self.january1 < self.day def test_smaller_Raises_Type(self): - self.assertRaises(TypeError, self.january1.__lt__, 1) + with tm.assertRaises(TypeError): + self.january1 < 1 def test_sort(self): periods = [self.march, self.january1, self.february] correctPeriods = [self.january1, self.february, self.march] self.assertEqual(sorted(periods), correctPeriods) + def test_period_nat_comp(self): + p_nat = Period('NaT', freq='D') + p = Period('2011-01-01', freq='D') + + nat = pd.Timestamp('NaT') + t = pd.Timestamp('2011-01-01') + # confirm Period('NaT') work identical with Timestamp('NaT') + for left, right in [(p_nat, p), (p, p_nat), (p_nat, p_nat), + (nat, t), (t, nat), (nat, nat)]: + self.assertEqual(left < right, False) + self.assertEqual(left > right, False) + self.assertEqual(left == right, False) + self.assertEqual(left != right, True) + self.assertEqual(left <= right, False) + self.assertEqual(left >= right, False) + + def test_pi_nat_comp(self): + idx1 = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-05'], freq='M') + + result = idx1 > Period('2011-02', freq='M') + self.assert_numpy_array_equal(result, np.array([False, False, False, True])) + + result = idx1 == Period('NaT', freq='M') + self.assert_numpy_array_equal(result, np.array([False, False, False, False])) + + result = idx1 != Period('NaT', freq='M') + self.assert_numpy_array_equal(result, np.array([True, True, True, True])) + + idx2 = PeriodIndex(['2011-02', '2011-01', '2011-04', 'NaT'], freq='M') + result = idx1 < idx2 + self.assert_numpy_array_equal(result, np.array([True, False, False, False])) + + result = idx1 == idx1 + self.assert_numpy_array_equal(result, np.array([True, True, False, True])) + + result = idx1 != idx1 + self.assert_numpy_array_equal(result, np.array([False, False, True, False])) + if __name__ == '__main__': import nose diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 2e0e8af3f5119..24b1215b949a3 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -3028,6 +3028,9 @@ def dt64arr_to_periodarr(ndarray[int64_t] dtarr, int freq, tz=None): if tz is None: for i in range(l): + if dtarr[i] == iNaT: + out[i] = iNaT + continue pandas_datetime_to_datetimestruct(dtarr[i], PANDAS_FR_ns, &dts) out[i] = get_period_ordinal(dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us, dts.ps, freq) @@ -3049,6 +3052,9 @@ def periodarr_to_dt64arr(ndarray[int64_t] periodarr, int freq): out = np.empty(l, dtype='i8') for i in range(l): + if periodarr[i] == iNaT: + out[i] = iNaT + continue out[i] = period_ordinal_to_dt64(periodarr[i], freq) return out @@ -3065,6 +3071,9 @@ cpdef int64_t period_asfreq(int64_t period_ordinal, int freq1, int freq2, cdef: int64_t retval + if period_ordinal == iNaT: + return iNaT + if end: retval = asfreq(period_ordinal, freq1, freq2, END) else: @@ -3100,6 +3109,9 @@ def period_asfreq_arr(ndarray[int64_t] arr, int freq1, int freq2, bint end): relation = START for i in range(n): + if arr[i] == iNaT: + result[i] = iNaT + continue val = func(arr[i], relation, &finfo) if val == INT32_MIN: raise ValueError("Unable to convert to desired frequency.") @@ -3120,6 +3132,9 @@ cpdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq): date_info dinfo float subsecond_fraction + if ordinal == iNaT: + return NPY_NAT + get_date_info(ordinal, freq, &dinfo) dts.year = dinfo.year @@ -3138,6 +3153,9 @@ def period_format(int64_t value, int freq, object fmt=None): cdef: int freq_group + if value == iNaT: + return repr(NaT) + if fmt is None: freq_group = (freq // 1000) * 1000 if freq_group == 1000: # FR_ANN @@ -3241,6 +3259,8 @@ def get_period_field(int code, int64_t value, int freq): cdef accessor f = _get_accessor_func(code) if f is NULL: raise ValueError('Unrecognized period code: %d' % code) + if value == iNaT: + return -1 return f(value, freq) def get_period_field_arr(int code, ndarray[int64_t] arr, int freq): @@ -3257,6 +3277,9 @@ def get_period_field_arr(int code, ndarray[int64_t] arr, int freq): out = np.empty(sz, dtype=np.int64) for i in range(sz): + if arr[i] == iNaT: + out[i] = -1 + continue out[i] = f(arr[i], freq) return out
Closes #7228. Closes #4731. `Period` and `PeriodIndex` now can contain `NaT` using `iNaT` as its internal value.
https://api.github.com/repos/pandas-dev/pandas/pulls/7485
2014-06-17T16:04:40Z
2014-06-19T19:27:10Z
2014-06-19T19:27:10Z
2014-06-20T14:45:55Z
DOC: Clean up docs for io.data.Options.
diff --git a/doc/source/release.rst b/doc/source/release.rst index da6c46ce37a94..fb2c24acff30d 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -55,10 +55,6 @@ performance improvements along with a large number of bug fixes. Highlights include: -Experimental Features -~~~~~~~~~~~~~~~~~~~~~ -- ``pandas.io.data.Options`` has a get_all_data method and now consistently returns a multi-indexed ''DataFrame'' (:issue:`5602`) - See the :ref:`v0.14.1 Whatsnew <whatsnew_0141>` overview or the issue tracker on GitHub for an extensive list of all API changes, enhancements and bugs that have been fixed in 0.14.1. diff --git a/doc/source/remote_data.rst b/doc/source/remote_data.rst index aae36ee1d54b3..98d14b23e28bf 100644 --- a/doc/source/remote_data.rst +++ b/doc/source/remote_data.rst @@ -60,8 +60,8 @@ Yahoo! Finance Options The Options class allows the download of options data from Yahoo! Finance. -The ''get_all_data'' method downloads and caches option data for all expiry months -and provides a formatted ''DataFrame'' with a hierarchical index, so its easy to get +The ``get_all_data`` method downloads and caches option data for all expiry months +and provides a formatted ``DataFrame`` with a hierarchical index, so its easy to get to the specific option you want. .. ipython:: python @@ -69,13 +69,13 @@ to the specific option you want. from pandas.io.data import Options aapl = Options('aapl', 'yahoo') data = aapl.get_all_data() - data.head() + data.iloc[0:5:, 0:5] - #Show the $600 strike puts at all expiry dates: - data.loc[(600, slice(None), 'put'),:].head() + #Show the $100 strike puts at all expiry dates: + data.loc[(100, slice(None), 'put'),:].iloc[0:5:, 0:5] - #Show the volume traded of $600 strike puts at all expiry dates: - data.loc[(600, slice(None), 'put'),'Vol'].head() + #Show the volume traded of $100 strike puts at all expiry dates: + data.loc[(100, slice(None), 'put'),'Vol'].iloc[0:5:, 0:5] If you don't want to download all the data, more specific requests can be made. @@ -84,9 +84,9 @@ If you don't want to download all the data, more specific requests can be made. import datetime expiry = datetime.date(2016, 1, 1) data = aapl.get_call_data(expiry=expiry) - data.head() + data.iloc[0:5:, 0:5] -Note that if you call ''get_all_data'' first, this second call will happen much faster, as the data is cached. +Note that if you call ``get_all_data`` first, this second call will happen much faster, as the data is cached. .. _remote_data.google: diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index c0696ca45167a..2bd92953e2db7 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -148,7 +148,7 @@ Performance Experimental ~~~~~~~~~~~~ -``pandas.io.data.Options`` has a get_all_data method and now consistently returns a multi-indexed ''DataFrame'' (PR `#5602`) +``pandas.io.data.Options`` has a get_all_data method and now consistently returns a multi-indexed ``DataFrame`` (PR `#5602`) See :ref:`the docs<remote_data.yahoo_Options>` ***Experimental*** .. ipython:: python @@ -156,14 +156,15 @@ Experimental from pandas.io.data import Options aapl = Options('aapl', 'yahoo') data = aapl.get_all_data() - data.head() + data.iloc[0:5:, 0:5] + .. ipython:: python from pandas.io.data import Options aapl = Options('aapl', 'yahoo') data = aapl.get_all_data() - data.head() + data.iloc[0:5:, 0:5] .. _whatsnew_0141.bug_fixes:
Per conversation on #5602, adjusted some of the docs. Removed from release.rst, changed example strike price for AAPL, fixed backticks.
https://api.github.com/repos/pandas-dev/pandas/pulls/7484
2014-06-17T16:04:01Z
2014-06-17T18:52:02Z
2014-06-17T18:52:02Z
2014-06-18T09:35:51Z
DOC: fix docstring of value_counts/nunique dropna argument after GH7424
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 1aec8561807c9..c45256c482e8f 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -170,7 +170,7 @@ def factorize(values, sort=False, order=None, na_sentinel=-1): def value_counts(values, sort=True, ascending=False, normalize=False, bins=None, dropna=True): """ - Compute a histogram of the counts of non-null values + Compute a histogram of the counts of non-null values. Parameters ---------- @@ -184,7 +184,7 @@ def value_counts(values, sort=True, ascending=False, normalize=False, bins : integer, optional Rather than count values, group them into half-open bins, convenience for pd.cut, only works with numeric data - dropna : boolean, default False + dropna : boolean, default True Don't include counts of NaN Returns diff --git a/pandas/core/base.py b/pandas/core/base.py index b43883885e962..d430b709e403e 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -247,9 +247,11 @@ def min(self): def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True): """ - Returns object containing counts of unique values. The resulting object - will be in descending order so that the first element is the most - frequently-occurring element. Excludes NA values. + Returns object containing counts of unique values. + + The resulting object will be in descending order so that the + first element is the most frequently-occurring element. + Excludes NA values by default. Parameters ---------- @@ -263,8 +265,8 @@ def value_counts(self, normalize=False, sort=True, ascending=False, bins : integer, optional Rather than count values, group them into half-open bins, a convenience for pd.cut, only works with numeric data - dropna : boolean, default False - Don't include counts of NaN + dropna : boolean, default True + Don't include counts of NaN. Returns ------- @@ -288,7 +290,14 @@ def unique(self): def nunique(self, dropna=True): """ - Return count of unique elements in the object. Excludes NA values. + Return number of unique elements in the object. + + Excludes NA values by default. + + Parameters + ---------- + dropna : boolean, default True + Don't include NaN in the count. Returns -------
See #7424, fix docstring.
https://api.github.com/repos/pandas-dev/pandas/pulls/7483
2014-06-17T13:38:20Z
2014-07-07T19:11:41Z
2014-07-07T19:11:41Z
2014-07-07T19:11:41Z
BUG: Make copies of certain interpolate arguments (GH7295)
diff --git a/pandas/core/common.py b/pandas/core/common.py index 8a44723a83c4e..92d60ae8d8847 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1538,6 +1538,14 @@ def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None, terp = interpolate.UnivariateSpline(x, y, k=order) new_y = terp(new_x) else: + # GH 7295: need to be able to write for some reason + # in some circumstances: check all three + if not x.flags.writeable: + x = x.copy() + if not y.flags.writeable: + y = y.copy() + if not new_x.flags.writeable: + new_x = new_x.copy() method = alt_methods[method] new_y = method(x, y, new_x) return new_y
closes #7295 On the alt_methods branch in interpolate_1d, make copies if x, y, and new_x don't have their `writeable` attribute set. It seems a little weird that we need to do this, but it'll work around the issue so that the test can pass, anyway.
https://api.github.com/repos/pandas-dev/pandas/pulls/7479
2014-06-17T01:32:57Z
2014-06-17T11:22:59Z
2014-06-17T11:22:59Z
2014-06-17T11:23:03Z
Fix cache key collision and add test for cache key distinctness.
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py index e6da490e1f722..b1d8bdd9f81ce 100644 --- a/pandas/tseries/tests/test_timezones.py +++ b/pandas/tseries/tests/test_timezones.py @@ -805,6 +805,21 @@ def test_utc_with_system_utc(self): self.assertEqual(ts, ts.tz_convert(dateutil.tz.tzutc())) +class TestTimeZoneCacheKey(tm.TestCase): + def test_cache_keys_are_distinct_for_pytz_vs_dateutil(self): + tzs = pytz.common_timezones + for tz_name in tzs: + if tz_name == 'UTC': + # skip utc as it's a special case in dateutil + continue + tz_p = tslib.maybe_get_tz(tz_name) + tz_d = tslib.maybe_get_tz('dateutil/' + tz_name) + if tz_d is None: + # skip timezones that dateutil doesn't know about. + continue + self.assertNotEqual(tslib._p_tz_cache_key(tz_p), tslib._p_tz_cache_key(tz_d)) + + class TestTimeZones(tm.TestCase): _multiprocess_can_split_ = True diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index c36d34b2199d8..2e0e8af3f5119 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -1962,6 +1962,11 @@ cdef inline bint _treat_tz_as_dateutil(object tz): return hasattr(tz, '_trans_list') and hasattr(tz, '_trans_idx') +def _p_tz_cache_key(tz): + ''' Python interface for cache function to facilitate testing.''' + return _tz_cache_key(tz) + + cdef inline object _tz_cache_key(object tz): """ Return the key in the cache for the timezone info object or None if unknown. @@ -1982,7 +1987,7 @@ cdef inline object _tz_cache_key(object tz): raise ValueError('Bad tz filename. Dateutil on python 3 on windows has a bug which causes tzfile._filename to be the same for all ' 'timezone files. Please construct dateutil timezones implicitly by passing a string like "dateutil/Europe/London" ' 'when you construct your pandas objects instead of passing a timezone object. See https://github.com/pydata/pandas/pull/7362') - return tz._filename + return 'dateutil' + tz._filename else: return None
Fixes #7420.
https://api.github.com/repos/pandas-dev/pandas/pulls/7478
2014-06-16T20:04:54Z
2014-06-17T00:18:00Z
2014-06-17T00:18:00Z
2014-06-17T09:56:00Z
BUG: Bug in Panel.apply with a multi-index as an axis (GH7469)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index f65a96f1c38a6..6b2ae95510fa1 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -163,7 +163,7 @@ Bug Fixes - +- Bug in ``Panel.apply`` with a multi-index as an axis (:issue:`7469`) - Bug in ``DatetimeIndex.insert`` doesn't preserve ``name`` and ``tz`` (:issue:`7299`) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 34ab401eac283..d0baa4b1ecad3 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -1180,6 +1180,16 @@ def test_apply_slabs(self): expected = Panel(dict([ (ax,f(self.panel.loc[:,ax])) for ax in self.panel.major_axis ])) assert_panel_equal(result,expected) + # with multi-indexes + # GH7469 + index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), ('two', 'a'), ('two', 'b')]) + dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(4,3), columns=list("ABC"), index=index) + dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(4,3), columns=list("ABC"), index=index) + p = Panel({'f':dfa, 'g':dfb}) + result = p.apply(lambda x: x.sum(), axis=0) + expected = p.sum(0) + assert_frame_equal(result,expected) + def test_reindex(self): ref = self.panel['ItemB'] diff --git a/pandas/tools/util.py b/pandas/tools/util.py index 1d6ed3e11c81e..215a76b84452a 100644 --- a/pandas/tools/util.py +++ b/pandas/tools/util.py @@ -3,6 +3,7 @@ from pandas.core.index import Index import numpy as np from pandas import algos +from pandas.core import common as com def match(needles, haystack): @@ -32,7 +33,7 @@ def cartesian_product(X): b = cumprodX[-1] / cumprodX - return [np.tile(np.repeat(np.asarray(x), b[i]), + return [np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]), np.product(a[i])) for i, x in enumerate(X)]
closes #7469
https://api.github.com/repos/pandas-dev/pandas/pulls/7474
2014-06-16T13:26:53Z
2014-06-16T14:20:06Z
2014-06-16T14:20:06Z
2014-06-16T14:20:06Z
ENH: Cast ndarray-like datetime64 arrays to Index properly
diff --git a/pandas/core/index.py b/pandas/core/index.py index 2252ba666ca59..6cbffd5b4dbba 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -148,6 +148,9 @@ def __new__(cls, data, dtype=None, copy=False, name=None, fastpath=False, if copy: subarr = subarr.copy() + elif hasattr(data, '__array__'): + return Index(np.asarray(data), dtype=dtype, copy=copy, name=name, + **kwargs) elif np.isscalar(data): cls._scalar_data_error(data) else: diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index f2372f98b330b..203d3f26d0812 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -177,12 +177,17 @@ def test_constructor_ndarray_like(self): # it should be possible to convert any object that satisfies the numpy # ndarray interface directly into an Index class ArrayLike(object): + def __init__(self, array): + self.array = array def __array__(self, dtype=None): - return np.arange(5) + return self.array - expected = pd.Index(np.arange(5)) - result = pd.Index(ArrayLike()) - self.assertTrue(result.equals(expected)) + for array in [np.arange(5), + np.array(['a', 'b', 'c']), + pd.date_range('2000-01-01', periods=3).values]: + expected = pd.Index(array) + result = pd.Index(ArrayLike(array)) + self.assertTrue(result.equals(expected)) def test_index_ctor_infer_periodindex(self): from pandas import period_range, PeriodIndex @@ -447,7 +452,7 @@ def test_intersection(self): assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5) idx1 = Index([1, 2, 3, 4, 5], name='idx') - # if target has the same name, it is preserved + # if target has the same name, it is preserved idx2 = Index([3, 4, 5, 6, 7], name='idx') expected2 = Index([3, 4, 5], name='idx') result2 = idx1.intersection(idx2)
It turns out that the ndarray-like arrays of dtype `datetime64` were not being properly cast to an `Index`, because -- due to a bug with `np.datetime64` -- calling `np.asarray(x, dtype=object)` if x is an ndarray of type `datetime64` results in an _integer_ array. This PR adds tests and a work around to `pd.Index.__new__`. Related #5460
https://api.github.com/repos/pandas-dev/pandas/pulls/7468
2014-06-15T22:56:55Z
2014-06-16T12:50:48Z
2014-06-16T12:50:48Z
2014-06-16T16:51:53Z
DOC: Add missing column header in 'baseball.csv'.
diff --git a/doc/data/baseball.csv b/doc/data/baseball.csv index 546c3ad62637b..aadbaced193a5 100644 --- a/doc/data/baseball.csv +++ b/doc/data/baseball.csv @@ -1,4 +1,4 @@ -id,year,stint,team,lg,g,ab,r,h,X2b,X3b,hr,rbi,sb,cs,bb,so,ibb,hbp,sh,sf,gidp +id,player,year,stint,team,lg,g,ab,r,h,X2b,X3b,hr,rbi,sb,cs,bb,so,ibb,hbp,sh,sf,gidp 88641,womacto01,2006,2,CHN,NL,19,50,6,14,1,0,1,2.0,1.0,1.0,4,4.0,0.0,0.0,3.0,0.0,0.0 88643,schilcu01,2006,1,BOS,AL,31,2,0,1,0,0,0,0.0,0.0,0.0,0,1.0,0.0,0.0,0.0,0.0,0.0 88645,myersmi01,2006,1,NYA,AL,62,0,0,0,0,0,0,0.0,0.0,0.0,0,0.0,0.0,0.0,0.0,0.0,0.0
Add missing column header in 'baseball.csv' in the docs.
https://api.github.com/repos/pandas-dev/pandas/pulls/7467
2014-06-15T17:42:00Z
2014-06-15T18:10:23Z
2014-06-15T18:10:23Z
2014-06-15T18:10:30Z
BUG: Some offsets.apply cannot handle tz properly
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index be0b3bc543c39..ac60e3df88715 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -224,6 +224,8 @@ Bug Fixes +- Bug in passing input with ``tzinfo`` to some offsets ``apply``, ``rollforward`` or ``rollback`` resets ``tzinfo`` or raises ``ValueError`` (:issue:`7465`) + - BUG in ``resample`` raises ``ValueError`` when target contains ``NaT`` (:issue:`7227`) diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 91ae91e92f3c3..ff4d6a54d51d4 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -48,6 +48,7 @@ def wrapper(self, other): elif isinstance(other, np.datetime64): other = as_timestamp(other) + tz = getattr(other, 'tzinfo', None) result = func(self, other) if self.normalize: @@ -55,6 +56,12 @@ def wrapper(self, other): if isinstance(other, Timestamp) and not isinstance(result, Timestamp): result = as_timestamp(result) + + if tz is not None: + if isinstance(result, Timestamp) and result.tzinfo is None: + result = result.tz_localize(tz) + elif isinstance(result, datetime) and result.tzinfo is None: + result = tz.localize(result) return result return wrapper @@ -570,6 +577,11 @@ def _to_dt64(dt, dtype='datetime64'): # > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]') # numpy.datetime64('2013-05-01T02:00:00.000000+0200') # Thus astype is needed to cast datetime to datetime64[D] + + if getattr(dt, 'tzinfo', None) is not None: + i8 = tslib.pydt_to_i8(dt) + dt = tslib.tz_convert_single(i8, 'UTC', dt.tzinfo) + dt = Timestamp(dt) dt = np.datetime64(dt) if dt.dtype.name != dtype: dt = dt.astype(dtype) @@ -966,13 +978,18 @@ def apply(self, other): months = self.n + 1 other = self.getOffsetOfMonth(as_datetime(other) + relativedelta(months=months, day=1)) - other = datetime(other.year, other.month, other.day, - base.hour, base.minute, base.second, base.microsecond) + other = datetime(other.year, other.month, other.day, base.hour, + base.minute, base.second, base.microsecond) + if getattr(other, 'tzinfo', None) is not None: + other = other.tzinfo.localize(other) return other def getOffsetOfMonth(self, dt): w = Week(weekday=self.weekday) + d = datetime(dt.year, dt.month, 1) + if getattr(dt, 'tzinfo', None) is not None: + d = dt.tzinfo.localize(d) d = w.rollforward(d) @@ -985,6 +1002,8 @@ def onOffset(self, dt): if self.normalize and not _is_normalized(dt): return False d = datetime(dt.year, dt.month, dt.day) + if getattr(dt, 'tzinfo', None) is not None: + d = dt.tzinfo.localize(d) return d == self.getOffsetOfMonth(dt) @property @@ -1056,6 +1075,8 @@ def apply(self, other): def getOffsetOfMonth(self, dt): m = MonthEnd() d = datetime(dt.year, dt.month, 1, dt.hour, dt.minute, dt.second, dt.microsecond) + if getattr(dt, 'tzinfo', None) is not None: + d = dt.tzinfo.localize(d) eom = m.rollforward(d) @@ -1134,6 +1155,10 @@ class BQuarterEnd(QuarterOffset): @apply_wraps def apply(self, other): n = self.n + base = other + other = datetime(other.year, other.month, other.day, + other.hour, other.minute, other.second, + other.microsecond) wkday, days_in_month = tslib.monthrange(other.year, other.month) lastBDay = days_in_month - max(((wkday + days_in_month - 1) @@ -1149,7 +1174,8 @@ def apply(self, other): n = n + 1 other = as_datetime(other) + relativedelta(months=monthsToGo + 3 * n, day=31) - + if getattr(base, 'tzinfo', None) is not None: + other = base.tzinfo.localize(other) if other.weekday() > 4: other = other - BDay() @@ -1216,6 +1242,8 @@ def apply(self, other): result = datetime(other.year, other.month, first, other.hour, other.minute, other.second, other.microsecond) + if getattr(other, 'tzinfo', None) is not None: + result = other.tzinfo.localize(result) return as_timestamp(result) @@ -1242,6 +1270,10 @@ def isAnchored(self): @apply_wraps def apply(self, other): n = self.n + base = other + other = datetime(other.year, other.month, other.day, + other.hour, other.minute, other.second, + other.microsecond) other = as_datetime(other) wkday, days_in_month = tslib.monthrange(other.year, other.month) @@ -1254,7 +1286,8 @@ def apply(self, other): n = n - 1 other = other + relativedelta(months=monthsToGo + 3 * n, day=31) - + if getattr(base, 'tzinfo', None) is not None: + other = base.tzinfo.localize(other) return as_timestamp(other) def onOffset(self, dt): @@ -1589,6 +1622,10 @@ def apply(self, other): datetime(other.year, self.startingMonth, 1)) next_year = self.get_year_end( datetime(other.year + 1, self.startingMonth, 1)) + if getattr(other, 'tzinfo', None) is not None: + prev_year = other.tzinfo.localize(prev_year) + cur_year = other.tzinfo.localize(cur_year) + next_year = other.tzinfo.localize(next_year) if n > 0: if other == prev_year: @@ -1647,7 +1684,9 @@ def get_year_end(self, dt): return self._get_year_end_last(dt) def get_target_month_end(self, dt): - target_month = datetime(year=dt.year, month=self.startingMonth, day=1) + target_month = datetime(dt.year, self.startingMonth, 1) + if getattr(dt, 'tzinfo', None) is not None: + target_month = dt.tzinfo.localize(target_month) next_month_first_of = target_month + relativedelta(months=+1) return next_month_first_of + relativedelta(days=-1) @@ -1665,7 +1704,9 @@ def _get_year_end_nearest(self, dt): return backward def _get_year_end_last(self, dt): - current_year = datetime(year=dt.year, month=self.startingMonth, day=1) + current_year = datetime(dt.year, self.startingMonth, 1) + if getattr(dt, 'tzinfo', None) is not None: + current_year = dt.tzinfo.localize(current_year) return current_year + self._offset_lwom @property @@ -1878,13 +1919,14 @@ class Easter(DateOffset): ''' def __init__(self, n=1, **kwds): super(Easter, self).__init__(n, **kwds) - + @apply_wraps def apply(self, other): - currentEaster = easter(other.year) currentEaster = datetime(currentEaster.year, currentEaster.month, currentEaster.day) - + if getattr(other, 'tzinfo', None) is not None: + currentEaster = other.tzinfo.localize(currentEaster) + # NOTE: easter returns a datetime.date so we have to convert to type of other if self.n >= 0: if other >= currentEaster: @@ -1905,6 +1947,7 @@ def onOffset(self, dt): if self.normalize and not _is_normalized(dt): return False return date(dt.year, dt.month, dt.day) == easter(dt.year) + #---------------------------------------------------------------------- # Ticks diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index ac7a8ae410429..fddfb3e3b4b56 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -185,6 +185,8 @@ def setUp(self): 'Milli': Timestamp('2011-01-01 09:00:00.001000'), 'Micro': Timestamp('2011-01-01 09:00:00.000001'), 'Nano': Timestamp(np.datetime64('2011-01-01T09:00:00.000000001Z'))} + + self.timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern'] def test_return_type(self): for offset in self.offset_types: @@ -214,6 +216,24 @@ def _check_offsetfunc_works(self, offset, funcname, dt, expected, self.assert_(isinstance(result, Timestamp)) self.assertEqual(result, expected) + if isinstance(dt, np.datetime64): + # test tz when input is datetime or Timestamp + return + + tm._skip_if_no_pytz() + import pytz + for tz in self.timezones: + expected_localize = expected.tz_localize(tz) + + dt_tz = pytz.timezone(tz).localize(dt) + result = func(dt_tz) + self.assert_(isinstance(result, datetime)) + self.assertEqual(result, expected_localize) + + result = func(Timestamp(dt, tz=tz)) + self.assert_(isinstance(result, datetime)) + self.assertEqual(result, expected_localize) + def _check_nanofunc_works(self, offset, funcname, dt, expected): offset = self._get_offset(offset) func = getattr(offset, funcname) @@ -334,9 +354,7 @@ def test_rollback(self): dt, expected, normalize=True) def test_onOffset(self): - for offset in self.offset_types: - dt = self.expecteds[offset.__name__] offset_s = self._get_offset(offset) self.assert_(offset_s.onOffset(dt)) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index b04747665480e..76f6850507d4d 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -309,13 +309,6 @@ def test_recreate_from_data(self): idx = DatetimeIndex(org, freq=f) self.assertTrue(idx.equals(org)) - # unbale to create tz-aware 'A' and 'C' freq - if _np_version_under1p7: - freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H'] - else: - freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N'] - - for f in freqs: org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1) idx = DatetimeIndex(org, freq=f, tz='US/Pacific') self.assertTrue(idx.equals(org)) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 380116fc5aab5..338fa381a11f5 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -209,6 +209,12 @@ def setUpClass(cls): cls.setUpClass = setUpClass return cls +def _skip_if_no_pytz(): + try: + import pytz + except ImportError: + import nose + raise nose.SkipTest("pytz not installed") #------------------------------------------------------------------------------ # locale utilities
There are some offsets which cannot handle input with `tz` properly ``` pd.offsets.Day().apply(pd.Timestamp('2010-01-01 9:00', tz='US/Eastern')) #2010-01-02 09:00:00-05:00 (Expected) pd.offsets.CustomBusinessDay().apply(pd.Timestamp('2010-01-01 9:00', tz='US/Eastern')) #2010-01-04 09:00:00 (tzinfo lost) pd.offsets.CustomBusinessMonthEnd().apply(pd.Timestamp('2010-01-01 9:00', tz='US/Eastern')) # ValueError: Cannot compare tz-naive and tz-aware timestamps ``` ### Affected Offsets - pandas.tseries.offsets.CustomBusinessDay' - 'pandas.tseries.offsets.CustomBusinessMonthEnd' - 'pandas.tseries.offsets.CustomBusinessMonthBegin' - 'pandas.tseries.offsets.BusinessMonthBegin' - 'pandas.tseries.offsets.YearBegin' - 'pandas.tseries.offsets.BYearBegin' - 'pandas.tseries.offsets.YearEnd' - 'pandas.tseries.offsets.BYearEnd' - 'pandas.tseries.offsets.BQuarterBegin' - 'pandas.tseries.offsets.LastWeekOfMonth' - 'pandas.tseries.offsets.FY5253Quarter' - 'pandas.tseries.offsets.FY5253' - 'pandas.tseries.offsets.Week' - 'pandas.tseries.offsets.WeekOfMonth' - 'pandas.tseries.offsets.Easter'
https://api.github.com/repos/pandas-dev/pandas/pulls/7465
2014-06-14T21:42:43Z
2014-06-17T11:57:36Z
2014-06-17T11:57:36Z
2014-06-17T14:38:59Z
BUG: astype(float) in Index does the wrong thing
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index be0b3bc543c39..61ffd5c89b568 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -229,7 +229,8 @@ Bug Fixes - +- Bug in ``Index.astype(float)`` where it would return an ``object`` dtype + ``Index`` (:issue:`7464`). diff --git a/pandas/core/index.py b/pandas/core/index.py index 2252ba666ca59..4f1c1d4f65a4c 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -140,6 +140,8 @@ def __new__(cls, data, dtype=None, copy=False, name=None, fastpath=False, if issubclass(data.dtype.type, np.integer): return Int64Index(data, copy=copy, dtype=dtype, name=name) + if issubclass(data.dtype.type, np.floating): + return Float64Index(data, copy=copy, dtype=dtype, name=name) subarr = com._asarray_tuplesafe(data, dtype=object) @@ -1986,7 +1988,8 @@ def inferred_type(self): def astype(self, dtype): if np.dtype(dtype) not in (np.object, np.float64): raise TypeError('Setting %s dtype to anything other than ' - 'float64 or object is not supported' % self.__class__) + 'float64 or object is not supported' % + self.__class__) return Index(self.values, name=self.name, dtype=dtype) def _convert_scalar_indexer(self, key, typ=None): @@ -2020,7 +2023,7 @@ def get_value(self, series, key): k = _values_from_object(key) loc = self.get_loc(k) new_values = series.values[loc] - if np.isscalar(new_values): + if np.isscalar(new_values) or new_values is None: return new_values new_index = self[loc] diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index f2372f98b330b..0bef5678b6825 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -905,6 +905,7 @@ def test_nan_first_take_datetime(self): exp = Index([idx[-1], idx[0], idx[1]]) tm.assert_index_equal(res, exp) + class TestFloat64Index(tm.TestCase): _multiprocess_can_split_ = True @@ -1041,6 +1042,13 @@ def test_nan_multiple_containment(self): np.testing.assert_array_equal(i.isin([np.nan]), np.array([False, False])) + def test_astype_from_object(self): + index = Index([1.0, np.nan, 0.2], dtype='object') + result = index.astype(float) + expected = Float64Index([1.0, np.nan, 0.2]) + tm.assert_equal(result.dtype, expected.dtype) + tm.assert_index_equal(result, expected) + class TestInt64Index(tm.TestCase): _multiprocess_can_split_ = True
null
https://api.github.com/repos/pandas-dev/pandas/pulls/7464
2014-06-14T20:15:22Z
2014-06-15T01:33:35Z
2014-06-15T01:33:35Z
2014-06-15T01:33:36Z
PERF: performance gains in DataFrame groupby.transform for ufuncs (GH7383)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 99fec7be42baa..271b4eb75a7b6 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -137,6 +137,7 @@ Performance ~~~~~~~~~~~ - Improvements in dtype inference for numeric operations involving yielding performance gains for dtypes: ``int64``, ``timedelta64``, ``datetime64`` (:issue:`7223`) - Improvements in Series.transform for signifcant performance gains (:issue`6496`) +- Improvements in DataFrame.transform with ufuncs and built-in grouper functions for signifcant performance gains (:issue`7383`) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index a90f00fd11e36..dc8b7f3bccc2a 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -2701,27 +2701,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): return self._concat_objects(keys, values, not_indexed_same=not_indexed_same) - def transform(self, func, *args, **kwargs): - """ - Call function producing a like-indexed DataFrame on each group and - return a DataFrame having the same indexes as the original object - filled with the transformed values - - Parameters - ---------- - f : function - Function to apply to each subframe - - Notes - ----- - Each subframe is endowed the attribute 'name' in case you need to know - which group you are working on. - - Examples - -------- - >>> grouped = df.groupby(lambda x: mapping[x]) - >>> grouped.transform(lambda x: (x - x.mean()) / x.std()) - """ + def _transform_general(self, func, *args, **kwargs): from pandas.tools.merge import concat applied = [] @@ -2763,6 +2743,66 @@ def transform(self, func, *args, **kwargs): concatenated.sort_index(inplace=True) return concatenated + def transform(self, func, *args, **kwargs): + """ + Call function producing a like-indexed DataFrame on each group and + return a DataFrame having the same indexes as the original object + filled with the transformed values + + Parameters + ---------- + f : function + Function to apply to each subframe + + Notes + ----- + Each subframe is endowed the attribute 'name' in case you need to know + which group you are working on. + + Examples + -------- + >>> grouped = df.groupby(lambda x: mapping[x]) + >>> grouped.transform(lambda x: (x - x.mean()) / x.std()) + """ + + # try to do a fast transform via merge if possible + try: + obj = self._obj_with_exclusions + if isinstance(func, compat.string_types): + result = getattr(self, func)(*args, **kwargs) + else: + cyfunc = _intercept_cython(func) + if cyfunc and not args and not kwargs: + result = getattr(self, cyfunc)() + else: + return self._transform_general(func, *args, **kwargs) + except: + return self._transform_general(func, *args, **kwargs) + + # a reduction transform + if not isinstance(result, DataFrame): + return self._transform_general(func, *args, **kwargs) + + # nuiscance columns + if not result.columns.equals(obj.columns): + return self._transform_general(func, *args, **kwargs) + + # a grouped that doesn't preserve the index, remap index based on the grouper + # and broadcast it + if not isinstance(obj.index,MultiIndex) and type(result.index) != type(obj.index): + results = obj.values.copy() + for (name, group), (i, row) in zip(self, result.iterrows()): + indexer = self._get_index(name) + results[indexer] = np.tile(row.values,len(indexer)).reshape(len(indexer),-1) + return DataFrame(results,columns=result.columns,index=obj.index).convert_objects() + + # we can merge the result in + # GH 7383 + names = result.columns + result = obj.merge(result, how='outer', left_index=True, right_index=True).ix[:,-result.shape[1]:] + result.columns = names + return result + def _define_paths(self, func, *args, **kwargs): if isinstance(func, compat.string_types): fast_path = lambda group: getattr(group, func)(*args, **kwargs) diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py index f61c60d939907..eac313481aca7 100644 --- a/vb_suite/groupby.py +++ b/vb_suite/groupby.py @@ -376,6 +376,7 @@ def f(g): """ groupby_transform = Benchmark("data.groupby(level='security_id').transform(f_fillna)", setup) +groupby_transform_ufunc = Benchmark("data.groupby(level='date').transform(np.max)", setup) setup = common_setup + """ np.random.seed(0) @@ -393,4 +394,4 @@ def f(g): df = DataFrame({ 'signal' : np.random.rand(N)}) """ -groupby_transform2 = Benchmark("df['signal'].groupby(g).transform(np.mean)", setup) +groupby_transform_series = Benchmark("df['signal'].groupby(g).transform(np.mean)", setup)
accelerates non-modifying transformations, e.g. closes #7383 `DataFrame.groupby(...).transform(np.max)` ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- groupby_transform_ufunc | 6.1977 | 215.6494 | 0.0287 | groupby_transform2 | 155.9653 | 155.1824 | 1.0050 | groupby_transform | 167.5134 | 165.7823 | 1.0104 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [3d3715b] : WPI: fast tranform on DataFrame Base [eb1ae6b] : Merge pull request #7458 from sinhrks/intersection ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7463
2014-06-14T20:12:21Z
2014-06-16T12:52:18Z
2014-06-16T12:52:18Z
2014-06-16T12:52:18Z
nanops pep8 fixes
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index c3e1da61330fa..431cb1ac451c0 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1,26 +1,21 @@ -from pandas import compat import sys import itertools import functools import numpy as np -from pandas.core.common import isnull, notnull, _values_from_object, is_float -import pandas.core.common as com -import pandas.lib as lib -import pandas.algos as algos -import pandas.hashtable as _hash -import pandas.tslib as tslib - -from pandas.compat import builtins - - try: import bottleneck as bn _USE_BOTTLENECK = True except ImportError: # pragma: no cover _USE_BOTTLENECK = False +import pandas.core.common as com +import pandas.hashtable as _hash +from pandas import compat, lib, algos, tslib +from pandas.compat import builtins +from pandas.core.common import isnull, notnull, _values_from_object, is_float + class disallow(object): @@ -75,7 +70,8 @@ def f(values, axis=None, skipna=True, **kwds): result.fill(0) return result - if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name): + if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, + bn_name): result = bn_func(values, axis=axis, **kwds) # prefer to treat inf/-inf as NA, but must compute the func @@ -94,7 +90,8 @@ def f(values, axis=None, skipna=True, **kwds): def _bn_ok_dtype(dt, name): # Bottleneck chokes on datetime64 - if dt != np.object_ and not issubclass(dt.type, (np.datetime64, np.timedelta64)): + if dt != np.object_ and not issubclass(dt.type, (np.datetime64, + np.timedelta64)): # bottleneck does not properly upcast during the sum # so can overflow @@ -179,8 +176,9 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None, # return a platform independent precision dtype dtype_max = dtype - if dtype.kind == 'i' and not issubclass( - dtype.type, (np.bool, np.datetime64, np.timedelta64)): + if dtype.kind == 'i' and not issubclass(dtype.type, (np.bool, + np.datetime64, + np.timedelta64)): dtype_max = np.int64 elif dtype.kind in ['b'] or issubclass(dtype.type, np.bool): dtype_max = np.int64 @@ -251,7 +249,7 @@ def nanall(values, axis=None, skipna=True): @bottleneck_switch(zero_value=0) def nansum(values, axis=None, skipna=True): values, mask, dtype, dtype_max = _get_values(values, skipna, 0) - the_sum = values.sum(axis,dtype=dtype_max) + the_sum = values.sum(axis, dtype=dtype_max) the_sum = _maybe_null_out(the_sum, axis, mask) return _wrap_results(the_sum, dtype) @@ -365,7 +363,8 @@ def nansem(values, axis=None, skipna=True, ddof=1): @bottleneck_switch() def nanmin(values, axis=None, skipna=True): - values, mask, dtype, dtype_max = _get_values(values, skipna, fill_value_typ='+inf') + values, mask, dtype, dtype_max = _get_values(values, skipna, + fill_value_typ='+inf') # numpy 1.6.1 workaround in Python 3.x if (values.dtype == np.object_ and compat.PY3): @@ -381,7 +380,7 @@ def nanmin(values, axis=None, skipna=True): if ((axis is not None and values.shape[axis] == 0) or values.size == 0): try: - result = com.ensure_float(values.sum(axis,dtype=dtype_max)) + result = com.ensure_float(values.sum(axis, dtype=dtype_max)) result.fill(np.nan) except: result = np.nan @@ -394,7 +393,8 @@ def nanmin(values, axis=None, skipna=True): @bottleneck_switch() def nanmax(values, axis=None, skipna=True): - values, mask, dtype, dtype_max = _get_values(values, skipna, fill_value_typ='-inf') + values, mask, dtype, dtype_max = _get_values(values, skipna, + fill_value_typ='-inf') # numpy 1.6.1 workaround in Python 3.x if (values.dtype == np.object_ and compat.PY3): @@ -427,7 +427,7 @@ def nanargmax(values, axis=None, skipna=True): Returns -1 in the NA case """ values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='-inf', - isfinite=True) + isfinite=True) result = values.argmax(axis) result = _maybe_arg_null_out(result, axis, mask, skipna) return result @@ -438,7 +438,7 @@ def nanargmin(values, axis=None, skipna=True): Returns -1 in the NA case """ values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='+inf', - isfinite=True) + isfinite=True) result = values.argmin(axis) result = _maybe_arg_null_out(result, axis, mask, skipna) return result
Very minor pep8 fixes for `nanops`. This brings `nanops` into pep8 compliance.
https://api.github.com/repos/pandas-dev/pandas/pulls/7461
2014-06-14T14:23:51Z
2014-06-17T12:36:03Z
2014-06-17T12:36:03Z
2014-06-26T10:29:12Z